diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000000..e69de29bb2d diff --git a/latest/doctrees/_include/distributions-table.doctree b/latest/doctrees/_include/distributions-table.doctree new file mode 100644 index 00000000000..1f9d8d9ded2 Binary files /dev/null and b/latest/doctrees/_include/distributions-table.doctree differ diff --git a/latest/doctrees/_include/inference_data_opts-table.doctree b/latest/doctrees/_include/inference_data_opts-table.doctree new file mode 100644 index 00000000000..85e485bb254 Binary files /dev/null and b/latest/doctrees/_include/inference_data_opts-table.doctree differ diff --git a/latest/doctrees/_include/inference_io_inheritance_diagrams.doctree b/latest/doctrees/_include/inference_io_inheritance_diagrams.doctree new file mode 100644 index 00000000000..b48ea8b4a84 Binary files /dev/null and b/latest/doctrees/_include/inference_io_inheritance_diagrams.doctree differ diff --git a/latest/doctrees/_include/models-table.doctree b/latest/doctrees/_include/models-table.doctree new file mode 100644 index 00000000000..f101881621a Binary files /dev/null and b/latest/doctrees/_include/models-table.doctree differ diff --git a/latest/doctrees/_include/psd_models-table.doctree b/latest/doctrees/_include/psd_models-table.doctree new file mode 100644 index 00000000000..fd066832c9c Binary files /dev/null and b/latest/doctrees/_include/psd_models-table.doctree differ diff --git a/latest/doctrees/_include/sampler_inheritance_diagrams.doctree b/latest/doctrees/_include/sampler_inheritance_diagrams.doctree new file mode 100644 index 00000000000..f654477de28 Binary files /dev/null and b/latest/doctrees/_include/sampler_inheritance_diagrams.doctree differ diff --git a/latest/doctrees/_include/samplers-table.doctree b/latest/doctrees/_include/samplers-table.doctree new file mode 100644 index 00000000000..bd8a8a35495 Binary files /dev/null and b/latest/doctrees/_include/samplers-table.doctree differ diff --git a/latest/doctrees/_include/transforms-table.doctree b/latest/doctrees/_include/transforms-table.doctree new file mode 100644 index 00000000000..44fec664bad Binary files /dev/null and b/latest/doctrees/_include/transforms-table.doctree differ diff --git a/latest/doctrees/_include/waveform-parameters.doctree b/latest/doctrees/_include/waveform-parameters.doctree new file mode 100644 index 00000000000..f8da31b37e6 Binary files /dev/null and b/latest/doctrees/_include/waveform-parameters.doctree differ diff --git a/latest/doctrees/apps.doctree b/latest/doctrees/apps.doctree new file mode 100644 index 00000000000..0fb6c967910 Binary files /dev/null and b/latest/doctrees/apps.doctree differ diff --git a/latest/doctrees/banksim.doctree b/latest/doctrees/banksim.doctree new file mode 100644 index 00000000000..827451e070c Binary files /dev/null and b/latest/doctrees/banksim.doctree differ diff --git a/latest/doctrees/build_gh_pages.doctree b/latest/doctrees/build_gh_pages.doctree new file mode 100644 index 00000000000..aad805c8d18 Binary files /dev/null and b/latest/doctrees/build_gh_pages.doctree differ diff --git a/latest/doctrees/catalog.doctree b/latest/doctrees/catalog.doctree new file mode 100644 index 00000000000..d052d648c36 Binary files /dev/null and b/latest/doctrees/catalog.doctree differ diff --git a/latest/doctrees/credit.doctree b/latest/doctrees/credit.doctree new file mode 100644 index 00000000000..e437d3e3067 Binary files /dev/null and b/latest/doctrees/credit.doctree differ diff --git a/latest/doctrees/dataquality.doctree b/latest/doctrees/dataquality.doctree new file mode 100644 index 00000000000..253c4688113 Binary files /dev/null and b/latest/doctrees/dataquality.doctree differ diff --git a/latest/doctrees/detector.doctree b/latest/doctrees/detector.doctree new file mode 100644 index 00000000000..aa98a71fa05 Binary files /dev/null and b/latest/doctrees/detector.doctree differ diff --git a/latest/doctrees/devs.doctree b/latest/doctrees/devs.doctree new file mode 100644 index 00000000000..0f1d8c24f58 Binary files /dev/null and b/latest/doctrees/devs.doctree differ diff --git a/latest/doctrees/distributions.doctree b/latest/doctrees/distributions.doctree new file mode 100644 index 00000000000..c930d45d841 Binary files /dev/null and b/latest/doctrees/distributions.doctree differ diff --git a/latest/doctrees/docker.doctree b/latest/doctrees/docker.doctree new file mode 100644 index 00000000000..5bf2c73fb96 Binary files /dev/null and b/latest/doctrees/docker.doctree differ diff --git a/latest/doctrees/documentation.doctree b/latest/doctrees/documentation.doctree new file mode 100644 index 00000000000..e4b9040442a Binary files /dev/null and b/latest/doctrees/documentation.doctree differ diff --git a/latest/doctrees/environment.pickle b/latest/doctrees/environment.pickle new file mode 100644 index 00000000000..ca63b2c2e4f Binary files /dev/null and b/latest/doctrees/environment.pickle differ diff --git a/latest/doctrees/extend.doctree b/latest/doctrees/extend.doctree new file mode 100644 index 00000000000..23d9b400d55 Binary files /dev/null and b/latest/doctrees/extend.doctree differ diff --git a/latest/doctrees/faithsim.doctree b/latest/doctrees/faithsim.doctree new file mode 100644 index 00000000000..07f6d7a3f41 Binary files /dev/null and b/latest/doctrees/faithsim.doctree differ diff --git a/latest/doctrees/fft.doctree b/latest/doctrees/fft.doctree new file mode 100644 index 00000000000..a32f4016ca4 Binary files /dev/null and b/latest/doctrees/fft.doctree differ diff --git a/latest/doctrees/filter.doctree b/latest/doctrees/filter.doctree new file mode 100644 index 00000000000..6d709a6b2e2 Binary files /dev/null and b/latest/doctrees/filter.doctree differ diff --git a/latest/doctrees/formats/hdf_format.doctree b/latest/doctrees/formats/hdf_format.doctree new file mode 100644 index 00000000000..4e36670db29 Binary files /dev/null and b/latest/doctrees/formats/hdf_format.doctree differ diff --git a/latest/doctrees/frame.doctree b/latest/doctrees/frame.doctree new file mode 100644 index 00000000000..ce6c1b4d633 Binary files /dev/null and b/latest/doctrees/frame.doctree differ diff --git a/latest/doctrees/genindex.doctree b/latest/doctrees/genindex.doctree new file mode 100644 index 00000000000..5cc44ded439 Binary files /dev/null and b/latest/doctrees/genindex.doctree differ diff --git a/latest/doctrees/gw150914.doctree b/latest/doctrees/gw150914.doctree new file mode 100644 index 00000000000..55586c83b2a Binary files /dev/null and b/latest/doctrees/gw150914.doctree differ diff --git a/latest/doctrees/hwinj.doctree b/latest/doctrees/hwinj.doctree new file mode 100644 index 00000000000..224f82c18a9 Binary files /dev/null and b/latest/doctrees/hwinj.doctree differ diff --git a/latest/doctrees/index.doctree b/latest/doctrees/index.doctree new file mode 100644 index 00000000000..26ae93e3e7f Binary files /dev/null and b/latest/doctrees/index.doctree differ diff --git a/latest/doctrees/inference.doctree b/latest/doctrees/inference.doctree new file mode 100644 index 00000000000..38d1ff56f00 Binary files /dev/null and b/latest/doctrees/inference.doctree differ diff --git a/latest/doctrees/inference/examples/analytic.doctree b/latest/doctrees/inference/examples/analytic.doctree new file mode 100644 index 00000000000..318dd41d546 Binary files /dev/null and b/latest/doctrees/inference/examples/analytic.doctree differ diff --git a/latest/doctrees/inference/examples/bbh.doctree b/latest/doctrees/inference/examples/bbh.doctree new file mode 100644 index 00000000000..b96f48b3f2e Binary files /dev/null and b/latest/doctrees/inference/examples/bbh.doctree differ diff --git a/latest/doctrees/inference/examples/gw150914.doctree b/latest/doctrees/inference/examples/gw150914.doctree new file mode 100644 index 00000000000..16d26e9afd1 Binary files /dev/null and b/latest/doctrees/inference/examples/gw150914.doctree differ diff --git a/latest/doctrees/inference/examples/hierarchical.doctree b/latest/doctrees/inference/examples/hierarchical.doctree new file mode 100644 index 00000000000..d22b9d57840 Binary files /dev/null and b/latest/doctrees/inference/examples/hierarchical.doctree differ diff --git a/latest/doctrees/inference/examples/lisa_smbhb_inj_pe.doctree b/latest/doctrees/inference/examples/lisa_smbhb_inj_pe.doctree new file mode 100644 index 00000000000..602ab7350e1 Binary files /dev/null and b/latest/doctrees/inference/examples/lisa_smbhb_inj_pe.doctree differ diff --git a/latest/doctrees/inference/examples/lisa_smbhb_ldc_pe.doctree b/latest/doctrees/inference/examples/lisa_smbhb_ldc_pe.doctree new file mode 100644 index 00000000000..1ca274adb02 Binary files /dev/null and b/latest/doctrees/inference/examples/lisa_smbhb_ldc_pe.doctree differ diff --git a/latest/doctrees/inference/examples/margtime.doctree b/latest/doctrees/inference/examples/margtime.doctree new file mode 100644 index 00000000000..58847787674 Binary files /dev/null and b/latest/doctrees/inference/examples/margtime.doctree differ diff --git a/latest/doctrees/inference/examples/relative.doctree b/latest/doctrees/inference/examples/relative.doctree new file mode 100644 index 00000000000..873be297706 Binary files /dev/null and b/latest/doctrees/inference/examples/relative.doctree differ diff --git a/latest/doctrees/inference/examples/sampler_platter.doctree b/latest/doctrees/inference/examples/sampler_platter.doctree new file mode 100644 index 00000000000..2034060b94f Binary files /dev/null and b/latest/doctrees/inference/examples/sampler_platter.doctree differ diff --git a/latest/doctrees/inference/examples/single.doctree b/latest/doctrees/inference/examples/single.doctree new file mode 100644 index 00000000000..aed6351e770 Binary files /dev/null and b/latest/doctrees/inference/examples/single.doctree differ diff --git a/latest/doctrees/inference/io.doctree b/latest/doctrees/inference/io.doctree new file mode 100644 index 00000000000..d555090d7fa Binary files /dev/null and b/latest/doctrees/inference/io.doctree differ diff --git a/latest/doctrees/inference/models.doctree b/latest/doctrees/inference/models.doctree new file mode 100644 index 00000000000..51c158ebb17 Binary files /dev/null and b/latest/doctrees/inference/models.doctree differ diff --git a/latest/doctrees/inference/sampler_api.doctree b/latest/doctrees/inference/sampler_api.doctree new file mode 100644 index 00000000000..9b6dffe2fc5 Binary files /dev/null and b/latest/doctrees/inference/sampler_api.doctree differ diff --git a/latest/doctrees/inference/viz.doctree b/latest/doctrees/inference/viz.doctree new file mode 100644 index 00000000000..fed17493647 Binary files /dev/null and b/latest/doctrees/inference/viz.doctree differ diff --git a/latest/doctrees/install.doctree b/latest/doctrees/install.doctree new file mode 100644 index 00000000000..db82f2e5396 Binary files /dev/null and b/latest/doctrees/install.doctree differ diff --git a/latest/doctrees/install_cuda.doctree b/latest/doctrees/install_cuda.doctree new file mode 100644 index 00000000000..3b5d664fc06 Binary files /dev/null and b/latest/doctrees/install_cuda.doctree differ diff --git a/latest/doctrees/install_lalsuite.doctree b/latest/doctrees/install_lalsuite.doctree new file mode 100644 index 00000000000..a2e965d603f Binary files /dev/null and b/latest/doctrees/install_lalsuite.doctree differ diff --git a/latest/doctrees/install_virtualenv.doctree b/latest/doctrees/install_virtualenv.doctree new file mode 100644 index 00000000000..8fe59f1c51b Binary files /dev/null and b/latest/doctrees/install_virtualenv.doctree differ diff --git a/latest/doctrees/modules.doctree b/latest/doctrees/modules.doctree new file mode 100644 index 00000000000..b6a1b26b8bb Binary files /dev/null and b/latest/doctrees/modules.doctree differ diff --git a/latest/doctrees/noise.doctree b/latest/doctrees/noise.doctree new file mode 100644 index 00000000000..d9ac85dbd32 Binary files /dev/null and b/latest/doctrees/noise.doctree differ diff --git a/latest/doctrees/psd.doctree b/latest/doctrees/psd.doctree new file mode 100644 index 00000000000..7135b59b9f5 Binary files /dev/null and b/latest/doctrees/psd.doctree differ diff --git a/latest/doctrees/pycbc.catalog.doctree b/latest/doctrees/pycbc.catalog.doctree new file mode 100644 index 00000000000..850a627a3f7 Binary files /dev/null and b/latest/doctrees/pycbc.catalog.doctree differ diff --git a/latest/doctrees/pycbc.coordinates.doctree b/latest/doctrees/pycbc.coordinates.doctree new file mode 100644 index 00000000000..adf72774eef Binary files /dev/null and b/latest/doctrees/pycbc.coordinates.doctree differ diff --git a/latest/doctrees/pycbc.distributions.doctree b/latest/doctrees/pycbc.distributions.doctree new file mode 100644 index 00000000000..3bfc636a611 Binary files /dev/null and b/latest/doctrees/pycbc.distributions.doctree differ diff --git a/latest/doctrees/pycbc.doctree b/latest/doctrees/pycbc.doctree new file mode 100644 index 00000000000..7692d7f5442 Binary files /dev/null and b/latest/doctrees/pycbc.doctree differ diff --git a/latest/doctrees/pycbc.events.doctree b/latest/doctrees/pycbc.events.doctree new file mode 100644 index 00000000000..31ec3f49830 Binary files /dev/null and b/latest/doctrees/pycbc.events.doctree differ diff --git a/latest/doctrees/pycbc.fft.doctree b/latest/doctrees/pycbc.fft.doctree new file mode 100644 index 00000000000..4676e6e0e89 Binary files /dev/null and b/latest/doctrees/pycbc.fft.doctree differ diff --git a/latest/doctrees/pycbc.filter.doctree b/latest/doctrees/pycbc.filter.doctree new file mode 100644 index 00000000000..b5797bec643 Binary files /dev/null and b/latest/doctrees/pycbc.filter.doctree differ diff --git a/latest/doctrees/pycbc.frame.doctree b/latest/doctrees/pycbc.frame.doctree new file mode 100644 index 00000000000..0e42c0f84a7 Binary files /dev/null and b/latest/doctrees/pycbc.frame.doctree differ diff --git a/latest/doctrees/pycbc.inference.doctree b/latest/doctrees/pycbc.inference.doctree new file mode 100644 index 00000000000..1b2d5047a96 Binary files /dev/null and b/latest/doctrees/pycbc.inference.doctree differ diff --git a/latest/doctrees/pycbc.inference.io.doctree b/latest/doctrees/pycbc.inference.io.doctree new file mode 100644 index 00000000000..f74a4b26cdd Binary files /dev/null and b/latest/doctrees/pycbc.inference.io.doctree differ diff --git a/latest/doctrees/pycbc.inference.jump.doctree b/latest/doctrees/pycbc.inference.jump.doctree new file mode 100644 index 00000000000..40037bcc0c2 Binary files /dev/null and b/latest/doctrees/pycbc.inference.jump.doctree differ diff --git a/latest/doctrees/pycbc.inference.models.doctree b/latest/doctrees/pycbc.inference.models.doctree new file mode 100644 index 00000000000..ff9a8d2785b Binary files /dev/null and b/latest/doctrees/pycbc.inference.models.doctree differ diff --git a/latest/doctrees/pycbc.inference.sampler.doctree b/latest/doctrees/pycbc.inference.sampler.doctree new file mode 100644 index 00000000000..96d91bb2545 Binary files /dev/null and b/latest/doctrees/pycbc.inference.sampler.doctree differ diff --git a/latest/doctrees/pycbc.inject.doctree b/latest/doctrees/pycbc.inject.doctree new file mode 100644 index 00000000000..e2f7d63948c Binary files /dev/null and b/latest/doctrees/pycbc.inject.doctree differ diff --git a/latest/doctrees/pycbc.io.doctree b/latest/doctrees/pycbc.io.doctree new file mode 100644 index 00000000000..f7294528ebc Binary files /dev/null and b/latest/doctrees/pycbc.io.doctree differ diff --git a/latest/doctrees/pycbc.live.doctree b/latest/doctrees/pycbc.live.doctree new file mode 100644 index 00000000000..d619108b735 Binary files /dev/null and b/latest/doctrees/pycbc.live.doctree differ diff --git a/latest/doctrees/pycbc.neutron_stars.doctree b/latest/doctrees/pycbc.neutron_stars.doctree new file mode 100644 index 00000000000..dc952711882 Binary files /dev/null and b/latest/doctrees/pycbc.neutron_stars.doctree differ diff --git a/latest/doctrees/pycbc.noise.doctree b/latest/doctrees/pycbc.noise.doctree new file mode 100644 index 00000000000..df250de3bd2 Binary files /dev/null and b/latest/doctrees/pycbc.noise.doctree differ diff --git a/latest/doctrees/pycbc.population.doctree b/latest/doctrees/pycbc.population.doctree new file mode 100644 index 00000000000..29d95bd23a1 Binary files /dev/null and b/latest/doctrees/pycbc.population.doctree differ diff --git a/latest/doctrees/pycbc.psd.doctree b/latest/doctrees/pycbc.psd.doctree new file mode 100644 index 00000000000..278ea2d8d7b Binary files /dev/null and b/latest/doctrees/pycbc.psd.doctree differ diff --git a/latest/doctrees/pycbc.results.doctree b/latest/doctrees/pycbc.results.doctree new file mode 100644 index 00000000000..7dffd5f601c Binary files /dev/null and b/latest/doctrees/pycbc.results.doctree differ diff --git a/latest/doctrees/pycbc.strain.doctree b/latest/doctrees/pycbc.strain.doctree new file mode 100644 index 00000000000..809818168ff Binary files /dev/null and b/latest/doctrees/pycbc.strain.doctree differ diff --git a/latest/doctrees/pycbc.tmpltbank.doctree b/latest/doctrees/pycbc.tmpltbank.doctree new file mode 100644 index 00000000000..f941b9fc85c Binary files /dev/null and b/latest/doctrees/pycbc.tmpltbank.doctree differ diff --git a/latest/doctrees/pycbc.types.doctree b/latest/doctrees/pycbc.types.doctree new file mode 100644 index 00000000000..5b9514b004b Binary files /dev/null and b/latest/doctrees/pycbc.types.doctree differ diff --git a/latest/doctrees/pycbc.vetoes.doctree b/latest/doctrees/pycbc.vetoes.doctree new file mode 100644 index 00000000000..468f8445ec4 Binary files /dev/null and b/latest/doctrees/pycbc.vetoes.doctree differ diff --git a/latest/doctrees/pycbc.waveform.doctree b/latest/doctrees/pycbc.waveform.doctree new file mode 100644 index 00000000000..854f653b13e Binary files /dev/null and b/latest/doctrees/pycbc.waveform.doctree differ diff --git a/latest/doctrees/pycbc.workflow.doctree b/latest/doctrees/pycbc.workflow.doctree new file mode 100644 index 00000000000..d588c6a9ca5 Binary files /dev/null and b/latest/doctrees/pycbc.workflow.doctree differ diff --git a/latest/doctrees/pycbc_condition_strain.doctree b/latest/doctrees/pycbc_condition_strain.doctree new file mode 100644 index 00000000000..ef6a0c3d4fb Binary files /dev/null and b/latest/doctrees/pycbc_condition_strain.doctree differ diff --git a/latest/doctrees/release.doctree b/latest/doctrees/release.doctree new file mode 100644 index 00000000000..85df69a04d4 Binary files /dev/null and b/latest/doctrees/release.doctree differ diff --git a/latest/doctrees/tmpltbank.doctree b/latest/doctrees/tmpltbank.doctree new file mode 100644 index 00000000000..aa115059986 Binary files /dev/null and b/latest/doctrees/tmpltbank.doctree differ diff --git a/latest/doctrees/tutorials.doctree b/latest/doctrees/tutorials.doctree new file mode 100644 index 00000000000..24a64de9a52 Binary files /dev/null and b/latest/doctrees/tutorials.doctree differ diff --git a/latest/doctrees/upload_to_gracedb.doctree b/latest/doctrees/upload_to_gracedb.doctree new file mode 100644 index 00000000000..fa97d820418 Binary files /dev/null and b/latest/doctrees/upload_to_gracedb.doctree differ diff --git a/latest/doctrees/waveform.doctree b/latest/doctrees/waveform.doctree new file mode 100644 index 00000000000..4ac2c9097ab Binary files /dev/null and b/latest/doctrees/waveform.doctree differ diff --git a/latest/doctrees/waveform_plugin.doctree b/latest/doctrees/waveform_plugin.doctree new file mode 100644 index 00000000000..68492e4ecb9 Binary files /dev/null and b/latest/doctrees/waveform_plugin.doctree differ diff --git a/latest/doctrees/workflow.doctree b/latest/doctrees/workflow.doctree new file mode 100644 index 00000000000..ef79b5fdd64 Binary files /dev/null and b/latest/doctrees/workflow.doctree differ diff --git a/latest/doctrees/workflow/datafind.doctree b/latest/doctrees/workflow/datafind.doctree new file mode 100644 index 00000000000..2ab97306f45 Binary files /dev/null and b/latest/doctrees/workflow/datafind.doctree differ diff --git a/latest/doctrees/workflow/hdf_coincidence.doctree b/latest/doctrees/workflow/hdf_coincidence.doctree new file mode 100644 index 00000000000..ea6845bd316 Binary files /dev/null and b/latest/doctrees/workflow/hdf_coincidence.doctree differ diff --git a/latest/doctrees/workflow/initialization.doctree b/latest/doctrees/workflow/initialization.doctree new file mode 100644 index 00000000000..b43b0397563 Binary files /dev/null and b/latest/doctrees/workflow/initialization.doctree differ diff --git a/latest/doctrees/workflow/injections.doctree b/latest/doctrees/workflow/injections.doctree new file mode 100644 index 00000000000..54d82f243fc Binary files /dev/null and b/latest/doctrees/workflow/injections.doctree differ diff --git a/latest/doctrees/workflow/matched_filter.doctree b/latest/doctrees/workflow/matched_filter.doctree new file mode 100644 index 00000000000..eaf3a74717a Binary files /dev/null and b/latest/doctrees/workflow/matched_filter.doctree differ diff --git a/latest/doctrees/workflow/pycbc_make_inference_inj_workflow.doctree b/latest/doctrees/workflow/pycbc_make_inference_inj_workflow.doctree new file mode 100644 index 00000000000..1d3e17255b6 Binary files /dev/null and b/latest/doctrees/workflow/pycbc_make_inference_inj_workflow.doctree differ diff --git a/latest/doctrees/workflow/pycbc_make_inference_workflow.doctree b/latest/doctrees/workflow/pycbc_make_inference_workflow.doctree new file mode 100644 index 00000000000..c01aeed3843 Binary files /dev/null and b/latest/doctrees/workflow/pycbc_make_inference_workflow.doctree differ diff --git a/latest/doctrees/workflow/pycbc_make_offline_search_workflow.doctree b/latest/doctrees/workflow/pycbc_make_offline_search_workflow.doctree new file mode 100644 index 00000000000..71bccd57545 Binary files /dev/null and b/latest/doctrees/workflow/pycbc_make_offline_search_workflow.doctree differ diff --git a/latest/doctrees/workflow/pycbc_make_psd_estimation_workflow.doctree b/latest/doctrees/workflow/pycbc_make_psd_estimation_workflow.doctree new file mode 100644 index 00000000000..ca02c3c475b Binary files /dev/null and b/latest/doctrees/workflow/pycbc_make_psd_estimation_workflow.doctree differ diff --git a/latest/doctrees/workflow/pygrb.doctree b/latest/doctrees/workflow/pygrb.doctree new file mode 100644 index 00000000000..1bede460b64 Binary files /dev/null and b/latest/doctrees/workflow/pygrb.doctree differ diff --git a/latest/doctrees/workflow/segments.doctree b/latest/doctrees/workflow/segments.doctree new file mode 100644 index 00000000000..13bc9bd7b69 Binary files /dev/null and b/latest/doctrees/workflow/segments.doctree differ diff --git a/latest/doctrees/workflow/splittable.doctree b/latest/doctrees/workflow/splittable.doctree new file mode 100644 index 00000000000..0a34ab9f01d Binary files /dev/null and b/latest/doctrees/workflow/splittable.doctree differ diff --git a/latest/doctrees/workflow/template_bank.doctree b/latest/doctrees/workflow/template_bank.doctree new file mode 100644 index 00000000000..acf6db8f0e9 Binary files /dev/null and b/latest/doctrees/workflow/template_bank.doctree differ diff --git a/latest/examples/catalog/data.hires.png b/latest/examples/catalog/data.hires.png new file mode 100644 index 00000000000..7e43ab8a56f Binary files /dev/null and b/latest/examples/catalog/data.hires.png differ diff --git a/latest/examples/catalog/data.pdf b/latest/examples/catalog/data.pdf new file mode 100644 index 00000000000..a916822c57f Binary files /dev/null and b/latest/examples/catalog/data.pdf differ diff --git a/latest/examples/catalog/data.png b/latest/examples/catalog/data.png new file mode 100644 index 00000000000..1a27f45ebf8 Binary files /dev/null and b/latest/examples/catalog/data.png differ diff --git a/latest/examples/catalog/data.py b/latest/examples/catalog/data.py new file mode 100644 index 00000000000..1223852a0f6 --- /dev/null +++ b/latest/examples/catalog/data.py @@ -0,0 +1,25 @@ +import matplotlib.pyplot as pp +import pycbc.catalog + + +m = pycbc.catalog.Merger("GW170817", source='gwtc-1') + +fig, axs = pp.subplots(2, 1, sharex=True, sharey=True) +for ifo, ax in zip(["L1", "H1"], axs): + pp.sca(ax) + pp.title(ifo) + # Retreive data around the BNS merger + ts = m.strain(ifo).time_slice(m.time - 15, m.time + 6) + + # Whiten the data with a 4s filter + white = ts.whiten(4, 4) + + times, freqs, power = white.qtransform(.01, logfsteps=200, + qrange=(110, 110), + frange=(20, 512)) + pp.pcolormesh(times, freqs, power**0.5, vmax=5) + +pp.yscale('log') +pp.ylabel("Frequency (Hz)") +pp.xlabel("Time (s)") +pp.show() diff --git a/latest/examples/catalog/stat.hires.png b/latest/examples/catalog/stat.hires.png new file mode 100644 index 00000000000..f8f6f4cb6fd Binary files /dev/null and b/latest/examples/catalog/stat.hires.png differ diff --git a/latest/examples/catalog/stat.pdf b/latest/examples/catalog/stat.pdf new file mode 100644 index 00000000000..a356196388b Binary files /dev/null and b/latest/examples/catalog/stat.pdf differ diff --git a/latest/examples/catalog/stat.png b/latest/examples/catalog/stat.png new file mode 100644 index 00000000000..2e73711aaa9 Binary files /dev/null and b/latest/examples/catalog/stat.png differ diff --git a/latest/examples/catalog/stat.py b/latest/examples/catalog/stat.py new file mode 100644 index 00000000000..7997e34c142 --- /dev/null +++ b/latest/examples/catalog/stat.py @@ -0,0 +1,13 @@ +import matplotlib.pyplot as pp +import pycbc.catalog + + +c = pycbc.catalog.Catalog(source='gwtc-2') +mchirp, elow, ehigh = c.median1d('mchirp', return_errors=True) +spin = c.median1d('chi_eff') + +pp.errorbar(mchirp, spin, xerr=[-elow, ehigh], fmt='o', markersize=7) +pp.xlabel('Chirp Mass') +pp.xscale('log') +pp.ylabel('Effective Spin') +pp.show() diff --git a/latest/examples/dataquality/hwinj.hires.png b/latest/examples/dataquality/hwinj.hires.png new file mode 100644 index 00000000000..ca732afc78b Binary files /dev/null and b/latest/examples/dataquality/hwinj.hires.png differ diff --git a/latest/examples/dataquality/hwinj.pdf b/latest/examples/dataquality/hwinj.pdf new file mode 100644 index 00000000000..685656f38bc Binary files /dev/null and b/latest/examples/dataquality/hwinj.pdf differ diff --git a/latest/examples/dataquality/hwinj.png b/latest/examples/dataquality/hwinj.png new file mode 100644 index 00000000000..4c26399a939 Binary files /dev/null and b/latest/examples/dataquality/hwinj.png differ diff --git a/latest/examples/dataquality/hwinj.py b/latest/examples/dataquality/hwinj.py new file mode 100644 index 00000000000..3c0d2ea5089 --- /dev/null +++ b/latest/examples/dataquality/hwinj.py @@ -0,0 +1,22 @@ +"""This example shows how to determine when a CBC hardware injection is present +in the data from a detector. +""" + +import matplotlib.pyplot as pp +from pycbc import dq + + +start_time = 1126051217 +end_time = start_time + 10000000 + +# Get times that the Livingston detector has CBC injections into the data +segs = dq.query_flag('L1', 'CBC_HW_INJ', start_time, end_time) + +pp.figure(figsize=[10, 2]) +for seg in segs: + start, end = seg + pp.axvspan(start, end, color='blue') + +pp.xlabel('Time (s)') +pp.show() + diff --git a/latest/examples/dataquality/on.hires.png b/latest/examples/dataquality/on.hires.png new file mode 100644 index 00000000000..8ddf63b10f0 Binary files /dev/null and b/latest/examples/dataquality/on.hires.png differ diff --git a/latest/examples/dataquality/on.pdf b/latest/examples/dataquality/on.pdf new file mode 100644 index 00000000000..a155612fb54 Binary files /dev/null and b/latest/examples/dataquality/on.pdf differ diff --git a/latest/examples/dataquality/on.png b/latest/examples/dataquality/on.png new file mode 100644 index 00000000000..9b677315443 Binary files /dev/null and b/latest/examples/dataquality/on.png differ diff --git a/latest/examples/dataquality/on.py b/latest/examples/dataquality/on.py new file mode 100644 index 00000000000..b6b2138c3f1 --- /dev/null +++ b/latest/examples/dataquality/on.py @@ -0,0 +1,27 @@ +"""This example shows how to determine when a detector is active.""" + +import matplotlib.pyplot as pp +from pycbc import dq +from pycbc.results import ifo_color + + +start_time = 1126051217 +end_time = start_time + 100000 + +# Get times that the Hanford detector has data +hsegs = dq.query_flag('H1', 'DATA', start_time, end_time) + +# Get times that the Livingston detector has data +lsegs = dq.query_flag('L1', 'DATA', start_time, end_time) + +pp.figure(figsize=[10,2]) +for seg in lsegs: + start, end = seg + pp.axvspan(start, end, color=ifo_color('L1'), ymin=0.1, ymax=0.4) + +for seg in hsegs: + start, end = seg + pp.axvspan(start, end, color=ifo_color('H1'), ymin=0.6, ymax=0.9) + +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/examples/detector/custom.py b/latest/examples/detector/custom.py new file mode 100644 index 00000000000..96b93ac07ac --- /dev/null +++ b/latest/examples/detector/custom.py @@ -0,0 +1,45 @@ +import matplotlib.pyplot as plt +from pycbc.detector import add_detector_on_earth, Detector +import pycbc.psd +import numpy as np + +# Set up potential Cosmic Explorer detector locations + +# 40 km detector +lon = -125 / 180.0 * np.pi +lat = 46 / 180.0 * np.pi +yangle = 100.0 / 180.0 * np.pi +# yangle is the rotation clockwise from pointing north at 0 +# xangle can also be specified and allows for detectors that don't have +# 90 degree opening between arms. By default we assume xangle is yangle + pi/2 +add_detector_on_earth("C4", lon, lat, yangle=yangle, + xlength=40000, ylength=40000) + +# 20 km detector +# Arm length is optional, but if provided, you can accurately calcuale +# high-frequency corrects if you provide a frequency argument to the +# antenna pattern method +lon = -94 / 180.0 * np.pi +lat = 29 / 180.0 * np.pi +yangle = 160.0 / 180.0 * np.pi +add_detector_on_earth("C2", lon, lat, yangle=yangle, + xlength=20000, ylength=20000) + +ra, dec = np.meshgrid(np.arange(0, np.pi*2.0, .1), + np.arange(-np.pi / 2.0, np.pi / 2.0, .1)) +ra = ra.flatten() +dec = dec.flatten() + +pol = 0 +time = 1e10 + 8000 # A time when ra ~ lines up with lat/lon coordinates + +for d in [Detector("C4"), Detector("C2")]: + fp, fc = d.antenna_pattern(ra, dec, pol, time) + + plt.figure() + plt.subplot(111, projection="mollweide") + ra[ra>np.pi] -= np.pi * 2.0 + plt.scatter(ra, dec, c=fp**2.0 + fc**2.0) + plt.title("Mollweide") + plt.grid(True) + plt.show() diff --git a/latest/examples/detector/custom_00.hires.png b/latest/examples/detector/custom_00.hires.png new file mode 100644 index 00000000000..48b2b5b4b88 Binary files /dev/null and b/latest/examples/detector/custom_00.hires.png differ diff --git a/latest/examples/detector/custom_00.pdf b/latest/examples/detector/custom_00.pdf new file mode 100644 index 00000000000..39b68338c5e Binary files /dev/null and b/latest/examples/detector/custom_00.pdf differ diff --git a/latest/examples/detector/custom_00.png b/latest/examples/detector/custom_00.png new file mode 100644 index 00000000000..6372dc26599 Binary files /dev/null and b/latest/examples/detector/custom_00.png differ diff --git a/latest/examples/detector/custom_01.hires.png b/latest/examples/detector/custom_01.hires.png new file mode 100644 index 00000000000..57c94d7ca46 Binary files /dev/null and b/latest/examples/detector/custom_01.hires.png differ diff --git a/latest/examples/detector/custom_01.pdf b/latest/examples/detector/custom_01.pdf new file mode 100644 index 00000000000..d22504498eb Binary files /dev/null and b/latest/examples/detector/custom_01.pdf differ diff --git a/latest/examples/detector/custom_01.png b/latest/examples/detector/custom_01.png new file mode 100644 index 00000000000..c4171806b74 Binary files /dev/null and b/latest/examples/detector/custom_01.png differ diff --git a/latest/examples/distributions/mass_examples.hires.png b/latest/examples/distributions/mass_examples.hires.png new file mode 100644 index 00000000000..4644ea745a4 Binary files /dev/null and b/latest/examples/distributions/mass_examples.hires.png differ diff --git a/latest/examples/distributions/mass_examples.pdf b/latest/examples/distributions/mass_examples.pdf new file mode 100644 index 00000000000..29a8361d469 Binary files /dev/null and b/latest/examples/distributions/mass_examples.pdf differ diff --git a/latest/examples/distributions/mass_examples.png b/latest/examples/distributions/mass_examples.png new file mode 100644 index 00000000000..1a7f235435d Binary files /dev/null and b/latest/examples/distributions/mass_examples.png differ diff --git a/latest/examples/distributions/mass_examples.py b/latest/examples/distributions/mass_examples.py new file mode 100644 index 00000000000..e1d2b1428e4 --- /dev/null +++ b/latest/examples/distributions/mass_examples.py @@ -0,0 +1,44 @@ +import matplotlib.pyplot as plt +from pycbc import distributions + +# Create a mass distribution object that is uniform between 0.5 and 1.5 +# solar masses. +mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5)) +# Take 100000 random variable samples from this uniform mass distribution. +mass1_samples = mass1_distribution.rvs(size=1000000) + +# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses +# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar +# masses. Gaussian takes the variance as an input so square the standard +# deviation. +variance = 0.15*0.15 +mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2, + mass2_var=variance) + +# Take 100000 random variable samples from this gaussian mass distribution. +mass2_samples = mass2_gaussian.rvs(size=1000000) + +# We can make pairs of distributions together, instead of apart. +two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0), + mass4=(1.6, 3.0)) +two_mass_samples = two_mass_distributions.rvs(size=1000000) + +# Choose 50 bins for the histogram subplots. +n_bins = 50 + +# Plot histograms of samples in subplots +fig, axes = plt.subplots(nrows=2, ncols=2) +ax0, ax1, ax2, ax3, = axes.flat + +ax0.hist(mass1_samples['mass1'], bins = n_bins) +ax1.hist(mass2_samples['mass2'], bins = n_bins) +ax2.hist(two_mass_samples['mass3'], bins = n_bins) +ax3.hist(two_mass_samples['mass4'], bins = n_bins) + +ax0.set_title('Mass 1 samples') +ax1.set_title('Mass 2 samples') +ax2.set_title('Mass 3 samples') +ax3.set_title('Mass 4 samples') + +plt.tight_layout() +plt.show() diff --git a/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.hires.png b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.hires.png new file mode 100644 index 00000000000..33ab96cbfa7 Binary files /dev/null and b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.hires.png differ diff --git a/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.pdf b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.pdf new file mode 100644 index 00000000000..8003b13b722 Binary files /dev/null and b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.pdf differ diff --git a/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.png b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.png new file mode 100644 index 00000000000..a6d88906ff2 Binary files /dev/null and b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.png differ diff --git a/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.py b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.py new file mode 100644 index 00000000000..698ed39acf9 --- /dev/null +++ b/latest/examples/distributions/mchirp_q_from_uniform_m1m2_example.py @@ -0,0 +1,64 @@ +import matplotlib.pyplot as plt +from pycbc import distributions +from pycbc import conversions +import numpy as np + +# Create chirp mass and mass ratio distribution object that is uniform +# in mass1 and mass2 +minmc = 5 +maxmc = 60 +mc_distribution = distributions.MchirpfromUniformMass1Mass2(mc=(minmc,maxmc)) +# generate q in a symmetric range [min, 1/min] to make mass1 and mass2 +# symmetric +minq = 1/4 +maxq = 1/minq +q_distribution = distributions.QfromUniformMass1Mass2(q=(minq,maxq)) + +# Take 100000 random variable samples from this chirp mass and mass ratio +# distribution. +n_size = 100000 +mc_samples = mc_distribution.rvs(size=n_size) +q_samples = q_distribution.rvs(size=n_size) + +# Convert chirp mass and mass ratio to mass1 and mass2 +m1 = conversions.mass1_from_mchirp_q(mc_samples['mc'],q_samples['q']) +m2 = conversions.mass2_from_mchirp_q(mc_samples['mc'],q_samples['q']) + +# Check the 1D marginalization of mchirp and q is consistent with the +# expected analytical formula +n_bins = 200 +xq = np.linspace(minq,maxq,100) +yq = ((1+xq)/(xq**3))**(2/5) +xmc = np.linspace(minmc,maxmc,100) +ymc = xmc + +plt.figure(figsize=(10,10)) +# Plot histograms of samples in subplots +plt.subplot(221) +plt.hist2d(mc_samples['mc'], q_samples['q'], bins=n_bins, cmap='Blues') +plt.xlabel('chirp mass') +plt.ylabel('mass ratio') +plt.colorbar(fraction=.05, pad=0.05,label='number of samples') + +plt.subplot(222) +plt.hist2d(m1, m2, bins=n_bins, cmap='Blues') +plt.xlabel('mass1') +plt.ylabel('mass2') +plt.colorbar(fraction=.05, pad=0.05,label='number of samples') + +plt.subplot(223) +plt.hist(mc_samples['mc'],density=True,bins=100,label='samples') +plt.plot(xmc,ymc*mc_distribution.norm,label='$P(M_c)\propto M_c$') +plt.xlabel('chirp mass') +plt.ylabel('PDF') +plt.legend() + +plt.subplot(224) +plt.hist(q_samples['q'],density=True,bins=n_bins,label='samples') +plt.plot(xq,yq*q_distribution.norm,label='$P(q)\propto((1+q)/q^3)^{2/5}$') +plt.xlabel('mass ratio') +plt.ylabel('PDF') +plt.legend() + +plt.tight_layout() +plt.show() \ No newline at end of file diff --git a/latest/examples/distributions/sampling_from_config_example.hires.png b/latest/examples/distributions/sampling_from_config_example.hires.png new file mode 100644 index 00000000000..2a957fedb1c Binary files /dev/null and b/latest/examples/distributions/sampling_from_config_example.hires.png differ diff --git a/latest/examples/distributions/sampling_from_config_example.pdf b/latest/examples/distributions/sampling_from_config_example.pdf new file mode 100644 index 00000000000..1ecc64e3f68 Binary files /dev/null and b/latest/examples/distributions/sampling_from_config_example.pdf differ diff --git a/latest/examples/distributions/sampling_from_config_example.png b/latest/examples/distributions/sampling_from_config_example.png new file mode 100644 index 00000000000..2a00c96833c Binary files /dev/null and b/latest/examples/distributions/sampling_from_config_example.png differ diff --git a/latest/examples/distributions/sampling_from_config_example.py b/latest/examples/distributions/sampling_from_config_example.py new file mode 100644 index 00000000000..7c377d5655b --- /dev/null +++ b/latest/examples/distributions/sampling_from_config_example.py @@ -0,0 +1,43 @@ +import numpy as np +import matplotlib.pyplot as plt +from pycbc.distributions.utils import draw_samples_from_config + + +# A path to the .ini file. +CONFIG_PATH = "./pycbc_bbh_prior.ini" +random_seed = np.random.randint(low=0, high=2**32-1) + +# Draw a single sample. +sample = draw_samples_from_config( + path=CONFIG_PATH, num=1, seed=random_seed) + +# Print all parameters. +print(sample.fieldnames) +print(sample) +# Print a certain parameter, for example 'mass1'. +print(sample[0]['mass1']) + +# Draw 1000000 samples, and select all values of a certain parameter. +n_bins = 50 +samples = draw_samples_from_config( + path=CONFIG_PATH, num=1000000, seed=random_seed) + +fig, axes = plt.subplots(nrows=3, ncols=2) +ax1, ax2, ax3, ax4, ax5, ax6 = axes.flat + +ax1.hist(samples[:]['srcmass1'], bins=n_bins) +ax2.hist(samples[:]['srcmass2'], bins=n_bins) +ax3.hist(samples[:]['comoving_volume'], bins=n_bins) +ax4.hist(samples[:]['redshift'], bins=n_bins) +ax5.hist(samples[:]['distance'], bins=n_bins) +ax6.hist(samples[:]['mass1'], bins=n_bins) + +ax1.set_title('srcmass1') +ax2.set_title('srcmass2') +ax3.set_title('comoving_volume') +ax4.set_title('redshift') +ax5.set_title('distance') +ax6.set_title('mass1 or mass2') + +plt.tight_layout() +plt.show() diff --git a/latest/examples/distributions/spin_examples.hires.png b/latest/examples/distributions/spin_examples.hires.png new file mode 100644 index 00000000000..3f002890d9d Binary files /dev/null and b/latest/examples/distributions/spin_examples.hires.png differ diff --git a/latest/examples/distributions/spin_examples.pdf b/latest/examples/distributions/spin_examples.pdf new file mode 100644 index 00000000000..1e6167350a8 Binary files /dev/null and b/latest/examples/distributions/spin_examples.pdf differ diff --git a/latest/examples/distributions/spin_examples.png b/latest/examples/distributions/spin_examples.png new file mode 100644 index 00000000000..e9f74c93422 Binary files /dev/null and b/latest/examples/distributions/spin_examples.png differ diff --git a/latest/examples/distributions/spin_examples.py b/latest/examples/distributions/spin_examples.py new file mode 100644 index 00000000000..21ae36c9874 --- /dev/null +++ b/latest/examples/distributions/spin_examples.py @@ -0,0 +1,56 @@ +import matplotlib.pyplot as plt +import numpy +import pycbc.coordinates as co +from pycbc import distributions + +# We can choose any bounds between 0 and pi for this distribution but in +# units of pi so we use between 0 and 1 +theta_low = 0. +theta_high = 1. + +# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi +phi_low = 0. +phi_high = 2. + +# Create a distribution object from distributions.py. Here we are using the +# Uniform Solid Angle function which takes +# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then +# phi = azimuthal_ bound(phi_lower_bound to a phi_upper_bound). +uniform_solid_angle_distribution = distributions.UniformSolidAngle( + polar_bounds=(theta_low,theta_high), + azimuthal_bounds=(phi_low,phi_high)) + +# Now we can take a random variable sample from that distribution. In this +# case we want 50000 samples. +solid_angle_samples = uniform_solid_angle_distribution.rvs(size=500000) + +# Make spins with unit length for coordinate transformation below. +spin_mag = numpy.ndarray(shape=(500000), dtype=float) + +for i in range(0,500000): + spin_mag[i] = 1. + +# Use the pycbc.coordinates as co spherical_to_cartesian function to convert +# from spherical polar coordinates to cartesian coordinates +spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag, + solid_angle_samples['phi'], + solid_angle_samples['theta']) + +# Choose 50 bins for the histograms. +n_bins = 50 + +plt.figure(figsize=(10,10)) +plt.subplot(2, 2, 1) +plt.hist(spinx, bins = n_bins) +plt.title('Spin x samples') + +plt.subplot(2, 2, 2) +plt.hist(spiny, bins = n_bins) +plt.title('Spin y samples') + +plt.subplot(2, 2, 3) +plt.hist(spinz, bins = n_bins) +plt.title('Spin z samples') + +plt.tight_layout() +plt.show() diff --git a/latest/examples/distributions/spin_spatial_distr_example.hires.png b/latest/examples/distributions/spin_spatial_distr_example.hires.png new file mode 100644 index 00000000000..94707cfde2e Binary files /dev/null and b/latest/examples/distributions/spin_spatial_distr_example.hires.png differ diff --git a/latest/examples/distributions/spin_spatial_distr_example.pdf b/latest/examples/distributions/spin_spatial_distr_example.pdf new file mode 100644 index 00000000000..d15ebe3863d Binary files /dev/null and b/latest/examples/distributions/spin_spatial_distr_example.pdf differ diff --git a/latest/examples/distributions/spin_spatial_distr_example.png b/latest/examples/distributions/spin_spatial_distr_example.png new file mode 100644 index 00000000000..d792bc49f33 Binary files /dev/null and b/latest/examples/distributions/spin_spatial_distr_example.png differ diff --git a/latest/examples/distributions/spin_spatial_distr_example.py b/latest/examples/distributions/spin_spatial_distr_example.py new file mode 100644 index 00000000000..b46518c3733 --- /dev/null +++ b/latest/examples/distributions/spin_spatial_distr_example.py @@ -0,0 +1,50 @@ +import numpy +import matplotlib.pyplot as plt +import pycbc.coordinates as co +from pycbc import distributions + +# We can choose any bounds between 0 and pi for this distribution but in units +# of pi so we use between 0 and 1. +theta_low = 0. +theta_high = 1. + +# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi. +phi_low = 0. +phi_high = 2. + +# Create a distribution object from distributions.py +# Here we are using the Uniform Solid Angle function which takes +# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then +# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound). +uniform_solid_angle_distribution = distributions.UniformSolidAngle( + polar_bounds=(theta_low,theta_high), + azimuthal_bounds=(phi_low,phi_high)) + +# Now we can take a random variable sample from that distribution. +# In this case we want 50000 samples. +solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000) + +# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a +# 3rd dimension for a 3D plot that we make later on. +spin_mag = numpy.ndarray(shape=(10000), dtype=float) + +for i in range(0,10000): + spin_mag[i] = 1. + +# Use pycbc.coordinates as co. Use spherical_to_cartesian function to +# convert from spherical polar coordinates to cartesian coordinates. +spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag, + solid_angle_samples['phi'], + solid_angle_samples['theta']) + +# Plot the spherical distribution of spins to make sure that we +# distributed across the surface of a sphere. + +fig = plt.figure(figsize=(10,10)) +ax = fig.add_subplot(111, projection='3d') +ax.scatter(spinx, spiny, spinz, s=1) + +ax.set_xlabel('Spin X Axis') +ax.set_ylabel('Spin Y Axis') +ax.set_zlabel('Spin Z Axis') +plt.show() diff --git a/latest/examples/filter/chisq.hires.png b/latest/examples/filter/chisq.hires.png new file mode 100644 index 00000000000..537e4dc3a33 Binary files /dev/null and b/latest/examples/filter/chisq.hires.png differ diff --git a/latest/examples/filter/chisq.pdf b/latest/examples/filter/chisq.pdf new file mode 100644 index 00000000000..ec9e25c4d42 Binary files /dev/null and b/latest/examples/filter/chisq.pdf differ diff --git a/latest/examples/filter/chisq.png b/latest/examples/filter/chisq.png new file mode 100644 index 00000000000..f46be4b136b Binary files /dev/null and b/latest/examples/filter/chisq.png differ diff --git a/latest/examples/filter/chisq.py b/latest/examples/filter/chisq.py new file mode 100644 index 00000000000..05e18eaf4f5 --- /dev/null +++ b/latest/examples/filter/chisq.py @@ -0,0 +1,41 @@ +"""This example shows how to calculate the chi^2 discriminator described in +https://arxiv.org/abs/gr-qc/0405045, also known as the "power chi^2" or "Allen +chi^2" discriminator. +""" + +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.waveform +import pycbc.vetoes + + +# Generate some noise with an advanced ligo psd +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 16 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(16 / delta_t) +strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +stilde = strain.to_frequencyseries() + +# Calculate the power chisq time series +hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD', + mass1=25, mass2=25, + f_lower=flow, delta_f=stilde.delta_f) + +hp.resize(len(stilde)) +num_bins = 16 +chisq = pycbc.vetoes.power_chisq(hp, stilde, num_bins, psd, + low_frequency_cutoff=flow) + +# convert to a reduced chisq +chisq /= (num_bins * 2) - 2 + +pp.plot(chisq.sample_times, chisq) +pp.ylabel('$\chi^2_r$') +pp.xlabel('time (s)') +pp.show() diff --git a/latest/examples/filter/fir.py b/latest/examples/filter/fir.py new file mode 100644 index 00000000000..aeb07812fdd --- /dev/null +++ b/latest/examples/filter/fir.py @@ -0,0 +1,19 @@ +# Apply an FIR filter. The algorithm is written for high performance so if you +# have a large number of taps, it will resort to a FFT based implementation +# under the hood. +import pycbc.types +import pycbc.filter.resample + +# Reference time series +ts = pycbc.types.TimeSeries([-1, 1, -1, 1, -1], delta_t=1.0) + +# May also be a numpy array +coeff = pycbc.types.Array([1.0, 0, 1.0]) + +ts_filtered = pycbc.filter.resample.lfilter(coeff, ts) + +# If you want to have a zero phase filter provide a symmetric set of coefficients +# The time delay will be compensated for. + +ts_filtered2 = pycbc.filter.resample.fir_zero_filter(coeff, ts) + diff --git a/latest/examples/filter/pass.hires.png b/latest/examples/filter/pass.hires.png new file mode 100644 index 00000000000..51f58817f37 Binary files /dev/null and b/latest/examples/filter/pass.hires.png differ diff --git a/latest/examples/filter/pass.pdf b/latest/examples/filter/pass.pdf new file mode 100644 index 00000000000..5bda1dd37df Binary files /dev/null and b/latest/examples/filter/pass.pdf differ diff --git a/latest/examples/filter/pass.png b/latest/examples/filter/pass.png new file mode 100644 index 00000000000..35cd68c456b Binary files /dev/null and b/latest/examples/filter/pass.png differ diff --git a/latest/examples/filter/pass.py b/latest/examples/filter/pass.py new file mode 100644 index 00000000000..c291cb68ed4 --- /dev/null +++ b/latest/examples/filter/pass.py @@ -0,0 +1,30 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.filter + + +# Generate some noise with an advanced ligo psd +flow = 5.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 1 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(1 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +pp.plot(ts.sample_times, ts, label='Original') + +# Suppress the low frequencies below 30 Hz +ts = pycbc.filter.highpass(ts, 30.0) +pp.plot(ts.sample_times, ts, label='Highpassed') + +# Suppress the high frequencies +ts = pycbc.filter.lowpass_fir(ts, 1000.0, 8) +pp.plot(ts.sample_times, ts, label='Highpassed + Lowpassed') + +pp.legend() +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/examples/filter/snr.hires.png b/latest/examples/filter/snr.hires.png new file mode 100644 index 00000000000..14678b5adb9 Binary files /dev/null and b/latest/examples/filter/snr.hires.png differ diff --git a/latest/examples/filter/snr.pdf b/latest/examples/filter/snr.pdf new file mode 100644 index 00000000000..44986986a25 Binary files /dev/null and b/latest/examples/filter/snr.pdf differ diff --git a/latest/examples/filter/snr.png b/latest/examples/filter/snr.png new file mode 100644 index 00000000000..1d8b52893e7 Binary files /dev/null and b/latest/examples/filter/snr.png differ diff --git a/latest/examples/filter/snr.py b/latest/examples/filter/snr.py new file mode 100644 index 00000000000..987d7406ca8 --- /dev/null +++ b/latest/examples/filter/snr.py @@ -0,0 +1,33 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.filter +import pycbc.waveform + + +# Generate some noise with an advanced ligo psd +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 16 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(16 / delta_t) +strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +stilde = strain.to_frequencyseries() + +# Use a waveform as a matched filter +hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD', + mass1=25, mass2=25, + f_lower=flow, delta_f=stilde.delta_f) + +hp.resize(len(stilde)) +snr = pycbc.filter.matched_filter(hp, stilde, psd=psd, + low_frequency_cutoff=flow) + + +pp.plot(snr.sample_times, abs(snr)) +pp.ylabel('signal-to-noise ratio') +pp.xlabel('time (s)') +pp.show() diff --git a/latest/examples/gw150914/gw150914_h1_snr.hires.png b/latest/examples/gw150914/gw150914_h1_snr.hires.png new file mode 100644 index 00000000000..2e25ce6e4de Binary files /dev/null and b/latest/examples/gw150914/gw150914_h1_snr.hires.png differ diff --git a/latest/examples/gw150914/gw150914_h1_snr.pdf b/latest/examples/gw150914/gw150914_h1_snr.pdf new file mode 100644 index 00000000000..d30a376d3c5 Binary files /dev/null and b/latest/examples/gw150914/gw150914_h1_snr.pdf differ diff --git a/latest/examples/gw150914/gw150914_h1_snr.png b/latest/examples/gw150914/gw150914_h1_snr.png new file mode 100644 index 00000000000..0657795f32f Binary files /dev/null and b/latest/examples/gw150914/gw150914_h1_snr.png differ diff --git a/latest/examples/gw150914/gw150914_h1_snr.py b/latest/examples/gw150914/gw150914_h1_snr.py new file mode 100644 index 00000000000..2360e702827 --- /dev/null +++ b/latest/examples/gw150914/gw150914_h1_snr.py @@ -0,0 +1,33 @@ +import matplotlib.pyplot as pp +from urllib.request import urlretrieve +from pycbc.frame import read_frame +from pycbc.filter import highpass_fir, matched_filter +from pycbc.waveform import get_fd_waveform +from pycbc.psd import welch, interpolate + + +# Read data and remove low frequency content +fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' +url = "https://www.gwosc.org/GW150914data/" + fname +urlretrieve(url, filename=fname) +h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN') +h1 = highpass_fir(h1, 15, 8) + +# Calculate the noise spectrum +psd = interpolate(welch(h1), 1.0 / h1.duration) + +# Generate a template to filter with +hp, hc = get_fd_waveform(approximant="IMRPhenomD", mass1=40, mass2=32, + f_lower=20, delta_f=1.0/h1.duration) +hp.resize(len(h1) // 2 + 1) + +# Calculate the complex (two-phase SNR) +snr = matched_filter(hp, h1, psd=psd, low_frequency_cutoff=20.0) + +# Remove regions corrupted by filter wraparound +snr = snr[len(snr) // 4: len(snr) * 3 // 4] + +pp.plot(snr.sample_times, abs(snr)) +pp.ylabel('signal-to-noise') +pp.xlabel('GPS Time (s)') +pp.show() diff --git a/latest/examples/gw150914/gw150914_shape.hires.png b/latest/examples/gw150914/gw150914_shape.hires.png new file mode 100644 index 00000000000..ffd52091133 Binary files /dev/null and b/latest/examples/gw150914/gw150914_shape.hires.png differ diff --git a/latest/examples/gw150914/gw150914_shape.pdf b/latest/examples/gw150914/gw150914_shape.pdf new file mode 100644 index 00000000000..e55167418d5 Binary files /dev/null and b/latest/examples/gw150914/gw150914_shape.pdf differ diff --git a/latest/examples/gw150914/gw150914_shape.png b/latest/examples/gw150914/gw150914_shape.png new file mode 100644 index 00000000000..825d3a7e9e2 Binary files /dev/null and b/latest/examples/gw150914/gw150914_shape.png differ diff --git a/latest/examples/gw150914/gw150914_shape.py b/latest/examples/gw150914/gw150914_shape.py new file mode 100644 index 00000000000..ca2dfb4ff5a --- /dev/null +++ b/latest/examples/gw150914/gw150914_shape.py @@ -0,0 +1,35 @@ +import matplotlib.pyplot as pp +from pycbc.filter import highpass_fir, lowpass_fir +from pycbc.psd import welch, interpolate +from pycbc.catalog import Merger + + +for ifo in ['H1', 'L1']: + # Read data and remove low frequency content + h1 = Merger("GW150914").strain(ifo) + h1 = highpass_fir(h1, 15, 8) + + # Calculate the noise spectrum + psd = interpolate(welch(h1), 1.0 / h1.duration) + + # whiten + white_strain = (h1.to_frequencyseries() / psd ** 0.5).to_timeseries() + + # remove some of the high and low + smooth = highpass_fir(white_strain, 35, 8) + smooth = lowpass_fir(smooth, 300, 8) + + # time shift and flip L1 + if ifo == 'L1': + smooth *= -1 + smooth.roll(int(.007 / smooth.delta_t)) + + pp.plot(smooth.sample_times, smooth, label=ifo) + +pp.legend() +pp.xlim(1126259462.21, 1126259462.45) +pp.ylim(-150, 150) +pp.ylabel('Smoothed-Whitened Strain') +pp.grid() +pp.xlabel('GPS Time (s)') +pp.show() diff --git a/latest/examples/noise/timeseries.hires.png b/latest/examples/noise/timeseries.hires.png new file mode 100644 index 00000000000..ea4c9429f38 Binary files /dev/null and b/latest/examples/noise/timeseries.hires.png differ diff --git a/latest/examples/noise/timeseries.pdf b/latest/examples/noise/timeseries.pdf new file mode 100644 index 00000000000..b4a066fe923 Binary files /dev/null and b/latest/examples/noise/timeseries.pdf differ diff --git a/latest/examples/noise/timeseries.png b/latest/examples/noise/timeseries.png new file mode 100644 index 00000000000..20daa635681 Binary files /dev/null and b/latest/examples/noise/timeseries.png differ diff --git a/latest/examples/noise/timeseries.py b/latest/examples/noise/timeseries.py new file mode 100644 index 00000000000..015f211baed --- /dev/null +++ b/latest/examples/noise/timeseries.py @@ -0,0 +1,20 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd + + +# The color of the noise matches a PSD which you provide +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 32 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(32 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) + +pp.plot(ts.sample_times, ts) +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/examples/psd/analytic.hires.png b/latest/examples/psd/analytic.hires.png new file mode 100644 index 00000000000..f20b03d0d52 Binary files /dev/null and b/latest/examples/psd/analytic.hires.png differ diff --git a/latest/examples/psd/analytic.pdf b/latest/examples/psd/analytic.pdf new file mode 100644 index 00000000000..65eb17ce53e Binary files /dev/null and b/latest/examples/psd/analytic.pdf differ diff --git a/latest/examples/psd/analytic.png b/latest/examples/psd/analytic.png new file mode 100644 index 00000000000..40579c9e38b Binary files /dev/null and b/latest/examples/psd/analytic.png differ diff --git a/latest/examples/psd/analytic.py b/latest/examples/psd/analytic.py new file mode 100644 index 00000000000..f9ae5eba50a --- /dev/null +++ b/latest/examples/psd/analytic.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +import pycbc.psd + + +# List the available analytic psds +print(pycbc.psd.get_lalsim_psd_list()) + +delta_f = 1.0 / 4 +flen = int(1024 / delta_f) +low_frequency_cutoff = 30.0 + +# One can either call the psd generator by name +p1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, low_frequency_cutoff) + +# or by using the name as a string. +p2 = pycbc.psd.from_string('aLIGOZeroDetLowPower', flen, delta_f, low_frequency_cutoff) + +pp.plot(p1.sample_frequencies, p1, label='HighPower') +pp.plot(p2.sample_frequencies, p2, label='LowPower') +pp.legend() +pp.show() diff --git a/latest/examples/psd/estimate.hires.png b/latest/examples/psd/estimate.hires.png new file mode 100644 index 00000000000..36dc7995ed2 Binary files /dev/null and b/latest/examples/psd/estimate.hires.png differ diff --git a/latest/examples/psd/estimate.pdf b/latest/examples/psd/estimate.pdf new file mode 100644 index 00000000000..585b2031cbc Binary files /dev/null and b/latest/examples/psd/estimate.pdf differ diff --git a/latest/examples/psd/estimate.png b/latest/examples/psd/estimate.png new file mode 100644 index 00000000000..7dc0f493842 Binary files /dev/null and b/latest/examples/psd/estimate.png differ diff --git a/latest/examples/psd/estimate.py b/latest/examples/psd/estimate.py new file mode 100644 index 00000000000..6c328650f09 --- /dev/null +++ b/latest/examples/psd/estimate.py @@ -0,0 +1,31 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd + + +# generate some colored gaussian noise +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +### Generate 128 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(128 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) + +# Estimate the PSD +# We'll choose 4 seconds PSD samples that are overlapped 50 % +seg_len = int(4 / delta_t) +seg_stride = int(seg_len / 2) +estimated_psd = pycbc.psd.welch(ts, + seg_len=seg_len, + seg_stride=seg_stride) + +pp.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate') +pp.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd') +pp.xlim(xmin=flow, xmax=2000) +pp.ylim(1e-48, 1e-45) +pp.legend() +pp.grid() +pp.show() diff --git a/latest/examples/psd/read.hires.png b/latest/examples/psd/read.hires.png new file mode 100644 index 00000000000..50b37e6dfe0 Binary files /dev/null and b/latest/examples/psd/read.hires.png differ diff --git a/latest/examples/psd/read.pdf b/latest/examples/psd/read.pdf new file mode 100644 index 00000000000..9ce3a9e646d Binary files /dev/null and b/latest/examples/psd/read.pdf differ diff --git a/latest/examples/psd/read.png b/latest/examples/psd/read.png new file mode 100644 index 00000000000..7ec0ecb275b Binary files /dev/null and b/latest/examples/psd/read.png differ diff --git a/latest/examples/psd/read.py b/latest/examples/psd/read.py new file mode 100644 index 00000000000..ee303b293f0 --- /dev/null +++ b/latest/examples/psd/read.py @@ -0,0 +1,26 @@ +import matplotlib.pyplot as pp +import pycbc.psd +import pycbc.types + + +filename = 'example_psd.txt' + +# The PSD will be interpolated to the requested frequency spacing +delta_f = 1.0 / 4 +length = int(1024 / delta_f) +low_frequency_cutoff = 30.0 +psd = pycbc.psd.from_txt(filename, length, delta_f, + low_frequency_cutoff, is_asd_file=False) +pp.loglog(psd.sample_frequencies, psd, label='interpolated') + +# The PSD will be read in without modification +psd = pycbc.types.load_frequencyseries('./example_psd.txt') +pp.loglog(psd.sample_frequencies, psd, label='raw') + +pp.xlim(xmin=30, xmax=1000) +pp.legend() +pp.xlabel('Hz') +pp.show() + +# Save a psd to file, several formats are supported (.txt, .hdf, .npy) +psd.save('tmp_psd.txt') diff --git a/latest/examples/waveform/add_waveform.py b/latest/examples/waveform/add_waveform.py new file mode 100644 index 00000000000..4768c85649b --- /dev/null +++ b/latest/examples/waveform/add_waveform.py @@ -0,0 +1,48 @@ +import numpy +import matplotlib.pyplot as pp +import pycbc.waveform +from pycbc.types import TimeSeries + + +def test_waveform(**args): + flow = args['f_lower'] # Required parameter + dt = args['delta_t'] # Required parameter + fpeak = args['fpeak'] # A new parameter for my model + + t = numpy.arange(0, 10, dt) + f = t/t.max() * (fpeak - flow) + flow + a = t + + wf = numpy.exp(2.0j * numpy.pi * f * t) * a + + # Return product should be a pycbc time series in this case for + # each GW polarization + # + # + # Note that by convention, the time at 0 is a fiducial reference. + # For CBC waveforms, this would be set to where the merger occurs + offset = - len(t) * dt + wf = TimeSeries(wf, delta_t=dt, epoch=offset) + return wf.real(), wf.imag() + + +# This tells pycbc about our new waveform so we can call it from standard +# pycbc functions. If this were a frequency-domain model, select 'frequency' +# instead of 'time' to this function call. +pycbc.waveform.add_custom_waveform('test', test_waveform, 'time', force=True) + +# Let's plot what our new waveform looks like +hp, hc = pycbc.waveform.get_td_waveform(approximant="test", + f_lower=20, fpeak=50, + delta_t=1.0/4096) +pp.figure(0) +pp.plot(hp.sample_times, hp) +pp.xlabel('Time (s)') + +pp.figure(1) +hf = hp.to_frequencyseries() +pp.plot(hf.sample_frequencies, hf.real()) +pp.xlabel('Frequency (Hz)') +pp.xscale('log') +pp.xlim(20, 100) +pp.show() diff --git a/latest/examples/waveform/add_waveform_00.hires.png b/latest/examples/waveform/add_waveform_00.hires.png new file mode 100644 index 00000000000..b3520f44f31 Binary files /dev/null and b/latest/examples/waveform/add_waveform_00.hires.png differ diff --git a/latest/examples/waveform/add_waveform_00.pdf b/latest/examples/waveform/add_waveform_00.pdf new file mode 100644 index 00000000000..87dbfd8aaf1 Binary files /dev/null and b/latest/examples/waveform/add_waveform_00.pdf differ diff --git a/latest/examples/waveform/add_waveform_00.png b/latest/examples/waveform/add_waveform_00.png new file mode 100644 index 00000000000..0eac1fe6632 Binary files /dev/null and b/latest/examples/waveform/add_waveform_00.png differ diff --git a/latest/examples/waveform/add_waveform_01.hires.png b/latest/examples/waveform/add_waveform_01.hires.png new file mode 100644 index 00000000000..52f94ad1288 Binary files /dev/null and b/latest/examples/waveform/add_waveform_01.hires.png differ diff --git a/latest/examples/waveform/add_waveform_01.pdf b/latest/examples/waveform/add_waveform_01.pdf new file mode 100644 index 00000000000..e7495f8abba Binary files /dev/null and b/latest/examples/waveform/add_waveform_01.pdf differ diff --git a/latest/examples/waveform/add_waveform_01.png b/latest/examples/waveform/add_waveform_01.png new file mode 100644 index 00000000000..ca11646b879 Binary files /dev/null and b/latest/examples/waveform/add_waveform_01.png differ diff --git a/latest/examples/waveform/higher_modes.hires.png b/latest/examples/waveform/higher_modes.hires.png new file mode 100644 index 00000000000..3c3e64b9a67 Binary files /dev/null and b/latest/examples/waveform/higher_modes.hires.png differ diff --git a/latest/examples/waveform/higher_modes.pdf b/latest/examples/waveform/higher_modes.pdf new file mode 100644 index 00000000000..558e7a044fd Binary files /dev/null and b/latest/examples/waveform/higher_modes.pdf differ diff --git a/latest/examples/waveform/higher_modes.png b/latest/examples/waveform/higher_modes.png new file mode 100644 index 00000000000..c395bdc6472 Binary files /dev/null and b/latest/examples/waveform/higher_modes.png differ diff --git a/latest/examples/waveform/higher_modes.py b/latest/examples/waveform/higher_modes.py new file mode 100644 index 00000000000..9c6db4ad084 --- /dev/null +++ b/latest/examples/waveform/higher_modes.py @@ -0,0 +1,42 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform + +# Let's plot what our new waveform looks like +pp.figure() + +# You can select sets of modes or individual modes using the 'mode_array' +# The standard format is to provide a list of (l, m) modes, however +# a string format is also provided to aid use in population from config files. +# e.g. "22 33" is also acceptable to select these two modes. +# "None" will result in the waveform return its default which is usually +# to return all implemented modes. +for mode_select in [None, + [(2, 2), (3, 3)], # Select two modes at once + [(2, 2)], + [(2, 1)], + [(3, 2)], + [(4, 4)], + ]: + hp, hc = get_td_waveform(approximant="IMRPhenomXPHM", + mass1=7, + mass2=40, + f_lower=20.0, + mode_array=mode_select, + inclination = 1.0, + delta_t=1.0/4096) + + + + if mode_select is None: + label = 'Full Waveform' + a = hp.max() + else: + label = "l, m = " + ' '.join([f"{l}, {m}" for l, m in mode_select]) + + (hp / a).plot(label=label) + +pp.xlim(-1, 0.05) +pp.legend() +pp.xlabel('Time [s]') +pp.ylabel('Relative Strain') +pp.show() diff --git a/latest/examples/waveform/plot_detwaveform.hires.png b/latest/examples/waveform/plot_detwaveform.hires.png new file mode 100644 index 00000000000..4629fcfef87 Binary files /dev/null and b/latest/examples/waveform/plot_detwaveform.hires.png differ diff --git a/latest/examples/waveform/plot_detwaveform.pdf b/latest/examples/waveform/plot_detwaveform.pdf new file mode 100644 index 00000000000..5cc3c5043f8 Binary files /dev/null and b/latest/examples/waveform/plot_detwaveform.pdf differ diff --git a/latest/examples/waveform/plot_detwaveform.png b/latest/examples/waveform/plot_detwaveform.png new file mode 100644 index 00000000000..764005e4c17 Binary files /dev/null and b/latest/examples/waveform/plot_detwaveform.png differ diff --git a/latest/examples/waveform/plot_detwaveform.py b/latest/examples/waveform/plot_detwaveform.py new file mode 100644 index 00000000000..8ae99d2681e --- /dev/null +++ b/latest/examples/waveform/plot_detwaveform.py @@ -0,0 +1,44 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform +from pycbc.detector import Detector + + +apx = 'SEOBNRv4' +# NOTE: Inclination runs from 0 to pi, with poles at 0 and pi +# coa_phase runs from 0 to 2 pi. +hp, hc = get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + spin1z=0.9, + spin2z=0.4, + inclination=1.23, + coa_phase=2.45, + delta_t=1.0/4096, + f_lower=40) + +det_h1 = Detector('H1') +det_l1 = Detector('L1') +det_v1 = Detector('V1') + +# Choose a GPS end time, sky location, and polarization phase for the merger +# NOTE: Right ascension and polarization phase runs from 0 to 2pi +# Declination runs from pi/2. to -pi/2 with the poles at pi/2. and -pi/2. +end_time = 1192529720 +declination = 0.65 +right_ascension = 4.67 +polarization = 2.34 +hp.start_time += end_time +hc.start_time += end_time + +signal_h1 = det_h1.project_wave(hp, hc, right_ascension, declination, polarization) +signal_l1 = det_l1.project_wave(hp, hc, right_ascension, declination, polarization) +signal_v1 = det_v1.project_wave(hp, hc, right_ascension, declination, polarization) + +pp.plot(signal_h1.sample_times, signal_h1, label='H1') +pp.plot(signal_l1.sample_times, signal_l1, label='L1') +pp.plot(signal_v1.sample_times, signal_v1, label='V1') + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/examples/waveform/plot_fd_td.hires.png b/latest/examples/waveform/plot_fd_td.hires.png new file mode 100644 index 00000000000..b6d29b24879 Binary files /dev/null and b/latest/examples/waveform/plot_fd_td.hires.png differ diff --git a/latest/examples/waveform/plot_fd_td.pdf b/latest/examples/waveform/plot_fd_td.pdf new file mode 100644 index 00000000000..025d1b6ee8c Binary files /dev/null and b/latest/examples/waveform/plot_fd_td.pdf differ diff --git a/latest/examples/waveform/plot_fd_td.png b/latest/examples/waveform/plot_fd_td.png new file mode 100644 index 00000000000..090b362824a Binary files /dev/null and b/latest/examples/waveform/plot_fd_td.png differ diff --git a/latest/examples/waveform/plot_fd_td.py b/latest/examples/waveform/plot_fd_td.py new file mode 100644 index 00000000000..912d557a7aa --- /dev/null +++ b/latest/examples/waveform/plot_fd_td.py @@ -0,0 +1,30 @@ +"""Plot a time domain and Fourier domain waveform together in the time domain. +Note that without special cleanup the Fourier domain waveform will exhibit +the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon) +""" + +import matplotlib.pyplot as pp +from pycbc import types, fft, waveform + + +# Get a time domain waveform +hp, hc = waveform.get_td_waveform(approximant='SEOBNRv4', + mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40) + +# Get a frequency domain waveform +sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2", + mass1=6, mass2=6, delta_f=1.0/4, f_lower=40) + +# FFT it to the time-domain +tlen = int(1.0 / hp.delta_t / sptilde.delta_f) +sptilde.resize(tlen/2 + 1) +sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t) +fft.ifft(sptilde, sp) + +pp.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)") +pp.plot(hp.sample_times, hp, label='SEOBNRv4') + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/examples/waveform/plot_freq.hires.png b/latest/examples/waveform/plot_freq.hires.png new file mode 100644 index 00000000000..6d20b4f23a4 Binary files /dev/null and b/latest/examples/waveform/plot_freq.hires.png differ diff --git a/latest/examples/waveform/plot_freq.pdf b/latest/examples/waveform/plot_freq.pdf new file mode 100644 index 00000000000..241c846d4bc Binary files /dev/null and b/latest/examples/waveform/plot_freq.pdf differ diff --git a/latest/examples/waveform/plot_freq.png b/latest/examples/waveform/plot_freq.png new file mode 100644 index 00000000000..593dcde2d45 Binary files /dev/null and b/latest/examples/waveform/plot_freq.png differ diff --git a/latest/examples/waveform/plot_freq.py b/latest/examples/waveform/plot_freq.py new file mode 100644 index 00000000000..70995ecb7f2 --- /dev/null +++ b/latest/examples/waveform/plot_freq.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +from pycbc import waveform + + +for phase_order in [2, 3, 4, 5, 6, 7]: + hp, hc = waveform.get_td_waveform(approximant='SpinTaylorT4', + mass1=10, mass2=10, + phase_order=phase_order, + delta_t=1.0/4096, + f_lower=100) + + hp, hc = hp.trim_zeros(), hc.trim_zeros() + amp = waveform.utils.amplitude_from_polarizations(hp, hc) + f = waveform.utils.frequency_from_polarizations(hp, hc) + + pp.plot(f.sample_times, f, label="PN Order = %s" % phase_order) + +pp.ylabel('Frequency (Hz)') +pp.xlabel('Time (s)') +pp.legend(loc='upper left') +pp.show() diff --git a/latest/examples/waveform/plot_phase.hires.png b/latest/examples/waveform/plot_phase.hires.png new file mode 100644 index 00000000000..9ef6c65ca11 Binary files /dev/null and b/latest/examples/waveform/plot_phase.hires.png differ diff --git a/latest/examples/waveform/plot_phase.pdf b/latest/examples/waveform/plot_phase.pdf new file mode 100644 index 00000000000..59d7a35e2a9 Binary files /dev/null and b/latest/examples/waveform/plot_phase.pdf differ diff --git a/latest/examples/waveform/plot_phase.png b/latest/examples/waveform/plot_phase.png new file mode 100644 index 00000000000..877dcc19003 Binary files /dev/null and b/latest/examples/waveform/plot_phase.png differ diff --git a/latest/examples/waveform/plot_phase.py b/latest/examples/waveform/plot_phase.py new file mode 100644 index 00000000000..6ad52556197 --- /dev/null +++ b/latest/examples/waveform/plot_phase.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +from pycbc import waveform + + +for apx in ['SEOBNRv4', 'TaylorT4', 'IMRPhenomB']: + hp, hc = waveform.get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + delta_t=1.0/4096, + f_lower=40) + + hp, hc = hp.trim_zeros(), hc.trim_zeros() + amp = waveform.utils.amplitude_from_polarizations(hp, hc) + phase = waveform.utils.phase_from_polarizations(hp, hc) + + pp.plot(phase, amp, label=apx) + +pp.ylabel('GW Strain Amplitude') +pp.xlabel('GW Phase (radians)') +pp.legend(loc='upper left') +pp.show() diff --git a/latest/examples/waveform/plot_waveform.hires.png b/latest/examples/waveform/plot_waveform.hires.png new file mode 100644 index 00000000000..94d66ce770c Binary files /dev/null and b/latest/examples/waveform/plot_waveform.hires.png differ diff --git a/latest/examples/waveform/plot_waveform.pdf b/latest/examples/waveform/plot_waveform.pdf new file mode 100644 index 00000000000..c793ea5cb9a Binary files /dev/null and b/latest/examples/waveform/plot_waveform.pdf differ diff --git a/latest/examples/waveform/plot_waveform.png b/latest/examples/waveform/plot_waveform.png new file mode 100644 index 00000000000..33009d7e504 Binary files /dev/null and b/latest/examples/waveform/plot_waveform.png differ diff --git a/latest/examples/waveform/plot_waveform.py b/latest/examples/waveform/plot_waveform.py new file mode 100644 index 00000000000..396c8014c6d --- /dev/null +++ b/latest/examples/waveform/plot_waveform.py @@ -0,0 +1,18 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform + + +for apx in ['SEOBNRv4', 'IMRPhenomD']: + hp, hc = get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + spin1z=0.9, + delta_t=1.0/4096, + f_lower=40) + + pp.plot(hp.sample_times, hp, label=apx) + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/html/_downloads/04a120ac069d6bd3d64d157a34dce221/snr.py b/latest/html/_downloads/04a120ac069d6bd3d64d157a34dce221/snr.py new file mode 100644 index 00000000000..987d7406ca8 --- /dev/null +++ b/latest/html/_downloads/04a120ac069d6bd3d64d157a34dce221/snr.py @@ -0,0 +1,33 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.filter +import pycbc.waveform + + +# Generate some noise with an advanced ligo psd +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 16 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(16 / delta_t) +strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +stilde = strain.to_frequencyseries() + +# Use a waveform as a matched filter +hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD', + mass1=25, mass2=25, + f_lower=flow, delta_f=stilde.delta_f) + +hp.resize(len(stilde)) +snr = pycbc.filter.matched_filter(hp, stilde, psd=psd, + low_frequency_cutoff=flow) + + +pp.plot(snr.sample_times, abs(snr)) +pp.ylabel('signal-to-noise ratio') +pp.xlabel('time (s)') +pp.show() diff --git a/latest/html/_downloads/05845f8d3bf9d32d657f354cc407d9b2/spin_spatial_distr_example.png b/latest/html/_downloads/05845f8d3bf9d32d657f354cc407d9b2/spin_spatial_distr_example.png new file mode 100644 index 00000000000..d792bc49f33 Binary files /dev/null and b/latest/html/_downloads/05845f8d3bf9d32d657f354cc407d9b2/spin_spatial_distr_example.png differ diff --git a/latest/html/_downloads/0790a055dbce70298e91733425a6514e/on.py b/latest/html/_downloads/0790a055dbce70298e91733425a6514e/on.py new file mode 100644 index 00000000000..b6b2138c3f1 --- /dev/null +++ b/latest/html/_downloads/0790a055dbce70298e91733425a6514e/on.py @@ -0,0 +1,27 @@ +"""This example shows how to determine when a detector is active.""" + +import matplotlib.pyplot as pp +from pycbc import dq +from pycbc.results import ifo_color + + +start_time = 1126051217 +end_time = start_time + 100000 + +# Get times that the Hanford detector has data +hsegs = dq.query_flag('H1', 'DATA', start_time, end_time) + +# Get times that the Livingston detector has data +lsegs = dq.query_flag('L1', 'DATA', start_time, end_time) + +pp.figure(figsize=[10,2]) +for seg in lsegs: + start, end = seg + pp.axvspan(start, end, color=ifo_color('L1'), ymin=0.1, ymax=0.4) + +for seg in hsegs: + start, end = seg + pp.axvspan(start, end, color=ifo_color('H1'), ymin=0.6, ymax=0.9) + +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/html/_downloads/091595c508e0c42e0ba19195b18adb84/event1_inj.ini b/latest/html/_downloads/091595c508e0c42e0ba19195b18adb84/event1_inj.ini new file mode 100644 index 00000000000..ecf746a2d84 --- /dev/null +++ b/latest/html/_downloads/091595c508e0c42e0ba19195b18adb84/event1_inj.ini @@ -0,0 +1,27 @@ +# A GW150914-like injection, with approximately the same time and location. + +[variable_params] + +[static_params] +approximant = IMRPhenomD +tc = 1126259462.430 +srcmass1 = 35 +srcmass2 = 35 +distance = 500 +ra = 2.2 +dec = -1.2 +inclination = 0 +polarization = 0 +f_ref = 18 +f_lower = 18 +taper = start + +[waveform_transforms-mass1] +name = custom +inputs = srcmass1, distance +mass1 = srcmass1 * (1+redshift(distance)) + +[waveform_transforms-mass2] +name = custom +inputs = srcmass2, distance +mass2 = srcmass2 * (1+redshift(distance)) diff --git a/latest/html/_downloads/09a181e2efab61de2a500856035f084d/timeseries.py b/latest/html/_downloads/09a181e2efab61de2a500856035f084d/timeseries.py new file mode 100644 index 00000000000..015f211baed --- /dev/null +++ b/latest/html/_downloads/09a181e2efab61de2a500856035f084d/timeseries.py @@ -0,0 +1,20 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd + + +# The color of the noise matches a PSD which you provide +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 32 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(32 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) + +pp.plot(ts.sample_times, ts) +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/html/_downloads/09fe48e222843938c8c0ed47251231a6/spin_spatial_distr_example.pdf b/latest/html/_downloads/09fe48e222843938c8c0ed47251231a6/spin_spatial_distr_example.pdf new file mode 100644 index 00000000000..d15ebe3863d Binary files /dev/null and b/latest/html/_downloads/09fe48e222843938c8c0ed47251231a6/spin_spatial_distr_example.pdf differ diff --git a/latest/html/_downloads/0ade72095df83e64d5349edd2104d032/plot_waveform.py b/latest/html/_downloads/0ade72095df83e64d5349edd2104d032/plot_waveform.py new file mode 100644 index 00000000000..396c8014c6d --- /dev/null +++ b/latest/html/_downloads/0ade72095df83e64d5349edd2104d032/plot_waveform.py @@ -0,0 +1,18 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform + + +for apx in ['SEOBNRv4', 'IMRPhenomD']: + hp, hc = get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + spin1z=0.9, + delta_t=1.0/4096, + f_lower=40) + + pp.plot(hp.sample_times, hp, label=apx) + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/html/_downloads/0c6c8a415913fc391f95c26c7a12130c/snr.hires.png b/latest/html/_downloads/0c6c8a415913fc391f95c26c7a12130c/snr.hires.png new file mode 100644 index 00000000000..14678b5adb9 Binary files /dev/null and b/latest/html/_downloads/0c6c8a415913fc391f95c26c7a12130c/snr.hires.png differ diff --git a/latest/html/_downloads/0d094eafc8b12aa3ffcee3d3f06e4722/timeseries.hires.png b/latest/html/_downloads/0d094eafc8b12aa3ffcee3d3f06e4722/timeseries.hires.png new file mode 100644 index 00000000000..ea4c9429f38 Binary files /dev/null and b/latest/html/_downloads/0d094eafc8b12aa3ffcee3d3f06e4722/timeseries.hires.png differ diff --git a/latest/html/_downloads/0ec990a46902a476429a6f3e921fd9e4/custom_00.hires.png b/latest/html/_downloads/0ec990a46902a476429a6f3e921fd9e4/custom_00.hires.png new file mode 100644 index 00000000000..48b2b5b4b88 Binary files /dev/null and b/latest/html/_downloads/0ec990a46902a476429a6f3e921fd9e4/custom_00.hires.png differ diff --git a/latest/html/_downloads/0f03f488991ef25e6a2110bbea97d3e3/custom.py b/latest/html/_downloads/0f03f488991ef25e6a2110bbea97d3e3/custom.py new file mode 100644 index 00000000000..96b93ac07ac --- /dev/null +++ b/latest/html/_downloads/0f03f488991ef25e6a2110bbea97d3e3/custom.py @@ -0,0 +1,45 @@ +import matplotlib.pyplot as plt +from pycbc.detector import add_detector_on_earth, Detector +import pycbc.psd +import numpy as np + +# Set up potential Cosmic Explorer detector locations + +# 40 km detector +lon = -125 / 180.0 * np.pi +lat = 46 / 180.0 * np.pi +yangle = 100.0 / 180.0 * np.pi +# yangle is the rotation clockwise from pointing north at 0 +# xangle can also be specified and allows for detectors that don't have +# 90 degree opening between arms. By default we assume xangle is yangle + pi/2 +add_detector_on_earth("C4", lon, lat, yangle=yangle, + xlength=40000, ylength=40000) + +# 20 km detector +# Arm length is optional, but if provided, you can accurately calcuale +# high-frequency corrects if you provide a frequency argument to the +# antenna pattern method +lon = -94 / 180.0 * np.pi +lat = 29 / 180.0 * np.pi +yangle = 160.0 / 180.0 * np.pi +add_detector_on_earth("C2", lon, lat, yangle=yangle, + xlength=20000, ylength=20000) + +ra, dec = np.meshgrid(np.arange(0, np.pi*2.0, .1), + np.arange(-np.pi / 2.0, np.pi / 2.0, .1)) +ra = ra.flatten() +dec = dec.flatten() + +pol = 0 +time = 1e10 + 8000 # A time when ra ~ lines up with lat/lon coordinates + +for d in [Detector("C4"), Detector("C2")]: + fp, fc = d.antenna_pattern(ra, dec, pol, time) + + plt.figure() + plt.subplot(111, projection="mollweide") + ra[ra>np.pi] -= np.pi * 2.0 + plt.scatter(ra, dec, c=fp**2.0 + fc**2.0) + plt.title("Mollweide") + plt.grid(True) + plt.show() diff --git a/latest/html/_downloads/0fd41f1f8236d85bd2e6fa3106697fe7/marginalized_phase.ini b/latest/html/_downloads/0fd41f1f8236d85bd2e6fa3106697fe7/marginalized_phase.ini new file mode 100644 index 00000000000..aca374167d3 --- /dev/null +++ b/latest/html/_downloads/0fd41f1f8236d85bd2e6fa3106697fe7/marginalized_phase.ini @@ -0,0 +1,44 @@ +;============================================================================== +; +; Gaussian noise model with marginalized phase +; +;============================================================================== +; +; This provides settings for the marginalized Gaussian noise model. The +; low-frequency-cutoff of 20Hz is adequate for the O1-O3 LIGO and Virgo +; detectors. Change as appropriate for future detectors. +; +; The check-for-valid-times and shift-psd-times-to-valid options mean that +; the model will check to see if there are any data quality flags on during +; the requested analysis times (determined by the [data] section). If there +; are any flags on during the analysis times for a detector, that detector +; will automatically be removed from the analysis (an error will be raised if +; all detectors are removed). If you do not want this to happen, uncomment +; the err-on-missing-detectors option. In that case, an error will be raised +; if any detectors have a data quality flag on. The shift-psd-times-to-valid +; argument will cause the times used for estimating the PSD (determined by +; the psd-start|end-time arguments in the [data] section) to be shifted left +; or right to avoid any data quality flags. A shift up to +/- dT/2 will be +; tried, where dT is the difference between psd-end-time and psd-start-time. +; If no valid data can be found even with the maximum shift, the detector +; will be removed the analysis. To check for valid-times, the dq-* options +; are used in the strain model. See +; http://pycbc.org/pycbc/latest/html/inference.html#setting-data for details. +; +; The ignore-failed-waveforms option tells the model to treat points in +; parameter space that cause the waveform generator to fail as having 0 +; likelihood. This may be necessary for newer precessing models, in which +; the entire parameter space has not been fully tested. Note, however, that +; in that case you will not be able to tell if some parameters have been ruled +; out because the data disfavors them, or because the model failed. For this +; reason it is best to let the code raise an error (i.e., leave the option +; commented out), and only ignore these errors once you are confident you know +; the reason. +; +[model] +name = marginalized_phase +low-frequency-cutoff = 20 +check-for-valid-times = +shift-psd-times-to-valid = +;err-on-missing-detectors = +;ignore-failed-waveforms = diff --git a/latest/html/_downloads/10ea8f0d73c2ddf7a7ebba1e433fdffc/spin_spatial_distr_example.py b/latest/html/_downloads/10ea8f0d73c2ddf7a7ebba1e433fdffc/spin_spatial_distr_example.py new file mode 100644 index 00000000000..b46518c3733 --- /dev/null +++ b/latest/html/_downloads/10ea8f0d73c2ddf7a7ebba1e433fdffc/spin_spatial_distr_example.py @@ -0,0 +1,50 @@ +import numpy +import matplotlib.pyplot as plt +import pycbc.coordinates as co +from pycbc import distributions + +# We can choose any bounds between 0 and pi for this distribution but in units +# of pi so we use between 0 and 1. +theta_low = 0. +theta_high = 1. + +# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi. +phi_low = 0. +phi_high = 2. + +# Create a distribution object from distributions.py +# Here we are using the Uniform Solid Angle function which takes +# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then +# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound). +uniform_solid_angle_distribution = distributions.UniformSolidAngle( + polar_bounds=(theta_low,theta_high), + azimuthal_bounds=(phi_low,phi_high)) + +# Now we can take a random variable sample from that distribution. +# In this case we want 50000 samples. +solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000) + +# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a +# 3rd dimension for a 3D plot that we make later on. +spin_mag = numpy.ndarray(shape=(10000), dtype=float) + +for i in range(0,10000): + spin_mag[i] = 1. + +# Use pycbc.coordinates as co. Use spherical_to_cartesian function to +# convert from spherical polar coordinates to cartesian coordinates. +spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag, + solid_angle_samples['phi'], + solid_angle_samples['theta']) + +# Plot the spherical distribution of spins to make sure that we +# distributed across the surface of a sphere. + +fig = plt.figure(figsize=(10,10)) +ax = fig.add_subplot(111, projection='3d') +ax.scatter(spinx, spiny, spinz, s=1) + +ax.set_xlabel('Spin X Axis') +ax.set_ylabel('Spin Y Axis') +ax.set_zlabel('Spin Z Axis') +plt.show() diff --git a/latest/html/_downloads/148ab970ade4e6a65d1bca1aa5baf8a4/analytic.hires.png b/latest/html/_downloads/148ab970ade4e6a65d1bca1aa5baf8a4/analytic.hires.png new file mode 100644 index 00000000000..f20b03d0d52 Binary files /dev/null and b/latest/html/_downloads/148ab970ade4e6a65d1bca1aa5baf8a4/analytic.hires.png differ diff --git a/latest/html/_downloads/168313a00888f0f3fca2de438a7001d0/higher_modes.png b/latest/html/_downloads/168313a00888f0f3fca2de438a7001d0/higher_modes.png new file mode 100644 index 00000000000..c395bdc6472 Binary files /dev/null and b/latest/html/_downloads/168313a00888f0f3fca2de438a7001d0/higher_modes.png differ diff --git a/latest/html/_downloads/168a0acfc61247bbc8f9f5585ab38af7/estimate.py b/latest/html/_downloads/168a0acfc61247bbc8f9f5585ab38af7/estimate.py new file mode 100644 index 00000000000..6c328650f09 --- /dev/null +++ b/latest/html/_downloads/168a0acfc61247bbc8f9f5585ab38af7/estimate.py @@ -0,0 +1,31 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd + + +# generate some colored gaussian noise +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +### Generate 128 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(128 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) + +# Estimate the PSD +# We'll choose 4 seconds PSD samples that are overlapped 50 % +seg_len = int(4 / delta_t) +seg_stride = int(seg_len / 2) +estimated_psd = pycbc.psd.welch(ts, + seg_len=seg_len, + seg_stride=seg_stride) + +pp.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate') +pp.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd') +pp.xlim(xmin=flow, xmax=2000) +pp.ylim(1e-48, 1e-45) +pp.legend() +pp.grid() +pp.show() diff --git a/latest/html/_downloads/16b7a236703a517c53a01173cc2017aa/plot.sh b/latest/html/_downloads/16b7a236703a517c53a01173cc2017aa/plot.sh new file mode 100644 index 00000000000..89e13edb3f3 --- /dev/null +++ b/latest/html/_downloads/16b7a236703a517c53a01173cc2017aa/plot.sh @@ -0,0 +1,9 @@ +#!/bin/sh +pycbc_inference_plot_posterior --verbose \ + --input-file normal2d.hdf \ + --output-file posterior-normal2d.png \ + --plot-scatter \ + --plot-contours \ + --plot-marginal \ + --z-arg 'loglikelihood:$\log p(h|\vartheta)$' \ + --iteration -1 diff --git a/latest/html/_downloads/17017417929c18b256c527a86855b0d6/spin_examples.py b/latest/html/_downloads/17017417929c18b256c527a86855b0d6/spin_examples.py new file mode 100644 index 00000000000..21ae36c9874 --- /dev/null +++ b/latest/html/_downloads/17017417929c18b256c527a86855b0d6/spin_examples.py @@ -0,0 +1,56 @@ +import matplotlib.pyplot as plt +import numpy +import pycbc.coordinates as co +from pycbc import distributions + +# We can choose any bounds between 0 and pi for this distribution but in +# units of pi so we use between 0 and 1 +theta_low = 0. +theta_high = 1. + +# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi +phi_low = 0. +phi_high = 2. + +# Create a distribution object from distributions.py. Here we are using the +# Uniform Solid Angle function which takes +# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then +# phi = azimuthal_ bound(phi_lower_bound to a phi_upper_bound). +uniform_solid_angle_distribution = distributions.UniformSolidAngle( + polar_bounds=(theta_low,theta_high), + azimuthal_bounds=(phi_low,phi_high)) + +# Now we can take a random variable sample from that distribution. In this +# case we want 50000 samples. +solid_angle_samples = uniform_solid_angle_distribution.rvs(size=500000) + +# Make spins with unit length for coordinate transformation below. +spin_mag = numpy.ndarray(shape=(500000), dtype=float) + +for i in range(0,500000): + spin_mag[i] = 1. + +# Use the pycbc.coordinates as co spherical_to_cartesian function to convert +# from spherical polar coordinates to cartesian coordinates +spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag, + solid_angle_samples['phi'], + solid_angle_samples['theta']) + +# Choose 50 bins for the histograms. +n_bins = 50 + +plt.figure(figsize=(10,10)) +plt.subplot(2, 2, 1) +plt.hist(spinx, bins = n_bins) +plt.title('Spin x samples') + +plt.subplot(2, 2, 2) +plt.hist(spiny, bins = n_bins) +plt.title('Spin y samples') + +plt.subplot(2, 2, 3) +plt.hist(spinz, bins = n_bins) +plt.title('Spin z samples') + +plt.tight_layout() +plt.show() diff --git a/latest/html/_downloads/1804f9c9926d99639541c44054a1fa50/gw150914_h1_snr.py b/latest/html/_downloads/1804f9c9926d99639541c44054a1fa50/gw150914_h1_snr.py new file mode 100644 index 00000000000..2360e702827 --- /dev/null +++ b/latest/html/_downloads/1804f9c9926d99639541c44054a1fa50/gw150914_h1_snr.py @@ -0,0 +1,33 @@ +import matplotlib.pyplot as pp +from urllib.request import urlretrieve +from pycbc.frame import read_frame +from pycbc.filter import highpass_fir, matched_filter +from pycbc.waveform import get_fd_waveform +from pycbc.psd import welch, interpolate + + +# Read data and remove low frequency content +fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' +url = "https://www.gwosc.org/GW150914data/" + fname +urlretrieve(url, filename=fname) +h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN') +h1 = highpass_fir(h1, 15, 8) + +# Calculate the noise spectrum +psd = interpolate(welch(h1), 1.0 / h1.duration) + +# Generate a template to filter with +hp, hc = get_fd_waveform(approximant="IMRPhenomD", mass1=40, mass2=32, + f_lower=20, delta_f=1.0/h1.duration) +hp.resize(len(h1) // 2 + 1) + +# Calculate the complex (two-phase SNR) +snr = matched_filter(hp, h1, psd=psd, low_frequency_cutoff=20.0) + +# Remove regions corrupted by filter wraparound +snr = snr[len(snr) // 4: len(snr) * 3 // 4] + +pp.plot(snr.sample_times, abs(snr)) +pp.ylabel('signal-to-noise') +pp.xlabel('GPS Time (s)') +pp.show() diff --git a/latest/html/_downloads/196073b0e0a8bead1a5e6f3aa00a5e04/snr.png b/latest/html/_downloads/196073b0e0a8bead1a5e6f3aa00a5e04/snr.png new file mode 100644 index 00000000000..1d8b52893e7 Binary files /dev/null and b/latest/html/_downloads/196073b0e0a8bead1a5e6f3aa00a5e04/snr.png differ diff --git a/latest/html/_downloads/1a208e6695b80fac3547b309ccca5363/stat.hires.png b/latest/html/_downloads/1a208e6695b80fac3547b309ccca5363/stat.hires.png new file mode 100644 index 00000000000..f8f6f4cb6fd Binary files /dev/null and b/latest/html/_downloads/1a208e6695b80fac3547b309ccca5363/stat.hires.png differ diff --git a/latest/html/_downloads/1a8c7a97487fee58ca6907fc6aec21ea/plot_detwaveform.py b/latest/html/_downloads/1a8c7a97487fee58ca6907fc6aec21ea/plot_detwaveform.py new file mode 100644 index 00000000000..8ae99d2681e --- /dev/null +++ b/latest/html/_downloads/1a8c7a97487fee58ca6907fc6aec21ea/plot_detwaveform.py @@ -0,0 +1,44 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform +from pycbc.detector import Detector + + +apx = 'SEOBNRv4' +# NOTE: Inclination runs from 0 to pi, with poles at 0 and pi +# coa_phase runs from 0 to 2 pi. +hp, hc = get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + spin1z=0.9, + spin2z=0.4, + inclination=1.23, + coa_phase=2.45, + delta_t=1.0/4096, + f_lower=40) + +det_h1 = Detector('H1') +det_l1 = Detector('L1') +det_v1 = Detector('V1') + +# Choose a GPS end time, sky location, and polarization phase for the merger +# NOTE: Right ascension and polarization phase runs from 0 to 2pi +# Declination runs from pi/2. to -pi/2 with the poles at pi/2. and -pi/2. +end_time = 1192529720 +declination = 0.65 +right_ascension = 4.67 +polarization = 2.34 +hp.start_time += end_time +hc.start_time += end_time + +signal_h1 = det_h1.project_wave(hp, hc, right_ascension, declination, polarization) +signal_l1 = det_l1.project_wave(hp, hc, right_ascension, declination, polarization) +signal_v1 = det_v1.project_wave(hp, hc, right_ascension, declination, polarization) + +pp.plot(signal_h1.sample_times, signal_h1, label='H1') +pp.plot(signal_l1.sample_times, signal_l1, label='L1') +pp.plot(signal_v1.sample_times, signal_v1, label='V1') + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/html/_downloads/1a99a1fe8ff20313709e6b638df8abb1/margtime.ini b/latest/html/_downloads/1a99a1fe8ff20313709e6b638df8abb1/margtime.ini new file mode 100644 index 00000000000..fd17c0e7e5a --- /dev/null +++ b/latest/html/_downloads/1a99a1fe8ff20313709e6b638df8abb1/margtime.ini @@ -0,0 +1,108 @@ + +[model] +name = marginalized_time +low-frequency-cutoff = 30.0 + +# This is the sample rate used for the model and marginalization +sample_rate = 4096 + +marginalize_vector_params = tc, ra, dec, polarization +marginalize_vector_samples = 2000 + +; You shouldn't use phase marginalization if the approximant has +; higher-order modes +marginalize_phase = True + +marginalize_distance = True +marginalize_distance_param = distance +marginalize_distance_interpolator = True +marginalize_distance_snr_range = 5, 50 +marginalize_distance_density = 100, 100 +marginalize_distance_samples = 1000 + +[data] +instruments = H1 L1 +trigger-time = 1126259462.43 +; See the documentation at +; http://pycbc.org/pycbc/latest/html/inference.html#simulated-bbh-example +; for details on the following settings: +analysis-start-time = -6 +analysis-end-time = 2 +psd-estimation = median-mean +psd-start-time = -256 +psd-end-time = 256 +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +; The frame files must be downloaded from GWOSC before running. Here, we +; assume that the files have been downloaded to the same directory. Adjust +; the file path as necessary if not. +frame-files = H1:H-H1_GWOSC_4KHZ_R1-1126257415-4096.gwf L1:L-L1_GWOSC_4KHZ_R1-1126257415-4096.gwf +channel-name = H1:GWOSC-4KHZ_R1_STRAIN L1:GWOSC-4KHZ_R1_STRAIN +sample-rate = 2048 +; We'll use a high-pass filter so as not to get numerical errors from the large +; amplitude low frequency noise. Here we use 15 Hz, which is safely below the +; low frequency cutoff of our likelihood integral (20 Hz) +strain-high-pass = 15 +; The pad-data argument is for the high-pass filter: 8s are added to the +; beginning/end of the analysis/psd times when the data is loaded. After the +; high pass filter is applied, the additional time is discarded. This pad is +; *in addition to* the time added to the analysis start/end time for the PSD +; inverse length. Since it is discarded before the data is transformed for the +; likelihood integral, it has little affect on the run time. +pad-data = 8 + +[sampler] +name = dynesty +dlogz = 1.0 +nlive = 500 + +[variable_params] +; waveform parameters that will vary in MCMC +mass1 = +mass2 = +inclination = +distance = +polarization = +ra = +dec = +tc = + +[static_params] +; waveform parameters that will not change in MCMC +approximant = IMRPhenomD +f_lower = 20 + +[prior-mass1] +name = uniform +min-mass1 = 15 +max-mass1 = 50 + +[prior-mass2] +name = uniform +min-mass2 = 15 +max-mass2 = 50 + +[prior-ra] +name = uniform_angle + +[prior-dec] +name = cos_angle + +[prior-tc] +#; coalescence time prior +name = uniform +min-tc = 1126259462.35 +max-tc = 1126259462.45 + +[prior-distance] +#; following gives a uniform in volume +name = uniform_radius +min-distance = 100 +max-distance = 1000 + +[prior-polarization] +name = uniform_angle + +[prior-inclination] +name = sin_angle diff --git a/latest/html/_downloads/1bcf4f46ede00892b26eaecdcbecb898/injection_smbhb.sh b/latest/html/_downloads/1bcf4f46ede00892b26eaecdcbecb898/injection_smbhb.sh new file mode 100644 index 00000000000..6c9e5b47c7a --- /dev/null +++ b/latest/html/_downloads/1bcf4f46ede00892b26eaecdcbecb898/injection_smbhb.sh @@ -0,0 +1,10 @@ +#!/bin/sh +pycbc_create_injections --verbose \ + --config-files injection_smbhb.ini \ + --ninjections 1 \ + --seed 10 \ + --output-file injection_smbhb.hdf \ + --variable-params-section variable_params \ + --static-params-section static_params \ + --dist-section prior \ + --force diff --git a/latest/html/_downloads/1ea0da9e1c660534f3126d3190f72090/plot.sh b/latest/html/_downloads/1ea0da9e1c660534f3126d3190f72090/plot.sh new file mode 100644 index 00000000000..ed099b24eca --- /dev/null +++ b/latest/html/_downloads/1ea0da9e1c660534f3126d3190f72090/plot.sh @@ -0,0 +1,4 @@ +pycbc_inference_plot_posterior \ +--input-file single.hdf \ +--output-file single.png \ +--z-arg snr diff --git a/latest/html/_downloads/1eb58ba7243dac08da42b48e64be8d77/run.sh b/latest/html/_downloads/1eb58ba7243dac08da42b48e64be8d77/run.sh new file mode 100644 index 00000000000..966f4ed69f9 --- /dev/null +++ b/latest/html/_downloads/1eb58ba7243dac08da42b48e64be8d77/run.sh @@ -0,0 +1,13 @@ +#!/bin/sh +export OMP_NUM_THREADS=1 +pycbc_inference \ +--config-files `dirname "$0"`/lisa_smbhb_relbin.ini \ +--output-file lisa_smbhb_inj_pe.hdf \ +--force \ +--nprocesses 1 \ +--fft-backends fftw \ +--verbose + +# PLEASE NOTE: This example is currently forcing a FFTW backend because MKL +# seems to fail for FFT lengths > 2^24. This is fine for most LIGO +# applications, but an issue for most LISA applications. diff --git a/latest/html/_downloads/2523563fd823dd07217350f2227a2e0d/make_injections.sh b/latest/html/_downloads/2523563fd823dd07217350f2227a2e0d/make_injections.sh new file mode 100644 index 00000000000..7b5221603be --- /dev/null +++ b/latest/html/_downloads/2523563fd823dd07217350f2227a2e0d/make_injections.sh @@ -0,0 +1,9 @@ +pycbc_create_injections --verbose --force \ + --ninjections 1 \ + --config-file event1_inj.ini \ + --output-file event1_inj.hdf + +pycbc_create_injections --verbose --force \ + --ninjections 1 \ + --config-file event2_inj.ini \ + --output-file event2_inj.hdf diff --git a/latest/html/_downloads/25805a47a94e7ee8924a3b12278dc62e/on.png b/latest/html/_downloads/25805a47a94e7ee8924a3b12278dc62e/on.png new file mode 100644 index 00000000000..9b677315443 Binary files /dev/null and b/latest/html/_downloads/25805a47a94e7ee8924a3b12278dc62e/on.png differ diff --git a/latest/html/_downloads/25cc739bc011bad399e9baaaa6bb7c70/normal2d.ini b/latest/html/_downloads/25cc739bc011bad399e9baaaa6bb7c70/normal2d.ini new file mode 100644 index 00000000000..c6330504a12 --- /dev/null +++ b/latest/html/_downloads/25cc739bc011bad399e9baaaa6bb7c70/normal2d.ini @@ -0,0 +1,21 @@ +[model] +name = test_normal + +[sampler] +name = emcee +nwalkers = 5000 +niterations = 100 + +[variable_params] +x = +y = + +[prior-x] +name = uniform +min-x = -10 +max-x = 10 + +[prior-y] +name = uniform +min-y = -10 +max-y = 10 diff --git a/latest/html/_downloads/267e8aab61a1d267928dca4f8588fe03/plot.sh b/latest/html/_downloads/267e8aab61a1d267928dca4f8588fe03/plot.sh new file mode 100644 index 00000000000..935e43106e2 --- /dev/null +++ b/latest/html/_downloads/267e8aab61a1d267928dca4f8588fe03/plot.sh @@ -0,0 +1,13 @@ +pycbc_inference_plot_posterior \ + --input-file lisa_smbhb_ldc_pe.hdf \ + --output-file lisa_smbhb_mass_tc_0.png \ + --z-arg snr --plot-scatter --plot-marginal \ + --plot-contours --contour-color black \ + --parameters \ + 'mass1_from_mchirp_q(mchirp,q)':mass1 \ + 'mass2_from_mchirp_q(mchirp,q)':mass2 \ + tc \ + --expected-parameters \ + 'mass1_from_mchirp_q(mchirp,q)':1015522.4376 \ + 'mass2_from_mchirp_q(mchirp,q)':796849.1091 \ + tc:4799624.274911478 \ diff --git a/latest/html/_downloads/2958ccfdeabca74c596999997d897130/hwinj.pdf b/latest/html/_downloads/2958ccfdeabca74c596999997d897130/hwinj.pdf new file mode 100644 index 00000000000..685656f38bc Binary files /dev/null and b/latest/html/_downloads/2958ccfdeabca74c596999997d897130/hwinj.pdf differ diff --git a/latest/html/_downloads/2c118961f6835a9f39d1ed3179e5f8cf/spin_examples.hires.png b/latest/html/_downloads/2c118961f6835a9f39d1ed3179e5f8cf/spin_examples.hires.png new file mode 100644 index 00000000000..3f002890d9d Binary files /dev/null and b/latest/html/_downloads/2c118961f6835a9f39d1ed3179e5f8cf/spin_examples.hires.png differ diff --git a/latest/html/_downloads/2c4c00a374e743238ba1025c90d0f7f8/mchirp_q_from_uniform_m1m2_example.hires.png b/latest/html/_downloads/2c4c00a374e743238ba1025c90d0f7f8/mchirp_q_from_uniform_m1m2_example.hires.png new file mode 100644 index 00000000000..33ab96cbfa7 Binary files /dev/null and b/latest/html/_downloads/2c4c00a374e743238ba1025c90d0f7f8/mchirp_q_from_uniform_m1m2_example.hires.png differ diff --git a/latest/html/_downloads/2cc28387b202d412095a5cc9d519a7a0/pass.png b/latest/html/_downloads/2cc28387b202d412095a5cc9d519a7a0/pass.png new file mode 100644 index 00000000000..35cd68c456b Binary files /dev/null and b/latest/html/_downloads/2cc28387b202d412095a5cc9d519a7a0/pass.png differ diff --git a/latest/html/_downloads/2df0dea54aa32af1d188acbf81a3a738/plot.sh b/latest/html/_downloads/2df0dea54aa32af1d188acbf81a3a738/plot.sh new file mode 100644 index 00000000000..b3ba86c4d83 --- /dev/null +++ b/latest/html/_downloads/2df0dea54aa32af1d188acbf81a3a738/plot.sh @@ -0,0 +1,14 @@ +#!/bin/sh +pycbc_inference_plot_posterior \ + --input-file lisa_smbhb_inj_pe.hdf \ + --output-file lisa_smbhb_mass_tc.png \ + --z-arg snr --plot-scatter --plot-marginal \ + --plot-contours --contour-color black \ + --parameters \ + 'mass1_from_mchirp_q(mchirp,q)':mass1 \ + 'mass2_from_mchirp_q(mchirp,q)':mass2 \ + tc \ + --expected-parameters \ + 'mass1_from_mchirp_q(mchirp,q)':1015522.4376 \ + 'mass2_from_mchirp_q(mchirp,q)':796849.1091 \ + tc:4799624.274911478 \ diff --git a/latest/html/_downloads/31cb13748e932f47734db0fb008fdb2b/hwinj.hires.png b/latest/html/_downloads/31cb13748e932f47734db0fb008fdb2b/hwinj.hires.png new file mode 100644 index 00000000000..ca732afc78b Binary files /dev/null and b/latest/html/_downloads/31cb13748e932f47734db0fb008fdb2b/hwinj.hires.png differ diff --git a/latest/html/_downloads/3279a6bca0987ce771242f64df62149d/custom_01.hires.png b/latest/html/_downloads/3279a6bca0987ce771242f64df62149d/custom_01.hires.png new file mode 100644 index 00000000000..57c94d7ca46 Binary files /dev/null and b/latest/html/_downloads/3279a6bca0987ce771242f64df62149d/custom_01.hires.png differ diff --git a/latest/html/_downloads/34b738a7a8bc972a787d891767c676d1/mass_examples.png b/latest/html/_downloads/34b738a7a8bc972a787d891767c676d1/mass_examples.png new file mode 100644 index 00000000000..1a7f235435d Binary files /dev/null and b/latest/html/_downloads/34b738a7a8bc972a787d891767c676d1/mass_examples.png differ diff --git a/latest/html/_downloads/37334d71e56635960fe1d1e52c4ec96f/model-event2_relbin.ini b/latest/html/_downloads/37334d71e56635960fe1d1e52c4ec96f/model-event2_relbin.ini new file mode 100644 index 00000000000..f578c795097 --- /dev/null +++ b/latest/html/_downloads/37334d71e56635960fe1d1e52c4ec96f/model-event2_relbin.ini @@ -0,0 +1,11 @@ +[event2__model] +name = relative +low-frequency-cutoff = ${event1__model|low-frequency-cutoff} +high-frequency-cutoff = ${event1__model|high-frequency-cutoff} +epsilon = ${event1__model|epsilon} +mass1_ref = ${event1__model|mass1_ref} +mass2_ref = ${event1__model|mass2_ref} +tc_ref = FROM_INJECTION:tc +ra_ref = FROM_INJECTION:ra +dec_ref = FROM_INJECTION:dec + diff --git a/latest/html/_downloads/38956967dd9b9b4dc9cd8d616ea21c75/pycbc_bbh_prior.ini b/latest/html/_downloads/38956967dd9b9b4dc9cd8d616ea21c75/pycbc_bbh_prior.ini new file mode 100644 index 00000000000..057455ffef5 --- /dev/null +++ b/latest/html/_downloads/38956967dd9b9b4dc9cd8d616ea21c75/pycbc_bbh_prior.ini @@ -0,0 +1,126 @@ +[variable_params] +srcmass1 = +srcmass2 = +spin1_a = +spin1_azimuthal = +spin1_polar = +spin2_a = +spin2_azimuthal = +spin2_polar = +comoving_volume = +inclination = +polarization = +ra = +dec = +coa_phase = + +[static_params] +approximant = IMRPhenomPv2 +f_lower = 1 +f_ref = 1 + + +;----------------------------------------------------------------------------- +; +; Intrinsic parameters +; +;----------------------------------------------------------------------------- + +[prior-srcmass1] +name = uniform +min-srcmass1 = 10 +max-srcmass1 = 80 + +[prior-srcmass2] +name = uniform +min-srcmass2 = 10 +max-srcmass2 = 80 + +[prior-spin1_a] +name = uniform +min-spin1_a = 0.0 +max-spin1_a = 0.99 + +[prior-spin1_polar+spin1_azimuthal] +name = uniform_solidangle +polar-angle = spin1_polar +azimuthal-angle = spin1_azimuthal + +[prior-spin2_a] +name = uniform +min-spin2_a = 0.0 +max-spin2_a = 0.99 + +[prior-spin2_polar+spin2_azimuthal] +name = uniform_solidangle +polar-angle = spin2_polar +azimuthal-angle = spin2_azimuthal + +[waveform_transforms-spin1x+spin1y+spin1z] +name = spherical_to_cartesian +x = spin1x +y = spin1y +z = spin1z +radial = spin1_a +polar = spin1_polar +azimuthal = spin1_azimuthal + +[waveform_transforms-spin2x+spin2y+spin2z] +name = spherical_to_cartesian +x = spin2x +y = spin2y +z = spin2z +radial = spin2_a +polar = spin2_polar +azimuthal = spin2_azimuthal + +;----------------------------------------------------------------------------- +; +; Extrinsic parameters +; +;----------------------------------------------------------------------------- + +[prior-inclination] +name = sin_angle + +[prior-coa_phase] +name = uniform_angle + +[prior-ra+dec] +name = uniform_sky + +[prior-polarization] +name = uniform_angle + +[prior-comoving_volume] +name = uniform +; These limits correspond to luminosity distances of ~[10, 1500) Mpc. Change +; if you are analyzing detections which are more than ~1Gpc away. +min-comoving_volume = 5e3 +max-comoving_volume = 9e9 + +; The following [waveform_transforms] sections convert the comoving volume +; to luminosity distance and the source masses to detector frame masses. +; The latter is done by calculating redshift from the comoving volume first. +; The order that transforms need to be applied is figured out automatically by +; the code, so it doesn't matter what order we put them here, as long as we +; provide transforms for all intermediate steps. +[waveform_transforms-redshift] +name = custom +inputs = comoving_volume +redshift = redshift_from_comoving_volume(comoving_volume) + +[waveform_transforms-distance] +name = custom +inputs = comoving_volume +distance = distance_from_comoving_volume(comoving_volume) + +[waveform_transforms-mass1] +name = custom +inputs = srcmass1, redshift +mass1 = srcmass1 * (1 + redshift) + +[waveform_transforms-mass2] +name = custom +inputs = srcmass2, redshift +mass2 = srcmass2 * (1 + redshift) diff --git a/latest/html/_downloads/3a1f7917f6e1935db1d12e6c55f86853/plot_phase.png b/latest/html/_downloads/3a1f7917f6e1935db1d12e6c55f86853/plot_phase.png new file mode 100644 index 00000000000..877dcc19003 Binary files /dev/null and b/latest/html/_downloads/3a1f7917f6e1935db1d12e6c55f86853/plot_phase.png differ diff --git a/latest/html/_downloads/3d460bdfaf79d8effeeb12da41dc4f51/make_movie.sh b/latest/html/_downloads/3d460bdfaf79d8effeeb12da41dc4f51/make_movie.sh new file mode 100644 index 00000000000..1faf0f4bbcc --- /dev/null +++ b/latest/html/_downloads/3d460bdfaf79d8effeeb12da41dc4f51/make_movie.sh @@ -0,0 +1,12 @@ +#!/bin/sh +pycbc_inference_plot_movie --verbose \ + --nprocesses 4 \ + --input-file normal2d.hdf \ + --output-prefix frames-normal2d \ + --movie-file normal2d_mcmc_evolution.mp4 \ + --cleanup \ + --plot-scatter \ + --plot-contours \ + --plot-marginal \ + --z-arg 'loglikelihood:$\log p(h|\vartheta)$' \ + --frame-step 1 diff --git a/latest/html/_downloads/3e78a0a1d11204a9c9bbb1180523a859/analytic.png b/latest/html/_downloads/3e78a0a1d11204a9c9bbb1180523a859/analytic.png new file mode 100644 index 00000000000..40579c9e38b Binary files /dev/null and b/latest/html/_downloads/3e78a0a1d11204a9c9bbb1180523a859/analytic.png differ diff --git a/latest/html/_downloads/41e148c8ed8b064280f1ab0de3143c4a/snr.pdf b/latest/html/_downloads/41e148c8ed8b064280f1ab0de3143c4a/snr.pdf new file mode 100644 index 00000000000..44986986a25 Binary files /dev/null and b/latest/html/_downloads/41e148c8ed8b064280f1ab0de3143c4a/snr.pdf differ diff --git a/latest/html/_downloads/472f1250f8a99fec37c79775ac7f2005/fir.py b/latest/html/_downloads/472f1250f8a99fec37c79775ac7f2005/fir.py new file mode 100644 index 00000000000..aeb07812fdd --- /dev/null +++ b/latest/html/_downloads/472f1250f8a99fec37c79775ac7f2005/fir.py @@ -0,0 +1,19 @@ +# Apply an FIR filter. The algorithm is written for high performance so if you +# have a large number of taps, it will resort to a FFT based implementation +# under the hood. +import pycbc.types +import pycbc.filter.resample + +# Reference time series +ts = pycbc.types.TimeSeries([-1, 1, -1, 1, -1], delta_t=1.0) + +# May also be a numpy array +coeff = pycbc.types.Array([1.0, 0, 1.0]) + +ts_filtered = pycbc.filter.resample.lfilter(coeff, ts) + +# If you want to have a zero phase filter provide a symmetric set of coefficients +# The time delay will be compensated for. + +ts_filtered2 = pycbc.filter.resample.fir_zero_filter(coeff, ts) + diff --git a/latest/html/_downloads/48fcb3929c3394b6314094d743085ae0/spin_spatial_distr_example.hires.png b/latest/html/_downloads/48fcb3929c3394b6314094d743085ae0/spin_spatial_distr_example.hires.png new file mode 100644 index 00000000000..94707cfde2e Binary files /dev/null and b/latest/html/_downloads/48fcb3929c3394b6314094d743085ae0/spin_spatial_distr_example.hires.png differ diff --git a/latest/html/_downloads/4b7195fa1e22931dc48ec01704063862/create_workflow.sh b/latest/html/_downloads/4b7195fa1e22931dc48ec01704063862/create_workflow.sh new file mode 100644 index 00000000000..3bbf79974bc --- /dev/null +++ b/latest/html/_downloads/4b7195fa1e22931dc48ec01704063862/create_workflow.sh @@ -0,0 +1,16 @@ +set -e + +WORKFLOW_NAME=inference-gw150914_gw170814 +# Set the HTML_DIR to point to your public html page. This is where the results +# page will be written. +HTML_DIR='' +if [ "${HTML_DIR}" == '' ]; then + echo "Please set an HTML_DIR" + exit 1 +fi +SEED=978241 +pycbc_make_inference_workflow \ + --seed ${SEED} \ + --config-files workflow_config.ini events.ini \ + --workflow-name ${WORKFLOW_NAME} \ + --config-overrides results_page:output-path:${HTML_DIR}/${WORKFLOW_NAME} diff --git a/latest/html/_downloads/4bc4ffdccbc6b88dbf6929a727492118/data.hires.png b/latest/html/_downloads/4bc4ffdccbc6b88dbf6929a727492118/data.hires.png new file mode 100644 index 00000000000..7e43ab8a56f Binary files /dev/null and b/latest/html/_downloads/4bc4ffdccbc6b88dbf6929a727492118/data.hires.png differ diff --git a/latest/html/_downloads/4c1dc660f9df4f24f548d214caa1b3d1/make_injection.sh b/latest/html/_downloads/4c1dc660f9df4f24f548d214caa1b3d1/make_injection.sh new file mode 100644 index 00000000000..e4532701022 --- /dev/null +++ b/latest/html/_downloads/4c1dc660f9df4f24f548d214caa1b3d1/make_injection.sh @@ -0,0 +1,10 @@ +#!/bin/sh +pycbc_create_injections --verbose \ + --config-files injection.ini \ + --ninjections 1 \ + --seed 10 \ + --output-file injection.hdf \ + --variable-params-section variable_params \ + --static-params-section static_params \ + --dist-section prior \ + --force diff --git a/latest/html/_downloads/524f46f309c6a5e099509f216d9dc4e9/analytic.py b/latest/html/_downloads/524f46f309c6a5e099509f216d9dc4e9/analytic.py new file mode 100644 index 00000000000..f9ae5eba50a --- /dev/null +++ b/latest/html/_downloads/524f46f309c6a5e099509f216d9dc4e9/analytic.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +import pycbc.psd + + +# List the available analytic psds +print(pycbc.psd.get_lalsim_psd_list()) + +delta_f = 1.0 / 4 +flen = int(1024 / delta_f) +low_frequency_cutoff = 30.0 + +# One can either call the psd generator by name +p1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, low_frequency_cutoff) + +# or by using the name as a string. +p2 = pycbc.psd.from_string('aLIGOZeroDetLowPower', flen, delta_f, low_frequency_cutoff) + +pp.plot(p1.sample_frequencies, p1, label='HighPower') +pp.plot(p2.sample_frequencies, p2, label='LowPower') +pp.legend() +pp.show() diff --git a/latest/html/_downloads/5308da9784fcffea8dc8006461bf0e95/sampling_from_config_example.hires.png b/latest/html/_downloads/5308da9784fcffea8dc8006461bf0e95/sampling_from_config_example.hires.png new file mode 100644 index 00000000000..2a957fedb1c Binary files /dev/null and b/latest/html/_downloads/5308da9784fcffea8dc8006461bf0e95/sampling_from_config_example.hires.png differ diff --git a/latest/html/_downloads/53ae15772e158f8a14196904c7bfdd50/gw150914_h1_snr.hires.png b/latest/html/_downloads/53ae15772e158f8a14196904c7bfdd50/gw150914_h1_snr.hires.png new file mode 100644 index 00000000000..2e25ce6e4de Binary files /dev/null and b/latest/html/_downloads/53ae15772e158f8a14196904c7bfdd50/gw150914_h1_snr.hires.png differ diff --git a/latest/html/_downloads/5764f3018b9c87223974259998419897/create_inj_workflow.sh b/latest/html/_downloads/5764f3018b9c87223974259998419897/create_inj_workflow.sh new file mode 100644 index 00000000000..a69f183dc1c --- /dev/null +++ b/latest/html/_downloads/5764f3018b9c87223974259998419897/create_inj_workflow.sh @@ -0,0 +1,20 @@ +set -e + +WORKFLOW_NAME=bbh_injections-dynesty +# Set the HTML_DIR to point to your public html page. This is where the results +# page will be written. +HTML_DIR='' +if [ "${HTML_DIR}" == '' ]; then + echo "Please set an HTML_DIR" + exit 1 +fi +SEED=983124 +# Set the number of injections to create. For a full PP test, we suggest using +# 100. +NINJ=10 +pycbc_make_inference_inj_workflow \ + --seed ${SEED} \ + --num-injections 10 \ + --config-files workflow_config.ini injections_config.ini \ + --workflow-name ${WORKFLOW_NAME} \ + --config-overrides results_page:output-path:${HTML_DIR}/${WORKFLOW_NAME} diff --git a/latest/html/_downloads/579bb478d5b214a0f304469206a3e8cb/lisa_smbhb_relbin.ini b/latest/html/_downloads/579bb478d5b214a0f304469206a3e8cb/lisa_smbhb_relbin.ini new file mode 100644 index 00000000000..0733a18cac9 --- /dev/null +++ b/latest/html/_downloads/579bb478d5b214a0f304469206a3e8cb/lisa_smbhb_relbin.ini @@ -0,0 +1,79 @@ +[data] +instruments = LISA_A LISA_E LISA_T +trigger-time = 4800021.15572853 +analysis-start-time = -4800021 +analysis-end-time = 26735978 +pad-data = 0 +sample-rate = 0.2 +psd-file= LISA_A:A_psd.txt LISA_E:E_psd.txt LISA_T:T_psd.txt +frame-files = LISA_A:A_TDI_v2.gwf LISA_E:E_TDI_v2.gwf LISA_T:T_TDI_v2.gwf +channel-name = LISA_A:LA:LA LISA_E:LE:LE LISA_T:LT:LT + +[model] +name = relative +low-frequency-cutoff = 0.0001 +high-frequency-cutoff = 1e-2 +epsilon = 0.01 +mass1_ref = 1015522.4376 +mass2_ref = 796849.1091 +mchirp_ref = 781969.693924104 +q_ref = 1.2744225048415756 +tc_ref = 4799624.274911478 +distance_ref = 17758.367941273442 +spin1z_ref = 0.597755394865021 +spin2z_ref = 0.36905807298613247 +inclination_ref = 1.5970175301911231 + +[variable_params] +mchirp = +q = +tc = + +[static_params] +; LDC-Sangria uses TDI-1.5 +tdi = 1.5 +ref_frame = LISA +approximant = BBHX_PhenomD +coa_phase = 4.275929308696054 +eclipticlongitude = 5.4431083771985165 +eclipticlatitude = -1.2734504596198182 +polarization = 0.22558110042980073 +spin1z = 0.597755394865021 +spin2z = 0.36905807298613247 +distance = 17758.367941273442 +inclination = 1.5970175301911231 +t_obs_start = 31536000 +f_lower = 1e-4 +; Put LISA behind the Earth by ~20 degrees. +t_offset = 7365189.431698299 + +[prior-mchirp] +name = uniform +min-mchirp = 703772.7245316936 +max-mchirp = 860166.6633165143 + +[prior-q] +name = uniform +min-q = 1.1469802543574181 +max-q = 1.401864755325733 + +[prior-tc] +name = uniform +min-tc = 4798221.15572853 +max-tc = 4801821.15572853 + +[waveform_transforms-mass1+mass2] +name = mchirp_q_to_mass1_mass2 + +[sampler] +name = dynesty +dlogz = 0.1 +nlive = 150 + +; NOTE: While this example doesn't sample in polarization, if doing this we +; recommend the following transformation, and then sampling in this coordinate +; +; [waveform_transforms-polarization] +; name = custom +; inputs = better_pol, eclipticlongitude +; polarization = better_pol + eclipticlongitude diff --git a/latest/html/_downloads/57f2754f3c667a72e8d0a0039b619755/plot_fd_td.pdf b/latest/html/_downloads/57f2754f3c667a72e8d0a0039b619755/plot_fd_td.pdf new file mode 100644 index 00000000000..025d1b6ee8c Binary files /dev/null and b/latest/html/_downloads/57f2754f3c667a72e8d0a0039b619755/plot_fd_td.pdf differ diff --git a/latest/html/_downloads/580c5d22556c2d170e7bd639531e9c6c/sampling_from_config_example.png b/latest/html/_downloads/580c5d22556c2d170e7bd639531e9c6c/sampling_from_config_example.png new file mode 100644 index 00000000000..2a00c96833c Binary files /dev/null and b/latest/html/_downloads/580c5d22556c2d170e7bd639531e9c6c/sampling_from_config_example.png differ diff --git a/latest/html/_downloads/5aa9cd1937d058f7eb06602fe48d0228/mass_examples.hires.png b/latest/html/_downloads/5aa9cd1937d058f7eb06602fe48d0228/mass_examples.hires.png new file mode 100644 index 00000000000..4644ea745a4 Binary files /dev/null and b/latest/html/_downloads/5aa9cd1937d058f7eb06602fe48d0228/mass_examples.hires.png differ diff --git a/latest/html/_downloads/634bf102355a1f22d546cb0f7eacca7e/stat.py b/latest/html/_downloads/634bf102355a1f22d546cb0f7eacca7e/stat.py new file mode 100644 index 00000000000..7997e34c142 --- /dev/null +++ b/latest/html/_downloads/634bf102355a1f22d546cb0f7eacca7e/stat.py @@ -0,0 +1,13 @@ +import matplotlib.pyplot as pp +import pycbc.catalog + + +c = pycbc.catalog.Catalog(source='gwtc-2') +mchirp, elow, ehigh = c.median1d('mchirp', return_errors=True) +spin = c.median1d('chi_eff') + +pp.errorbar(mchirp, spin, xerr=[-elow, ehigh], fmt='o', markersize=7) +pp.xlabel('Chirp Mass') +pp.xscale('log') +pp.ylabel('Effective Spin') +pp.show() diff --git a/latest/html/_downloads/66a2739b160c03de5085d31a82c8e4c7/mchirp_q_from_uniform_m1m2_example.pdf b/latest/html/_downloads/66a2739b160c03de5085d31a82c8e4c7/mchirp_q_from_uniform_m1m2_example.pdf new file mode 100644 index 00000000000..8003b13b722 Binary files /dev/null and b/latest/html/_downloads/66a2739b160c03de5085d31a82c8e4c7/mchirp_q_from_uniform_m1m2_example.pdf differ diff --git a/latest/html/_downloads/6afeaabe42f3f7fbbbc9d4ae50027fa9/emcee_pt-gw150914_like.ini b/latest/html/_downloads/6afeaabe42f3f7fbbbc9d4ae50027fa9/emcee_pt-gw150914_like.ini new file mode 100644 index 00000000000..0dd33a79ae9 --- /dev/null +++ b/latest/html/_downloads/6afeaabe42f3f7fbbbc9d4ae50027fa9/emcee_pt-gw150914_like.ini @@ -0,0 +1,23 @@ +[sampler] +name = emcee_pt +nwalkers = 200 +ntemps = 20 +effective-nsamples = 1000 +checkpoint-interval = 2000 +max-samples-per-chain = 1000 + +[sampler-burn_in] +burn-in-test = nacl & max_posterior + +; +; Sampling transforms +; +[sampling_params] +; parameters on the left will be sampled in +; parametes on the right +mass1, mass2 : mchirp, q + +[sampling_transforms-mchirp+q] +; inputs mass1, mass2 +; outputs mchirp, q +name = mass1_mass2_to_mchirp_q diff --git a/latest/html/_downloads/6df47f9ba98451a9703534bd10c52abf/chisq.pdf b/latest/html/_downloads/6df47f9ba98451a9703534bd10c52abf/chisq.pdf new file mode 100644 index 00000000000..ec9e25c4d42 Binary files /dev/null and b/latest/html/_downloads/6df47f9ba98451a9703534bd10c52abf/chisq.pdf differ diff --git a/latest/html/_downloads/714de35ed3e315227db7afe4501cc42a/mchirp_q_from_uniform_m1m2_example.py b/latest/html/_downloads/714de35ed3e315227db7afe4501cc42a/mchirp_q_from_uniform_m1m2_example.py new file mode 100644 index 00000000000..698ed39acf9 --- /dev/null +++ b/latest/html/_downloads/714de35ed3e315227db7afe4501cc42a/mchirp_q_from_uniform_m1m2_example.py @@ -0,0 +1,64 @@ +import matplotlib.pyplot as plt +from pycbc import distributions +from pycbc import conversions +import numpy as np + +# Create chirp mass and mass ratio distribution object that is uniform +# in mass1 and mass2 +minmc = 5 +maxmc = 60 +mc_distribution = distributions.MchirpfromUniformMass1Mass2(mc=(minmc,maxmc)) +# generate q in a symmetric range [min, 1/min] to make mass1 and mass2 +# symmetric +minq = 1/4 +maxq = 1/minq +q_distribution = distributions.QfromUniformMass1Mass2(q=(minq,maxq)) + +# Take 100000 random variable samples from this chirp mass and mass ratio +# distribution. +n_size = 100000 +mc_samples = mc_distribution.rvs(size=n_size) +q_samples = q_distribution.rvs(size=n_size) + +# Convert chirp mass and mass ratio to mass1 and mass2 +m1 = conversions.mass1_from_mchirp_q(mc_samples['mc'],q_samples['q']) +m2 = conversions.mass2_from_mchirp_q(mc_samples['mc'],q_samples['q']) + +# Check the 1D marginalization of mchirp and q is consistent with the +# expected analytical formula +n_bins = 200 +xq = np.linspace(minq,maxq,100) +yq = ((1+xq)/(xq**3))**(2/5) +xmc = np.linspace(minmc,maxmc,100) +ymc = xmc + +plt.figure(figsize=(10,10)) +# Plot histograms of samples in subplots +plt.subplot(221) +plt.hist2d(mc_samples['mc'], q_samples['q'], bins=n_bins, cmap='Blues') +plt.xlabel('chirp mass') +plt.ylabel('mass ratio') +plt.colorbar(fraction=.05, pad=0.05,label='number of samples') + +plt.subplot(222) +plt.hist2d(m1, m2, bins=n_bins, cmap='Blues') +plt.xlabel('mass1') +plt.ylabel('mass2') +plt.colorbar(fraction=.05, pad=0.05,label='number of samples') + +plt.subplot(223) +plt.hist(mc_samples['mc'],density=True,bins=100,label='samples') +plt.plot(xmc,ymc*mc_distribution.norm,label='$P(M_c)\propto M_c$') +plt.xlabel('chirp mass') +plt.ylabel('PDF') +plt.legend() + +plt.subplot(224) +plt.hist(q_samples['q'],density=True,bins=n_bins,label='samples') +plt.plot(xq,yq*q_distribution.norm,label='$P(q)\propto((1+q)/q^3)^{2/5}$') +plt.xlabel('mass ratio') +plt.ylabel('PDF') +plt.legend() + +plt.tight_layout() +plt.show() \ No newline at end of file diff --git a/latest/html/_downloads/72f653000524b9d236f157c5ff389f12/run.sh b/latest/html/_downloads/72f653000524b9d236f157c5ff389f12/run.sh new file mode 100644 index 00000000000..1fd15f68b0a --- /dev/null +++ b/latest/html/_downloads/72f653000524b9d236f157c5ff389f12/run.sh @@ -0,0 +1,7 @@ +pycbc_inference \ +--config-file `dirname "$0"`/relative.ini \ +--nprocesses=1 \ +--output-file relative.hdf \ +--seed 0 \ +--force \ +--verbose diff --git a/latest/html/_downloads/732422e550aa0c31768c26d29d5c890c/o1.ini b/latest/html/_downloads/732422e550aa0c31768c26d29d5c890c/o1.ini new file mode 100644 index 00000000000..64924323f48 --- /dev/null +++ b/latest/html/_downloads/732422e550aa0c31768c26d29d5c890c/o1.ini @@ -0,0 +1,35 @@ +;============================================================================== +; +; Settings for analyzing O1 data +; +;============================================================================== +; +; This provides standard settings for analyzing H1, and L1 data in O1. +; It uses "OVERRIDE" for parameters that event-specific. Replace OVERRIDE +; either by editing this file, or using the config-override option in +; pycbc_inference. +[data] +instruments = H1 L1 +trigger-time = OVERRIDE +analysis-start-time = OVERRIDE +analysis-end-time = OVERRIDE +psd-estimation = median-mean +psd-start-time = -256 +psd-end-time = 256 +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +; If you are running on the Atlas cluster, an LDG cluster, or any computer +; with a ligo-data-server, you can use the frame-type argument to automatically +; locate the location of the frame files containing the data. If you are not +; running on one of those computers, download the necessary data from GWOSC +; (gwosc.org), remove the frame-type argument, and uncomment +; frame-files, pointing the latter to the files you downloaded. +;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf +frame-type = H1:H1_LOSC_16_V1 L1:L1_LOSC_16_V1 +channel-name = H1:GWOSC-16KHZ_R1_STRAIN L1:GWOSC-16KHZ_R1_STRAIN +; A sample rate of 2048 is sufficient for BBH. If you are analyzing a BNS or +; NSBH, change to 4096. +sample-rate = 2048 +strain-high-pass = 15 +pad-data = 8 diff --git a/latest/html/_downloads/756240f62f9fd8b6941073ae6fe3e5dc/plot_fd_td.py b/latest/html/_downloads/756240f62f9fd8b6941073ae6fe3e5dc/plot_fd_td.py new file mode 100644 index 00000000000..912d557a7aa --- /dev/null +++ b/latest/html/_downloads/756240f62f9fd8b6941073ae6fe3e5dc/plot_fd_td.py @@ -0,0 +1,30 @@ +"""Plot a time domain and Fourier domain waveform together in the time domain. +Note that without special cleanup the Fourier domain waveform will exhibit +the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon) +""" + +import matplotlib.pyplot as pp +from pycbc import types, fft, waveform + + +# Get a time domain waveform +hp, hc = waveform.get_td_waveform(approximant='SEOBNRv4', + mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40) + +# Get a frequency domain waveform +sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2", + mass1=6, mass2=6, delta_f=1.0/4, f_lower=40) + +# FFT it to the time-domain +tlen = int(1.0 / hp.delta_t / sptilde.delta_f) +sptilde.resize(tlen/2 + 1) +sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t) +fft.ifft(sptilde, sp) + +pp.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)") +pp.plot(hp.sample_times, hp, label='SEOBNRv4') + +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.legend() +pp.show() diff --git a/latest/html/_downloads/75e4294f476c985202b4218289acb60d/plot_freq.py b/latest/html/_downloads/75e4294f476c985202b4218289acb60d/plot_freq.py new file mode 100644 index 00000000000..70995ecb7f2 --- /dev/null +++ b/latest/html/_downloads/75e4294f476c985202b4218289acb60d/plot_freq.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +from pycbc import waveform + + +for phase_order in [2, 3, 4, 5, 6, 7]: + hp, hc = waveform.get_td_waveform(approximant='SpinTaylorT4', + mass1=10, mass2=10, + phase_order=phase_order, + delta_t=1.0/4096, + f_lower=100) + + hp, hc = hp.trim_zeros(), hc.trim_zeros() + amp = waveform.utils.amplitude_from_polarizations(hp, hc) + f = waveform.utils.frequency_from_polarizations(hp, hc) + + pp.plot(f.sample_times, f, label="PN Order = %s" % phase_order) + +pp.ylabel('Frequency (Hz)') +pp.xlabel('Time (s)') +pp.legend(loc='upper left') +pp.show() diff --git a/latest/html/_downloads/760e775b293160b7541f556c441151f5/workflow_config.ini b/latest/html/_downloads/760e775b293160b7541f556c441151f5/workflow_config.ini new file mode 100644 index 00000000000..8cd46031c40 --- /dev/null +++ b/latest/html/_downloads/760e775b293160b7541f556c441151f5/workflow_config.ini @@ -0,0 +1,192 @@ +[workflow] +; basic information used by the workflow generator +file-retention-level = all_triggers +; The start/end times here are just used for file naming. They can be set +; to anything -- they aren't used for anything, and have no effect on the +; analysis. The actual analysis times used are set by the [data] section in +; the configuration files given to pycbc_inference (specified in the events +; config file). +start-time = 1126259200 +end-time = 1126259600 + +[workflow-ifos] +; The ifos listed here are just used for file naming, it doesn't matter if +; they are not consistent with the actual detectors analyzed. +h1 = +l1 = +v1 = + +[extract_posterior] +; Here, we'll ensure that the output parameters are such that mass1 >= mass2 +; (and associated spins), change comoving volume into redshift and distance, +; add mchirp, q, chi_eff, and chi_p to the posterior files. +parameters = 'primary_mass(srcmass1, srcmass2):srcmass1' + 'secondary_mass(srcmass1, srcmass2):srcmass2' + 'primary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin1_a' + 'primary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin1_azimuthal' + 'primary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin1_polar' + 'secondary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin2_a' + 'secondary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin2_azimuthal' + 'secondary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin2_polar' + 'mchirp_from_mass1_mass2(srcmass1, srcmass2):srcmchirp' + 'chi_eff_from_spherical(srcmass1, srcmass2, spin1_a, spin1_polar, spin2_a, spin2_polar):chi_eff' + 'chi_p_from_spherical(srcmass1, srcmass2, spin1_a, spin1_azimuthal, spin1_polar, spin2_a, spin2_azimuthal, spin2_polar):chi_p' + 'redshift_from_comoving_volume(comoving_volume):redshift' + 'distance_from_comoving_volume(comoving_volume):distance' + '*' +force = + +[workflow-summary_table] +; Parameters that will be printed in the summary table. +; These must be from the set specified in extract_posterior. +table-params = srcmass1 srcmass2 + srcmchirp 'q_from_mass1_mass2(srcmass1, srcmass2):q' + chi_eff chi_p + ra dec delta_tc + distance redshift + 'snr_from_loglr(loglikelihood-lognl):SNR' +; The additional metadata will be printed below the table. We can print +; anything that is in the posterior files' attrs. +print-metadata = 'trigger_time:$t_0$' 'analyzed_detectors:Detectors' + +[workflow-summary_plots] +; Parameter posteriors that will plotted on the summary page. +; These must be from the set specified in extract_posterior. +; Each plot-group corresponds to a single plot that will be plot on the +; summary page. Generally, these should be limited to 1 or 2 dimensions +; (although this is not enforced); larger corner plots can be put in the +; Posteriors page. The plots for those are set by the [workflow-plot_params] +; section (see below). +; The settings for the posterior plots created here are read from the +; [plot_posterior_summary] section. +plot-group-mass1_mass2 = srcmass1 srcmass2 +plot-group-inc_distance = inclination distance +plot-group-chip_chieff = chi_p chi_eff +; Notice that we are not including ra and dec here. The sky map is +; created by [plot_skymap]. + +[workflow-plot_params] +; Parameter posteriors that will plotted on the "Posteriors" page. +; These must be from the set specified in extract_posterior. +; Each plot-group corresponds to a single plot that will be plot on the +; page. Since the events are split into their own sub-pages, it's ok to make +; large corner plots here (although too large and it will be hard to make +; out what each parameter is doing). +; The settings for the posterior plots created here are read from the +; [plot_posterior] section. +; Since we plotted source-frame masses on the summary page, here we'll +; plot detector-frame masses. +plot-group-masses = 'srcmass1*(1+redshift):mass1' + 'srcmass2*(1+redshift):mass2' + 'srcmchirp*(1+redshift):mchirp' + 'q_from_mass1_mass2(srcmass1, srcmass2):q' +plot-group-spins = spin1_a spin2_a + spin1_azimuthal spin2_azimuthal + spin1_polar spin2_polar + chi_eff chi_p +plot-group-extrinsic = ra dec delta_tc polarization inclination distance redshift + +[executables] +; paths to executables to use in workflow +inference = ${which:run_pycbc_inference} +extract_posterior = ${which:pycbc_inference_extract_samples} +plot_posterior = ${which:pycbc_inference_plot_posterior} +plot_posterior_summary = ${which:pycbc_inference_plot_posterior} +plot_prior = ${which:pycbc_inference_plot_prior} +table_summary = ${which:pycbc_inference_table_summary} +create_fits_file = ${which:pycbc_inference_create_fits} +plot_skymap = ${which:pycbc_inference_plot_skymap} +plot_spectrum = ${which:pycbc_plot_psd_file} +results_page = ${which:pycbc_make_html_page} +page_versioning = ${which:pycbc_page_versioning} +; diagnostic plots: at the moment, there are none for Dynesty + +[pegasus_profile] +; +MaxRunTimeHours is needed for running on the ATLAS cluster; comment out +; if your cluster does not need this. +condor|+MaxRunTimeHours = 1 + +[pegasus_profile-inference] +condor|request_memory = 40G +; +MaxRunTimeHours is needed for running on the ATLAS cluster; comment out +; if your cluster does not need this. +condor|+MaxRunTimeHours = 10 +condor|request_cpus = ${inference|nprocesses} + +[pegasus_profile-plot_prior] +condor|request_memory = 4G + +[pegasus_profile-plot_skymap] +condor|request_memory = 4G + +[pegasus_profile-plot_posterior] +condor|request_memory = 4G + +[pegasus_profile-plot_posterior_summary] +condor|request_memory = 4G + +[pegasus_profile-plot_samples] +condor|request_memory = 4G + +[inference] +; Command line options for pycbc_inference. +verbose = +; Set the nprocesses to the number of cores you want each job to use. The +; value you use is cluster dependent. +nprocesses = 64 + +[plot_posterior_summary] +; These are the command line options that will be passed to +; pycbc_inference_plot_posterior for creating the posterior plots on the +; summary page. These settings will cause density plots to be made. +plot-contours = +plot-marginal = +plot-density = +density-cmap = Blues +contour-color = black + +[plot_posterior] +; These are the command line options that will be passed to +; pycbc_inference_plot_posterior for creating the posterior plots on the +; posteriors page. These settings will cause scatter plots to be made showing +; each point in the posterior, colored by the matched-filter SNR. +plot-contours = +plot-marginal = +plot-scatter = +z-arg = snr + +[create_fits_file] +; These are the settings for creating a fits file, which is used to produce +; the skymaps. This program needs ligo.skymap to be installed. +; The maxpts option limits the number of points in the posterior that are used +; to create the skymap. This is mostly for speeding up run time. Comment out +; to use all points. +maxpts = 1000 +; Since the posterior file stores delta_tc, we need to tell the fits +; file how to calculate tc +tc = 'trigger_time+delta_tc' + +[plot_skymap] +; These are settings for creating the skymap. This program requires +; ligo.skymap to be installed. Here, we're just setting the colormap to be +; the same as the posterior density plots, above. +colormap = ${plot_posterior_summary|density-cmap} + +[plot_prior] +; This sets command-line options to use for the plot prior function. These +; plots are on the "priors" page. The default (giving no options) is to +; plot all of the variable params. + +[table_summary] +; This sets command-line options for the table on the summary page. You +; should not need to set anything here. + +[plot_spectrum] +; This sets command-line options for the ASD plots on the detector sensitivity +; page. The dyn-range-factor needs to be set to 1. +dyn-range-factor = 1 + +[results_page] +; This sets settings for creating the results page. You may want to change +; the analysis title, to make it more descriptive. +analysis-title = "Inference results with dynesty" diff --git a/latest/html/_downloads/765f02e6453de47aaccb177ac06221f4/custom_00.pdf b/latest/html/_downloads/765f02e6453de47aaccb177ac06221f4/custom_00.pdf new file mode 100644 index 00000000000..39b68338c5e Binary files /dev/null and b/latest/html/_downloads/765f02e6453de47aaccb177ac06221f4/custom_00.pdf differ diff --git a/latest/html/_downloads/780a9b7de700f57e47f071a0546c9e4a/run.sh b/latest/html/_downloads/780a9b7de700f57e47f071a0546c9e4a/run.sh new file mode 100644 index 00000000000..91530155990 --- /dev/null +++ b/latest/html/_downloads/780a9b7de700f57e47f071a0546c9e4a/run.sh @@ -0,0 +1,7 @@ +pycbc_inference \ + --config-files model.ini model-event1_relbin.ini model-event2_relbin.ini prior.ini data.ini dynesty.ini \ + --nprocesses 8 \ + --output-file hierarchical.hdf \ + --seed 10 \ + --force \ + --verbose diff --git a/latest/html/_downloads/79afe3bbaf5ada4e3ae6fdd90673b7cc/add_waveform_01.png b/latest/html/_downloads/79afe3bbaf5ada4e3ae6fdd90673b7cc/add_waveform_01.png new file mode 100644 index 00000000000..ca11646b879 Binary files /dev/null and b/latest/html/_downloads/79afe3bbaf5ada4e3ae6fdd90673b7cc/add_waveform_01.png differ diff --git a/latest/html/_downloads/7d8462d7a88e3e0c56afa064af258d16/estimate.png b/latest/html/_downloads/7d8462d7a88e3e0c56afa064af258d16/estimate.png new file mode 100644 index 00000000000..7dc0f493842 Binary files /dev/null and b/latest/html/_downloads/7d8462d7a88e3e0c56afa064af258d16/estimate.png differ diff --git a/latest/html/_downloads/7ddb9eb469843013650edee52e109a37/spin_examples.pdf b/latest/html/_downloads/7ddb9eb469843013650edee52e109a37/spin_examples.pdf new file mode 100644 index 00000000000..1e6167350a8 Binary files /dev/null and b/latest/html/_downloads/7ddb9eb469843013650edee52e109a37/spin_examples.pdf differ diff --git a/latest/html/_downloads/7f2d876d705ae9b0d2cb84a9e52901b3/run_marg.sh b/latest/html/_downloads/7f2d876d705ae9b0d2cb84a9e52901b3/run_marg.sh new file mode 100644 index 00000000000..089a203a364 --- /dev/null +++ b/latest/html/_downloads/7f2d876d705ae9b0d2cb84a9e52901b3/run_marg.sh @@ -0,0 +1,30 @@ +pycbc_inference \ +--config-file `dirname "$0"`/single.ini \ +--nprocesses=1 \ +--output-file single_marg.hdf \ +--seed 0 \ +--force \ +--verbose + +pycbc_inference_plot_posterior \ +--input-file single_marg.hdf \ +--output-file single_marg.png \ +--z-arg snr --vmin 31.85 --vmax 32.15 + +# This reconstructs any marginalized parameters +# and would be optional if you don't need them or +# have sampled over all parameters directly (see single.ini) +pycbc_inference_model_stats \ +--input-file single_marg.hdf \ +--output-file single_demarg.hdf \ +--nprocesses 1 \ +--reconstruct-parameters \ +--force \ +--verbose + +pycbc_inference_plot_posterior \ +--input-file single_demarg.hdf \ +--output-file single_demarg.png \ +--parameters distance inclination polarization coa_phase tc ra dec \ +--z-arg snr --vmin 31.85 --vmax 32.15 \ + diff --git a/latest/html/_downloads/7f8d58c639fb4cc275959516ff119eec/injections_config.ini b/latest/html/_downloads/7f8d58c639fb4cc275959516ff119eec/injections_config.ini new file mode 100644 index 00000000000..b9aa23d756e --- /dev/null +++ b/latest/html/_downloads/7f8d58c639fb4cc275959516ff119eec/injections_config.ini @@ -0,0 +1,122 @@ +[workflow-inference] +; The inference configuration file(s) and any overrides to use for all of the +; injections. +; If no injection file is provided on the command line, injections will be +; drawn from the prior specified in the inference config file +config-files = bbh-uniform_comoving_volume.ini + marginalized_phase.ini + dynesty.ini + data.ini +; As with events sections in the standard workflow, you can specify +; config-overrides for the above file(s). Here, we will change the prior from +; uniform in comoving volume to uniform in the log10 of the comoving volume. +; We'll do this so as to get a distribution of injections with appreciable +; SNR. (A uniform in comoving volume prior leads to most of the injections +; having SNR < 7, and so we mostly end up seeing prior-in, prior-out.) +config-overrides = prior-comoving_volume:name:uniform_log10 +; Optionally, you may also specify the number of times to run inference on +; each injection by setting nruns. Each run will use different random seeds, +; and samples from the runs will be combined into a single posterior file for +; each injection. Not setting this is equivalent to nruns = 1 +;nruns = 1 + +; For the injection workflow, we need to add an executable to create the +; injections. Optionally, we may also add executables to perform +; percentile-percentile (pp) tests. +[executables] +create_injections = ${which:pycbc_create_injections} +; Executables for percentile-percentile test. These are optional. If you do +; not include them in this section, no PP test will be done. If you do +; include them, all 3 must be included. +pp_table_summary = ${which:pycbc_inference_pp_table_summary} +plot_pp = ${which:pycbc_inference_plot_pp} +inj_recovery = ${which:pycbc_inference_plot_inj_recovery} +; We do not need to provide any of the other executables since they are +; specified in workflow_config.ini. When this file is combined with +; workflow_config.ini, these options are automatically added to the +; [executables] section in that file. + + +[workflow-pp_test] +; Since we have included the executables to make the PP plots, we need to +; provide this section. +; The pp-params option specifies what parameters to perform the percentile- +; percentile test on. If you do not provide anything, all parameters +; in the posterior file will be used (that is set by the parameters +; argument in the [extract_posterior] section in workflow_config.ini). A +; p-value of p-values will be calculated for all parameters and reported +; in the summary table on the Percentile-Percentile table of the results +; page. We therefore do not want to include all parameters in the posterior +; file, since we have added parameters that are derived from the others in +; [extract_posterior] section. For this reason, we manually list all the +; parameters we want to do the pp test on here: +pp-params = delta_tc srcmass1 srcmass2 spin1_a spin1_azimuthal spin1_polar + spin2_a spin2_azimuthal spin2_polar distance inclination + polarization ra dec +; In order to do the PP test, the code needs to know what parameters in the +; posterior correspond to which injection parameters. Since we have applied +; some functions to the samples parameters when creating the posterior file +; (again, refer to the [extract_posterior] section in workflow_config.ini), +; the mapping between posterior parameters and injection parameters is no +; longer a 1:1. To tell the code how to map from the injections parameters +; the posterior parameters, we provide the following injection-samples-map. +; We can just copy most of the parameters argument from the [extract_posterior] +; section for this (we can't just do a reference because the wildcard (*) that +; is there is not understood by the injection-samples-map option. +injection-samples-map = 'primary_mass(srcmass1, srcmass2):srcmass1' + 'secondary_mass(srcmass1, srcmass2):srcmass2' + 'primary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin1_a' + 'primary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin1_azimuthal' + 'primary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin1_polar' + 'secondary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin2_a' + 'secondary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin2_azimuthal' + 'secondary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin2_polar' + 'mchirp_from_mass1_mass2(srcmass1, srcmass2):srcmchirp' + 'chi_eff_from_spherical(srcmass1, srcmass2, spin1_a, spin1_polar, spin2_a, spin2_polar):chi_eff' + 'chi_p_from_spherical(srcmass1, srcmass2, spin1_a, spin1_azimuthal, spin1_polar, spin2_a, spin2_azimuthal, spin2_polar):chi_p' + 'redshift_from_comoving_volume(comoving_volume):redshift' + 'distance_from_comoving_volume(comoving_volume):distance' +; Notice that we can provide more parameters to the injection-samples-map then +; what we will be using in the PP test. This is fine, extra parameters are +; just ignored. By providing all of the parameters here, we can re-use this +; argument for the posterior plots (see below). + +[create_injections] +; Options for the create_injections executable. Do not provide a config file +; nor the number of injections to create here. The inference +; config file is used for generating injections, and the number is determined +; by the command-line options given to make_inference_inj_workflow + +[plot_posterior_summary] +; Adding plot-injection-parameters will cause a red line to be plotted on +; the posterior plots showing the injection parameters. +plot-injection-parameters = +; In order for the redline to be plotted in the right place, we have to +; provide an injection samples map. We can just use what was used in the +; workflow-pp_test section. +injection-samples-map = ${workflow-pp_test|injection-samples-map} +; We do not need to provide any arguments, as the rest are set in +; workflow_config.ini. + +[plot_posterior] +; Do the same for the full corner plots. +plot-injection-parameters = +injection-samples-map = ${workflow-pp_test|injection-samples-map} +; We do not need to provide any arguments, as the rest are set in +; workflow_config.ini. + +[pp_table_summary] +; command line options for percentile-percentile table summary +; do not provide parameters or injection-samples map here, as that is read +; from the [workflow-pp_test] section + +[plot_pp] +; command line options for percentile-percentile plot +; do not provide parameters or injection-samples map here, as that is read +; from the [workflow-pp_test] section + +[inj_recovery] +; command line options for injection recovery plots +; do not provide parameters or injection-samples map here, as that is read +; from the [workflow-pp_test] section + diff --git a/latest/html/_downloads/7fa21a05f250d2884f77bcb07370ba50/gw150914_shape.png b/latest/html/_downloads/7fa21a05f250d2884f77bcb07370ba50/gw150914_shape.png new file mode 100644 index 00000000000..825d3a7e9e2 Binary files /dev/null and b/latest/html/_downloads/7fa21a05f250d2884f77bcb07370ba50/gw150914_shape.png differ diff --git a/latest/html/_downloads/81c6031e0f6e3a150cefb716f43d66a6/gw150914_h1_snr.pdf b/latest/html/_downloads/81c6031e0f6e3a150cefb716f43d66a6/gw150914_h1_snr.pdf new file mode 100644 index 00000000000..d30a376d3c5 Binary files /dev/null and b/latest/html/_downloads/81c6031e0f6e3a150cefb716f43d66a6/gw150914_h1_snr.pdf differ diff --git a/latest/html/_downloads/85838b72b4fb049f7accd4b3a2cf3c35/add_waveform_00.pdf b/latest/html/_downloads/85838b72b4fb049f7accd4b3a2cf3c35/add_waveform_00.pdf new file mode 100644 index 00000000000..87dbfd8aaf1 Binary files /dev/null and b/latest/html/_downloads/85838b72b4fb049f7accd4b3a2cf3c35/add_waveform_00.pdf differ diff --git a/latest/html/_downloads/8841a51870154a7d3f36163052c5483b/estimate.pdf b/latest/html/_downloads/8841a51870154a7d3f36163052c5483b/estimate.pdf new file mode 100644 index 00000000000..585b2031cbc Binary files /dev/null and b/latest/html/_downloads/8841a51870154a7d3f36163052c5483b/estimate.pdf differ diff --git a/latest/html/_downloads/8a7ade99cc07f34f40c79d2fdebcd9b8/on.hires.png b/latest/html/_downloads/8a7ade99cc07f34f40c79d2fdebcd9b8/on.hires.png new file mode 100644 index 00000000000..8ddf63b10f0 Binary files /dev/null and b/latest/html/_downloads/8a7ade99cc07f34f40c79d2fdebcd9b8/on.hires.png differ diff --git a/latest/html/_downloads/8c739005a11bef7b653130c71ad2b224/single_simple.ini b/latest/html/_downloads/8c739005a11bef7b653130c71ad2b224/single_simple.ini new file mode 100644 index 00000000000..16d71b95651 --- /dev/null +++ b/latest/html/_downloads/8c739005a11bef7b653130c71ad2b224/single_simple.ini @@ -0,0 +1,66 @@ +[model] +name = single_template + +#; This model precalculates the SNR time series at a fixed rate. +#; If you need a higher time resolution, this may be increased +sample_rate = 32768 +low-frequency-cutoff = 30.0 + +[data] +instruments = H1 L1 V1 +analysis-start-time = 1187008482 +analysis-end-time = 1187008892 +psd-estimation = median +psd-segment-length = 16 +psd-segment-stride = 8 +psd-inverse-length = 16 +pad-data = 8 +channel-name = H1:LOSC-STRAIN L1:LOSC-STRAIN V1:LOSC-STRAIN +frame-files = H1:H-H1_LOSC_CLN_4_V1-1187007040-2048.gwf L1:L-L1_LOSC_CLN_4_V1-1187007040-2048.gwf V1:V-V1_LOSC_CLN_4_V1-1187007040-2048.gwf +strain-high-pass = 15 +sample-rate = 2048 + +[sampler] +name = dynesty +sample = rwalk +bound = multi +dlogz = 0.1 +nlive = 200 +checkpoint_time_interval = 100 +maxcall = 10000 + +[variable_params] +; waveform parameters that will vary in MCMC +tc = +distance = +inclination = + +[static_params] +; waveform parameters that will not change in MCMC +approximant = TaylorF2 +f_lower = 30 +mass1 = 1.3757 +mass2 = 1.3757 + +#; we'll choose not to sample over these, but you could +polarization = 0 +ra = 3.44615914 +dec = -0.40808407 + +#; You could also set additional parameters if your waveform model supports / requires it. +; spin1z = 0 + +[prior-tc] +; coalescence time prior +name = uniform +min-tc = 1187008882.4 +max-tc = 1187008882.5 + +[prior-distance] +#; following gives a uniform in volume +name = uniform_radius +min-distance = 10 +max-distance = 60 + +[prior-inclination] +name = sin_angle diff --git a/latest/html/_downloads/8f20384f6c459c412dc9ff2082139a93/data.png b/latest/html/_downloads/8f20384f6c459c412dc9ff2082139a93/data.png new file mode 100644 index 00000000000..1a27f45ebf8 Binary files /dev/null and b/latest/html/_downloads/8f20384f6c459c412dc9ff2082139a93/data.png differ diff --git a/latest/html/_downloads/8f9fbac86ecf6bfffd6095e5279ae16e/read.hires.png b/latest/html/_downloads/8f9fbac86ecf6bfffd6095e5279ae16e/read.hires.png new file mode 100644 index 00000000000..50b37e6dfe0 Binary files /dev/null and b/latest/html/_downloads/8f9fbac86ecf6bfffd6095e5279ae16e/read.hires.png differ diff --git a/latest/html/_downloads/9061a8eb75c5dd7d1a0d35a3589cbc7f/plot_phase.hires.png b/latest/html/_downloads/9061a8eb75c5dd7d1a0d35a3589cbc7f/plot_phase.hires.png new file mode 100644 index 00000000000..9ef6c65ca11 Binary files /dev/null and b/latest/html/_downloads/9061a8eb75c5dd7d1a0d35a3589cbc7f/plot_phase.hires.png differ diff --git a/latest/html/_downloads/91178aedab5fcd6cf839ce24c5cdbb9f/custom_01.png b/latest/html/_downloads/91178aedab5fcd6cf839ce24c5cdbb9f/custom_01.png new file mode 100644 index 00000000000..c4171806b74 Binary files /dev/null and b/latest/html/_downloads/91178aedab5fcd6cf839ce24c5cdbb9f/custom_01.png differ diff --git a/latest/html/_downloads/92505b33321d111bbd97283e6b3d31d1/add_waveform_00.hires.png b/latest/html/_downloads/92505b33321d111bbd97283e6b3d31d1/add_waveform_00.hires.png new file mode 100644 index 00000000000..b3520f44f31 Binary files /dev/null and b/latest/html/_downloads/92505b33321d111bbd97283e6b3d31d1/add_waveform_00.hires.png differ diff --git a/latest/html/_downloads/934fabb608786362d316cbea04e6cd73/data.ini b/latest/html/_downloads/934fabb608786362d316cbea04e6cd73/data.ini new file mode 100644 index 00000000000..1992c840f65 --- /dev/null +++ b/latest/html/_downloads/934fabb608786362d316cbea04e6cd73/data.ini @@ -0,0 +1,41 @@ +# We'll inject the simulated signals into fake noise generated with the aLIGO +# design sensitivity. We need two separate data sections, one for each event. +[event1__data] +instruments = H1 L1 +trigger-time = 1126259462.430 +analysis-start-time = -6 +analysis-end-time = 2 +sample-rate = 2048 +fake-strain = H1:aLIGOaLIGODesignSensitivityT1800044 L1:aLIGOaLIGODesignSensitivityT1800044 +fake-strain-seed = H1:237 L1:82 +psd-estimation = median-mean +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +psd-start-time = -256 +psd-end-time = 256 +channel-name = H1:STRAIN L1:STRAIN +injection-file = event1_inj.hdf +strain-high-pass = 15 +pad-data = 8 + +# For event2, we'll reuse many of the same settings, just changing the seed, +# injection file, and the trigger time. +[event2__data] +instruments = ${event1__data|instruments} +trigger-time = 1126859462.0 +analysis-start-time = ${event1__data|analysis-start-time} +analysis-end-time = ${event1__data|analysis-end-time} +sample-rate = ${event1__data|sample-rate} +fake-strain = ${event1__data|fake-strain} +fake-strain-seed = H1:918 L1:6610 +psd-estimation = ${event1__data|psd-estimation} +psd-inverse-length = ${event1__data|psd-inverse-length} +psd-segment-length = ${event1__data|psd-segment-length} +psd-segment-stride = ${event1__data|psd-segment-stride} +psd-start-time = ${event1__data|psd-start-time} +psd-end-time = ${event1__data|psd-end-time} +channel-name = ${event1__data|channel-name} +injection-file = event2_inj.hdf +strain-high-pass = ${event1__data|strain-high-pass} +pad-data = ${event1__data|pad-data} diff --git a/latest/html/_downloads/9390221efa9b25cc56943ebdb7dcca2d/plot_detwaveform.pdf b/latest/html/_downloads/9390221efa9b25cc56943ebdb7dcca2d/plot_detwaveform.pdf new file mode 100644 index 00000000000..5cc3c5043f8 Binary files /dev/null and b/latest/html/_downloads/9390221efa9b25cc56943ebdb7dcca2d/plot_detwaveform.pdf differ diff --git a/latest/html/_downloads/970b616a4c3afcd372b110a34c727039/plot_detwaveform.png b/latest/html/_downloads/970b616a4c3afcd372b110a34c727039/plot_detwaveform.png new file mode 100644 index 00000000000..764005e4c17 Binary files /dev/null and b/latest/html/_downloads/970b616a4c3afcd372b110a34c727039/plot_detwaveform.png differ diff --git a/latest/html/_downloads/983c45d5b32e9369a8daa7a1a4f7f856/prior.ini b/latest/html/_downloads/983c45d5b32e9369a8daa7a1a4f7f856/prior.ini new file mode 100644 index 00000000000..24bcb596987 --- /dev/null +++ b/latest/html/_downloads/983c45d5b32e9369a8daa7a1a4f7f856/prior.ini @@ -0,0 +1,89 @@ +[variable_params] +# common parameters +srcmchirp = +q = +# parameters unique to event1 +event1__delta_tc = +event1__ra = +event1__dec = +# parameters unique to event2 +event2__delta_tc = +# We'll make event2's sky location be dependent on +# event1. We'll do that by making the variable +# parameter be a deviation from event1's sky location. +# The ra and dec for event2 will be set with a +# transform, below. +event2__dra = +event2__ddec = + +[static_params] +approximant = IMRPhenomD +f_lower = 20 +f_ref = 20 +distance = 500 +inclination = 0 +polarization = 0 + +# Prior bounds taken from 4-OGC +[prior-srcmchirp] +name = mchirp_from_uniform_mass1_mass2 +min-srcmchirp = 23 +max-srcmchirp = 42 + +[prior-q] +name = q_from_uniform_mass1_mass2 +min-q = 1. +max-q = 4. + +[waveform_transforms-mass1+mass2] +name = custom +inputs = srcmchirp, q +mass1 = mass1_from_mchirp_q(srcmchirp,q) * (1 + 0.105) +mass2 = mass2_from_mchirp_q(srcmchirp,q) * (1 + 0.105) + +[prior-event1__delta_tc] +name = uniform +min-event1__delta_tc = -0.1 +max-event1__delta_tc = 0.1 + +[prior-event2__delta_tc] +name = uniform +min-event2__delta_tc = -0.1 +max-event2__delta_tc = 0.1 + +# Note that the output of waveform transforms that output parameters specific +# to a particular sub-model must include that model's label in the parameter +# name, just as the variable and static params do. +[waveform_transforms-event1__tc] +name = custom +inputs = event1__delta_tc +event1__tc = ${event1__data|trigger-time} + event1__delta_tc + +[waveform_transforms-event2__tc] +name = custom +inputs = event2__delta_tc +event2__tc = ${event2__data|trigger-time} + event2__delta_tc + +# We'll use a uniform prior for the sky location of event1... +[prior-event1__ra+event1__dec] +name = uniform_sky +azimuthal-angle = event1__ra +polar-angle = event1__dec + +# ...and a Gaussian centered on event1 for event2. +[prior-event2__dra+event2__ddec] +name = gaussian +event2__dra_mean = 0 +event2__dra_var = 1 +event2__ddec_mean = 0 +event2__ddec_var = 1 + +[waveform_transforms-event2__ra+event2__dec] +name = custom +inputs = event1__ra, event1__dec, event2__dra, event2__ddec +event2__ra = event1__ra + event2__dra +event2__dec = event1__dec + event2__ddec + +[constraint-event2dec] +name = custom +constraint_arg = (event1__dec + event2__ddec >= -pi/2) & (event1__dec + event2__ddec <= pi/2) diff --git a/latest/html/_downloads/98de0a772d36c2bd0875e57ad394a322/run.sh b/latest/html/_downloads/98de0a772d36c2bd0875e57ad394a322/run.sh new file mode 100644 index 00000000000..3d9a7f32a73 --- /dev/null +++ b/latest/html/_downloads/98de0a772d36c2bd0875e57ad394a322/run.sh @@ -0,0 +1,27 @@ +OMP_NUM_THREADS=1 pycbc_inference \ +--config-file `dirname "$0"`/margtime.ini \ +--nprocesses 1 \ +--processing-scheme mkl \ +--output-file marg_150914.hdf \ +--seed 0 \ +--force \ +--verbose + +# This reconstructs any marginalized parameters +OMP_NUM_THREADS=1 pycbc_inference_model_stats \ +--input-file marg_150914.hdf \ +--output-file demarg_150914.hdf \ +--nprocesses 1 \ +--reconstruct-parameters \ +--force \ +--verbose + +pycbc_inference_plot_posterior \ +--input-file demarg_150914.hdf \ +--output-file demarg_150914.png \ +--parameters \ + "primary_mass(mass1, mass2) / (1 + redshift(distance)):srcmass1" \ + "secondary_mass(mass1, mass2) / (1 + redshift(distance)):srcmass2" \ + ra dec tc inclination coa_phase polarization distance \ +--vmin 23.2 \ +--z-arg snr diff --git a/latest/html/_downloads/9a265b71429311540a4aac24d9610fa7/pass.py b/latest/html/_downloads/9a265b71429311540a4aac24d9610fa7/pass.py new file mode 100644 index 00000000000..c291cb68ed4 --- /dev/null +++ b/latest/html/_downloads/9a265b71429311540a4aac24d9610fa7/pass.py @@ -0,0 +1,30 @@ +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.filter + + +# Generate some noise with an advanced ligo psd +flow = 5.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 1 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(1 / delta_t) +ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +pp.plot(ts.sample_times, ts, label='Original') + +# Suppress the low frequencies below 30 Hz +ts = pycbc.filter.highpass(ts, 30.0) +pp.plot(ts.sample_times, ts, label='Highpassed') + +# Suppress the high frequencies +ts = pycbc.filter.lowpass_fir(ts, 1000.0, 8) +pp.plot(ts.sample_times, ts, label='Highpassed + Lowpassed') + +pp.legend() +pp.ylabel('Strain') +pp.xlabel('Time (s)') +pp.show() diff --git a/latest/html/_downloads/9b919663a4f1f3f6fd54fc52ea34d906/pass.hires.png b/latest/html/_downloads/9b919663a4f1f3f6fd54fc52ea34d906/pass.hires.png new file mode 100644 index 00000000000..51f58817f37 Binary files /dev/null and b/latest/html/_downloads/9b919663a4f1f3f6fd54fc52ea34d906/pass.hires.png differ diff --git a/latest/html/_downloads/9d2c9f96f635ab4003fe6886ccc1a23d/stat.png b/latest/html/_downloads/9d2c9f96f635ab4003fe6886ccc1a23d/stat.png new file mode 100644 index 00000000000..2e73711aaa9 Binary files /dev/null and b/latest/html/_downloads/9d2c9f96f635ab4003fe6886ccc1a23d/stat.png differ diff --git a/latest/html/_downloads/a032293bba876fad3d899b68ce4891b2/advanced_plot.py b/latest/html/_downloads/a032293bba876fad3d899b68ce4891b2/advanced_plot.py new file mode 100644 index 00000000000..b3c9a898988 --- /dev/null +++ b/latest/html/_downloads/a032293bba876fad3d899b68ce4891b2/advanced_plot.py @@ -0,0 +1,64 @@ +import subprocess +import pickle +import numpy as np +from pycbc.conversions import q_from_mass1_mass2, mchirp_from_mass1_mass2 + + +def spin_ldc2pycbc(mag, pol): + return mag*np.cos(pol) + +def plt(index): + + with open('./MBHB_params_v2_LISA_frame.pkl', 'rb') as f: + params_true_all = pickle.load(f) + + p_index = index + params_true = params_true_all[p_index] + print(params_true) + + modes = [(2,2)] + + q = q_from_mass1_mass2(params_true['Mass1'], params_true['Mass2']) + mchirp = mchirp_from_mass1_mass2(params_true['Mass1'],params_true['Mass2']) + + params = {'approximant': 'BBHX_PhenomD', + 'mass1': params_true['Mass1'], + 'mass2': params_true['Mass2'], + 'inclination': params_true['Inclination'], + 'tc_lisa': params_true['CoalescenceTime_LISA'], + 'polarization_lisa': params_true['Polarization_LISA'], + 'spin1z': spin_ldc2pycbc(params_true['Spin1'], params_true['PolarAngleOfSpin1']), + 'spin2z': spin_ldc2pycbc(params_true['Spin2'], params_true['PolarAngleOfSpin2']), + 'coa_phase': params_true['PhaseAtCoalescence'], + 'distance': params_true['Distance'], + 'eclipticlatitude_lisa': params_true['EclipticLatitude_LISA'], + 'eclipticlongitude_lisa': params_true['EclipticLongitude_LISA'], + 'mchirp': mchirp, + 'q': q, + 'mode_array': modes + } + + plot_code = f""" + pycbc_inference_plot_posterior \ + --input-file lisa_smbhb_ldc_pe.hdf \ + --output-file lisa_smbhb_mass_tc_{p_index}.png \ + --z-arg snr --plot-scatter --plot-marginal \ + --plot-contours --contour-color black \ + --parameters \ + mass1_from_mchirp_q(mchirp,q):mass1 \ + mass2_from_mchirp_q(mchirp,q):mass2 \ + tc \ + --expected-parameters \ + mass1_from_mchirp_q(mchirp,q):{params['mass1']} \ + mass2_from_mchirp_q(mchirp,q):{params['mass2']} \ + tc:{params['tc_lisa']} \ + """ + return plot_code + +# The index of first SMBHB in LDC Sangria (0-14) is 0. +p = [0] + +for i in p: + process = subprocess.Popen(plt(i).split(), stdout=subprocess.PIPE) + output, error = process.communicate() + print('rel{} image created'.format(i)) diff --git a/latest/html/_downloads/a17ff9da6a718d5bf3338f800293a453/mchirp_q_from_uniform_m1m2_example.png b/latest/html/_downloads/a17ff9da6a718d5bf3338f800293a453/mchirp_q_from_uniform_m1m2_example.png new file mode 100644 index 00000000000..a6d88906ff2 Binary files /dev/null and b/latest/html/_downloads/a17ff9da6a718d5bf3338f800293a453/mchirp_q_from_uniform_m1m2_example.png differ diff --git a/latest/html/_downloads/a1877608f232a719343d00812d4a6a73/pass.pdf b/latest/html/_downloads/a1877608f232a719343d00812d4a6a73/pass.pdf new file mode 100644 index 00000000000..5bda1dd37df Binary files /dev/null and b/latest/html/_downloads/a1877608f232a719343d00812d4a6a73/pass.pdf differ diff --git a/latest/html/_downloads/a1e83afccc8df7dd2af9da87099a64c9/gw150914_h1_snr.png b/latest/html/_downloads/a1e83afccc8df7dd2af9da87099a64c9/gw150914_h1_snr.png new file mode 100644 index 00000000000..0657795f32f Binary files /dev/null and b/latest/html/_downloads/a1e83afccc8df7dd2af9da87099a64c9/gw150914_h1_snr.png differ diff --git a/latest/html/_downloads/a1fefb41acc88e3dd2fd71a8673a6a73/run.sh b/latest/html/_downloads/a1fefb41acc88e3dd2fd71a8673a6a73/run.sh new file mode 100644 index 00000000000..31bc9e82dfd --- /dev/null +++ b/latest/html/_downloads/a1fefb41acc88e3dd2fd71a8673a6a73/run.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# sampler parameters +PRIOR_CONFIG=gw150914_like.ini +DATA_CONFIG=data.ini +SAMPLER_CONFIG=emcee_pt-gw150914_like.ini +OUTPUT_PATH=inference.hdf + +# the following sets the number of cores to use; adjust as needed to +# your computer's capabilities +NPROCS=10 + +# run sampler +# Running with OMP_NUM_THREADS=1 stops lalsimulation +# from spawning multiple jobs that would otherwise be used +# by pycbc_inference and cause a reduced runtime. +OMP_NUM_THREADS=1 \ +pycbc_inference --verbose \ + --seed 12 \ + --config-file ${PRIOR_CONFIG} ${DATA_CONFIG} ${SAMPLER_CONFIG} \ + --output-file ${OUTPUT_PATH} \ + --nprocesses ${NPROCS} \ + --force diff --git a/latest/html/_downloads/a32b063245d40146934e009a78b2e9e3/plot_detwaveform.hires.png b/latest/html/_downloads/a32b063245d40146934e009a78b2e9e3/plot_detwaveform.hires.png new file mode 100644 index 00000000000..4629fcfef87 Binary files /dev/null and b/latest/html/_downloads/a32b063245d40146934e009a78b2e9e3/plot_detwaveform.hires.png differ diff --git a/latest/html/_downloads/a370f2d3d86bf49e84425cc59758ae86/plot_waveform.hires.png b/latest/html/_downloads/a370f2d3d86bf49e84425cc59758ae86/plot_waveform.hires.png new file mode 100644 index 00000000000..94d66ce770c Binary files /dev/null and b/latest/html/_downloads/a370f2d3d86bf49e84425cc59758ae86/plot_waveform.hires.png differ diff --git a/latest/html/_downloads/a3c7e3c9469d27473ba132ee67a423f0/plot.sh b/latest/html/_downloads/a3c7e3c9469d27473ba132ee67a423f0/plot.sh new file mode 100644 index 00000000000..e3c5bc88737 --- /dev/null +++ b/latest/html/_downloads/a3c7e3c9469d27473ba132ee67a423f0/plot.sh @@ -0,0 +1,4 @@ +pycbc_inference_plot_posterior \ +--input-file relative.hdf \ +--output-file relative.png \ +--z-arg snr diff --git a/latest/html/_downloads/a4a29c7b648dd581082f2d751006902f/gw150914_like.ini b/latest/html/_downloads/a4a29c7b648dd581082f2d751006902f/gw150914_like.ini new file mode 100644 index 00000000000..2e634af3e4d --- /dev/null +++ b/latest/html/_downloads/a4a29c7b648dd581082f2d751006902f/gw150914_like.ini @@ -0,0 +1,116 @@ +[model] +name = gaussian_noise +low-frequency-cutoff = 20.0 + +[variable_params] +; waveform parameters that will vary in MCMC +delta_tc = +mass1 = +mass2 = +spin1_a = +spin1_azimuthal = +spin1_polar = +spin2_a = +spin2_azimuthal = +spin2_polar = +distance = +coa_phase = +inclination = +polarization = +ra = +dec = + +[static_params] +; waveform parameters that will not change in MCMC +approximant = IMRPhenomPv2 +f_lower = 20 +f_ref = 20 +; we'll set the tc by using the trigger time in the data +; section of the config file + delta_tc +trigger_time = ${data|trigger-time} + +[prior-delta_tc] +; coalescence time prior +name = uniform +min-delta_tc = -0.1 +max-delta_tc = 0.1 + +[waveform_transforms-tc] +; we need to provide tc to the waveform generator +name = custom +inputs = delta_tc +tc = ${data|trigger-time} + delta_tc + +[prior-mass1] +name = uniform +min-mass1 = 10. +max-mass1 = 80. + +[prior-mass2] +name = uniform +min-mass2 = 10. +max-mass2 = 80. + +[prior-spin1_a] +name = uniform +min-spin1_a = 0.0 +max-spin1_a = 0.99 + +[prior-spin1_polar+spin1_azimuthal] +name = uniform_solidangle +polar-angle = spin1_polar +azimuthal-angle = spin1_azimuthal + +[prior-spin2_a] +name = uniform +min-spin2_a = 0.0 +max-spin2_a = 0.99 + +[prior-spin2_polar+spin2_azimuthal] +name = uniform_solidangle +polar-angle = spin2_polar +azimuthal-angle = spin2_azimuthal + +; The waveform generator expects spins to be in cartesian coordinates, with +; names spin(1|2)(x|y|z). We therefore need to provide a waveform transform +; that converts the spherical coordinates that we have defined the spin prior +; in to cartesian coordinates. +[waveform_transforms-spin1x+spin1y+spin1z] +name = spherical_to_cartesian +x = spin1x +y = spin1y +z = spin1z +radial = spin1_a +polar = spin1_polar +azimuthal = spin1_azimuthal + +[waveform_transforms-spin2x+spin2y+spin2z] +name = spherical_to_cartesian +x = spin2x +y = spin2y +z = spin2z +radial = spin2_a +polar = spin2_polar +azimuthal = spin2_azimuthal + +[prior-distance] +; following gives a uniform volume prior +name = uniform_radius +min-distance = 10 +max-distance = 1000 + +[prior-coa_phase] +; coalescence phase prior +name = uniform_angle + +[prior-inclination] +; inclination prior +name = sin_angle + +[prior-ra+dec] +; sky position prior +name = uniform_sky + +[prior-polarization] +; polarization prior +name = uniform_angle diff --git a/latest/html/_downloads/a5278cc329f7b3f6ddc70b1494c95a97/timeseries.png b/latest/html/_downloads/a5278cc329f7b3f6ddc70b1494c95a97/timeseries.png new file mode 100644 index 00000000000..20daa635681 Binary files /dev/null and b/latest/html/_downloads/a5278cc329f7b3f6ddc70b1494c95a97/timeseries.png differ diff --git a/latest/html/_downloads/a8e2421ad41e50761a7c391b997dd5b4/chisq.hires.png b/latest/html/_downloads/a8e2421ad41e50761a7c391b997dd5b4/chisq.hires.png new file mode 100644 index 00000000000..537e4dc3a33 Binary files /dev/null and b/latest/html/_downloads/a8e2421ad41e50761a7c391b997dd5b4/chisq.hires.png differ diff --git a/latest/html/_downloads/a9c7975e4229dddf16539c8bb067843b/higher_modes.pdf b/latest/html/_downloads/a9c7975e4229dddf16539c8bb067843b/higher_modes.pdf new file mode 100644 index 00000000000..558e7a044fd Binary files /dev/null and b/latest/html/_downloads/a9c7975e4229dddf16539c8bb067843b/higher_modes.pdf differ diff --git a/latest/html/_downloads/aa5e3bb7d69ddd7d7edcfd42a19348ba/on.pdf b/latest/html/_downloads/aa5e3bb7d69ddd7d7edcfd42a19348ba/on.pdf new file mode 100644 index 00000000000..a155612fb54 Binary files /dev/null and b/latest/html/_downloads/aa5e3bb7d69ddd7d7edcfd42a19348ba/on.pdf differ diff --git a/latest/html/_downloads/adabcee75346fe062f34af90d0fa1bf0/mass_examples.pdf b/latest/html/_downloads/adabcee75346fe062f34af90d0fa1bf0/mass_examples.pdf new file mode 100644 index 00000000000..29a8361d469 Binary files /dev/null and b/latest/html/_downloads/adabcee75346fe062f34af90d0fa1bf0/mass_examples.pdf differ diff --git a/latest/html/_downloads/afadbefa604476a881ca7ecd4c8fac1a/plot_fd_td.hires.png b/latest/html/_downloads/afadbefa604476a881ca7ecd4c8fac1a/plot_fd_td.hires.png new file mode 100644 index 00000000000..b6d29b24879 Binary files /dev/null and b/latest/html/_downloads/afadbefa604476a881ca7ecd4c8fac1a/plot_fd_td.hires.png differ diff --git a/latest/html/_downloads/b041c338cf11d8b2d1eacdfc20b68586/run.sh b/latest/html/_downloads/b041c338cf11d8b2d1eacdfc20b68586/run.sh new file mode 100644 index 00000000000..ec8d962a52c --- /dev/null +++ b/latest/html/_downloads/b041c338cf11d8b2d1eacdfc20b68586/run.sh @@ -0,0 +1,7 @@ +#!/bin/sh +pycbc_inference --verbose \ + --config-files normal2d.ini \ + --output-file normal2d.hdf \ + --nprocesses 2 \ + --seed 10 \ + --force diff --git a/latest/html/_downloads/b13d523dd860aa810518be58c4d0f0af/model-event1_relbin.ini b/latest/html/_downloads/b13d523dd860aa810518be58c4d0f0af/model-event1_relbin.ini new file mode 100644 index 00000000000..439e78075e9 --- /dev/null +++ b/latest/html/_downloads/b13d523dd860aa810518be58c4d0f0af/model-event1_relbin.ini @@ -0,0 +1,10 @@ +[event1__model] +name = relative +low-frequency-cutoff = 18.0 +high-frequency-cutoff = 1024.0 +epsilon = 0.005 +mass1_ref = 35 +mass2_ref = 35 +tc_ref = FROM_INJECTION:tc +ra_ref = FROM_INJECTION:ra +dec_ref = FROM_INJECTION:dec diff --git a/latest/html/_downloads/b2cf9d55af524aa250078baa06b28957/get.sh b/latest/html/_downloads/b2cf9d55af524aa250078baa06b28957/get.sh new file mode 100644 index 00000000000..4d7962c408b --- /dev/null +++ b/latest/html/_downloads/b2cf9d55af524aa250078baa06b28957/get.sh @@ -0,0 +1,16 @@ +set -e + +for channel in A E T +do + strain_file=${channel}_TDI_v2.gwf + test -f ${strain_file} && continue + curl -LO --show-error --silent https://zenodo.org/record/7497853/files/${strain_file} + + psd_file=${channel}_psd.txt + test -f ${psd_file} && continue + curl -LO --show-error --silent https://zenodo.org/record/7497853/files/${psd_file} +done + +params_file=MBHB_params_v2_LISA_frame.pkl +test -f ${params_file} && continue +curl -LO --show-error --silent https://zenodo.org/record/7497853/files/${params_file} diff --git a/latest/html/_downloads/b47605ece9341b3cd4f5daede64f1016/plot_freq.hires.png b/latest/html/_downloads/b47605ece9341b3cd4f5daede64f1016/plot_freq.hires.png new file mode 100644 index 00000000000..6d20b4f23a4 Binary files /dev/null and b/latest/html/_downloads/b47605ece9341b3cd4f5daede64f1016/plot_freq.hires.png differ diff --git a/latest/html/_downloads/b4a89d69026a1bf9afa043c9a8234b83/relative.ini b/latest/html/_downloads/b4a89d69026a1bf9afa043c9a8234b83/relative.ini new file mode 100644 index 00000000000..b607966069b --- /dev/null +++ b/latest/html/_downloads/b4a89d69026a1bf9afa043c9a8234b83/relative.ini @@ -0,0 +1,85 @@ +[model] +name = relative +low-frequency-cutoff = 30.0 +high-frequency-cutoff = 1024.0 +epsilon = 0.03 +mass1_ref = 1.3757 +mass2_ref = 1.3757 +tc_ref = 1187008882.42 + +[data] +instruments = H1 L1 V1 +analysis-start-time = 1187008482 +analysis-end-time = 1187008892 +psd-estimation = median +psd-segment-length = 16 +psd-segment-stride = 8 +psd-inverse-length = 16 +pad-data = 8 +channel-name = H1:LOSC-STRAIN L1:LOSC-STRAIN V1:LOSC-STRAIN +frame-files = H1:H-H1_LOSC_CLN_4_V1-1187007040-2048.gwf L1:L-L1_LOSC_CLN_4_V1-1187007040-2048.gwf V1:V-V1_LOSC_CLN_4_V1-1187007040-2048.gwf +strain-high-pass = 15 +sample-rate = 2048 + +[sampler] +name = emcee_pt +ntemps = 4 +nwalkers = 100 +niterations = 300 + +[sampler-burn_in] +burn-in-test = min_iterations +min-iterations = 100 + +[variable_params] +; waveform parameters that will vary in MCMC +tc = +distance = +inclination = +mchirp = +eta = + +[static_params] +; waveform parameters that will not change in MCMC +approximant = TaylorF2 +f_lower = 30 + +#; we'll choose not to sample over these, but you could +polarization = 0.0 +ra = 3.44615914 +dec = -0.40808407 + +#; You could also set additional parameters if your waveform model supports / requires it. +; spin1z = 0 + +[prior-mchirp] +; chirp mass prior +name = uniform +min-mchirp = 1.1876 +max-mchirp = 1.2076 + +[prior-eta] +; symmetric mass ratio prior +name = uniform +min-eta = 0.23 +max-eta = 0.25 + +[prior-tc] +; coalescence time prior +name = uniform +min-tc = 1187008882.4 +max-tc = 1187008882.5 + +[prior-distance] +#; following gives a uniform in volume +name = uniform_radius +min-distance = 10 +max-distance = 60 + +[prior-inclination] +name = sin_angle + +[waveform_transforms-mass1+mass2] +; transform from mchirp, eta to mass1, mass2 for waveform generation +name = mchirp_eta_to_mass1_mass2 + diff --git a/latest/html/_downloads/b57d20df612b39fc74ac65832572dcf0/hwinj.py b/latest/html/_downloads/b57d20df612b39fc74ac65832572dcf0/hwinj.py new file mode 100644 index 00000000000..3c0d2ea5089 --- /dev/null +++ b/latest/html/_downloads/b57d20df612b39fc74ac65832572dcf0/hwinj.py @@ -0,0 +1,22 @@ +"""This example shows how to determine when a CBC hardware injection is present +in the data from a detector. +""" + +import matplotlib.pyplot as pp +from pycbc import dq + + +start_time = 1126051217 +end_time = start_time + 10000000 + +# Get times that the Livingston detector has CBC injections into the data +segs = dq.query_flag('L1', 'CBC_HW_INJ', start_time, end_time) + +pp.figure(figsize=[10, 2]) +for seg in segs: + start, end = seg + pp.axvspan(start, end, color='blue') + +pp.xlabel('Time (s)') +pp.show() + diff --git a/latest/html/_downloads/b6127b2d5e1b58372aed0948232f3cc3/o2.ini b/latest/html/_downloads/b6127b2d5e1b58372aed0948232f3cc3/o2.ini new file mode 100644 index 00000000000..873e5034149 --- /dev/null +++ b/latest/html/_downloads/b6127b2d5e1b58372aed0948232f3cc3/o2.ini @@ -0,0 +1,36 @@ +;============================================================================== +; +; Settings for analyzing O2 data +; +;============================================================================== +; +; This provides standard settings for analyzing H1, L1, and V1 data in O2. +; It uses "OVERRIDE" for parameters that event-specific. Replace OVERRIDE +; either by editing this file, or using the config-override option in +; pycbc_inference. +; +[data] +instruments = H1 L1 V1 +trigger-time = OVERRIDE +analysis-start-time = OVERRIDE +analysis-end-time = OVERRIDE +psd-estimation = median-mean +psd-start-time = -256 +psd-end-time = 256 +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +; If you are running on the Atlas cluster, an LDG cluster, or any computer +; with a ligo-data-server, you can use the frame-type argument to automatically +; locate the location of the frame files containing the data. If you are not +; running on one of those computers, download the necessary data from GWOSC +; (gwosc.org), remove the frame-type argument, and uncomment +; frame-files, pointing the latter to the files you downloaded. +frame-type = H1:H1_GWOSC_O2_16KHZ_R1 L1:L1_GWOSC_O2_16KHZ_R1 V1:V1_GWOSC_O2_16KHZ_R1 +;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf V1:/PATH/TO/DOWNLOADED/V1FRAME.gwf +channel-name = H1:GWOSC-16KHZ_R1_STRAIN L1:GWOSC-16KHZ_R1_STRAIN V1:GWOSC-16KHZ_R1_STRAIN +; A sample rate of 2048 is sufficient for BBH. If you are analyzing a BNS or +; NSBH, change to 4096. +sample-rate = 2048 +strain-high-pass = 15 +pad-data = 8 diff --git a/latest/html/_downloads/b9a3044c41b67e20301e970f7dbcdc78/sampling_from_config_example.pdf b/latest/html/_downloads/b9a3044c41b67e20301e970f7dbcdc78/sampling_from_config_example.pdf new file mode 100644 index 00000000000..1ecc64e3f68 Binary files /dev/null and b/latest/html/_downloads/b9a3044c41b67e20301e970f7dbcdc78/sampling_from_config_example.pdf differ diff --git a/latest/html/_downloads/ba3e77b56722b3347718ce2474cc659a/dynesty.ini b/latest/html/_downloads/ba3e77b56722b3347718ce2474cc659a/dynesty.ini new file mode 100644 index 00000000000..54a99d34d13 --- /dev/null +++ b/latest/html/_downloads/ba3e77b56722b3347718ce2474cc659a/dynesty.ini @@ -0,0 +1,12 @@ +;============================================================================== +; +; Dynesty settings +; +;============================================================================== +; +; The following provides standard settings for the dynesty sampler. +; +[sampler] +name = dynesty +dlogz = 0.1 +nlive = 2000 diff --git a/latest/html/_downloads/bbff9a69bbf714aa6cd5d5e5e20aac0f/injection.ini b/latest/html/_downloads/bbff9a69bbf714aa6cd5d5e5e20aac0f/injection.ini new file mode 100644 index 00000000000..7b8f3b56648 --- /dev/null +++ b/latest/html/_downloads/bbff9a69bbf714aa6cd5d5e5e20aac0f/injection.ini @@ -0,0 +1,16 @@ +[variable_params] + +[static_params] +tc = 1126259462.420 +mass1 = 37 +mass2 = 32 +ra = 2.2 +dec = -1.25 +inclination = 2.5 +coa_phase = 1.5 +polarization = 1.75 +distance = 100 +f_ref = 20 +f_lower = 18 +approximant = IMRPhenomPv2 +taper = start diff --git a/latest/html/_downloads/bc1aa6de022bab0a0aeb9b67b32f7af8/events.ini b/latest/html/_downloads/bc1aa6de022bab0a0aeb9b67b32f7af8/events.ini new file mode 100644 index 00000000000..d0febdaf045 --- /dev/null +++ b/latest/html/_downloads/bc1aa6de022bab0a0aeb9b67b32f7af8/events.ini @@ -0,0 +1,23 @@ +[event-gw150914] +label = GW150914+09:50:45UTC +config-files = bbh-uniform_comoving_volume.ini + marginalized_phase.ini + emcee_pt-srcmasses_comoving_volume.ini + o1.ini +config-overrides = data:trigger-time:1126259462.413 + data:analysis-start-time:-6 + data:analysis-end-time:2 +; We'll run inference twice to double the number of independent samples +nruns = 2 + +[event-gw170814] +label = GW170814+10:30:43UTC +config-files = bbh-uniform_comoving_volume.ini + marginalized_phase.ini + emcee_pt-srcmasses_comoving_volume.ini + o2.ini +config-overrides = data:trigger-time:1186741861.533 + data:analysis-start-time:-6 + data:analysis-end-time:2 +; We'll run inference twice to double the number of independent samples +nruns = 2 diff --git a/latest/html/_downloads/bc7f2dc504b78724f4c2ec690f470d0c/run.sh b/latest/html/_downloads/bc7f2dc504b78724f4c2ec690f470d0c/run.sh new file mode 100644 index 00000000000..26c9706253c --- /dev/null +++ b/latest/html/_downloads/bc7f2dc504b78724f4c2ec690f470d0c/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh +export OMP_NUM_THREADS=1 +pycbc_inference \ +--config-files `dirname "$0"`/lisa_smbhb_relbin.ini \ +--output-file lisa_smbhb_ldc_pe.hdf \ +--nprocesses 1 \ +--force \ +--verbose diff --git a/latest/html/_downloads/bd31c32757b3607607fd2e349c669b07/gw150914_shape.hires.png b/latest/html/_downloads/bd31c32757b3607607fd2e349c669b07/gw150914_shape.hires.png new file mode 100644 index 00000000000..ffd52091133 Binary files /dev/null and b/latest/html/_downloads/bd31c32757b3607607fd2e349c669b07/gw150914_shape.hires.png differ diff --git a/latest/html/_downloads/bd97f978ca347604cfc56ed2f0179357/custom_00.png b/latest/html/_downloads/bd97f978ca347604cfc56ed2f0179357/custom_00.png new file mode 100644 index 00000000000..6372dc26599 Binary files /dev/null and b/latest/html/_downloads/bd97f978ca347604cfc56ed2f0179357/custom_00.png differ diff --git a/latest/html/_downloads/be267a7c70e9075914224ec7b04c45f6/plot_fd_td.png b/latest/html/_downloads/be267a7c70e9075914224ec7b04c45f6/plot_fd_td.png new file mode 100644 index 00000000000..090b362824a Binary files /dev/null and b/latest/html/_downloads/be267a7c70e9075914224ec7b04c45f6/plot_fd_td.png differ diff --git a/latest/html/_downloads/c14825ac3203509867c15b155aceeaaf/chisq.png b/latest/html/_downloads/c14825ac3203509867c15b155aceeaaf/chisq.png new file mode 100644 index 00000000000..f46be4b136b Binary files /dev/null and b/latest/html/_downloads/c14825ac3203509867c15b155aceeaaf/chisq.png differ diff --git a/latest/html/_downloads/c44b46be252aa7dde9cade6d77bd2006/hwinj.png b/latest/html/_downloads/c44b46be252aa7dde9cade6d77bd2006/hwinj.png new file mode 100644 index 00000000000..4c26399a939 Binary files /dev/null and b/latest/html/_downloads/c44b46be252aa7dde9cade6d77bd2006/hwinj.png differ diff --git a/latest/html/_downloads/c52aede3bdd122cab25845b644d071cc/data.ini b/latest/html/_downloads/c52aede3bdd122cab25845b644d071cc/data.ini new file mode 100644 index 00000000000..73d04609457 --- /dev/null +++ b/latest/html/_downloads/c52aede3bdd122cab25845b644d071cc/data.ini @@ -0,0 +1,34 @@ +[data] +instruments = H1 L1 +trigger-time = 1126259462.42 +analysis-start-time = -6 +analysis-end-time = 2 +; strain settings +sample-rate = 2048 +fake-strain = H1:aLIGOaLIGODesignSensitivityT1800044 L1:aLIGOaLIGODesignSensitivityT1800044 +fake-strain-seed = H1:44 L1:45 +; psd settings +psd-estimation = median-mean +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +psd-start-time = -256 +psd-end-time = 256 +; even though we're making fake strain, the strain +; module requires a channel to be provided, so we'll +; just make one up +channel-name = H1:STRAIN L1:STRAIN +; Providing an injection file will cause a simulated +; signal to be added to the data +injection-file = injection.hdf +; We'll use a high-pass filter so as not to get numerical errors from the large +; amplitude low frequency noise. Here we use 15 Hz, which is safely below the +; low frequency cutoff of our likelihood integral (20 Hz) +strain-high-pass = 15 +; The pad-data argument is for the high-pass filter: 8s are added to the +; beginning/end of the analysis/psd times when the data is loaded. After the +; high pass filter is applied, the additional time is discarded. This pad is +; *in addition to* the time added to the analysis start/end time for the PSD +; inverse length. Since it is discarded before the data is transformed for the +; likelihood integral, it has little affect on the run time. +pad-data = 8 diff --git a/latest/html/_downloads/c56e0fd2fba714b66390581568758862/add_waveform_01.pdf b/latest/html/_downloads/c56e0fd2fba714b66390581568758862/add_waveform_01.pdf new file mode 100644 index 00000000000..e7495f8abba Binary files /dev/null and b/latest/html/_downloads/c56e0fd2fba714b66390581568758862/add_waveform_01.pdf differ diff --git a/latest/html/_downloads/c80593d8e985069830e2d7b113c0c9b2/add_waveform.py b/latest/html/_downloads/c80593d8e985069830e2d7b113c0c9b2/add_waveform.py new file mode 100644 index 00000000000..4768c85649b --- /dev/null +++ b/latest/html/_downloads/c80593d8e985069830e2d7b113c0c9b2/add_waveform.py @@ -0,0 +1,48 @@ +import numpy +import matplotlib.pyplot as pp +import pycbc.waveform +from pycbc.types import TimeSeries + + +def test_waveform(**args): + flow = args['f_lower'] # Required parameter + dt = args['delta_t'] # Required parameter + fpeak = args['fpeak'] # A new parameter for my model + + t = numpy.arange(0, 10, dt) + f = t/t.max() * (fpeak - flow) + flow + a = t + + wf = numpy.exp(2.0j * numpy.pi * f * t) * a + + # Return product should be a pycbc time series in this case for + # each GW polarization + # + # + # Note that by convention, the time at 0 is a fiducial reference. + # For CBC waveforms, this would be set to where the merger occurs + offset = - len(t) * dt + wf = TimeSeries(wf, delta_t=dt, epoch=offset) + return wf.real(), wf.imag() + + +# This tells pycbc about our new waveform so we can call it from standard +# pycbc functions. If this were a frequency-domain model, select 'frequency' +# instead of 'time' to this function call. +pycbc.waveform.add_custom_waveform('test', test_waveform, 'time', force=True) + +# Let's plot what our new waveform looks like +hp, hc = pycbc.waveform.get_td_waveform(approximant="test", + f_lower=20, fpeak=50, + delta_t=1.0/4096) +pp.figure(0) +pp.plot(hp.sample_times, hp) +pp.xlabel('Time (s)') + +pp.figure(1) +hf = hp.to_frequencyseries() +pp.plot(hf.sample_frequencies, hf.real()) +pp.xlabel('Frequency (Hz)') +pp.xscale('log') +pp.xlim(20, 100) +pp.show() diff --git a/latest/html/_downloads/cbb584ab88c70c6cae5ca5cb5c36bbb6/custom_01.pdf b/latest/html/_downloads/cbb584ab88c70c6cae5ca5cb5c36bbb6/custom_01.pdf new file mode 100644 index 00000000000..d22504498eb Binary files /dev/null and b/latest/html/_downloads/cbb584ab88c70c6cae5ca5cb5c36bbb6/custom_01.pdf differ diff --git a/latest/html/_downloads/cbfb2ea8108d59acf9ebe75239a40269/plot_waveform.png b/latest/html/_downloads/cbfb2ea8108d59acf9ebe75239a40269/plot_waveform.png new file mode 100644 index 00000000000..33009d7e504 Binary files /dev/null and b/latest/html/_downloads/cbfb2ea8108d59acf9ebe75239a40269/plot_waveform.png differ diff --git a/latest/html/_downloads/cd42dcf19f5f79be8c513e654b62f25e/higher_modes.hires.png b/latest/html/_downloads/cd42dcf19f5f79be8c513e654b62f25e/higher_modes.hires.png new file mode 100644 index 00000000000..3c3e64b9a67 Binary files /dev/null and b/latest/html/_downloads/cd42dcf19f5f79be8c513e654b62f25e/higher_modes.hires.png differ diff --git a/latest/html/_downloads/ce004edee3d0a35c37cbd214a293a231/injection_smbhb.ini b/latest/html/_downloads/ce004edee3d0a35c37cbd214a293a231/injection_smbhb.ini new file mode 100644 index 00000000000..82cb0108e74 --- /dev/null +++ b/latest/html/_downloads/ce004edee3d0a35c37cbd214a293a231/injection_smbhb.ini @@ -0,0 +1,28 @@ +[variable_params] + +[static_params] +; This assumes all those values are in LISA frame. +; You can set "ref_frame = SSB", but then you should also add it to +; "static_params" section in PE .ini file. +ref_frame = LISA +approximant = BBHX_PhenomD +; You can use "1.5" or "2.0" for TDI. +; Please use the same TDI version for PSD and static_params in the PE .ini file. +tdi = 1.5 +mass1 = 1015522.4376 +mass2 = 796849.1091 +spin1z = 0.597755394865021 +spin2z = 0.36905807298613247 +distance = 17758.367941273442 +inclination = 1.5970175301911231 +coa_phase = 4.275929308696054 +eclipticlongitude = 5.4431083771985165 +eclipticlatitude = -1.2734504596198182 +polarization = 0.22558110042980073 +tc = 4799624.274911478 +t_obs_start = 31536000 +; Put LISA behind the Earth by ~20 degrees. +t_offset = 7365189.431698299 +f_lower = 1e-4 +f_ref = 1e-4 +f_final = 0.1 diff --git a/latest/html/_downloads/ce7407033baf800f8cf99443cdab10ca/run.sh b/latest/html/_downloads/ce7407033baf800f8cf99443cdab10ca/run.sh new file mode 100644 index 00000000000..3a63115625b --- /dev/null +++ b/latest/html/_downloads/ce7407033baf800f8cf99443cdab10ca/run.sh @@ -0,0 +1,7 @@ +pycbc_inference \ +--config-file `dirname "$0"`/single_simple.ini \ +--nprocesses=1 \ +--output-file single.hdf \ +--seed 0 \ +--force \ +--verbose diff --git a/latest/html/_downloads/ceac8ff1e783fd7ea14687de9bb43077/data.py b/latest/html/_downloads/ceac8ff1e783fd7ea14687de9bb43077/data.py new file mode 100644 index 00000000000..1223852a0f6 --- /dev/null +++ b/latest/html/_downloads/ceac8ff1e783fd7ea14687de9bb43077/data.py @@ -0,0 +1,25 @@ +import matplotlib.pyplot as pp +import pycbc.catalog + + +m = pycbc.catalog.Merger("GW170817", source='gwtc-1') + +fig, axs = pp.subplots(2, 1, sharex=True, sharey=True) +for ifo, ax in zip(["L1", "H1"], axs): + pp.sca(ax) + pp.title(ifo) + # Retreive data around the BNS merger + ts = m.strain(ifo).time_slice(m.time - 15, m.time + 6) + + # Whiten the data with a 4s filter + white = ts.whiten(4, 4) + + times, freqs, power = white.qtransform(.01, logfsteps=200, + qrange=(110, 110), + frange=(20, 512)) + pp.pcolormesh(times, freqs, power**0.5, vmax=5) + +pp.yscale('log') +pp.ylabel("Frequency (Hz)") +pp.xlabel("Time (s)") +pp.show() diff --git a/latest/html/_downloads/ced57b28fbb0c1bd45582785eb34f1de/plot_phase.py b/latest/html/_downloads/ced57b28fbb0c1bd45582785eb34f1de/plot_phase.py new file mode 100644 index 00000000000..6ad52556197 --- /dev/null +++ b/latest/html/_downloads/ced57b28fbb0c1bd45582785eb34f1de/plot_phase.py @@ -0,0 +1,21 @@ +import matplotlib.pyplot as pp +from pycbc import waveform + + +for apx in ['SEOBNRv4', 'TaylorT4', 'IMRPhenomB']: + hp, hc = waveform.get_td_waveform(approximant=apx, + mass1=10, + mass2=10, + delta_t=1.0/4096, + f_lower=40) + + hp, hc = hp.trim_zeros(), hc.trim_zeros() + amp = waveform.utils.amplitude_from_polarizations(hp, hc) + phase = waveform.utils.phase_from_polarizations(hp, hc) + + pp.plot(phase, amp, label=apx) + +pp.ylabel('GW Strain Amplitude') +pp.xlabel('GW Phase (radians)') +pp.legend(loc='upper left') +pp.show() diff --git a/latest/html/_downloads/cf27a3cf7c72c6d8629dfb8614154a86/read.png b/latest/html/_downloads/cf27a3cf7c72c6d8629dfb8614154a86/read.png new file mode 100644 index 00000000000..7ec0ecb275b Binary files /dev/null and b/latest/html/_downloads/cf27a3cf7c72c6d8629dfb8614154a86/read.png differ diff --git a/latest/html/_downloads/d205631f1625389c6b67350c2db9a26e/bbh-uniform_comoving_volume.ini b/latest/html/_downloads/d205631f1625389c6b67350c2db9a26e/bbh-uniform_comoving_volume.ini new file mode 100644 index 00000000000..35d1fdc4682 --- /dev/null +++ b/latest/html/_downloads/d205631f1625389c6b67350c2db9a26e/bbh-uniform_comoving_volume.ini @@ -0,0 +1,197 @@ +;============================================================================== +; +; Standard BBH Prior +; +;============================================================================== +; +; This configuration file provides a standard prior for binary-black holes +; (BBH). It uses a uniform prior on *source* masses, along with a uniform +; prior in comoving volume. Waveform transforms are included to convert the +; source masses into detector-frame masses using a standard cosmology +; (Planck 2015). The minimum and maximum volumes used correspond to a +; luminosity distances of ~10Mpc and ~1.5Gpc, respectively. It can therefore +; be used with BBH in O1-O2. To use for future detectors, simply change the +; volume limits. +; +; The coa_phase is not varied, so this has to be used with a model that +; marginalizes the phase automatically (e.g. the mariginalized_phase or relbin +; models). If you are not using a model that marginalizes the phase, uncomment +; the coa_phase in the [variable_params], along with the [prior-coa_phase] +; section. +; +; The mass range used is 10-80, and so is fine for GW150914-like BBH. For +; more lower-mass BBH, the prior range should be decreased. Keep in mind +; that lowering the mass prior increases the duration of the longest waveform +; admitted by the prior (meaning that you may need to change your +; analysis-start-time in your data section if you do that). +; +; The starting frequency of the waveform approximant is set to 20Hz (the +; f_lower and f_ref settings in the [static_params]). This is OK to use +; for the O1-O3 LIGO and Virgo detectors. With this lower-frequency cutoff +; and the lower-bound of the mass prior of 10, the longest waveform that may +; be generated is ~6s. Suggested analysis-start and end-time settings are -6 +; and 2 (with respect to the trigger-time), respectively. +; +; You may wish to lower the lower frequency cutoff for future detectors, +; in which the PSD has better lower-frequency performance. +; Keep in mind that decreasing the lower-frequency cutoff will make the +; waveforms have longer duration in the time domain, and so the analysis +; start time will need to be adjusted. +; +; No [data], [model], or [sampler] sections are provided here. This should be +; in used in tandem with additional configuration files that provide those +; sections. + +[variable_params] +delta_tc = +; Note that we call the masses srcmass[X]. This is because the waveform +; generator assumes that parameters called mass[X] are detector-frame masses. +; We therefore need to call the source masses something different; we choose +; "srcmass" here, but they could be called anything. In the waveform transforms +; sections below, we convert these to detector-frame masses. +srcmass1 = +srcmass2 = +spin1_a = +spin1_azimuthal = +spin1_polar = +spin2_a = +spin2_azimuthal = +spin2_polar = +comoving_volume = +inclination = +polarization = +ra = +dec = +; Uncomment this if you are not using a model that marginalizes over phase. +;coa_phase = + +[static_params] +approximant = IMRPhenomPv2 +f_lower = 20 +f_ref = 20 +; The trigger time is used with delta_tc to get the coalescence time tc. We'll +; get the trigger time from the data section (provided in a separate file). +trigger_time = ${data|trigger-time} + +;----------------------------------------------------------------------------- +; +; Intrinsic parameters +; +;----------------------------------------------------------------------------- + +[prior-srcmass1] +name = uniform +min-srcmass1 = 10 +max-srcmass1 = 80 + +[prior-srcmass2] +name = uniform +min-srcmass2 = 10 +max-srcmass2 = 80 + +[prior-spin1_a] +name = uniform +min-spin1_a = 0.0 +max-spin1_a = 0.99 + +[prior-spin1_polar+spin1_azimuthal] +name = uniform_solidangle +polar-angle = spin1_polar +azimuthal-angle = spin1_azimuthal + +[prior-spin2_a] +name = uniform +min-spin2_a = 0.0 +max-spin2_a = 0.99 + +[prior-spin2_polar+spin2_azimuthal] +name = uniform_solidangle +polar-angle = spin2_polar +azimuthal-angle = spin2_azimuthal + +; The waveform generator expects spins to be in cartesian coordinates, with +; names spin(1|2)(x|y|z). We therefore need to provide a waveform transform +; that converts the spherical coordinates that we have defined the spin prior +; in to cartesian coordinates. +[waveform_transforms-spin1x+spin1y+spin1z] +name = spherical_to_cartesian +x = spin1x +y = spin1y +z = spin1z +radial = spin1_a +polar = spin1_polar +azimuthal = spin1_azimuthal + +[waveform_transforms-spin2x+spin2y+spin2z] +name = spherical_to_cartesian +x = spin2x +y = spin2y +z = spin2z +radial = spin2_a +polar = spin2_polar +azimuthal = spin2_azimuthal + +;----------------------------------------------------------------------------- +; +; Extrinsic parameters +; +;----------------------------------------------------------------------------- + +[prior-delta_tc] +name = uniform +; We'll use +/-0.1s around the estimated coalescence (trigger) time. +min-delta_tc = -0.1 +max-delta_tc = 0.1 + +[waveform_transforms-tc] +; The waveform generator needs tc, which we calculate here. +name = custom +inputs = trigger_time, delta_tc +tc = trigger_time + delta_tc + +[prior-inclination] +name = sin_angle + +; Uncomment this section if you are not using a model that marginalizes over +; the phase. +;[prior-coa_phase] +;name = uniform_angle + +[prior-ra+dec] +name = uniform_sky + +[prior-polarization] +name = uniform_angle + +[prior-comoving_volume] +name = uniform +; These limits correspond to luminosity distances of ~[10, 1500) Mpc. Change +; if you are analyzing detections which are more than ~1Gpc away. +min-comoving_volume = 5e3 +max-comoving_volume = 9e9 + +; The following [waveform_transforms] sections convert the comoving volume +; to luminosity distance and the source masses to detector frame masses. +; The latter is done by calculating redshift from the comoving volume first. +; The order that transforms need to be applied is figured out automatically by +; the code, so it doesn't matter what order we put them here, as long as we +; provide transforms for all intermediate steps. +[waveform_transforms-redshift] +name = custom +inputs = comoving_volume +redshift = redshift_from_comoving_volume(comoving_volume) + +[waveform_transforms-distance] +name = custom +inputs = comoving_volume +distance = distance_from_comoving_volume(comoving_volume) + +[waveform_transforms-mass1] +name = custom +inputs = srcmass1, redshift +mass1 = srcmass1 * (1 + redshift) + +[waveform_transforms-mass2] +name = custom +inputs = srcmass2, redshift +mass2 = srcmass2 * (1 + redshift) diff --git a/latest/html/_downloads/d2774010069f78f3f44c074dd73b758d/plot_phase.pdf b/latest/html/_downloads/d2774010069f78f3f44c074dd73b758d/plot_phase.pdf new file mode 100644 index 00000000000..59d7a35e2a9 Binary files /dev/null and b/latest/html/_downloads/d2774010069f78f3f44c074dd73b758d/plot_phase.pdf differ diff --git a/latest/html/_downloads/d4bdaaa72e2a7116a73576bd6a9a5499/workflow_config.ini b/latest/html/_downloads/d4bdaaa72e2a7116a73576bd6a9a5499/workflow_config.ini new file mode 100644 index 00000000000..efefb03584f --- /dev/null +++ b/latest/html/_downloads/d4bdaaa72e2a7116a73576bd6a9a5499/workflow_config.ini @@ -0,0 +1,206 @@ +[workflow] +; basic information used by the workflow generator +file-retention-level = all_triggers +; The start/end times here are just used for file naming. They can be set +; to anything -- they aren't used for anything, and have no effect on the +; analysis. The actual analysis times used are set by the [data] section in +; the configuration files given to pycbc_inference (specified in the events +; config file). +start-time = 1126259200 +end-time = 1126259600 + +[workflow-ifos] +; The ifos listed here are just used for file naming, it doesn't matter if +; they are not consistent with the actual detectors analyzed. +h1 = +l1 = +v1 = + +[extract_posterior] +; Here, we'll ensure that the output parameters are such that mass1 >= mass2 +; (and associated spins), change comoving volume into redshift and distance, +; add mchirp, q, chi_eff, and chi_p to the posterior files. +parameters = 'primary_mass(srcmass1, srcmass2):srcmass1' + 'secondary_mass(srcmass1, srcmass2):srcmass2' + 'primary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin1_a' + 'primary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin1_azimuthal' + 'primary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin1_polar' + 'secondary_spin(srcmass1, srcmass2, spin1_a, spin2_a):spin2_a' + 'secondary_spin(srcmass1, srcmass2, spin1_azimuthal, spin2_azimuthal):spin2_azimuthal' + 'secondary_spin(srcmass1, srcmass2, spin1_polar, spin2_polar):spin2_polar' + 'mchirp_from_mass1_mass2(srcmass1, srcmass2):srcmchirp' + 'chi_eff_from_spherical(srcmass1, srcmass2, spin1_a, spin1_polar, spin2_a, spin2_polar):chi_eff' + 'chi_p_from_spherical(srcmass1, srcmass2, spin1_a, spin1_azimuthal, spin1_polar, spin2_a, spin2_azimuthal, spin2_polar):chi_p' + 'redshift_from_comoving_volume(comoving_volume):redshift' + 'distance_from_comoving_volume(comoving_volume):distance' + '*' +force = + +[workflow-summary_table] +; Parameters that will be printed in the summary table. +; These must be from the set specified in extract_posterior. +table-params = srcmass1 srcmass2 + srcmchirp 'q_from_mass1_mass2(srcmass1, srcmass2):q' + chi_eff chi_p + ra dec delta_tc + distance redshift + 'snr_from_loglr(loglikelihood-lognl):SNR' +; The additional metadata will be printed below the table. We can print +; anything that is in the posterior files' attrs. +print-metadata = 'trigger_time:$t_0$' 'analyzed_detectors:Detectors' + +[workflow-summary_plots] +; Parameter posteriors that will plotted on the summary page. +; These must be from the set specified in extract_posterior. +; Each plot-group corresponds to a single plot that will be plot on the +; summary page. Generally, these should be limited to 1 or 2 dimensions +; (although this is not enforced); larger corner plots can be put in the +; Posteriors page. The plots for those are set by the [workflow-plot_params] +; section (see below). +; The settings for the posterior plots created here are read from the +; [plot_posterior_summary] section. +plot-group-mass1_mass2 = srcmass1 srcmass2 +plot-group-inc_distance = inclination distance +plot-group-chip_chieff = chi_p chi_eff +; Notice that we are not including ra and dec here. The sky map is +; created by [plot_skymap]. + +[workflow-plot_params] +; Parameter posteriors that will plotted on the "Posteriors" page. +; These must be from the set specified in extract_posterior. +; Each plot-group corresponds to a single plot that will be plot on the +; page. Since the events are split into their own sub-pages, it's ok to make +; large corner plots here (although too large and it will be hard to make +; out what each parameter is doing). +; The settings for the posterior plots created here are read from the +; [plot_posterior] section. +; Since we plotted source-frame masses on the summary page, here we'll +; plot detector-frame masses. +plot-group-masses = 'srcmass1*(1+redshift):mass1' + 'srcmass2*(1+redshift):mass2' + 'srcmchirp*(1+redshift):mchirp' + 'q_from_mass1_mass2(srcmass1, srcmass2):q' +plot-group-spins = spin1_a spin2_a + spin1_azimuthal spin2_azimuthal + spin1_polar spin2_polar + chi_eff chi_p +plot-group-extrinsic = ra dec delta_tc polarization inclination distance redshift + +[executables] +; paths to executables to use in workflow +inference = ${which:run_pycbc_inference} +extract_posterior = ${which:pycbc_inference_extract_samples} +plot_posterior = ${which:pycbc_inference_plot_posterior} +plot_posterior_summary = ${which:pycbc_inference_plot_posterior} +plot_prior = ${which:pycbc_inference_plot_prior} +table_summary = ${which:pycbc_inference_table_summary} +create_fits_file = ${which:pycbc_inference_create_fits} +plot_skymap = ${which:pycbc_inference_plot_skymap} +plot_spectrum = ${which:pycbc_plot_psd_file} +results_page = ${which:pycbc_make_html_page} +; diagnostic plots +plot_acceptance_rate = ${which:pycbc_inference_plot_acceptance_rate} +plot_samples = ${which:pycbc_inference_plot_samples} + +[pegasus_profile] +; +MaxRunTimeHours is needed for running on the ATLAS cluster; comment out +; if your cluster does not need this. +condor|+MaxRunTimeHours = 1 + +[pegasus_profile-inference] +condor|request_memory = 40G +; +MaxRunTimeHours is needed for running on the ATLAS cluster; comment out +; if your cluster does not need this. +condor|+MaxRunTimeHours = 10 +condor|request_cpus = ${inference|nprocesses} + +[pegasus_profile-plot_prior] +condor|request_memory = 4G + +[pegasus_profile-plot_skymap] +condor|request_memory = 4G + +[pegasus_profile-plot_posterior] +condor|request_memory = 4G + +[pegasus_profile-plot_posterior_summary] +condor|request_memory = 4G + +[pegasus_profile-plot_samples] +condor|request_memory = 4G + +[inference] +; Command line options for pycbc_inference. +verbose = +; Set the nprocesses to the number of cores you want each job to use. The +; value you use is cluster dependent. +nprocesses = 32 + +[plot_posterior_summary] +; These are the command line options that will be passed to +; pycbc_inference_plot_posterior for creating the posterior plots on the +; summary page. These settings will cause density plots to be made. +plot-contours = +plot-marginal = +plot-density = +density-cmap = Blues +contour-color = black + +[plot_posterior] +; These are the command line options that will be passed to +; pycbc_inference_plot_posterior for creating the posterior plots on the +; posteriors page. These settings will cause scatter plots to be made showing +; each point in the posterior, colored by the matched-filter SNR. +plot-contours = +plot-marginal = +plot-scatter = +z-arg = snr + +[create_fits_file] +; These are the settings for creating a fits file, which is used to produce +; the skymaps. This program needs ligo.skymap to be installed. +; The maxpts option limits the number of points in the posterior that are used +; to create the skymap. This is mostly for speeding up run time. Comment out +; to use all points. +maxpts = 1000 +; Since the posterior file stores delta_tc, we need to tell the fits +; file how to calculate tc +tc = 'trigger_time+delta_tc' + +[plot_skymap] +; These are settings for creating the skymap. This program requires +; ligo.skymap to be installed. Here, we're just setting the colormap to be +; the same as the posterior density plots, above. +colormap = ${plot_posterior_summary|density-cmap} + +[plot_prior] +; This sets command-line options to use for the plot prior function. These +; plots are on the "priors" page. The default (giving no options) is to +; plot all of the variable params. + +[table_summary] +; This sets command-line options for the table on the summary page. You +; should not need to set anything here. + +[plot_spectrum] +; This sets command-line options for the ASD plots on the detector sensitivity +; page. The dyn-range-factor needs to be set to 1. +dyn-range-factor = 1 + +[plot_acceptance_rate] +; This sets command-line options for the acceptance rate diagnostic plots. +; This should only be used for MCMC samplers. You do not need to set anything +; here for this plot. + +[plot_samples] +; This sets command-line options for the plot of samples chains. +; This should only be used for MCMC samplers. Here, we are telling it to plot +; all chains, and to show every single iteration. +chains = all +thin-start = 0 +thin-interval = 1 + +[results_page] +; This sets settings for creating the results page. You may want to change +; the analysis title, to make it more descriptive. +analysis-title = "Inference results" diff --git a/latest/html/_downloads/d5d7ccdf3b1a148dc72903c494f37265/dynesty.ini b/latest/html/_downloads/d5d7ccdf3b1a148dc72903c494f37265/dynesty.ini new file mode 100644 index 00000000000..d149d2dfb8d --- /dev/null +++ b/latest/html/_downloads/d5d7ccdf3b1a148dc72903c494f37265/dynesty.ini @@ -0,0 +1,6 @@ +[sampler] +name = dynesty +dlogz = 0.1 +nlive = 2000 +checkpoint_time_interval = 1800 +maxcall = 10000 diff --git a/latest/html/_downloads/d6e2eeb650c67fb5aee2416467791fd4/higher_modes.py b/latest/html/_downloads/d6e2eeb650c67fb5aee2416467791fd4/higher_modes.py new file mode 100644 index 00000000000..9c6db4ad084 --- /dev/null +++ b/latest/html/_downloads/d6e2eeb650c67fb5aee2416467791fd4/higher_modes.py @@ -0,0 +1,42 @@ +import matplotlib.pyplot as pp +from pycbc.waveform import get_td_waveform + +# Let's plot what our new waveform looks like +pp.figure() + +# You can select sets of modes or individual modes using the 'mode_array' +# The standard format is to provide a list of (l, m) modes, however +# a string format is also provided to aid use in population from config files. +# e.g. "22 33" is also acceptable to select these two modes. +# "None" will result in the waveform return its default which is usually +# to return all implemented modes. +for mode_select in [None, + [(2, 2), (3, 3)], # Select two modes at once + [(2, 2)], + [(2, 1)], + [(3, 2)], + [(4, 4)], + ]: + hp, hc = get_td_waveform(approximant="IMRPhenomXPHM", + mass1=7, + mass2=40, + f_lower=20.0, + mode_array=mode_select, + inclination = 1.0, + delta_t=1.0/4096) + + + + if mode_select is None: + label = 'Full Waveform' + a = hp.max() + else: + label = "l, m = " + ' '.join([f"{l}, {m}" for l, m in mode_select]) + + (hp / a).plot(label=label) + +pp.xlim(-1, 0.05) +pp.legend() +pp.xlabel('Time [s]') +pp.ylabel('Relative Strain') +pp.show() diff --git a/latest/html/_downloads/d73d3e210099e1aaf36612ca66d2a703/single.ini b/latest/html/_downloads/d73d3e210099e1aaf36612ca66d2a703/single.ini new file mode 100644 index 00000000000..f26e781a206 --- /dev/null +++ b/latest/html/_downloads/d73d3e210099e1aaf36612ca66d2a703/single.ini @@ -0,0 +1,102 @@ + +[model] +name = single_template + +#; This model precalculates the SNR time series at a fixed rate. +#; If you need a higher time resolution, this may be increased +sample_rate = 8192 +low-frequency-cutoff = 30.0 + +marginalize_vector_params = tc, ra, dec, polarization +marginalize_vector_samples = 1000 + +# These lines enable the marginalization to only use information +# around SNR peaks for drawing possible sky / time positions. Remove +# if you want to use the entire prior range as possible +peak_lock_snr = 4.0 +peak_min_snr = 4.0 + +marginalize_phase = True + +marginalize_distance = True +marginalize_distance_param = distance +marginalize_distance_interpolator = True +marginalize_distance_snr_range = 5, 50 +marginalize_distance_density = 200, 200 +marginalize_distance_samples = 1000 + +[data] +instruments = H1 L1 V1 +analysis-start-time = 1187008482 +analysis-end-time = 1187008892 +psd-estimation = median +psd-segment-length = 16 +psd-segment-stride = 8 +psd-inverse-length = 16 +pad-data = 8 +channel-name = H1:LOSC-STRAIN L1:LOSC-STRAIN V1:LOSC-STRAIN +frame-files = H1:H-H1_LOSC_CLN_4_V1-1187007040-2048.gwf L1:L-L1_LOSC_CLN_4_V1-1187007040-2048.gwf V1:V-V1_LOSC_CLN_4_V1-1187007040-2048.gwf +strain-high-pass = 15 +sample-rate = 2048 + +[sampler] +name = dynesty +dlogz = 0.5 +nlive = 100 + +[variable_params] +; waveform parameters that will vary in MCMC +#coa_phase = +distance = +polarization = +inclination = +ra = +dec = +tc = + +[static_params] +; waveform parameters that will not change in MCMC +approximant = TaylorF2 +f_lower = 30 +mass1 = 1.3757 +mass2 = 1.3757 + +#; we'll choose not to sample over these, but you could +#polarization = 0 +#ra = 3.44615914 +#dec = -0.40808407 + +#tc = 1187008882.42825 + +#; You could also set additional parameters if your waveform model supports / requires it. +; spin1z = 0 + +#[prior-coa_phase] +#name = uniform_angle + +#[prior-ra+dec] +#name = uniform_sky + +[prior-ra] +name = uniform_angle + +[prior-dec] +name = cos_angle + +[prior-tc] +#; coalescence time prior +name = uniform +min-tc = 1187008882.4 +max-tc = 1187008882.5 + +[prior-distance] +#; following gives a uniform in volume +name = uniform_radius +min-distance = 10 +max-distance = 60 + +[prior-polarization] +name = uniform_angle + +[prior-inclination] +name = sin_angle diff --git a/latest/html/_downloads/d826f63b8efa7e8c4f54808a773fa653/mass_examples.py b/latest/html/_downloads/d826f63b8efa7e8c4f54808a773fa653/mass_examples.py new file mode 100644 index 00000000000..e1d2b1428e4 --- /dev/null +++ b/latest/html/_downloads/d826f63b8efa7e8c4f54808a773fa653/mass_examples.py @@ -0,0 +1,44 @@ +import matplotlib.pyplot as plt +from pycbc import distributions + +# Create a mass distribution object that is uniform between 0.5 and 1.5 +# solar masses. +mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5)) +# Take 100000 random variable samples from this uniform mass distribution. +mass1_samples = mass1_distribution.rvs(size=1000000) + +# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses +# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar +# masses. Gaussian takes the variance as an input so square the standard +# deviation. +variance = 0.15*0.15 +mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2, + mass2_var=variance) + +# Take 100000 random variable samples from this gaussian mass distribution. +mass2_samples = mass2_gaussian.rvs(size=1000000) + +# We can make pairs of distributions together, instead of apart. +two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0), + mass4=(1.6, 3.0)) +two_mass_samples = two_mass_distributions.rvs(size=1000000) + +# Choose 50 bins for the histogram subplots. +n_bins = 50 + +# Plot histograms of samples in subplots +fig, axes = plt.subplots(nrows=2, ncols=2) +ax0, ax1, ax2, ax3, = axes.flat + +ax0.hist(mass1_samples['mass1'], bins = n_bins) +ax1.hist(mass2_samples['mass2'], bins = n_bins) +ax2.hist(two_mass_samples['mass3'], bins = n_bins) +ax3.hist(two_mass_samples['mass4'], bins = n_bins) + +ax0.set_title('Mass 1 samples') +ax1.set_title('Mass 2 samples') +ax2.set_title('Mass 3 samples') +ax3.set_title('Mass 4 samples') + +plt.tight_layout() +plt.show() diff --git a/latest/html/_downloads/e19b84e28414cda9681f639913c7cf44/timeseries.pdf b/latest/html/_downloads/e19b84e28414cda9681f639913c7cf44/timeseries.pdf new file mode 100644 index 00000000000..b4a066fe923 Binary files /dev/null and b/latest/html/_downloads/e19b84e28414cda9681f639913c7cf44/timeseries.pdf differ diff --git a/latest/html/_downloads/e2c054c8cee13788fef09c1fbc3132a6/chisq.py b/latest/html/_downloads/e2c054c8cee13788fef09c1fbc3132a6/chisq.py new file mode 100644 index 00000000000..05e18eaf4f5 --- /dev/null +++ b/latest/html/_downloads/e2c054c8cee13788fef09c1fbc3132a6/chisq.py @@ -0,0 +1,41 @@ +"""This example shows how to calculate the chi^2 discriminator described in +https://arxiv.org/abs/gr-qc/0405045, also known as the "power chi^2" or "Allen +chi^2" discriminator. +""" + +import matplotlib.pyplot as pp +import pycbc.noise +import pycbc.psd +import pycbc.waveform +import pycbc.vetoes + + +# Generate some noise with an advanced ligo psd +flow = 30.0 +delta_f = 1.0 / 16 +flen = int(2048 / delta_f) + 1 +psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow) + +# Generate 16 seconds of noise at 4096 Hz +delta_t = 1.0 / 4096 +tsamples = int(16 / delta_t) +strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127) +stilde = strain.to_frequencyseries() + +# Calculate the power chisq time series +hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD', + mass1=25, mass2=25, + f_lower=flow, delta_f=stilde.delta_f) + +hp.resize(len(stilde)) +num_bins = 16 +chisq = pycbc.vetoes.power_chisq(hp, stilde, num_bins, psd, + low_frequency_cutoff=flow) + +# convert to a reduced chisq +chisq /= (num_bins * 2) - 2 + +pp.plot(chisq.sample_times, chisq) +pp.ylabel('$\chi^2_r$') +pp.xlabel('time (s)') +pp.show() diff --git a/latest/html/_downloads/e33463c8a4653160bed2230433b7d91b/events.ini b/latest/html/_downloads/e33463c8a4653160bed2230433b7d91b/events.ini new file mode 100644 index 00000000000..f5c47020f92 --- /dev/null +++ b/latest/html/_downloads/e33463c8a4653160bed2230433b7d91b/events.ini @@ -0,0 +1,24 @@ +[event-gw150914] +label = GW150914+09:50:45UTC +config-files = bbh-uniform_comoving_volume.ini + marginalized_phase.ini + dynesty.ini + o1.ini +config-overrides = data:trigger-time:1126259462.413 + data:analysis-start-time:-8 + data:analysis-end-time:2 +; We can run multiple instances of inference to accumulate more samples by +; setting nruns. This is useful for emcee_pt. However, dynesty generally +; produces enough samples in a single run, so we'll leave this commented out. +; (The default is to do 1 run.) +;nruns = 2 + +[event-gw170814] +label = GW170814+10:30:43UTC +config-files = bbh-uniform_comoving_volume.ini + marginalized_phase.ini + dynesty.ini + o2.ini +config-overrides = data:trigger-time:1186741861.533 + data:analysis-start-time:-8 + data:analysis-end-time:2 diff --git a/latest/html/_downloads/e3ccf002aa53cbbf3323984129e98e33/sampling_from_config_example.py b/latest/html/_downloads/e3ccf002aa53cbbf3323984129e98e33/sampling_from_config_example.py new file mode 100644 index 00000000000..7c377d5655b --- /dev/null +++ b/latest/html/_downloads/e3ccf002aa53cbbf3323984129e98e33/sampling_from_config_example.py @@ -0,0 +1,43 @@ +import numpy as np +import matplotlib.pyplot as plt +from pycbc.distributions.utils import draw_samples_from_config + + +# A path to the .ini file. +CONFIG_PATH = "./pycbc_bbh_prior.ini" +random_seed = np.random.randint(low=0, high=2**32-1) + +# Draw a single sample. +sample = draw_samples_from_config( + path=CONFIG_PATH, num=1, seed=random_seed) + +# Print all parameters. +print(sample.fieldnames) +print(sample) +# Print a certain parameter, for example 'mass1'. +print(sample[0]['mass1']) + +# Draw 1000000 samples, and select all values of a certain parameter. +n_bins = 50 +samples = draw_samples_from_config( + path=CONFIG_PATH, num=1000000, seed=random_seed) + +fig, axes = plt.subplots(nrows=3, ncols=2) +ax1, ax2, ax3, ax4, ax5, ax6 = axes.flat + +ax1.hist(samples[:]['srcmass1'], bins=n_bins) +ax2.hist(samples[:]['srcmass2'], bins=n_bins) +ax3.hist(samples[:]['comoving_volume'], bins=n_bins) +ax4.hist(samples[:]['redshift'], bins=n_bins) +ax5.hist(samples[:]['distance'], bins=n_bins) +ax6.hist(samples[:]['mass1'], bins=n_bins) + +ax1.set_title('srcmass1') +ax2.set_title('srcmass2') +ax3.set_title('comoving_volume') +ax4.set_title('redshift') +ax5.set_title('distance') +ax6.set_title('mass1 or mass2') + +plt.tight_layout() +plt.show() diff --git a/latest/html/_downloads/e4fb10637416fd9054296bbe56d69231/lisa_smbhb_relbin.ini b/latest/html/_downloads/e4fb10637416fd9054296bbe56d69231/lisa_smbhb_relbin.ini new file mode 100644 index 00000000000..9c0e3f65efa --- /dev/null +++ b/latest/html/_downloads/e4fb10637416fd9054296bbe56d69231/lisa_smbhb_relbin.ini @@ -0,0 +1,90 @@ +[data] +instruments = LISA_A LISA_E LISA_T +trigger-time = 4800021.15572853 +analysis-start-time = -4800021 +analysis-end-time = 26735979 +pad-data = 0 +sample-rate = 0.2 +fake-strain = LISA_A:analytical_psd_lisa_tdi_AE LISA_E:analytical_psd_lisa_tdi_AE LISA_T:analytical_psd_lisa_tdi_T +; fake-strain-extra-args = LISA_A:len_arm:2.5e9 LISA_A:acc_noise_level:2.4e-15 LISA_A:oms_noise_level:7.9e-12 LISA_A:tdi:1.5 LISA_E:len_arm:2.5e9 LISA_E:acc_noise_level:2.4e-15 LISA_E:oms_noise_level:7.9e-12 LISA_E:tdi:1.5 LISA_T:len_arm:2.5e9 LISA_T:acc_noise_level:2.4e-15 LISA_T:oms_noise_level:7.9e-12 LISA_T:tdi:1.5 +fake-strain-extra-args = len_arm:2.5e9 acc_noise_level:2.4e-15 oms_noise_level:7.9e-12 tdi:1.5 +fake-strain-seed = LISA_A:100 LISA_E:150 LISA_T:200 +fake-strain-flow = 0.0001 +fake-strain-sample-rate = 0.2 +fake-strain-filter-duration = 31536000 +psd-estimation = median-mean +psd-inverse-length = 267840 +invpsd-trunc-method = hann +psd-segment-length = 267840 +psd-segment-stride = 133920 +psd-start-time = -4800021 +psd-end-time = 26735979 +channel-name = LISA_A:LISA_A LISA_E:LISA_E LISA_T:LISA_T +injection-file = injection_smbhb.hdf + +[model] +name = relative +low-frequency-cutoff = 0.0001 +high-frequency-cutoff = 0.1 +epsilon = 0.01 +mass1_ref = 1015522.4376 +mass2_ref = 796849.1091 +tc_ref = 4799624.274911478 +spin1z_ref = 0.597755394865021 +spin2z_ref = 0.36905807298613247 + +[variable_params] +mchirp = +q = +tc = + +[static_params] +; Change it to "ref_frame = SSB", if you use SSB frame in injection file. +ref_frame = LISA +approximant = BBHX_PhenomD +; You can use "1.5" or "2.0" for TDI. +; Please use the same TDI version for PSD and injection file. +tdi = 1.5 +coa_phase = 4.275929308696054 +eclipticlongitude = 5.4431083771985165 +eclipticlatitude = -1.2734504596198182 +polarization = 0.22558110042980073 +spin1z = 0.597755394865021 +spin2z = 0.36905807298613247 +distance = 17758.367941273442 +inclination = 1.5970175301911231 +t_obs_start = 31536000 +f_lower = 1e-4 +; Put LISA behind the Earth by ~20 degrees. +t_offset = 7365189.431698299 + +[prior-mchirp] +name = uniform +min-mchirp = 703772.7245316936 +max-mchirp = 860166.6633165143 + +[prior-q] +name = uniform +min-q = 1.1469802543574181 +max-q = 1.401864755325733 + +[prior-tc] +name = uniform +min-tc = 4798221.15572853 +max-tc = 4801821.15572853 + +[waveform_transforms-mass1+mass2] +name = mchirp_q_to_mass1_mass2 + +[sampler] +name = dynesty +dlogz = 0.1 +nlive = 150 + +; NOTE: While this example doesn't sample in polarization, if doing this we +; recommend the following transformation, and then sampling in this coordinate +; +; [waveform_transforms-polarization] +; name = custom +; inputs = better_pol, eclipticlongitude +; polarization = better_pol + eclipticlongitude diff --git a/latest/html/_downloads/e5a0a5c62180e637160d7fe908b15dbd/stat.pdf b/latest/html/_downloads/e5a0a5c62180e637160d7fe908b15dbd/stat.pdf new file mode 100644 index 00000000000..a356196388b Binary files /dev/null and b/latest/html/_downloads/e5a0a5c62180e637160d7fe908b15dbd/stat.pdf differ diff --git a/latest/html/_downloads/e624742bd0b6fac4d6f692fa2364acc1/gw150914_shape.pdf b/latest/html/_downloads/e624742bd0b6fac4d6f692fa2364acc1/gw150914_shape.pdf new file mode 100644 index 00000000000..e55167418d5 Binary files /dev/null and b/latest/html/_downloads/e624742bd0b6fac4d6f692fa2364acc1/gw150914_shape.pdf differ diff --git a/latest/html/_downloads/e6e757065100efe0ec250cc14a832a1a/spin_examples.png b/latest/html/_downloads/e6e757065100efe0ec250cc14a832a1a/spin_examples.png new file mode 100644 index 00000000000..e9f74c93422 Binary files /dev/null and b/latest/html/_downloads/e6e757065100efe0ec250cc14a832a1a/spin_examples.png differ diff --git a/latest/html/_downloads/e898125cf7aee08e9696e6389c6a8f4b/gw150914_h1_chirp.wav b/latest/html/_downloads/e898125cf7aee08e9696e6389c6a8f4b/gw150914_h1_chirp.wav new file mode 100644 index 00000000000..25a2193c54a Binary files /dev/null and b/latest/html/_downloads/e898125cf7aee08e9696e6389c6a8f4b/gw150914_h1_chirp.wav differ diff --git a/latest/html/_downloads/e9a79c2f7e52d454ca6e8f46c16628d7/event2_inj.ini b/latest/html/_downloads/e9a79c2f7e52d454ca6e8f46c16628d7/event2_inj.ini new file mode 100644 index 00000000000..f8ccd5269f9 --- /dev/null +++ b/latest/html/_downloads/e9a79c2f7e52d454ca6e8f46c16628d7/event2_inj.ini @@ -0,0 +1,30 @@ +# A "lensed" version of event1. This has the same parameters except for the sky +# location and time: the event happens ~one week later, with a slightly +# different sky location. The sky location and time were picked arbitrarily; +# no attempt was made at making sure these are actually physically possible. + +[variable_params] + +[static_params] +approximant = IMRPhenomD +tc = 1126859462.0 +srcmass1 = 35 +srcmass2 = 35 +distance = 500 +ra = 2.0 +dec = -1. +inclination = 0 +polarization = 0 +f_ref = 18 +f_lower = 18 +taper = start + +[waveform_transforms-mass1] +name = custom +inputs = srcmass1, distance +mass1 = srcmass1 * (1+redshift(distance)) + +[waveform_transforms-mass2] +name = custom +inputs = srcmass2, distance +mass2 = srcmass2 * (1+redshift(distance)) diff --git a/latest/html/_downloads/ea7cfe5da95cad9e92f2da94b44a2c9f/analytic.pdf b/latest/html/_downloads/ea7cfe5da95cad9e92f2da94b44a2c9f/analytic.pdf new file mode 100644 index 00000000000..65eb17ce53e Binary files /dev/null and b/latest/html/_downloads/ea7cfe5da95cad9e92f2da94b44a2c9f/analytic.pdf differ diff --git a/latest/html/_downloads/ea8b62d728fdb3bb0fe6d478a9a254b7/add_waveform_00.png b/latest/html/_downloads/ea8b62d728fdb3bb0fe6d478a9a254b7/add_waveform_00.png new file mode 100644 index 00000000000..0eac1fe6632 Binary files /dev/null and b/latest/html/_downloads/ea8b62d728fdb3bb0fe6d478a9a254b7/add_waveform_00.png differ diff --git a/latest/html/_downloads/ebeb2b3479033c4a3b3361caf8180638/plot_freq.pdf b/latest/html/_downloads/ebeb2b3479033c4a3b3361caf8180638/plot_freq.pdf new file mode 100644 index 00000000000..241c846d4bc Binary files /dev/null and b/latest/html/_downloads/ebeb2b3479033c4a3b3361caf8180638/plot_freq.pdf differ diff --git a/latest/html/_downloads/ec56f84d4e01b1c82083a56c0d8a5123/plot_waveform.pdf b/latest/html/_downloads/ec56f84d4e01b1c82083a56c0d8a5123/plot_waveform.pdf new file mode 100644 index 00000000000..c793ea5cb9a Binary files /dev/null and b/latest/html/_downloads/ec56f84d4e01b1c82083a56c0d8a5123/plot_waveform.pdf differ diff --git a/latest/html/_downloads/ee23b70d8131236cc3dd84ced4d22d73/gw150914_shape.py b/latest/html/_downloads/ee23b70d8131236cc3dd84ced4d22d73/gw150914_shape.py new file mode 100644 index 00000000000..ca2dfb4ff5a --- /dev/null +++ b/latest/html/_downloads/ee23b70d8131236cc3dd84ced4d22d73/gw150914_shape.py @@ -0,0 +1,35 @@ +import matplotlib.pyplot as pp +from pycbc.filter import highpass_fir, lowpass_fir +from pycbc.psd import welch, interpolate +from pycbc.catalog import Merger + + +for ifo in ['H1', 'L1']: + # Read data and remove low frequency content + h1 = Merger("GW150914").strain(ifo) + h1 = highpass_fir(h1, 15, 8) + + # Calculate the noise spectrum + psd = interpolate(welch(h1), 1.0 / h1.duration) + + # whiten + white_strain = (h1.to_frequencyseries() / psd ** 0.5).to_timeseries() + + # remove some of the high and low + smooth = highpass_fir(white_strain, 35, 8) + smooth = lowpass_fir(smooth, 300, 8) + + # time shift and flip L1 + if ifo == 'L1': + smooth *= -1 + smooth.roll(int(.007 / smooth.delta_t)) + + pp.plot(smooth.sample_times, smooth, label=ifo) + +pp.legend() +pp.xlim(1126259462.21, 1126259462.45) +pp.ylim(-150, 150) +pp.ylabel('Smoothed-Whitened Strain') +pp.grid() +pp.xlabel('GPS Time (s)') +pp.show() diff --git a/latest/html/_downloads/ee3f63cbeb84eeada66caea40b503142/read.py b/latest/html/_downloads/ee3f63cbeb84eeada66caea40b503142/read.py new file mode 100644 index 00000000000..ee303b293f0 --- /dev/null +++ b/latest/html/_downloads/ee3f63cbeb84eeada66caea40b503142/read.py @@ -0,0 +1,26 @@ +import matplotlib.pyplot as pp +import pycbc.psd +import pycbc.types + + +filename = 'example_psd.txt' + +# The PSD will be interpolated to the requested frequency spacing +delta_f = 1.0 / 4 +length = int(1024 / delta_f) +low_frequency_cutoff = 30.0 +psd = pycbc.psd.from_txt(filename, length, delta_f, + low_frequency_cutoff, is_asd_file=False) +pp.loglog(psd.sample_frequencies, psd, label='interpolated') + +# The PSD will be read in without modification +psd = pycbc.types.load_frequencyseries('./example_psd.txt') +pp.loglog(psd.sample_frequencies, psd, label='raw') + +pp.xlim(xmin=30, xmax=1000) +pp.legend() +pp.xlabel('Hz') +pp.show() + +# Save a psd to file, several formats are supported (.txt, .hdf, .npy) +psd.save('tmp_psd.txt') diff --git a/latest/html/_downloads/ee4f0142804bbd323df33620a7cdcff7/model.ini b/latest/html/_downloads/ee4f0142804bbd323df33620a7cdcff7/model.ini new file mode 100644 index 00000000000..27262f6505c --- /dev/null +++ b/latest/html/_downloads/ee4f0142804bbd323df33620a7cdcff7/model.ini @@ -0,0 +1,3 @@ +[model] +name = hierarchical +submodels = event1 event2 diff --git a/latest/html/_downloads/f054669a6fcd6624e0c07865da3978e7/plot_freq.png b/latest/html/_downloads/f054669a6fcd6624e0c07865da3978e7/plot_freq.png new file mode 100644 index 00000000000..593dcde2d45 Binary files /dev/null and b/latest/html/_downloads/f054669a6fcd6624e0c07865da3978e7/plot_freq.png differ diff --git a/latest/html/_downloads/f1d51bd458b4ecaf82da2987bcb1760d/run.sh b/latest/html/_downloads/f1d51bd458b4ecaf82da2987bcb1760d/run.sh new file mode 100644 index 00000000000..52398b98fc0 --- /dev/null +++ b/latest/html/_downloads/f1d51bd458b4ecaf82da2987bcb1760d/run.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +# configuration files +PRIOR_CONFIG=gw150914_like.ini +DATA_CONFIG=data.ini +SAMPLER_CONFIG=emcee_pt-gw150914_like.ini + +OUTPUT_PATH=inference.hdf + +# the following sets the number of cores to use; adjust as needed to +# your computer's capabilities +NPROCS=10 + +# run sampler +# Running with OMP_NUM_THREADS=1 stops lalsimulation +# from spawning multiple jobs that would otherwise be used +# by pycbc_inference and cause a reduced runtime. +OMP_NUM_THREADS=1 \ +pycbc_inference --verbose \ + --seed 1897234 \ + --config-file ${PRIOR_CONFIG} ${DATA_CONFIG} ${SAMPLER_CONFIG} \ + --output-file ${OUTPUT_PATH} \ + --nprocesses ${NPROCS} \ + --force diff --git a/latest/html/_downloads/f2923c4f5e653f716f9a3d1d2efa38b6/add_waveform_01.hires.png b/latest/html/_downloads/f2923c4f5e653f716f9a3d1d2efa38b6/add_waveform_01.hires.png new file mode 100644 index 00000000000..52f94ad1288 Binary files /dev/null and b/latest/html/_downloads/f2923c4f5e653f716f9a3d1d2efa38b6/add_waveform_01.hires.png differ diff --git a/latest/html/_downloads/f3f76b9b3839c05a31fb5109042593f5/estimate.hires.png b/latest/html/_downloads/f3f76b9b3839c05a31fb5109042593f5/estimate.hires.png new file mode 100644 index 00000000000..36dc7995ed2 Binary files /dev/null and b/latest/html/_downloads/f3f76b9b3839c05a31fb5109042593f5/estimate.hires.png differ diff --git a/latest/html/_downloads/f67f0956310c0d7a705b20b9bf7d5a91/data.ini b/latest/html/_downloads/f67f0956310c0d7a705b20b9bf7d5a91/data.ini new file mode 100644 index 00000000000..f69c64648c1 --- /dev/null +++ b/latest/html/_downloads/f67f0956310c0d7a705b20b9bf7d5a91/data.ini @@ -0,0 +1,32 @@ +[data] +instruments = H1 L1 +trigger-time = 1126259462.43 +; See the documentation at +; http://pycbc.org/pycbc/latest/html/inference.html#simulated-bbh-example +; for details on the following settings: +analysis-start-time = -6 +analysis-end-time = 2 +psd-estimation = median-mean +psd-start-time = -256 +psd-end-time = 256 +psd-inverse-length = 8 +psd-segment-length = 8 +psd-segment-stride = 4 +; The frame files must be downloaded from GWOSC before running. Here, we +; assume that the files have been downloaded to the same directory. Adjust +; the file path as necessary if not. +frame-files = H1:H-H1_GWOSC_16KHZ_R1-1126257415-4096.gwf L1:L-L1_GWOSC_16KHZ_R1-1126257415-4096.gwf +channel-name = H1:GWOSC-16KHZ_R1_STRAIN L1:GWOSC-16KHZ_R1_STRAIN +; this will cause the data to be resampled to 2048 Hz: +sample-rate = 2048 +; We'll use a high-pass filter so as not to get numerical errors from the large +; amplitude low frequency noise. Here we use 15 Hz, which is safely below the +; low frequency cutoff of our likelihood integral (20 Hz) +strain-high-pass = 15 +; The pad-data argument is for the high-pass filter: 8s are added to the +; beginning/end of the analysis/psd times when the data is loaded. After the +; high pass filter is applied, the additional time is discarded. This pad is +; *in addition to* the time added to the analysis start/end time for the PSD +; inverse length. Since it is discarded before the data is transformed for the +; likelihood integral, it has little affect on the run time. +pad-data = 8 diff --git a/latest/html/_downloads/f85b80623cf43b9a861ca00942418ed6/data.pdf b/latest/html/_downloads/f85b80623cf43b9a861ca00942418ed6/data.pdf new file mode 100644 index 00000000000..a916822c57f Binary files /dev/null and b/latest/html/_downloads/f85b80623cf43b9a861ca00942418ed6/data.pdf differ diff --git a/latest/html/_downloads/fa1c075b7b79213b2035eed419618a10/emcee_pt-srcmasses_comoving_volume.ini b/latest/html/_downloads/fa1c075b7b79213b2035eed419618a10/emcee_pt-srcmasses_comoving_volume.ini new file mode 100644 index 00000000000..91c7106910a --- /dev/null +++ b/latest/html/_downloads/fa1c075b7b79213b2035eed419618a10/emcee_pt-srcmasses_comoving_volume.ini @@ -0,0 +1,39 @@ +;============================================================================== +; +; Emcee PT settings for CBC, comoving volume +; +;============================================================================== +; +; The following provides standard settings for emcee_pt when analying a +; combact binary merger. This assumes that the prior is specified in terms +; of the source masses (srcmass1, srcmass2) and comoving volume +; (comoving_volume). To speed up convergence, the source masses are sampled in +; chirp mass and mass ratio, and the comoving volume is sampled in the log. +; +; We set the number of effective samples to 1500 because we've found that +; emcee_pt struggles to acquire more than ~8 independent samples per walker. +; +[sampler] +name = emcee_pt +nwalkers = 200 +ntemps = 20 +effective-nsamples = 1500 +checkpoint-interval = 2000 +max-samples-per-chain = 1000 + +[sampler-burn_in] +burn-in-test = nacl & max_posterior + +[sampling_params] +srcmass1, srcmass2 = mchirp, q +comoving_volume = logv + +[sampling_transforms-mchirp+q] +name = mass1_mass2_to_mchirp_q +mass1_param = srcmass1 +mass2_param = srcmass2 + +[sampling_transforms-logv] +name = log +inputvar = comoving_volume +outputvar = logv diff --git a/latest/html/_downloads/fefae50936f9a7dc0e125dc7871795dd/read.pdf b/latest/html/_downloads/fefae50936f9a7dc0e125dc7871795dd/read.pdf new file mode 100644 index 00000000000..9ce3a9e646d Binary files /dev/null and b/latest/html/_downloads/fefae50936f9a7dc0e125dc7871795dd/read.pdf differ diff --git a/latest/html/_images/add_waveform_00.png b/latest/html/_images/add_waveform_00.png new file mode 100644 index 00000000000..0eac1fe6632 Binary files /dev/null and b/latest/html/_images/add_waveform_00.png differ diff --git a/latest/html/_images/add_waveform_01.png b/latest/html/_images/add_waveform_01.png new file mode 100644 index 00000000000..ca11646b879 Binary files /dev/null and b/latest/html/_images/add_waveform_01.png differ diff --git a/latest/html/_images/analytic.png b/latest/html/_images/analytic.png new file mode 100644 index 00000000000..40579c9e38b Binary files /dev/null and b/latest/html/_images/analytic.png differ diff --git a/latest/html/_images/chisq.png b/latest/html/_images/chisq.png new file mode 100644 index 00000000000..f46be4b136b Binary files /dev/null and b/latest/html/_images/chisq.png differ diff --git a/latest/html/_images/custom_00.png b/latest/html/_images/custom_00.png new file mode 100644 index 00000000000..6372dc26599 Binary files /dev/null and b/latest/html/_images/custom_00.png differ diff --git a/latest/html/_images/custom_01.png b/latest/html/_images/custom_01.png new file mode 100644 index 00000000000..c4171806b74 Binary files /dev/null and b/latest/html/_images/custom_01.png differ diff --git a/latest/html/_images/data.png b/latest/html/_images/data.png new file mode 100644 index 00000000000..1a27f45ebf8 Binary files /dev/null and b/latest/html/_images/data.png differ diff --git a/latest/html/_images/demarg_150914.png b/latest/html/_images/demarg_150914.png new file mode 100644 index 00000000000..29b6f6ff2b0 Binary files /dev/null and b/latest/html/_images/demarg_150914.png differ diff --git a/latest/html/_images/estimate.png b/latest/html/_images/estimate.png new file mode 100644 index 00000000000..7dc0f493842 Binary files /dev/null and b/latest/html/_images/estimate.png differ diff --git a/latest/html/_images/gw150914_h1_snr.png b/latest/html/_images/gw150914_h1_snr.png new file mode 100644 index 00000000000..0657795f32f Binary files /dev/null and b/latest/html/_images/gw150914_h1_snr.png differ diff --git a/latest/html/_images/gw150914_shape.png b/latest/html/_images/gw150914_shape.png new file mode 100644 index 00000000000..825d3a7e9e2 Binary files /dev/null and b/latest/html/_images/gw150914_shape.png differ diff --git a/latest/html/_images/higher_modes.png b/latest/html/_images/higher_modes.png new file mode 100644 index 00000000000..c395bdc6472 Binary files /dev/null and b/latest/html/_images/higher_modes.png differ diff --git a/latest/html/_images/hwinj.png b/latest/html/_images/hwinj.png new file mode 100644 index 00000000000..4c26399a939 Binary files /dev/null and b/latest/html/_images/hwinj.png differ diff --git a/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png b/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png new file mode 100644 index 00000000000..52981b1fcde Binary files /dev/null and b/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png differ diff --git a/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png.map b/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png.map new file mode 100644 index 00000000000..d3bfa4b54ba --- /dev/null +++ b/latest/html/_images/inheritance-0c4a0e40ac775ec4ce1f562374ca8503d2a3d8bc.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png b/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png new file mode 100644 index 00000000000..2bc9a413c81 Binary files /dev/null and b/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png differ diff --git a/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png.map b/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png.map new file mode 100644 index 00000000000..16ef3f7785c --- /dev/null +++ b/latest/html/_images/inheritance-135f539250b007c7cd68a043f4c6a5a395286031.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png b/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png new file mode 100644 index 00000000000..362285d7788 Binary files /dev/null and b/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png differ diff --git a/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png.map b/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png.map new file mode 100644 index 00000000000..0297b33016b --- /dev/null +++ b/latest/html/_images/inheritance-17f55d9c515cd42679023ba476ed6c5fdfd14c8e.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png b/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png new file mode 100644 index 00000000000..55fd0e59394 Binary files /dev/null and b/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png differ diff --git a/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png.map b/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png.map new file mode 100644 index 00000000000..a72273605a4 --- /dev/null +++ b/latest/html/_images/inheritance-194142140183ce5ecc64f61a3dac99da83710ea4.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png b/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png new file mode 100644 index 00000000000..60e8dd7aee9 Binary files /dev/null and b/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png differ diff --git a/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png.map b/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png.map new file mode 100644 index 00000000000..69c2c6358a2 --- /dev/null +++ b/latest/html/_images/inheritance-285ecc445818d1aeeece3abc3c5c4547e88fac62.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png b/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png new file mode 100644 index 00000000000..8376921d5b0 Binary files /dev/null and b/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png differ diff --git a/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png.map b/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png.map new file mode 100644 index 00000000000..5400f7a9027 --- /dev/null +++ b/latest/html/_images/inheritance-2dbbdf7af028f87f4d54856f8f0b14e55238af8e.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png b/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png new file mode 100644 index 00000000000..bdec8e74c06 Binary files /dev/null and b/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png differ diff --git a/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png.map b/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png.map new file mode 100644 index 00000000000..ea8f434e41e --- /dev/null +++ b/latest/html/_images/inheritance-35c3ce1c26527d3150a094dca0a606ce9e45e76e.png.map @@ -0,0 +1,7 @@ + + + + + + + diff --git a/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png b/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png new file mode 100644 index 00000000000..2e0df857c11 Binary files /dev/null and b/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png differ diff --git a/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png.map b/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png.map new file mode 100644 index 00000000000..f2a877666d5 --- /dev/null +++ b/latest/html/_images/inheritance-43f4b6a2bc6e7389b171ecf5cef3c86e2f6ce959.png.map @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png b/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png new file mode 100644 index 00000000000..a643dbea023 Binary files /dev/null and b/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png differ diff --git a/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png.map b/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png.map new file mode 100644 index 00000000000..08c53d6f1b5 --- /dev/null +++ b/latest/html/_images/inheritance-47e9a8870a3789f8706f5c886a423d5d9cd15af7.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png b/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png new file mode 100644 index 00000000000..1a293a81b33 Binary files /dev/null and b/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png differ diff --git a/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png.map b/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png.map new file mode 100644 index 00000000000..75f717f56b6 --- /dev/null +++ b/latest/html/_images/inheritance-49bf9d266524be009859206f5dfcb5cb0ed9cdfa.png.map @@ -0,0 +1,5 @@ + + + + + diff --git a/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png b/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png new file mode 100644 index 00000000000..d27a0a6d17b Binary files /dev/null and b/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png differ diff --git a/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png.map b/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png.map new file mode 100644 index 00000000000..056486dbfec --- /dev/null +++ b/latest/html/_images/inheritance-63302b4969d4d61c46d7555583d98ae7d696b4b7.png.map @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png b/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png new file mode 100644 index 00000000000..92c2269cafc Binary files /dev/null and b/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png differ diff --git a/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png.map b/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png.map new file mode 100644 index 00000000000..2d014afe08e --- /dev/null +++ b/latest/html/_images/inheritance-6638e2798cac2aaf6f9b72b697ad1d63a472305d.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png b/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png new file mode 100644 index 00000000000..63002635bb7 Binary files /dev/null and b/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png differ diff --git a/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png.map b/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png.map new file mode 100644 index 00000000000..9b6a1a5fe2d --- /dev/null +++ b/latest/html/_images/inheritance-7016cbadcaca2216268e651bc22c6cc468e2c081.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png b/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png new file mode 100644 index 00000000000..fa217f36c6b Binary files /dev/null and b/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png differ diff --git a/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png.map b/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png.map new file mode 100644 index 00000000000..c51e459ad15 --- /dev/null +++ b/latest/html/_images/inheritance-743b05add17dd9b5254278a435367b6e5139d519.png.map @@ -0,0 +1,7 @@ + + + + + + + diff --git a/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png b/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png new file mode 100644 index 00000000000..1b949b9f28e Binary files /dev/null and b/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png differ diff --git a/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png.map b/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png.map new file mode 100644 index 00000000000..b47bd69b331 --- /dev/null +++ b/latest/html/_images/inheritance-78570aa48e06cb52d7ec24aed7bb46a73945c110.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png b/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png new file mode 100644 index 00000000000..af8b9f677c7 Binary files /dev/null and b/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png differ diff --git a/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png.map b/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png.map new file mode 100644 index 00000000000..da41472f8f9 --- /dev/null +++ b/latest/html/_images/inheritance-8276920748f64014b7aa631c03c502281f415da7.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png b/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png new file mode 100644 index 00000000000..cbbdcf9d576 Binary files /dev/null and b/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png differ diff --git a/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png.map b/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png.map new file mode 100644 index 00000000000..c376ae18250 --- /dev/null +++ b/latest/html/_images/inheritance-94e55b939af5288e9179bf63f02bd6bf41fbd6c4.png.map @@ -0,0 +1,5 @@ + + + + + diff --git a/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png b/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png new file mode 100644 index 00000000000..1e91aef2fa0 Binary files /dev/null and b/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png differ diff --git a/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png.map b/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png.map new file mode 100644 index 00000000000..d84ecb907ba --- /dev/null +++ b/latest/html/_images/inheritance-99282dff716eebff9e975291ab8cacf7a57da3f2.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png b/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png new file mode 100644 index 00000000000..f44f24c27f6 Binary files /dev/null and b/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png differ diff --git a/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png.map b/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png.map new file mode 100644 index 00000000000..e43bf42e6da --- /dev/null +++ b/latest/html/_images/inheritance-a84ff498e884301e72320ab52ba9b4720c0eb3df.png.map @@ -0,0 +1,6 @@ + + + + + + diff --git a/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png b/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png new file mode 100644 index 00000000000..21571165847 Binary files /dev/null and b/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png differ diff --git a/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png.map b/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png.map new file mode 100644 index 00000000000..2f502c71ad9 --- /dev/null +++ b/latest/html/_images/inheritance-b45a2a7b4e9089b0aca33e3a71666b42d5efc66b.png.map @@ -0,0 +1,4 @@ + + + + diff --git a/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png b/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png new file mode 100644 index 00000000000..ddf94b283d1 Binary files /dev/null and b/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png differ diff --git a/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png.map b/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png.map new file mode 100644 index 00000000000..443238d1b90 --- /dev/null +++ b/latest/html/_images/inheritance-b68e0a9c17d78f5f2ee20392e60422d624410547.png.map @@ -0,0 +1,7 @@ + + + + + + + diff --git a/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png b/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png new file mode 100644 index 00000000000..413d5c16d34 Binary files /dev/null and b/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png differ diff --git a/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png.map b/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png.map new file mode 100644 index 00000000000..169867a5fd7 --- /dev/null +++ b/latest/html/_images/inheritance-b908626d1c769862e42661303d8091f5d10d9efd.png.map @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png b/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png new file mode 100644 index 00000000000..70d5bbba31b Binary files /dev/null and b/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png differ diff --git a/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png.map b/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png.map new file mode 100644 index 00000000000..9f15194af3e --- /dev/null +++ b/latest/html/_images/inheritance-f2176d6359b35b4d27d2748ff99a69262668ba62.png.map @@ -0,0 +1,5 @@ + + + + + diff --git a/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png b/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png new file mode 100644 index 00000000000..b275ca9c3b9 Binary files /dev/null and b/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png differ diff --git a/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png.map b/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png.map new file mode 100644 index 00000000000..5531b164588 --- /dev/null +++ b/latest/html/_images/inheritance-f381ddcd26e21142bb4ab078486879a1522fbcd7.png.map @@ -0,0 +1,7 @@ + + + + + + + diff --git a/latest/html/_images/lisa_smbhb_mass_tc.png b/latest/html/_images/lisa_smbhb_mass_tc.png new file mode 100644 index 00000000000..ec179920542 Binary files /dev/null and b/latest/html/_images/lisa_smbhb_mass_tc.png differ diff --git a/latest/html/_images/lisa_smbhb_mass_tc_0.png b/latest/html/_images/lisa_smbhb_mass_tc_0.png new file mode 100644 index 00000000000..02155e09278 Binary files /dev/null and b/latest/html/_images/lisa_smbhb_mass_tc_0.png differ diff --git a/latest/html/_images/mass_examples.png b/latest/html/_images/mass_examples.png new file mode 100644 index 00000000000..1a7f235435d Binary files /dev/null and b/latest/html/_images/mass_examples.png differ diff --git a/latest/html/_images/mchirp_q_from_uniform_m1m2_example.png b/latest/html/_images/mchirp_q_from_uniform_m1m2_example.png new file mode 100644 index 00000000000..a6d88906ff2 Binary files /dev/null and b/latest/html/_images/mchirp_q_from_uniform_m1m2_example.png differ diff --git a/latest/html/_images/on.png b/latest/html/_images/on.png new file mode 100644 index 00000000000..9b677315443 Binary files /dev/null and b/latest/html/_images/on.png differ diff --git a/latest/html/_images/pass.png b/latest/html/_images/pass.png new file mode 100644 index 00000000000..35cd68c456b Binary files /dev/null and b/latest/html/_images/pass.png differ diff --git a/latest/html/_images/plot_detwaveform.png b/latest/html/_images/plot_detwaveform.png new file mode 100644 index 00000000000..764005e4c17 Binary files /dev/null and b/latest/html/_images/plot_detwaveform.png differ diff --git a/latest/html/_images/plot_fd_td.png b/latest/html/_images/plot_fd_td.png new file mode 100644 index 00000000000..090b362824a Binary files /dev/null and b/latest/html/_images/plot_fd_td.png differ diff --git a/latest/html/_images/plot_freq.png b/latest/html/_images/plot_freq.png new file mode 100644 index 00000000000..593dcde2d45 Binary files /dev/null and b/latest/html/_images/plot_freq.png differ diff --git a/latest/html/_images/plot_phase.png b/latest/html/_images/plot_phase.png new file mode 100644 index 00000000000..877dcc19003 Binary files /dev/null and b/latest/html/_images/plot_phase.png differ diff --git a/latest/html/_images/plot_waveform.png b/latest/html/_images/plot_waveform.png new file mode 100644 index 00000000000..33009d7e504 Binary files /dev/null and b/latest/html/_images/plot_waveform.png differ diff --git a/latest/html/_images/posterior-normal2d.png b/latest/html/_images/posterior-normal2d.png new file mode 100644 index 00000000000..a5e8f8b2791 Binary files /dev/null and b/latest/html/_images/posterior-normal2d.png differ diff --git a/latest/html/_images/read.png b/latest/html/_images/read.png new file mode 100644 index 00000000000..7ec0ecb275b Binary files /dev/null and b/latest/html/_images/read.png differ diff --git a/latest/html/_images/relative.png b/latest/html/_images/relative.png new file mode 100644 index 00000000000..efdab8e6932 Binary files /dev/null and b/latest/html/_images/relative.png differ diff --git a/latest/html/_images/sample.png b/latest/html/_images/sample.png new file mode 100644 index 00000000000..9e6285c103e Binary files /dev/null and b/latest/html/_images/sample.png differ diff --git a/latest/html/_images/sampling_from_config_example.png b/latest/html/_images/sampling_from_config_example.png new file mode 100644 index 00000000000..2a00c96833c Binary files /dev/null and b/latest/html/_images/sampling_from_config_example.png differ diff --git a/latest/html/_images/single.png b/latest/html/_images/single.png new file mode 100644 index 00000000000..01be2017d20 Binary files /dev/null and b/latest/html/_images/single.png differ diff --git a/latest/html/_images/single_demarg.png b/latest/html/_images/single_demarg.png new file mode 100644 index 00000000000..07f9da0cf57 Binary files /dev/null and b/latest/html/_images/single_demarg.png differ diff --git a/latest/html/_images/single_instant.png b/latest/html/_images/single_instant.png new file mode 100644 index 00000000000..44fe51efc78 Binary files /dev/null and b/latest/html/_images/single_instant.png differ diff --git a/latest/html/_images/single_marg.png b/latest/html/_images/single_marg.png new file mode 100644 index 00000000000..446f8a03012 Binary files /dev/null and b/latest/html/_images/single_marg.png differ diff --git a/latest/html/_images/snr.png b/latest/html/_images/snr.png new file mode 100644 index 00000000000..1d8b52893e7 Binary files /dev/null and b/latest/html/_images/snr.png differ diff --git a/latest/html/_images/spin_examples.png b/latest/html/_images/spin_examples.png new file mode 100644 index 00000000000..e9f74c93422 Binary files /dev/null and b/latest/html/_images/spin_examples.png differ diff --git a/latest/html/_images/spin_spatial_distr_example.png b/latest/html/_images/spin_spatial_distr_example.png new file mode 100644 index 00000000000..d792bc49f33 Binary files /dev/null and b/latest/html/_images/spin_spatial_distr_example.png differ diff --git a/latest/html/_images/stat.png b/latest/html/_images/stat.png new file mode 100644 index 00000000000..2e73711aaa9 Binary files /dev/null and b/latest/html/_images/stat.png differ diff --git a/latest/html/_images/timeseries.png b/latest/html/_images/timeseries.png new file mode 100644 index 00000000000..20daa635681 Binary files /dev/null and b/latest/html/_images/timeseries.png differ diff --git a/latest/html/_images/workflow_planning.png b/latest/html/_images/workflow_planning.png new file mode 100644 index 00000000000..06daf9aafe0 Binary files /dev/null and b/latest/html/_images/workflow_planning.png differ diff --git a/latest/html/_include/distributions-table.html b/latest/html/_include/distributions-table.html new file mode 100644 index 00000000000..31be01a2240 --- /dev/null +++ b/latest/html/_include/distributions-table.html @@ -0,0 +1,195 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/inference_data_opts-table.html b/latest/html/_include/inference_data_opts-table.html new file mode 100644 index 00000000000..81a92b22856 --- /dev/null +++ b/latest/html/_include/inference_data_opts-table.html @@ -0,0 +1,496 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

Syntax

Description

instruments

INSTRUMENTS [INSTRUMENTS …]

Instruments to analyze, eg. H1 L1.

trigger-time

TRIGGER_TIME

Reference GPS time (at geocenter) from which the +(anlaysis|psd)-(start|end)-time options are measured. +The integer seconds will be used. Default is 0; i.e., +if not provided, the analysis and psd times should be +in GPS seconds.

analysis-start-time

IFO:TIME [IFO:TIME …]

The start time to use for the analysis, measured with +respect to the trigger-time. If psd-inverse-length is +provided, the given start time will be padded by half +that length to account for wrap-around effects.

analysis-end-time

IFO:TIME [IFO:TIME …]

The end time to use for the analysis, measured with +respect to the trigger-time. If psd-inverse-length is +provided, the given end time will be padded by half +that length to account for wrap-around effects.

psd-start-time

IFO:TIME [IFO:TIME …]

Start time to use for PSD estimation, measured with +respect to the trigger-time.

psd-end-time

IFO:TIME [IFO:TIME …]

End time to use for PSD estimation, measured with +respect to the trigger-time.

data-conditioning-low-freq

IFO:FLOW [IFO:FLOW …]

Low frequency cutoff of the data. Needed for PSD +estimation and when creating fake strain. If not +provided, will use the model’s low-frequency-cutoff.

Options to select the method of PSD generation: +The options psd-model, psd-file, asd-file, and psd-estimation are +mutually exclusive.

psd-model

IFO:MODEL [IFO:MODEL …]

Get PSD from given analytical model. Choose from any +available PSD model.

psd-extra-args

DETECTOR:PARAM:VALUE +[DETECTOR:PARAM:VALUE …]

(optional) Extra arguments passed to the PSD models.

psd-file

IFO:FILE [IFO:FILE …]

Get PSD using given PSD ASCII file

asd-file

IFO:FILE [IFO:FILE …]

Get PSD using given ASD ASCII file

psd-estimation

IFO:FILE [IFO:FILE …]

Measure PSD from the data, using given average method. +Choose from mean, median or median-mean.

psd-segment-length

IFO:LENGTH [IFO:LENGTH …]

(Required for psd-estimation) The segment length for +PSD estimation (s)

psd-segment-stride

IFO:STRIDE [IFO:STRIDE …]

(Required for psd-estimation) The separation between +consecutive segments (s)

psd-num-segments

IFO:NUM [IFO:NUM …]

(Optional, used only with psd-estimation). If given +PSDs will be estimated using only this number of +segments. If more data is given than needed to make +this number of segments than excess data will not be +used in the PSD estimate. If not enough data is given +the code will fail.

psd-inverse-length

IFO:LENGTH [IFO:LENGTH …]

(Optional) The maximum length of the impulse response +of the overwhitening filter (s)

invpsd-trunc-method

{hann}

(Optional) What truncation method to use when applying +psd-inverse-length. If not provided, a hard truncation +will be used.

psd-output

IFO:FILE [IFO:FILE …]

(Optional) Write PSD to specified file

psdvar-segment

SECONDS

Length of segment when calculating the PSD +variability.

psdvar-short-segment

SECONDS

Length of short segment for outliers removal in PSD +variability calculation.

psdvar-long-segment

SECONDS

Length of long segment when calculating the PSD +variability.

psdvar-psd-duration

SECONDS

Duration of short segments for PSD estimation.

psdvar-psd-stride

SECONDS

Separation between PSD estimation segments.

psdvar-low-freq

HERTZ

Minimum frequency to consider in strain bandpass.

psdvar-high-freq

HERTZ

Maximum frequency to consider in strain bandpass.

Options for obtaining h(t): +These options are used for generating h(t) either by reading from a file +or by generating it. This is only needed if the PSD is to be estimated +from the data, ie. if the psd-estimation option is given. This group +supports reading from multiple ifos simultaneously.

strain-high-pass

IFO:FREQUENCY [IFO:FREQUENCY …]

High pass frequency

strain-low-pass

IFO:FREQUENCY [IFO:FREQUENCY …]

Low pass frequency

pad-data

IFO:LENGTH [IFO:LENGTH …]

Extra padding to remove highpass corruption (integer +seconds, default 8)

taper-data

IFO:LENGTH [IFO:LENGTH …]

Taper ends of data to zero using the supplied length +as a window (integer seconds)

sample-rate

IFO:RATE [IFO:RATE …]

The sample rate to use for h(t) generation (integer +Hz).

channel-name

IFO:CHANNEL [IFO:CHANNEL …]

The channel containing the gravitational strain data

frame-cache

IFO:FRAME_CACHE [IFO:FRAME_CACHE +…]

Cache file containing the frame locations.

frame-files

IFO:FRAME_FILES [IFO:FRAME_FILES +…]

list of frame files

hdf-store

IFO:HDF_STORE_FILE +[IFO:HDF_STORE_FILE …]

Store of time series data in hdf format

frame-type

IFO:FRAME_TYPE [IFO:FRAME_TYPE +…]

(optional) Replaces frame-files. Use datafind to get +the needed frame file(s) of this type.

frame-sieve

IFO:FRAME_SIEVE [IFO:FRAME_SIEVE +…]

(optional), Only use frame files where the URL matches +the regular expression given.

fake-strain

IFO:CHOICE [IFO:CHOICE …]

Name of model PSD for generating fake gaussian noise. +Choose from any available PSD model, or zeroNoise.

fake-strain-extra-args

DETECTOR:PARAM:VALUE +[DETECTOR:PARAM:VALUE …]

(optional) Extra arguments passed to the PSD models.

fake-strain-seed

IFO:SEED [IFO:SEED …]

Seed value for the generation of fake colored gaussian +noise

fake-strain-from-file

IFO:FILE [IFO:FILE …]

File containing ASD for generating fake noise from it.

fake-strain-flow

FAKE_STRAIN_FLOW [FAKE_STRAIN_FLOW +…]

Low frequency cutoff of the fake strain

fake-strain-filter-duration

FAKE_STRAIN_FILTER_DURATION +[FAKE_STRAIN_FILTER_DURATION …]

Duration in seconds of the fake data coloring filter

fake-strain-sample-rate

FAKE_STRAIN_SAMPLE_RATE +[FAKE_STRAIN_SAMPLE_RATE …]

Sample rate of the fake data generation

injection-file

IFO:FILE [IFO:FILE …]

(optional) Injection file containing parametersof CBC +signals to be added to the strain

sgburst-injection-file

IFO:FILE [IFO:FILE …]

(optional) Injection file containing parametersof +sine-Gaussian burst signals to add to the strain

injection-scale-factor

IFO:VAL [IFO:VAL …]

Divide injections by this factor before adding to the +strain data

injection-sample-rate

IFO:VAL [IFO:VAL …]

Sample rate to use for injections (integer Hz). +Typically similar to the strain data sample rate.If +not provided, the strain sample rate will be used

injection-f-ref

IFO:VALUE [IFO:VALUE …]

Reference frequency in Hz for creating CBC injections +from an XML file

injection-f-final

IFO:VALUE [IFO:VALUE …]

Override the f_final field of a CBC XML injection file +(frequency in Hz)

gating-file

IFO:FILE [IFO:FILE …]

(optional) Text file of gating segments to apply. +Format of each line (units s) : gps_time +zeros_half_width pad_half_width

autogating-threshold

IFO:SIGMA [IFO:SIGMA …]

If given, find and gate glitches producing a deviation +larger than SIGMA in the whitened strain time series

autogating-max-iterations

SIGMA

If given, iteratively apply autogating

autogating-cluster

IFO:SECONDS [IFO:SECONDS …]

Length of clustering window for detecting glitches for +autogating.

autogating-width

IFO:SECONDS [IFO:SECONDS …]

Half-width of the gating window.

autogating-taper

IFO:SECONDS [IFO:SECONDS …]

Taper the strain before and after each gating window +over a duration of SECONDS.

autogating-pad

IFO:SECONDS [IFO:SECONDS …]

Ignore the given length of whitened strain at the ends +of a segment, to avoid filters ringing.

gating-method

{hard,taper,paint} +[{hard,taper,paint} …]

Choose the method for gating. Default: taper

normalize-strain

IFO:VALUE [IFO:VALUE …]

(optional) Divide frame data by constant.

zpk-z

IFO:VALUE [IFO:VALUE …]

(optional) Zero-pole-gain (zpk) filter strain. A list +of zeros for transfer function

zpk-p

IFO:VALUE [IFO:VALUE …]

(optional) Zero-pole-gain (zpk) filter strain. A list +of poles for transfer function

zpk-k

IFO:VALUE [IFO:VALUE …]

(optional) Zero-pole-gain (zpk) filter strain. +Transfer function gain

Options for gating data:

gate

IFO:CENTRALTIME:HALFDUR:TAPERDUR +[IFO:CENTRALTIME:HALFDUR:TAPERDUR +…]

Apply one or more gates to the data before filtering.

gate-overwhitened

Overwhiten data first, then apply the gates specified +in gate. Overwhitening allows for sharper tapers to +be used, since lines are not blurred.

psd-gate

IFO:CENTRALTIME:HALFDUR:TAPERDUR +[IFO:CENTRALTIME:HALFDUR:TAPERDUR +…]

Apply one or more gates to the data used for computing +the PSD. Gates are applied prior to FFT-ing the data +for PSD estimation.

Options for quering data quality (DQ):

dq-segment-name

DQ_SEGMENT_NAME

The status flag to query for data quality. Default is +“DATA”.

dq-source

{any,GWOSC,dqsegdb}

Where to look for DQ information. If “any” (the +default) will first try GWOSC, then dqsegdb.

dq-server

DQ_SERVER

The server to use for dqsegdb.

veto-definer

VETO_DEFINER

Path to a veto definer file that defines groups of +flags, which themselves define a set of DQ segments.

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/inference_io_inheritance_diagrams.html b/latest/html/_include/inference_io_inheritance_diagrams.html new file mode 100644 index 00000000000..2288115a4e6 --- /dev/null +++ b/latest/html/_include/inference_io_inheritance_diagrams.html @@ -0,0 +1,262 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
    +
  • cpnest_file:

  • +
+
Inheritance diagram of pycbc.inference.io.cpnest.CPNestFile
+ + + + +
+

+
+
    +
  • dynesty_file:

  • +
+
Inheritance diagram of pycbc.inference.io.dynesty.DynestyFile
+ + + + + + +
+

+
+
    +
  • emcee_file:

  • +
+
Inheritance diagram of pycbc.inference.io.emcee.EmceeFile
+ + + + + + +
+

+
+
    +
  • emcee_pt_file:

  • +
+
Inheritance diagram of pycbc.inference.io.emcee_pt.EmceePTFile
+ + + + + + + +
+

+
+
    +
  • epsie_file:

  • +
+
Inheritance diagram of pycbc.inference.io.epsie.EpsieFile
+ + + + + + + +
+

+
+
    +
  • multinest_file:

  • +
+
Inheritance diagram of pycbc.inference.io.multinest.MultinestFile
+ + + + +
+

+
+
    +
  • nessai_file:

  • +
+
Inheritance diagram of pycbc.inference.io.nessai.NessaiFile
+ + + + + + +
+

+
+
    +
  • posterior_file:

  • +
+
Inheritance diagram of pycbc.inference.io.posterior.PosteriorFile
+ + + +
+

+
+
    +
  • ptemcee_file:

  • +
+
Inheritance diagram of pycbc.inference.io.ptemcee.PTEmceeFile
+ + + + + + + +
+

+
+
    +
  • snowline_file:

  • +
+
Inheritance diagram of pycbc.inference.io.snowline.SnowlineFile
+ + + + +
+

+
+
    +
  • ultranest_file:

  • +
+
Inheritance diagram of pycbc.inference.io.ultranest.UltranestFile
+ + + + + +
+

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/models-table.html b/latest/html/_include/models-table.html new file mode 100644 index 00000000000..c6dec8dcc36 --- /dev/null +++ b/latest/html/_include/models-table.html @@ -0,0 +1,201 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

Class

'brute_lisa_sky_modes_marginalize'

pycbc.inference.models.brute_marg.BruteLISASkyModesMarginalize

'brute_parallel_gaussian_marginalize'

pycbc.inference.models.brute_marg.BruteParallelGaussianMarginalize

'gated_gaussian_margpol'

pycbc.inference.models.gated_gaussian_noise.GatedGaussianMargPol

'gated_gaussian_noise'

pycbc.inference.models.gated_gaussian_noise.GatedGaussianNoise

'gaussian_noise'

pycbc.inference.models.gaussian_noise.GaussianNoise

'hierarchical'

pycbc.inference.models.hierarchical.HierarchicalModel

'joint_primary_marginalized'

pycbc.inference.models.hierarchical.JointPrimaryMarginalizedModel

'marginalized_hmpolphase'

pycbc.inference.models.marginalized_gaussian_noise.MarginalizedHMPolPhase

'marginalized_phase'

pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPhaseGaussianNoise

'marginalized_polarization'

pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPolarization

'marginalized_time'

pycbc.inference.models.marginalized_gaussian_noise.MarginalizedTime

'multi_signal'

pycbc.inference.models.hierarchical.MultiSignalModel

'relative'

pycbc.inference.models.relbin.Relative

'relative_time'

pycbc.inference.models.relbin.RelativeTime

'relative_time_dom'

pycbc.inference.models.relbin.RelativeTimeDom

'single_template'

pycbc.inference.models.single_template.SingleTemplate

'test_eggbox'

pycbc.inference.models.analytic.TestEggbox

'test_normal'

pycbc.inference.models.analytic.TestNormal

'test_posterior'

pycbc.inference.models.analytic.TestPosterior

'test_prior'

pycbc.inference.models.analytic.TestPrior

'test_rosenbrock'

pycbc.inference.models.analytic.TestRosenbrock

'test_volcano'

pycbc.inference.models.analytic.TestVolcano

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/psd_models-table.html b/latest/html/_include/psd_models-table.html new file mode 100644 index 00000000000..814e823fbf8 --- /dev/null +++ b/latest/html/_include/psd_models-table.html @@ -0,0 +1,417 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

Function

AdVBNSOptimizedSensitivityP1200087

pycbc.psd.analytical.AdVBNSOptimizedSensitivityP1200087()

AdVDesignSensitivityP1200087

pycbc.psd.analytical.AdVDesignSensitivityP1200087()

AdVEarlyHighSensitivityP1200087

pycbc.psd.analytical.AdVEarlyHighSensitivityP1200087()

AdVEarlyLowSensitivityP1200087

pycbc.psd.analytical.AdVEarlyLowSensitivityP1200087()

AdVLateHighSensitivityP1200087

pycbc.psd.analytical.AdVLateHighSensitivityP1200087()

AdVLateLowSensitivityP1200087

pycbc.psd.analytical.AdVLateLowSensitivityP1200087()

AdVMidHighSensitivityP1200087

pycbc.psd.analytical.AdVMidHighSensitivityP1200087()

AdVMidLowSensitivityP1200087

pycbc.psd.analytical.AdVMidLowSensitivityP1200087()

AdVO3LowT1800545

pycbc.psd.analytical.AdVO3LowT1800545()

AdVO4IntermediateT1800545

pycbc.psd.analytical.AdVO4IntermediateT1800545()

AdVO4T1800545

pycbc.psd.analytical.AdVO4T1800545()

AdvVirgo

pycbc.psd.analytical.AdvVirgo()

CosmicExplorerP1600143

pycbc.psd.analytical.CosmicExplorerP1600143()

CosmicExplorerPessimisticP1600143

pycbc.psd.analytical.CosmicExplorerPessimisticP1600143()

CosmicExplorerWidebandP1600143

pycbc.psd.analytical.CosmicExplorerWidebandP1600143()

EinsteinTelescopeP1600143

pycbc.psd.analytical.EinsteinTelescopeP1600143()

GEOHF

pycbc.psd.analytical.GEOHF()

GEO

pycbc.psd.analytical.GEO()

KAGRA128MpcT1800545

pycbc.psd.analytical.KAGRA128MpcT1800545()

KAGRA25MpcT1800545

pycbc.psd.analytical.KAGRA25MpcT1800545()

KAGRA80MpcT1800545

pycbc.psd.analytical.KAGRA80MpcT1800545()

KAGRADesignSensitivityT1600593

pycbc.psd.analytical.KAGRADesignSensitivityT1600593()

KAGRAEarlySensitivityT1600593

pycbc.psd.analytical.KAGRAEarlySensitivityT1600593()

KAGRALateSensitivityT1600593

pycbc.psd.analytical.KAGRALateSensitivityT1600593()

KAGRAMidSensitivityT1600593

pycbc.psd.analytical.KAGRAMidSensitivityT1600593()

KAGRAOpeningSensitivityT1600593

pycbc.psd.analytical.KAGRAOpeningSensitivityT1600593()

KAGRA

pycbc.psd.analytical.KAGRA()

TAMA

pycbc.psd.analytical.TAMA()

Virgo

pycbc.psd.analytical.Virgo()

aLIGO140MpcT1800545

pycbc.psd.analytical.aLIGO140MpcT1800545()

aLIGO175MpcT1800545

pycbc.psd.analytical.aLIGO175MpcT1800545()

aLIGOAPlusDesignSensitivityT1800042

pycbc.psd.analytical.aLIGOAPlusDesignSensitivityT1800042()

aLIGOAdVO3LowT1800545

pycbc.psd.analytical.aLIGOAdVO3LowT1800545()

aLIGOAdVO4IntermediateT1800545

pycbc.psd.analytical.aLIGOAdVO4IntermediateT1800545()

aLIGOAdVO4T1800545

pycbc.psd.analytical.aLIGOAdVO4T1800545()

aLIGOBHBH20DegGWINC

pycbc.psd.analytical.aLIGOBHBH20DegGWINC()

aLIGOBHBH20Deg

pycbc.psd.analytical.aLIGOBHBH20Deg()

aLIGOBNSOptimizedSensitivityP1200087

pycbc.psd.analytical.aLIGOBNSOptimizedSensitivityP1200087()

aLIGODesignSensitivityP1200087

pycbc.psd.analytical.aLIGODesignSensitivityP1200087()

aLIGODesignSensitivityT1800044

pycbc.psd.analytical.aLIGODesignSensitivityT1800044()

aLIGOEarlyHighSensitivityP1200087

pycbc.psd.analytical.aLIGOEarlyHighSensitivityP1200087()

aLIGOEarlyLowSensitivityP1200087

pycbc.psd.analytical.aLIGOEarlyLowSensitivityP1200087()

aLIGOHighFrequencyGWINC

pycbc.psd.analytical.aLIGOHighFrequencyGWINC()

aLIGOHighFrequency

pycbc.psd.analytical.aLIGOHighFrequency()

aLIGOKAGRA128MpcT1800545

pycbc.psd.analytical.aLIGOKAGRA128MpcT1800545()

aLIGOKAGRA25MpcT1800545

pycbc.psd.analytical.aLIGOKAGRA25MpcT1800545()

aLIGOKAGRA80MpcT1800545

pycbc.psd.analytical.aLIGOKAGRA80MpcT1800545()

aLIGOLateHighSensitivityP1200087

pycbc.psd.analytical.aLIGOLateHighSensitivityP1200087()

aLIGOLateLowSensitivityP1200087

pycbc.psd.analytical.aLIGOLateLowSensitivityP1200087()

aLIGOMidHighSensitivityP1200087

pycbc.psd.analytical.aLIGOMidHighSensitivityP1200087()

aLIGOMidLowSensitivityP1200087

pycbc.psd.analytical.aLIGOMidLowSensitivityP1200087()

aLIGONSNSOptGWINC

pycbc.psd.analytical.aLIGONSNSOptGWINC()

aLIGONSNSOpt

pycbc.psd.analytical.aLIGONSNSOpt()

aLIGONoSRMHighPower

pycbc.psd.analytical.aLIGONoSRMHighPower()

aLIGONoSRMLowPowerGWINC

pycbc.psd.analytical.aLIGONoSRMLowPowerGWINC()

aLIGONoSRMLowPower

pycbc.psd.analytical.aLIGONoSRMLowPower()

aLIGOO3LowT1800545

pycbc.psd.analytical.aLIGOO3LowT1800545()

aLIGOQuantumBHBH20Deg

pycbc.psd.analytical.aLIGOQuantumBHBH20Deg()

aLIGOQuantumHighFrequency

pycbc.psd.analytical.aLIGOQuantumHighFrequency()

aLIGOQuantumNSNSOpt

pycbc.psd.analytical.aLIGOQuantumNSNSOpt()

aLIGOQuantumNoSRMHighPower

pycbc.psd.analytical.aLIGOQuantumNoSRMHighPower()

aLIGOQuantumNoSRMLowPower

pycbc.psd.analytical.aLIGOQuantumNoSRMLowPower()

aLIGOQuantumZeroDetHighPower

pycbc.psd.analytical.aLIGOQuantumZeroDetHighPower()

aLIGOQuantumZeroDetLowPower

pycbc.psd.analytical.aLIGOQuantumZeroDetLowPower()

aLIGOThermal

pycbc.psd.analytical.aLIGOThermal()

aLIGOZeroDetHighPowerGWINC

pycbc.psd.analytical.aLIGOZeroDetHighPowerGWINC()

aLIGOZeroDetHighPower

pycbc.psd.analytical.aLIGOZeroDetHighPower()

aLIGOZeroDetLowPowerGWINC

pycbc.psd.analytical.aLIGOZeroDetLowPowerGWINC()

aLIGOZeroDetLowPower

pycbc.psd.analytical.aLIGOZeroDetLowPower()

aLIGOaLIGO140MpcT1800545

pycbc.psd.analytical.aLIGOaLIGO140MpcT1800545()

aLIGOaLIGO175MpcT1800545

pycbc.psd.analytical.aLIGOaLIGO175MpcT1800545()

aLIGOaLIGODesignSensitivityT1800044

pycbc.psd.analytical.aLIGOaLIGODesignSensitivityT1800044()

aLIGOaLIGOO3LowT1800545

pycbc.psd.analytical.aLIGOaLIGOO3LowT1800545()

analytical_psd_lisa_tdi_AE_confusion

pycbc.psd.analytical_space.analytical_psd_lisa_tdi_AE_confusion()

analytical_psd_lisa_tdi_AE

pycbc.psd.analytical_space.analytical_psd_lisa_tdi_AE()

analytical_psd_lisa_tdi_T

pycbc.psd.analytical_space.analytical_psd_lisa_tdi_T()

analytical_psd_lisa_tdi_XYZ

pycbc.psd.analytical_space.analytical_psd_lisa_tdi_XYZ()

analytical_psd_taiji_tdi_AE_confusion

pycbc.psd.analytical_space.analytical_psd_taiji_tdi_AE_confusion()

analytical_psd_taiji_tdi_AE

pycbc.psd.analytical_space.analytical_psd_taiji_tdi_AE()

analytical_psd_taiji_tdi_T

pycbc.psd.analytical_space.analytical_psd_taiji_tdi_T()

analytical_psd_taiji_tdi_XYZ

pycbc.psd.analytical_space.analytical_psd_taiji_tdi_XYZ()

analytical_psd_tianqin_tdi_AE_confusion

pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_AE_confusion()

analytical_psd_tianqin_tdi_AE

pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_AE()

analytical_psd_tianqin_tdi_T

pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_T()

analytical_psd_tianqin_tdi_XYZ

pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_XYZ()

eLIGOModel

pycbc.psd.analytical.eLIGOModel()

eLIGOShot

pycbc.psd.analytical.eLIGOShot()

flat_unity

pycbc.psd.analytical.flat_unity()

iLIGOModel

pycbc.psd.analytical.iLIGOModel()

iLIGOSRD

pycbc.psd.analytical.iLIGOSRD()

iLIGOSeismic

pycbc.psd.analytical.iLIGOSeismic()

iLIGOShot

pycbc.psd.analytical.iLIGOShot()

iLIGOThermal

pycbc.psd.analytical.iLIGOThermal()

sh_transformed_psd_lisa_tdi_XYZ

pycbc.psd.analytical_space.sh_transformed_psd_lisa_tdi_XYZ()

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/sampler_inheritance_diagrams.html b/latest/html/_include/sampler_inheritance_diagrams.html new file mode 100644 index 00000000000..2f3cce41fe0 --- /dev/null +++ b/latest/html/_include/sampler_inheritance_diagrams.html @@ -0,0 +1,255 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
    +
  • cpnest:

  • +
+
Inheritance diagram of pycbc.inference.sampler.cpnest.CPNestSampler
+ + +
+

+
+
    +
  • dummy:

  • +
+
Inheritance diagram of pycbc.inference.sampler.dummy.DummySampler
+ + + +
+

+
+
    +
  • dynesty:

  • +
+
Inheritance diagram of pycbc.inference.sampler.dynesty.DynestySampler
+ + + +
+

+
+
    +
  • emcee:

  • +
+
Inheritance diagram of pycbc.inference.sampler.emcee.EmceeEnsembleSampler
+ + + + + +
+

+
+
    +
  • emcee_pt:

  • +
+
Inheritance diagram of pycbc.inference.sampler.emcee_pt.EmceePTSampler
+ + + + + + +
+

+
+
    +
  • epsie:

  • +
+
Inheritance diagram of pycbc.inference.sampler.epsie.EpsieSampler
+ + + + + +
+

+
+
    +
  • multinest:

  • +
+
Inheritance diagram of pycbc.inference.sampler.multinest.MultinestSampler
+ + + +
+

+
+
    +
  • nessai:

  • +
+
Inheritance diagram of pycbc.inference.sampler.nessai.NessaiSampler
+ + + +
+

+
+
    +
  • ptemcee:

  • +
+
Inheritance diagram of pycbc.inference.sampler.ptemcee.PTEmceeSampler
+ + + + + +
+

+
+
    +
  • refine:

  • +
+
Inheritance diagram of pycbc.inference.sampler.refine.RefineSampler
+ + + + +
+

+
+
    +
  • snowline:

  • +
+
Inheritance diagram of pycbc.inference.sampler.snowline.SnowlineSampler
+ + + +
+

+
+
    +
  • ultranest:

  • +
+
Inheritance diagram of pycbc.inference.sampler.ultranest.UltranestSampler
+ + + +
+

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/samplers-table.html b/latest/html/_include/samplers-table.html new file mode 100644 index 00000000000..bb381d56cc9 --- /dev/null +++ b/latest/html/_include/samplers-table.html @@ -0,0 +1,171 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/transforms-table.html b/latest/html/_include/transforms-table.html new file mode 100644 index 00000000000..4595ae80bf0 --- /dev/null +++ b/latest/html/_include/transforms-table.html @@ -0,0 +1,234 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

Class

'align_total_spin'

pycbc.transforms.AlignTotalSpin

'aligned_mass_spin_to_cartesian_spin'

pycbc.transforms.AlignedMassSpinToCartesianSpin

'cartesian_spin_1_to_spherical_spin_1'

pycbc.transforms.CartesianSpin1ToSphericalSpin1

'cartesian_spin_2_to_spherical_spin_2'

pycbc.transforms.CartesianSpin2ToSphericalSpin2

'cartesian_spin_to_aligned_mass_spin'

pycbc.transforms.CartesianSpinToAlignedMassSpin

'cartesian_spin_to_chi_p'

pycbc.transforms.CartesianSpinToChiP

'cartesian_spin_to_precession_mass_spin'

pycbc.transforms.CartesianSpinToPrecessionMassSpin

'cartesian_to_spherical'

pycbc.transforms.CartesianToSpherical

'chirp_distance_to_distance'

pycbc.transforms.ChirpDistanceToDistance

'custom'

pycbc.transforms.CustomTransform

'custom_multi'

pycbc.transforms.CustomTransformMultiOutputs

'distance_to_chirp_distance'

pycbc.transforms.DistanceToChirpDistance

'distance_to_redshift'

pycbc.transforms.DistanceToRedshift

'exponent'

pycbc.transforms.Exponent

'geo_to_lisa'

pycbc.transforms.GEOToLISA

'geo_to_ssb'

pycbc.transforms.GEOToSSB

'lambda_from_multiple_tov_files'

pycbc.transforms.LambdaFromMultipleTOVFiles

'lambda_from_tov_file'

pycbc.transforms.LambdaFromTOVFile

'lisa_to_geo'

pycbc.transforms.LISAToGEO

'lisa_to_ssb'

pycbc.transforms.LISAToSSB

'log'

pycbc.transforms.Log

'logistic'

pycbc.transforms.Logistic

'logit'

pycbc.transforms.Logit

'mass1_mass2_to_mchirp_eta'

pycbc.transforms.Mass1Mass2ToMchirpEta

'mass1_mass2_to_mchirp_q'

pycbc.transforms.Mass1Mass2ToMchirpQ

'mchirp_eta_to_mass1_mass2'

pycbc.transforms.MchirpEtaToMass1Mass2

'mchirp_q_to_mass1_mass2'

pycbc.transforms.MchirpQToMass1Mass2

'precession_mass_spin_to_cartesian_spin'

pycbc.transforms.PrecessionMassSpinToCartesianSpin

'spherical_spin_1_to_cartesian_spin_1'

pycbc.transforms.SphericalSpin1ToCartesianSpin1

'spherical_spin_2_to_cartesian_spin_2'

pycbc.transforms.SphericalSpin2ToCartesianSpin2

'spherical_to_cartesian'

pycbc.transforms.SphericalToCartesian

'ssb_to_geo'

pycbc.transforms.SSBToGEO

'ssb_to_lisa'

pycbc.transforms.SSBToLISA

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_include/waveform-parameters.html b/latest/html/_include/waveform-parameters.html new file mode 100644 index 00000000000..9fdc21f01bc --- /dev/null +++ b/latest/html/_include/waveform-parameters.html @@ -0,0 +1,330 @@ + + + + + + + <no title> — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

Description

'mass1'

The mass of the first component object in the binary (in solar masses).

'mass2'

The mass of the second component object in the binary (in solar masses).

'spin1x'

The x component of the first binary component’s dimensionless spin.

'spin1y'

The y component of the first binary component’s dimensionless spin.

'spin1z'

The z component of the first binary component’s dimensionless spin.

'spin2x'

The x component of the second binary component’s dimensionless spin.

'spin2y'

The y component of the second binary component’s dimensionless spin.

'spin2z'

The z component of the second binary component’s dimensionless spin.

'eccentricity'

Eccentricity.

'lambda1'

The dimensionless tidal deformability parameter of object 1.

'lambda2'

The dimensionless tidal deformability parameter of object 2.

'dquad_mon1'

Quadrupole-monopole parameter / m_1^5 -1.

'dquad_mon2'

Quadrupole-monopole parameter / m_2^5 -1.

'lambda_octu1'

The octupolar tidal deformability parameter of object 1.

'lambda_octu2'

The octupolar tidal deformability parameter of object 2.

'quadfmode1'

The quadrupolar f-mode angular frequency of object 1.

'quadfmode2'

The quadrupolar f-mode angular frequency of object 2.

'octufmode1'

The octupolar f-mode angular frequency of object 1.

'octufmode2'

The octupolar f-mode angular frequency of object 2.

'dchi0'

0PN testingGR parameter.

'dchi1'

0.5PN testingGR parameter.

'dchi2'

1PN testingGR parameter.

'dchi3'

1.5PN testingGR parameter.

'dchi4'

2PN testingGR parameter.

'dchi5'

2.5PN testingGR parameter.

'dchi5l'

2.5PN logrithm testingGR parameter.

'dchi6'

3PN testingGR parameter.

'dchi6l'

3PN logrithm testingGR parameter.

'dchi7'

3.5PN testingGR parameter.

'dalpha1'

Merger-ringdown testingGR parameter.

'dalpha2'

Merger-ringdown testingGR parameter.

'dalpha3'

Merger-ringdown testingGR parameter.

'dalpha4'

Merger-ringdown testingGR parameter.

'dalpha5'

Merger-ringdown testingGR parameter.

'dbeta1'

Intermediate testingGR parameter.

'dbeta2'

Intermediate testingGR parameter.

'dbeta3'

Intermediate testingGR parameter.

'distance'

Luminosity distance to the binary (in Mpc).

'coa_phase'

Coalesence phase of the binary (in rad).

'inclination'

Inclination (rad), defined as the angle between the orbital angular momentum L and the line-of-sight at the reference frequency.

'long_asc_nodes'

Longitude of ascending nodes axis (rad).

'mean_per_ano'

Mean anomaly of the periastron (rad).

'delta_t'

The time step used to generate the waveform (in s).

'f_lower'

The starting frequency of the waveform (in Hz).

'approximant'

A string that indicates the chosen approximant.

'f_ref'

The reference frequency.

'phase_order'

The pN order of the orbital phase. The default of -1 indicates that all implemented orders are used.

'spin_order'

The pN order of the spin corrections. The default of -1 indicates that all implemented orders are used.

'tidal_order'

The pN order of the tidal corrections. The default of -1 indicates that all implemented orders are used.

'amplitude_order'

The pN order of the amplitude. The default of -1 indicates that all implemented orders are used.

'eccentricity_order'

The pN order of the eccentricity corrections.The default of -1 indicates that all implemented orders are used.

'frame_axis'

Allow to choose among orbital_l, view and total_j

'modes_choice'

Allow to turn on among orbital_l, view and total_j

'side_bands'

Flag for generating sidebands

'mode_array'

Choose which (l,m) modes to include when generating a waveform. Only if approximant supports this feature.By default pass None and let lalsimulation use it’s default behaviour.Example: mode_array = [ [2,2], [2,-2] ]

'numrel_data'

Sets the NR flags; only needed for NR waveforms.

'delta_f'

The frequency step used to generate the waveform (in Hz).

'f_final'

The ending frequency of the waveform. The default (0) indicates that the choice is made by the respective approximant.

'f_final_func'

Use the given frequency function to compute f_final based on the parameters of the waveform.

'tc'

Coalescence time (s) is the time when a GW reaches the origin of a certain coordinate system.

'ra'

Right ascension (rad).

'dec'

Declination (rad).

'polarization'

Polarization angle (rad) in a certain coordinate system.

'eclipticlatitude'

eclipticlatitude in SSB/LISA coords.

'eclipticlongitude'

eclipticlongitude in SSB/LISA coords.

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/index.html b/latest/html/_modules/index.html new file mode 100644 index 00000000000..b51ec3be28d --- /dev/null +++ b/latest/html/_modules/index.html @@ -0,0 +1,374 @@ + + + + + + Overview: module code — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ +

All modules for which code is available

+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc.html b/latest/html/_modules/pycbc.html new file mode 100644 index 00000000000..5d83f2ba475 --- /dev/null +++ b/latest/html/_modules/pycbc.html @@ -0,0 +1,387 @@ + + + + + + pycbc — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc

+# Copyright (C) 2012  Alex Nitz, Josh Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""PyCBC contains a toolkit for CBC gravitational wave analysis
+"""
+import subprocess, os, sys, signal, warnings
+
+# Filter annoying Cython warnings that serve no good purpose.
+warnings.filterwarnings("ignore", message="numpy.dtype size changed")
+warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+import logging
+import random
+import string
+import importlib.util
+import importlib.machinery
+from datetime import datetime as dt
+
+try:
+    # This will fail when pycbc is imported during the build process,
+    # before version.py has been generated.
+    from .version import git_hash
+    from .version import version as pycbc_version
+    from .version import PyCBCVersionAction
+except:
+    git_hash = 'none'
+    pycbc_version = 'none'
+    PyCBCVersionAction = None
+
+__version__ = pycbc_version
+
+
+
+[docs] +class LogFormatter(logging.Formatter): + """ + Format the logging appropriately + This will return the log time in the ISO 6801 standard, + but with millisecond precision + https://en.wikipedia.org/wiki/ISO_8601 + e.g. 2022-11-18T09:53:01.554+00:00 + """ + converter = dt.fromtimestamp + +
+[docs] + def formatTime(self, record, datefmt=None): + ct = self.converter(record.created).astimezone() + t = ct.strftime("%Y-%m-%dT%H:%M:%S") + s = f"{t}.{int(record.msecs):03d}" + timezone = ct.strftime('%z') + timezone_colon = f"{timezone[:-2]}:{timezone[-2:]}" + s += timezone_colon + return s
+
+ + + +
+[docs] +def add_common_pycbc_options(parser): + """ + Common utility to add standard options to each PyCBC executable. + + Parameters + ---------- + parser : argparse.ArgumentParser + The argument parser to which the options will be added + """ + group = parser.add_argument_group( + title="PyCBC common options", + description="Common options for PyCBC executables.", + ) + group.add_argument( + '-v', + '--verbose', + action='count', + default=0, + help=( + 'Add verbosity to logging. Adding the option ' + 'multiple times makes logging progressively ' + 'more verbose, e.g. --verbose or -v provides ' + 'logging at the info level, but -vv or ' + '--verbose --verbose provides debug logging.' + ) + ) + group.add_argument( + '--version', + action=PyCBCVersionAction, + )
+ + + +
+[docs] +def init_logging(verbose=False, default_level=0, to_file=None, + format='%(asctime)s %(levelname)s : %(message)s'): + """Common utility for setting up logging in PyCBC. + + Installs a signal handler such that verbosity can be activated at + run-time by sending a SIGUSR1 to the process. + + Parameters + ---------- + verbose : bool or int, optional + What level to set the verbosity level to. Accepts either a boolean + or an integer representing the level to set. If True/False will set to + ``logging.INFO``/``logging.WARN``. For higher logging levels, pass + an integer representing the level to set. (1 = INFO, 2 = DEBUG). + default_level : int, optional + The default level, to be added to any verbose option if it is an + integer, or set to this value if it is None or False + to_file : filepath + Set up logging to a file instead of the stderr. File will be + overwritten if it already exists. + format : str, optional + The format to use for logging messages. + """ + def sig_handler(signum, frame): + logger = logging.getLogger() + log_level = logger.level + if log_level == logging.DEBUG: + log_level = logging.WARN + else: + log_level = logging.DEBUG + logging.warning('Got signal %d, setting log level to %d', + signum, log_level) + logger.setLevel(log_level) + + signal.signal(signal.SIGUSR1, sig_handler) + + # See https://docs.python.org/3/library/logging.html#levels + # for log level definitions + logger = logging.getLogger() + verbose_int = default_level if verbose is None \ + else int(verbose) + default_level + logger.setLevel(logging.WARNING - verbose_int * 10) # Initial setting + if to_file is not None: + handler = logging.FileHandler(to_file, mode='w') + else: + handler = logging.StreamHandler() + logger.addHandler(handler) + handler.setFormatter(LogFormatter(fmt=format))
+ + + +
+[docs] +def makedir(path): + """ + Make the analysis directory path and any parent directories that don't + already exist. Will do nothing if path already exists. + """ + if path is not None and not os.path.exists(path): + os.makedirs(path)
+ + + +# PyCBC-Specific Constants + +# Set the value we want any aligned memory calls to use +# N.B.: *Not* all pycbc memory will be aligned to multiples +# of this value + +PYCBC_ALIGNMENT = 32 + +# Dynamic range factor: a large constant for rescaling +# GW strains. This is 2**69 rounded to 17 sig.fig. + +DYN_RANGE_FAC = 5.9029581035870565e+20 + +# String used to separate parameters in configuration file section headers. +# This is used by the distributions and transforms modules +VARARGS_DELIM = '+' + +# Check for optional components of the PyCBC Package +try: + # This is a crude check to make sure that the driver is installed + try: + loaded_modules = subprocess.Popen(['lsmod'], stdout=subprocess.PIPE).communicate()[0] + loaded_modules = loaded_modules.decode() + if 'nvidia' not in loaded_modules: + raise ImportError("nvidia driver may not be installed correctly") + except OSError: + pass + + # Check that pycuda is installed and can talk to the driver + import pycuda.driver as _pycudadrv + + HAVE_CUDA=True +except ImportError: + HAVE_CUDA=False + +# Check for MKL capability +try: + import pycbc.fft.mkl + HAVE_MKL=True +except (ImportError, OSError): + HAVE_MKL=False + +# Check for openmp suppport, currently we pressume it exists, unless on +# platforms (mac) that are silly and don't use the standard gcc. +if sys.platform == 'darwin': + HAVE_OMP = False +else: + HAVE_OMP = True + +# https://pynative.com/python-generate-random-string/ +
+[docs] +def random_string(stringLength=10): + """Generate a random string of fixed length """ + letters = string.ascii_lowercase + return ''.join(random.choice(letters) for i in range(stringLength))
+ + +
+[docs] +def gps_now(): + """Return the current GPS time as a float using Astropy. + """ + from astropy.time import Time + + return float(Time.now().gps)
+ + +# This is needed as a backwards compatibility. The function was removed in +# python 3.12. +
+[docs] +def load_source(modname, filename): + loader = importlib.machinery.SourceFileLoader(modname, filename) + spec = importlib.util.spec_from_file_location(modname, filename, + loader=loader) + module = importlib.util.module_from_spec(spec) + # The module is always executed and not cached in sys.modules. + # Uncomment the following line to cache the module. + # sys.modules[module.__name__] = module + loader.exec_module(module) + return module
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/bin_utils.html b/latest/html/_modules/pycbc/bin_utils.html new file mode 100644 index 00000000000..00238015bbe --- /dev/null +++ b/latest/html/_modules/pycbc/bin_utils.html @@ -0,0 +1,966 @@ + + + + + + pycbc.bin_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.bin_utils

+from bisect import bisect_right
+try:
+    from fpconst import PosInf, NegInf
+except ImportError:
+    # fpconst is not part of the standard library and might not be available
+    PosInf = float("+inf")
+    NegInf = float("-inf")
+import numpy
+import math
+import logging
+
+logger = logging.getLogger('pycbc.bin_utils')
+
+
+
+[docs] +class Bins(object): + + """ + Parent class for 1-dimensional binnings. + + Not intended to be used directly, but to be subclassed for use in real + bins classes. + """ + def __init__(self, minv, maxv, n): + """ + Initialize a Bins instance. The three arguments are the + minimum and maximum of the values spanned by the bins, and + the number of bins to place between them. Subclasses may + require additional arguments, or different arguments + altogether. + """ + # convenience code to do some common initialization and + # input checking + if not isinstance(n, int): + raise TypeError(n) + if n < 1: + raise ValueError(n) + if maxv <= minv: + raise ValueError((minv, maxv)) + self.minv = minv + self.maxv = maxv + self.n = n + + def __len__(self): + return self.n + + def __getitem__(self, x): + """ + Convert a co-ordinate to a bin index. The co-ordinate can + be a single value, or a Python slice instance describing a + range of values. If a single value is given, it is mapped + to the bin index corresponding to that value. If a slice + is given, it is converted to a slice whose lower bound is + the index of the bin in which the slice's lower bound + falls, and whose upper bound is 1 greater than the index of + the bin in which the slice's upper bound falls. Steps are + not supported in slices. + """ + if isinstance(x, slice): + if x.step is not None: + raise NotImplementedError("step not supported: %s" % repr(x)) + return slice(self[x.start] if x.start is not None + else 0, self[x.stop] + 1 if x.stop is not None + else len(self)) + raise NotImplementedError + + def __iter__(self): + """ + If __iter__ does not exist, Python uses __getitem__ with + range(0) as input to define iteration. This is nonsensical + for bin objects, so explicitly unsupport iteration. + """ + raise NotImplementedError + +
+[docs] + def lower(self): + """ + Return an array containing the locations of the lower + boundaries of the bins. + """ + raise NotImplementedError
+ + +
+[docs] + def centres(self): + """ + Return an array containing the locations of the bin + centres. + """ + raise NotImplementedError
+ + +
+[docs] + def upper(self): + """ + Return an array containing the locations of the upper + boundaries of the bins. + """ + raise NotImplementedError
+
+ + + +
+[docs] +class IrregularBins(Bins): + + """ + Bins with arbitrary, irregular spacing. We only require strict + monotonicity of the bin boundaries. N boundaries define N-1 bins. + + Example: + + >>> x = IrregularBins([0.0, 11.0, 15.0, numpy.inf]) + >>> len(x) + 3 + >>> x[1] + 0 + >>> x[1.5] + 0 + >>> x[13] + 1 + >>> x[25] + 2 + >>> x[4:17] + slice(0, 3, None) + >>> IrregularBins([0.0, 15.0, 11.0]) + Traceback (most recent call last): + ... + ValueError: non-monotonic boundaries provided + >>> y = IrregularBins([0.0, 11.0, 15.0, numpy.inf]) + >>> x == y + True + """ + def __init__(self, boundaries): + """ + Initialize a set of custom bins with the bin boundaries. + This includes all left edges plus the right edge. The + boundaries must be monotonic and there must be at least two + elements. + """ + # check pre-conditions + if len(boundaries) < 2: + raise ValueError("less than two boundaries provided") + boundaries = tuple(boundaries) + if any(a > b for a, b in zip(boundaries[:-1], boundaries[1:])): + raise ValueError("non-monotonic boundaries provided") + + self.boundaries = boundaries + self.n = len(boundaries) - 1 + self.minv = boundaries[0] + self.maxv = boundaries[-1] + + def __getitem__(self, x): + if isinstance(x, slice): + return super(IrregularBins, self).__getitem__(x) + if self.minv <= x < self.maxv: + return bisect_right(self.boundaries, x) - 1 + # special measure-zero edge case + if x == self.maxv: + return len(self.boundaries) - 2 + raise IndexError(x) + +
+[docs] + def lower(self): + return numpy.array(self.boundaries[:-1])
+ + +
+[docs] + def upper(self): + return numpy.array(self.boundaries[1:])
+ + +
+[docs] + def centres(self): + return (self.lower() + self.upper()) / 2.0
+
+ + + +
+[docs] +class LinearBins(Bins): + + """ + Linearly-spaced bins. There are n bins of equal size, the first + bin starts on the lower bound and the last bin ends on the upper + bound inclusively. + + Example: + + >>> x = LinearBins(1.0, 25.0, 3) + >>> x.lower() + array([ 1., 9., 17.]) + >>> x.upper() + array([ 9., 17., 25.]) + >>> x.centres() + array([ 5., 13., 21.]) + >>> x[1] + 0 + >>> x[1.5] + 0 + >>> x[10] + 1 + >>> x[25] + 2 + >>> x[0:27] + Traceback (most recent call last): + ... + IndexError: 0 + >>> x[1:25] + slice(0, 3, None) + >>> x[:25] + slice(0, 3, None) + >>> x[10:16.9] + slice(1, 2, None) + >>> x[10:17] + slice(1, 3, None) + >>> x[10:] + slice(1, 3, None) + """ + def __init__(self, minv, maxv, n): + super(LinearBins, self).__init__(minv, maxv, n) + self.delta = float(maxv - minv) / n + + def __getitem__(self, x): + if isinstance(x, slice): + return super(LinearBins, self).__getitem__(x) + if self.minv <= x < self.maxv: + return int(math.floor((x - self.minv) / self.delta)) + if x == self.maxv: + # special "measure zero" corner case + return len(self) - 1 + raise IndexError(x) + +
+[docs] + def lower(self): + return numpy.linspace(self.minv, self.maxv - self.delta, len(self))
+ + +
+[docs] + def centres(self): + return numpy.linspace(self.minv + self.delta / 2., + self.maxv - self.delta / 2., len(self))
+ + +
+[docs] + def upper(self): + return numpy.linspace(self.minv + self.delta, self.maxv, len(self))
+
+ + + +
+[docs] +class LinearPlusOverflowBins(Bins): + + """ + Linearly-spaced bins with overflow at the edges. + + There are n-2 bins of equal size. The bin 1 starts on the lower bound and + bin n-2 ends on the upper bound. Bins 0 and n-1 are overflow going from + -infinity to the lower bound and from the upper bound to +infinity + respectively. Must have n >= 3. + + Example: + + >>> x = LinearPlusOverflowBins(1.0, 25.0, 5) + >>> x.centres() + array([-inf, 5., 13., 21., inf]) + >>> x.lower() + array([-inf, 1., 9., 17., 25.]) + >>> x.upper() + array([ 1., 9., 17., 25., inf]) + >>> x[float("-inf")] + 0 + >>> x[0] + 0 + >>> x[1] + 1 + >>> x[10] + 2 + >>> x[24.99999999] + 3 + >>> x[25] + 4 + >>> x[100] + 4 + >>> x[float("+inf")] + 4 + >>> x[float("-inf"):9] + slice(0, 3, None) + >>> x[9:float("+inf")] + slice(2, 5, None) + """ + def __init__(self, minv, maxv, n): + if n < 3: + raise ValueError("n must be >= 3") + super(LinearPlusOverflowBins, self).__init__(minv, maxv, n) + self.delta = float(maxv - minv) / (n - 2) + + def __getitem__(self, x): + if isinstance(x, slice): + return super(LinearPlusOverflowBins, self).__getitem__(x) + if self.minv <= x < self.maxv: + return int(math.floor((x - self.minv) / self.delta)) + 1 + if x >= self.maxv: + # +infinity overflow bin + return len(self) - 1 + if x < self.minv: + # -infinity overflow bin + return 0 + raise IndexError(x) + +
+[docs] + def lower(self): + return numpy.concatenate( + (numpy.array([NegInf]), + self.minv + self.delta * numpy.arange(len(self) - 2), + numpy.array([self.maxv])) + )
+ + +
+[docs] + def centres(self): + return numpy.concatenate( + (numpy.array([NegInf]), + self.minv + self.delta * (numpy.arange(len(self) - 2) + 0.5), + numpy.array([PosInf])) + )
+ + +
+[docs] + def upper(self): + return numpy.concatenate( + (numpy.array([self.minv]), + self.minv + self.delta * (numpy.arange(len(self) - 2) + 1), + numpy.array([PosInf])) + )
+
+ + + +
+[docs] +class LogarithmicBins(Bins): + + """ + Logarithmically-spaced bins. + + There are n bins, each of whose upper and lower bounds differ by the same + factor. The first bin starts on the lower bound, and the last bin ends on + the upper bound inclusively. + + Example: + + >>> x = LogarithmicBins(1.0, 25.0, 3) + >>> x[1] + 0 + >>> x[5] + 1 + >>> x[25] + 2 + """ + def __init__(self, minv, maxv, n): + super(LogarithmicBins, self).__init__(minv, maxv, n) + self.delta = (math.log(maxv) - math.log(minv)) / n + + def __getitem__(self, x): + if isinstance(x, slice): + return super(LogarithmicBins, self).__getitem__(x) + if self.minv <= x < self.maxv: + return int(math.floor((math.log(x) - math.log(self.minv)) / + self.delta)) + if x == self.maxv: + # special "measure zero" corner case + return len(self) - 1 + raise IndexError(x) + +
+[docs] + def lower(self): + return numpy.exp( + numpy.linspace(math.log(self.minv), math.log(self.maxv) - + self.delta, len(self)) + )
+ + +
+[docs] + def centres(self): + return numpy.exp( + numpy.linspace(math.log(self.minv), math.log(self.maxv) - + self.delta, len(self)) + self.delta / 2. + )
+ + +
+[docs] + def upper(self): + return numpy.exp( + numpy.linspace(math.log(self.minv) + self.delta, + math.log(self.maxv), len(self)) + )
+
+ + + +
+[docs] +class LogarithmicPlusOverflowBins(Bins): + + """ + Logarithmically-spaced bins plus one bin at each end that goes to + zero and positive infinity respectively. There are n-2 bins each + of whose upper and lower bounds differ by the same factor. Bin 1 + starts on the lower bound, and bin n-2 ends on the upper bound + inclusively. Bins 0 and n-1 are overflow bins extending from 0 to + the lower bound and from the upper bound to +infinity respectively. + Must have n >= 3. + + Example: + + >>> x = LogarithmicPlusOverflowBins(1.0, 25.0, 5) + >>> x[0] + 0 + >>> x[1] + 1 + >>> x[5] + 2 + >>> x[24.999] + 3 + >>> x[25] + 4 + >>> x[100] + 4 + >>> x.lower() + array([ 0. , 1. , 2.92401774, 8.54987973, 25. ]) + >>> x.upper() + array([ 1. , 2.92401774, 8.54987973, 25. , inf]) + >>> x.centres() + array([ 0. , 1.70997595, 5. , 14.62008869, inf]) + """ + def __init__(self, minv, maxv, n): + if n < 3: + raise ValueError("n must be >= 3") + super(LogarithmicPlusOverflowBins, self).__init__(minv, maxv, n) + self.delta = (math.log(maxv) - math.log(minv)) / (n - 2) + + def __getitem__(self, x): + if isinstance(x, slice): + return super(LogarithmicPlusOverflowBins, self).__getitem__(x) + if self.minv <= x < self.maxv: + return 1 + int(math.floor((math.log(x) - math.log(self.minv)) / + self.delta)) + if x >= self.maxv: + # infinity overflow bin + return len(self) - 1 + if x < self.minv: + # zero overflow bin + return 0 + raise IndexError(x) + +
+[docs] + def lower(self): + return numpy.concatenate( + (numpy.array([0.]), + numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv), + len(self) - 1)) + ) + )
+ + +
+[docs] + def centres(self): + return numpy.concatenate( + (numpy.array([0.]), + numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv) - + self.delta, len(self) - 2) + self.delta / 2.), + numpy.array([PosInf]) + ) + )
+ + +
+[docs] + def upper(self): + return numpy.concatenate( + (numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv), + len(self) - 1)), + numpy.array([PosInf]) + ) + )
+
+ + + +
+[docs] +class NDBins(tuple): + + """ + Multi-dimensional co-ordinate binning. An instance of this object + is used to convert a tuple of co-ordinates into a tuple of bin + indices. This can be used to allow the contents of an array object + to be accessed with real-valued coordinates. + + NDBins is a subclass of the tuple builtin, and is initialized with + an iterable of instances of subclasses of Bins. Each Bins subclass + instance describes the binning to apply in the corresponding + co-ordinate direction, and the number of them sets the dimensions + of the binning. + + Example: + + >>> x = NDBins((LinearBins(1, 25, 3), LogarithmicBins(1, 25, 3))) + >>> x[1, 1] + (0, 0) + >>> x[1.5, 1] + (0, 0) + >>> x[10, 1] + (1, 0) + >>> x[1, 5] + (0, 1) + >>> x[1, 1:5] + (0, slice(0, 2, None)) + >>> x.centres() + (array([ 5., 13., 21.]), array([ 1.70997595, 5. , 14.62008869])) + + Note that the co-ordinates to be converted must be a tuple, even if + it is only a 1-dimensional co-ordinate. + """ + def __new__(cls, *args): + new = tuple.__new__(cls, *args) + new.minv = tuple(b.minv for b in new) + new.maxv = tuple(b.maxv for b in new) + new.shape = tuple(len(b) for b in new) + return new + + def __getitem__(self, coords): + """ + When coords is a tuple, it is interpreted as an + N-dimensional co-ordinate which is converted to an N-tuple + of bin indices by the Bins instances in this object. + Otherwise coords is interpeted as an index into the tuple, + and the corresponding Bins instance is returned. + + Example: + + >>> x = NDBins((LinearBins(1, 25, 3), LogarithmicBins(1, 25, 3))) + >>> x[1, 1] + (0, 0) + >>> type(x[1]) + <class 'pylal.rate.LogarithmicBins'> + + When used to convert co-ordinates to bin indices, each + co-ordinate can be anything the corresponding Bins instance + will accept. Note that the co-ordinates to be converted + must be a tuple, even if it is only a 1-dimensional + co-ordinate. + """ + if isinstance(coords, tuple): + if len(coords) != len(self): + raise ValueError("dimension mismatch") + return tuple(map(lambda b, c: b[c], self, coords)) + else: + return tuple.__getitem__(self, coords) + +
+[docs] + def lower(self): + """ + Return a tuple of arrays, where each array contains the + locations of the lower boundaries of the bins in the + corresponding dimension. + """ + return tuple(b.lower() for b in self)
+ + +
+[docs] + def centres(self): + """ + Return a tuple of arrays, where each array contains the + locations of the bin centres for the corresponding + dimension. + """ + return tuple(b.centres() for b in self)
+ + +
+[docs] + def upper(self): + """ + Return a tuple of arrays, where each array contains the + locations of the upper boundaries of the bins in the + corresponding dimension. + """ + return tuple(b.upper() for b in self)
+
+ + + +
+[docs] +class BinnedArray(object): + + """ + A convenience wrapper, using the NDBins class to provide access to + the elements of an array object. Technical reasons preclude + providing a subclass of the array object, so the array data is made + available as the "array" attribute of this class. + + Examples: + + Note that even for 1 dimensional arrays the index must be a tuple. + + >>> x = BinnedArray(NDBins((LinearBins(0, 10, 5),))) + >>> x.array + array([ 0., 0., 0., 0., 0.]) + >>> x[0,] += 1 + >>> x[0.5,] += 1 + >>> x.array + array([ 2., 0., 0., 0., 0.]) + >>> x.argmax() + (1.0,) + + Note the relationship between the binning limits, the bin centres, + and the co-ordinates of the BinnedArray + + >>> x = BinnedArray(NDBins((LinearBins(-0.5, 1.5, 2), \ + LinearBins(-0.5, 1.5, 2)))) + >>> x.bins.centres() + (array([ 0., 1.]), array([ 0., 1.])) + >>> x[0, 0] = 0 + >>> x[0, 1] = 1 + >>> x[1, 0] = 2 + >>> x[1, 1] = 4 + >>> x.array + array([[ 0., 1.], + [ 2., 4.]]) + >>> x[0, 0] + 0.0 + >>> x[0, 1] + 1.0 + >>> x[1, 0] + 2.0 + >>> x[1, 1] + 4.0 + >>> x.argmin() + (0.0, 0.0) + >>> x.argmax() + (1.0, 1.0) + """ + def __init__(self, bins, array=None, dtype="double"): + self.bins = bins + if array is None: + self.array = numpy.zeros(bins.shape, dtype=dtype) + else: + if array.shape != bins.shape: + raise ValueError("input array and input bins must have the " + "same shape") + self.array = array + + def __getitem__(self, coords): + return self.array[self.bins[coords]] + + def __setitem__(self, coords, val): + self.array[self.bins[coords]] = val + + def __len__(self): + return len(self.array) + +
+[docs] + def copy(self): + """ + Return a copy of the BinnedArray. The .bins attribute is + shared with the original. + """ + return type(self)(self.bins, self.array.copy())
+ + +
+[docs] + def centres(self): + """ + Return a tuple of arrays containing the bin centres for + each dimension. + """ + return self.bins.centres()
+ + +
+[docs] + def argmin(self): + """ + Return the co-ordinates of the bin centre containing the + minimum value. Same as numpy.argmin(), converting the + indexes to bin co-ordinates. + """ + return tuple(centres[index] for centres, index in + zip(self.centres(), numpy.unravel_index(self.array.argmin(), + self.array.shape)))
+ + +
+[docs] + def argmax(self): + """ + Return the co-ordinates of the bin centre containing the + maximum value. Same as numpy.argmax(), converting the + indexes to bin co-ordinates. + """ + return tuple(centres[index] for centres, index in + zip(self.centres(), numpy.unravel_index(self.array.argmax(), + self.array.shape)))
+ + +
+[docs] + def logregularize(self, epsilon=2**-1074): + """ + Find bins <= 0, and set them to epsilon, This has the + effect of allowing the logarithm of the array to be + evaluated without error. + """ + self.array[self.array <= 0] = epsilon + return self
+
+ + + +
+[docs] +class BinnedRatios(object): + + """ + Like BinnedArray, but provides a numerator array and a denominator + array. The incnumerator() method increments a bin in the numerator + by the given weight, and the incdenominator() method increments a + bin in the denominator by the given weight. There are no methods + provided for setting or decrementing either, but the they are + accessible as the numerator and denominator attributes, which are + both BinnedArray objects. + """ + def __init__(self, bins, dtype="double"): + self.numerator = BinnedArray(bins, dtype=dtype) + self.denominator = BinnedArray(bins, dtype=dtype) + + def __getitem__(self, coords): + return self.numerator[coords] / self.denominator[coords] + +
+[docs] + def bins(self): + return self.numerator.bins
+ + +
+[docs] + def incnumerator(self, coords, weight=1): + """ + Add weight to the numerator bin at coords. + """ + self.numerator[coords] += weight
+ + +
+[docs] + def incdenominator(self, coords, weight=1): + """ + Add weight to the denominator bin at coords. + """ + self.denominator[coords] += weight
+ + +
+[docs] + def ratio(self): + """ + Compute and return the array of ratios. + """ + return self.numerator.array / self.denominator.array
+ + +
+[docs] + def regularize(self): + """ + Find bins in the denominator that are 0, and set them to 1. + Presumably the corresponding bin in the numerator is also + 0, so this has the effect of allowing the ratio array to be + evaluated without error, returning zeros in those bins that + have had no weight added to them. + """ + self.denominator.array[self.denominator.array == 0] = 1 + return self
+ + +
+[docs] + def logregularize(self, epsilon=2**-1074): + """ + Find bins in the denominator that are 0, and set them to 1, + while setting the corresponding bin in the numerator to + float epsilon. This has the effect of allowing the + logarithm of the ratio array to be evaluated without error. + """ + self.numerator.array[self.denominator.array == 0] = epsilon + self.denominator.array[self.denominator.array == 0] = 1 + return self
+ + +
+[docs] + def centres(self): + """ + Return a tuple of arrays containing the bin centres for + each dimension. + """ + return self.numerator.bins.centres()
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/boundaries.html b/latest/html/_modules/pycbc/boundaries.html new file mode 100644 index 00000000000..8cc96ad6785 --- /dev/null +++ b/latest/html/_modules/pycbc/boundaries.html @@ -0,0 +1,600 @@ + + + + + + pycbc.boundaries — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.boundaries

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides utilities for manipulating parameter boundaries. Namely,
+classes are offered that will map values to a specified domain using either
+cyclic boundaries or reflected boundaries.
+"""
+
+import numpy
+import logging
+
+logger = logging.getLogger('pycbc.boundaries')
+
+
+class _Bound(float):
+    """Adds methods to float for boundary comparisons."""
+
+    name = None
+
+    def larger(self, other):
+        """A function to determine whether or not `other` is larger
+        than the bound. This raises a NotImplementedError; classes that
+        inherit from this must define it.
+        """
+        raise NotImplementedError("larger function not set")
+
+    def smaller(self, other):
+        """A function to determine whether or not `other` is smaller
+        than the bound. This raises a NotImplementedError; classes that
+        inherit from this must define it.
+        """
+        raise NotImplementedError("smaller function not set")
+
+
+
+[docs] +class OpenBound(_Bound): + """Sets larger and smaller functions to be `>` and `<`, respectively.""" + + name = 'open' + +
+[docs] + def larger(self, other): + """Returns True if `other` is `>`, False otherwise""" + return self > other
+ + +
+[docs] + def smaller(self, other): + """Returns True if `other` is `<`, False otherwise.""" + return self < other
+
+ + + +
+[docs] +class ClosedBound(_Bound): + """Sets larger and smaller functions to be `>=` and `<=`, respectively.""" + + name = 'closed' + +
+[docs] + def larger(self, other): + return self >= other
+ + +
+[docs] + def smaller(self, other): + return self <= other
+
+ + + +
+[docs] +class ReflectedBound(ClosedBound): + """Inherits from `ClosedBound`, adding reflection functions.""" + + name = 'reflected' + +
+[docs] + def reflect(self, value): + return 2*self - value
+ + +
+[docs] + def reflect_left(self, value): + """Only reflects the value if is > self.""" + if value > self: + value = self.reflect(value) + return value
+ + +
+[docs] + def reflect_right(self, value): + """Only reflects the value if is < self.""" + if value < self: + value = self.reflect(value) + return value
+
+ + + +boundary_types = { + OpenBound.name: OpenBound, + ClosedBound.name: ClosedBound, + ReflectedBound.name: ReflectedBound +} + + +# +# Helper functions for applying conditions to boundaries +# + +
+[docs] +def apply_cyclic(value, bounds): + """Given a value, applies cyclic boundary conditions between the minimum + and maximum bounds. + + Parameters + ---------- + value : float + The value to apply the cyclic conditions to. + bounds : Bounds instance + Boundaries to use for applying cyclic conditions. + + Returns + ------- + float + The value after the cyclic bounds are applied. + """ + return (value - bounds._min) %(bounds._max - bounds._min) + bounds._min
+ + +
+[docs] +def reflect_well(value, bounds): + """Given some boundaries, reflects the value until it falls within both + boundaries. This is done iteratively, reflecting left off of the + `boundaries.max`, then right off of the `boundaries.min`, etc. + + Parameters + ---------- + value : float + The value to apply the reflected boundaries to. + bounds : Bounds instance + Boundaries to reflect between. Both `bounds.min` and `bounds.max` must + be instances of `ReflectedBound`, otherwise an AttributeError is + raised. + + Returns + ------- + float + The value after being reflected between the two bounds. + """ + while value not in bounds: + value = bounds._max.reflect_left(value) + value = bounds._min.reflect_right(value) + return value
+ + + +def _pass(value): + """Just return the given value.""" + return value + + +# +# Bounds class +# + +
+[docs] +class Bounds(object): + """Creates and stores bounds using the given values. + + The type of boundaries used can be set using the `btype_(min|max)` + parameters. These arguments set what kind of boundary is used at the + minimum and maximum bounds. Specifically, if `btype_min` (`btype_max`) is + set to: + + * "open": the minimum (maximum) boundary will be an instance of + `OpenBound`. This means that a value must be `>` (`<`) the bound + for it to be considered within the bounds. + * "closed": the minimum (maximum) boundary will be an instance of + `ClosedBound`. This means that a value must be `>=` (`<=`) the bound + for it to be considered within the bounds. + * "reflected": the minimum (maximum) boundary will be an isntance of + `ReflectedBound`. This means that a value will be reflected to the + right (left) if `apply_conditions` is used on the value. For more + details see `apply_conditions`. + + If the `cyclic` keyword is set to True, then `apply_conditions` will cause + values to be wrapped around to the minimum (maximum) bound if the value + is > (<=) the maximum (minimum) bound. For more details see + `apply_conditions`. + + Values can be checked whether or not they occur within the bounds using + `in`; e.g., `6 in bounds`. This is done without applying any boundary + conditions. To apply conditions, then check whether the value is in + bounds, use the `contains_conditioned` method. + + The default is for the minimum bound to be "closed" and the maximum bound + to be "open", i.e., a right-open interval. + + Parameters + ---------- + min_bound : {-numpy.inf, float} + The value of the lower bound. Default is `-inf`. + max_bound : {numpy.inf, float} + The value of the upper bound. Default is `inf`. + btype_min : {'closed', string} + The type of the lower bound; options are "closed", "open", or + "reflected". Default is "closed". + btype_min : {'open', string} + The type of the lower bound; options are "closed", "open", or + "reflected". Default is "open". + cyclic : {False, bool} + Whether or not to make the bounds cyclic; default is False. If True, + both the minimum and maximum bounds must be finite. + + Examples + -------- + Create a right-open interval between -1 and 1 and test whether various + values are within them: + >>> bounds = Bounds(-1., 1.) + >>> -1 in bounds + True + >>> 0 in bounds + True + >>> 1 in bounds + False + + Create an open interval between -1 and 1 and test the same values: + >>> bounds = Bounds(-1, 1, btype_min="open") + >>> -1 in bounds + False + >>> 0 in bounds + True + >>> 1 in bounds + False + + Create cyclic bounds between -1 and 1 and plot the effect of conditioning + on points between -10 and 10: + >>> bounds = Bounds(-1, 1, cyclic=True) + >>> x = numpy.linspace(-10, 10, num=1000) + >>> conditioned_x = bounds.apply_conditions(x) + >>> fig = pyplot.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, x, c='b', lw=2, label='input') + >>> ax.plot(conditioned_x, x, c='r', lw=1) + >>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--') + >>> ax.set_title('cyclic bounds between x=-1,1') + >>> fig.show() + + Create a reflected bound at -1 and plot the effect of conditioning: + >>> bounds = Bounds(-1, 1, btype_min='reflected') + >>> x = numpy.linspace(-10, 10, num=1000) + >>> conditioned_x = bounds.apply_conditions(x) + >>> fig = pyplot.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, x, c='b', lw=2, label='input') + >>> ax.plot(conditioned_x, x, c='r', lw=1) + >>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--') + >>> ax.set_title('reflected right at x=-1') + >>> fig.show() + + Create a reflected bound at 1 and plot the effect of conditioning: + >>> bounds = Bounds(-1, 1, btype_max='reflected') + >>> x = numpy.linspace(-10, 10, num=1000) + >>> conditioned_x = bounds.apply_conditions(x) + >>> fig = pyplot.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, x, c='b', lw=2, label='input') + >>> ax.plot(conditioned_x, x, c='r', lw=1) + >>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--') + >>> ax.set_title('reflected left at x=1') + >>> fig.show() + + Create reflected bounds at -1 and 1 and plot the effect of conditioning: + >>> bounds = Bounds(-1, 1, btype_min='reflected', btype_max='reflected') + >>> x = numpy.linspace(-10, 10, num=1000) + >>> conditioned_x = bounds.apply_conditions(x) + >>> fig = pyplot.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, x, c='b', lw=2, label='input') + >>> ax.plot(conditioned_x, x, c='r', lw=1) + >>> ax.vlines([-1., 1.], x.min(), x.max(), color='k', linestyle='--') + >>> ax.set_title('reflected betewen x=-1,1') + >>> fig.show() + """ + + def __init__(self, min_bound=-numpy.inf, max_bound=numpy.inf, + btype_min='closed', btype_max='open', cyclic=False): + # check boundary values + if min_bound >= max_bound: + raise ValueError("min_bound must be < max_bound") + if cyclic and not ( + numpy.isfinite(min_bound) and numpy.isfinite(max_bound)): + raise ValueError("if using cyclic, min and max bounds must both " + "be finite") + # store bounds + try: + self._min = boundary_types[btype_min](min_bound) + except KeyError: + raise ValueError("unrecognized btype_min {}".format(btype_min)) + try: + self._max = boundary_types[btype_max](max_bound) + except KeyError: + raise ValueError("unrecognized btype_max {}".format(btype_max)) + # store cyclic conditions + self._cyclic = bool(cyclic) + # store reflection conditions; we'll vectorize them here so that they + # can be used with arrays + if self._min.name == 'reflected' and self._max.name == 'reflected': + self._reflect = numpy.vectorize(self._reflect_well) + self.reflected = 'well' + elif self._min.name == 'reflected': + self._reflect = numpy.vectorize(self._min.reflect_right) + self.reflected = 'min' + elif self._max.name == 'reflected': + self._reflect = numpy.vectorize(self._max.reflect_left) + self.reflected = 'max' + else: + self._reflect = _pass + self.reflected = False + + def __repr__(self): + return str(self.__class__)[:-1] + " " + " ".join( + map(str, ["min", self._min, "max", self._max, + "cyclic", self._cyclic])) + ">" + + @property + def min(self): + """_bounds instance: The minimum bound """ + return self._min + + @property + def max(self): + """_bounds instance: The maximum bound """ + return self._max + + @property + def cyclic(self): + """bool: Whether the bounds are cyclic or not. + """ + return self._cyclic + + def __getitem__(self, ii): + if ii == 0: + return self._min + elif ii == 1: + return self._max + else: + raise IndexError("index {} out of range".format(ii)) + + def __abs__(self): + return abs(self._max - self._min) + + def __contains__(self, value): + return self._min.smaller(value) & self._max.larger(value) + + def _reflect_well(self, value): + """Thin wrapper around `reflect_well` that passes self as the `bounds`. + """ + return reflect_well(value, self) + + def _apply_cyclic(self, value): + """Thin wrapper around `apply_cyclic` that passes self as the `bounds`. + """ + return apply_cyclic(value, self) + +
+[docs] + def apply_conditions(self, value): + """Applies any boundary conditions to the given value. + + The value is manipulated according based on the following conditions: + + * If `self.cyclic` is True then `value` is wrapped around to the + minimum (maximum) bound if `value` is `>= self.max` (`< self.min`) + bound. For example, if the minimum and maximum bounds are `0, 2*pi` + and `value = 5*pi`, then the returned value will be `pi`. + * If `self.min` is a reflected boundary then `value` will be + reflected to the right if it is `< self.min`. For example, if + `self.min = 10` and `value = 3`, then the returned value will be + 17. + * If `self.max` is a reflected boundary then `value` will be + reflected to the left if it is `> self.max`. For example, if + `self.max = 20` and `value = 27`, then the returned value will be + 13. + * If `self.min` and `self.max` are both reflected boundaries, then + `value` will be reflected between the two boundaries until it + falls within the bounds. The first reflection occurs off of the + maximum boundary. For example, if `self.min = 10`, `self.max = + 20`, and `value = 42`, the returned value will be 18 ( the first + reflection yields -2, the second 22, and the last 18). + * If neither bounds are reflected and cyclic is False, then the + value is just returned as-is. + + Parameters + ---------- + value : float + The value to apply the conditions to. + + Returns + ------- + float + The value after the conditions are applied; see above for details. + """ + retval = value + if self._cyclic: + retval = apply_cyclic(value, self) + retval = self._reflect(retval) + if isinstance(retval, numpy.ndarray) and retval.size == 1: + try: + retval = retval[0] + except IndexError: + retval = float(retval) + return retval
+ + +
+[docs] + def contains_conditioned(self, value): + """Runs `apply_conditions` on the given value before testing whether it + is in bounds. Note that if `cyclic` is True, or both bounds + are reflected, than this will always return True. + + Parameters + ---------- + value : float + The value to test. + + Returns + ------- + bool + Whether or not the value is within the bounds after the boundary + conditions are applied. + """ + return self.apply_conditions(value) in self
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/catalog.html b/latest/html/_modules/pycbc/catalog.html new file mode 100644 index 00000000000..aca3343d23a --- /dev/null +++ b/latest/html/_modules/pycbc/catalog.html @@ -0,0 +1,375 @@ + + + + + + pycbc.catalog — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.catalog

+# Copyright (C) 2017  Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This package provides information about LIGO/Virgo detections of
+compact binary mergers
+"""
+import logging
+import numpy
+
+logger = logging.getLogger('pycbc.catalog')
+
+_aliases = {}
+_aliases['mchirp'] = 'chirp_mass_source'
+_aliases['mass1'] = 'mass_1_source'
+_aliases['mass2'] = 'mass_2_source'
+_aliases['snr'] = 'network_matched_filter_snr'
+_aliases['z'] = _aliases['redshift'] = 'redshift'
+_aliases['distance'] = 'luminosity_distance'
+
+
+[docs] +def find_event_in_catalog(name, source=None): + """ Get event data from a catalog + + Parameters + ---------- + name: str + Name of event to retrieve from catalog + source: str, None + If not provided, look through each catalog until event is found. + If provided, only check that specific catalog. + """ + from .catalog import list_catalogs, get_source + + if source is None: + catalogs = list_catalogs() + else: + catalogs = [source] + + for catalog in catalogs: + try: + return get_source(catalog)[name] + except KeyError: + # Try common name + data = get_source(catalog) + for mname in data: + cname = data[mname]['commonName'] + if cname.upper() == name.upper(): + name = mname + data = data[name] + return data + else: + raise ValueError(f'Did not find merger matching name: {name}')
+ + +
+[docs] +class Merger(object): + """Informaton about a specific compact binary merger""" + def __init__(self, name, source=None): + """ Return the information of a merger + + Parameters + ---------- + name: str + The name (GW prefixed date) of the merger event. + """ + self.data = find_event_in_catalog(name, source=source) + + # Set some basic params from the dataset + for key in self.data: + setattr(self, '_raw_' + key, self.data[key]) + + for key in _aliases: + setattr(self, key, self.data[_aliases[key]]) + + self.common_name = self.data['commonName'] + self.time = self.data['GPS'] + self.frame = 'source' + +
+[docs] + def median1d(self, name, return_errors=False): + """ Return median 1d marginalized parameters + + Parameters + ---------- + name: str + The name of the parameter requested + return_errors: Optional, {bool, False} + If true, return a second and third parameter that represents the + lower and upper 90% error on the parameter. + + Returns + ------- + param: float or tuple + The requested parameter + """ + if name in _aliases: + name = _aliases[name] + + try: + if return_errors: + mid = self.data[name] + high = self.data[name + '_upper'] + low = self.data[name + '_lower'] + return (mid, low, high) + else: + return self.data[name] + except KeyError as e: + print(e) + raise RuntimeError("Cannot get parameter {}".format(name))
+ + +
+[docs] + def strain(self, ifo, duration=32, sample_rate=4096): + """ Return strain around the event + + Currently this will return the strain around the event in the smallest + format available. Selection of other data is not yet available. + + Parameters + ---------- + ifo: str + The name of the observatory you want strain for. Ex. H1, L1, V1 + + Returns + ------- + strain: pycbc.types.TimeSeries + Strain around the event. + """ + from pycbc.io import get_file + from pycbc.frame import read_frame + + for fdict in self.data['strain']: + if (fdict['detector'] == ifo and fdict['duration'] == duration and + fdict['sampling_rate'] == sample_rate and + fdict['format'] == 'gwf'): + url = fdict['url'] + break + else: + raise ValueError('no strain data is available as requested ' + 'for ' + self.common_name) + + ver = url.split('/')[-1].split('-')[1].split('_')[-1] + sampling_map = {4096: "4KHZ", + 16384: "16KHZ"} + channel = "{}:GWOSC-{}_{}_STRAIN".format( + ifo, sampling_map[sample_rate], ver) + + filename = get_file(url, cache=True) + return read_frame(str(filename), str(channel))
+
+ + + +
+[docs] +class Catalog(object): + """Manage a set of binary mergers""" + + def __init__(self, source='gwtc-3'): + """ Return the set of detected mergers + + The set of detected mergers. At some point this may have some selection + abilities. + """ + from . import catalog + + self.data = catalog.get_source(source=source) + self.mergers = {name: Merger(name, + source=source) for name in self.data} + self.names = self.mergers.keys() + + def __len__(self): + return len(self.mergers) + + def __getitem__(self, key): + try: + return self.mergers[key] + except KeyError: + # Try common name + for m in self.mergers: + if key == self.mergers[m].common_name: + break + else: + raise ValueError('Did not find merger matching' + ' name: {}'.format(key)) + return self.mergers[m] + + def __setitem__(self, key, value): + self.mergers[key] = value + + def __delitem__(self, key): + del self.mergers[key] + + def __iter__(self): + return iter(self.mergers) + +
+[docs] + def median1d(self, param, return_errors=False): + """ Return median 1d marginalized parameters + + Parameters + ---------- + name: str + The name of the parameter requested + return_errors: Optional, {bool, False} + If true, return a second and third parameter that represents the + lower and upper 90% error on the parameter. + + Returns + ------- + param: nump.ndarray or tuple + The requested parameter + """ + v = [self.mergers[m].median1d(param, return_errors=return_errors) + for m in self.mergers] + if return_errors: + value, merror, perror = zip(*v) + return numpy.array(value), numpy.array(merror), numpy.array(perror) + else: + return numpy.array(v)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/catalog/catalog.html b/latest/html/_modules/pycbc/catalog/catalog.html new file mode 100644 index 00000000000..7575e4a55bd --- /dev/null +++ b/latest/html/_modules/pycbc/catalog/catalog.html @@ -0,0 +1,223 @@ + + + + + + pycbc.catalog.catalog — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.catalog.catalog

+# Copyright (C) 2017  Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This modules contains information about the announced LIGO/Virgo
+compact binary mergers
+"""
+import logging
+import json
+
+from pycbc.io import get_file
+
+logger = logging.getLogger('pycbc.catalog.catalog')
+
+# For the time being all quantities are the 1-d median value
+# FIXME with posteriors when available and we can just post-process that
+
+# LVC catalogs
+base_lvc_url = "https://www.gwosc.org/eventapi/jsonfull/{}/"
+
+
+[docs] +def lvk_catalogs(): + _catalog_source = "https://gwosc.org/eventapi/json/" + catalog_list = json.load(open(get_file(_catalog_source), 'r')) + return catalog_list
+ + +
+[docs] +def populate_catalogs(): + """ Refresh set of known catalogs + """ + global _catalogs + if _catalogs is None: + # update the LVK catalog information + _catalogs = {cname: 'LVK' for cname in lvk_catalogs().keys()}
+ + +_catalogs = None + +# add some aliases +_aliases = {} +_aliases['gwtc-1'] = 'GWTC-1-confident' +_aliases['gwtc-2'] = 'GWTC-2' +_aliases['gwtc-2.1'] = 'GWTC-2.1-confident' +_aliases['gwtc-3'] = 'GWTC-3-confident' + +
+[docs] +def list_catalogs(): + """Return a list of possible GW catalogs to query""" + populate_catalogs() + return list(_catalogs.keys())
+ + +
+[docs] +def get_source(source): + """Get the source data for a particular GW catalog + """ + populate_catalogs() + + if source in _aliases: + source = _aliases[source] + + if source in _catalogs: + catalog_type = _catalogs[source] + if catalog_type == 'LVK': + fname = get_file(base_lvc_url.format(source), cache=True) + data = json.load(open(fname, 'r')) + else: + raise ValueError('Unkown catalog source {}'.format(source)) + return data['events']
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/conversions.html b/latest/html/_modules/pycbc/conversions.html new file mode 100644 index 00000000000..6c668dc5578 --- /dev/null +++ b/latest/html/_modules/pycbc/conversions.html @@ -0,0 +1,2208 @@ + + + + + + pycbc.conversions — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.conversions

+# Copyright (C) 2017  Collin Capano, Christopher M. Biwer, Duncan Brown,
+# and Steven Reyes
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides a library of functions that calculate waveform parameters
+from other parameters. All exposed functions in this module's namespace return
+one parameter given a set of inputs.
+"""
+
+import copy
+import numpy
+import logging
+
+import lal
+
+from pycbc.detector import Detector
+import pycbc.cosmology
+from pycbc import neutron_stars as ns
+
+from .coordinates import (
+    spherical_to_cartesian as _spherical_to_cartesian,
+    cartesian_to_spherical as _cartesian_to_spherical)
+
+pykerr = pycbc.libutils.import_optional('pykerr')
+lalsim = pycbc.libutils.import_optional('lalsimulation')
+
+logger = logging.getLogger('pycbc.conversions')
+
+#
+# =============================================================================
+#
+#                           Helper functions
+#
+# =============================================================================
+#
+def ensurearray(*args):
+    """Apply numpy's broadcast rules to the given arguments.
+
+    This will ensure that all of the arguments are numpy arrays and that they
+    all have the same shape. See ``numpy.broadcast_arrays`` for more details.
+
+    It also returns a boolean indicating whether any of the inputs were
+    originally arrays.
+
+    Parameters
+    ----------
+    *args :
+        The arguments to check.
+
+    Returns
+    -------
+    list :
+        A list with length ``N+1`` where ``N`` is the number of given
+        arguments. The first N values are the input arguments as ``ndarrays``s.
+        The last value is a boolean indicating whether any of the
+        inputs was an array.
+    """
+    input_is_array = any(isinstance(arg, numpy.ndarray) for arg in args)
+    args = list(numpy.broadcast_arrays(*args))
+    args.append(input_is_array)
+    return tuple(args)
+
+
+def formatreturn(arg, input_is_array=False):
+    """If the given argument is a numpy array with shape (1,), just returns
+    that value."""
+    if not input_is_array and arg.size == 1:
+        arg = arg.item()
+    return arg
+
+#
+# =============================================================================
+#
+#                           Fundamental conversions
+#
+# =============================================================================
+#
+
+def sec_to_year(sec):
+    """ Converts number of seconds to number of years """
+    return sec / lal.YRJUL_SI
+
+
+#
+# =============================================================================
+#
+#                           CBC mass functions
+#
+# =============================================================================
+#
+
+[docs] +def primary_mass(mass1, mass2): + """Returns the larger of mass1 and mass2 (p = primary).""" + mass1, mass2, input_is_array = ensurearray(mass1, mass2) + if mass1.shape != mass2.shape: + raise ValueError("mass1 and mass2 must have same shape") + mp = copy.copy(mass1) + mask = mass1 < mass2 + mp[mask] = mass2[mask] + return formatreturn(mp, input_is_array)
+ + + +
+[docs] +def secondary_mass(mass1, mass2): + """Returns the smaller of mass1 and mass2 (s = secondary).""" + mass1, mass2, input_is_array = ensurearray(mass1, mass2) + if mass1.shape != mass2.shape: + raise ValueError("mass1 and mass2 must have same shape") + ms = copy.copy(mass2) + mask = mass1 < mass2 + ms[mask] = mass1[mask] + return formatreturn(ms, input_is_array)
+ + + +
+[docs] +def mtotal_from_mass1_mass2(mass1, mass2): + """Returns the total mass from mass1 and mass2.""" + return mass1 + mass2
+ + + +
+[docs] +def q_from_mass1_mass2(mass1, mass2): + """Returns the mass ratio m1/m2, where m1 >= m2.""" + return primary_mass(mass1, mass2) / secondary_mass(mass1, mass2)
+ + + +
+[docs] +def invq_from_mass1_mass2(mass1, mass2): + """Returns the inverse mass ratio m2/m1, where m1 >= m2.""" + return secondary_mass(mass1, mass2) / primary_mass(mass1, mass2)
+ + + +
+[docs] +def eta_from_mass1_mass2(mass1, mass2): + """Returns the symmetric mass ratio from mass1 and mass2.""" + return mass1*mass2 / (mass1 + mass2)**2.
+ + + +
+[docs] +def mchirp_from_mass1_mass2(mass1, mass2): + """Returns the chirp mass from mass1 and mass2.""" + return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1 + mass2)
+ + + +
+[docs] +def mass1_from_mtotal_q(mtotal, q): + """Returns a component mass from the given total mass and mass ratio. + + If the mass ratio q is >= 1, the returned mass will be the primary + (heavier) mass. If q < 1, the returned mass will be the secondary + (lighter) mass. + """ + return q*mtotal / (1. + q)
+ + + +
+[docs] +def mass2_from_mtotal_q(mtotal, q): + """Returns a component mass from the given total mass and mass ratio. + + If the mass ratio q is >= 1, the returned mass will be the secondary + (lighter) mass. If q < 1, the returned mass will be the primary (heavier) + mass. + """ + return mtotal / (1. + q)
+ + + +
+[docs] +def mass1_from_mtotal_eta(mtotal, eta): + """Returns the primary mass from the total mass and symmetric mass + ratio. + """ + return 0.5 * mtotal * (1.0 + (1.0 - 4.0 * eta)**0.5)
+ + + +
+[docs] +def mass2_from_mtotal_eta(mtotal, eta): + """Returns the secondary mass from the total mass and symmetric mass + ratio. + """ + return 0.5 * mtotal * (1.0 - (1.0 - 4.0 * eta)**0.5)
+ + + +
+[docs] +def mtotal_from_mchirp_eta(mchirp, eta): + """Returns the total mass from the chirp mass and symmetric mass ratio. + """ + return mchirp / eta**(3./5.)
+ + + +
+[docs] +def mass1_from_mchirp_eta(mchirp, eta): + """Returns the primary mass from the chirp mass and symmetric mass ratio. + """ + mtotal = mtotal_from_mchirp_eta(mchirp, eta) + return mass1_from_mtotal_eta(mtotal, eta)
+ + + +
+[docs] +def mass2_from_mchirp_eta(mchirp, eta): + """Returns the primary mass from the chirp mass and symmetric mass ratio. + """ + mtotal = mtotal_from_mchirp_eta(mchirp, eta) + return mass2_from_mtotal_eta(mtotal, eta)
+ + + +def _mass2_from_mchirp_mass1(mchirp, mass1): + r"""Returns the secondary mass from the chirp mass and primary mass. + + As this is a cubic equation this requires finding the roots and returning + the one that is real. Basically it can be shown that: + + .. math:: + m_2^3 - a(m_2 + m_1) = 0, + + where + + .. math:: + a = \frac{\mathcal{M}^5}{m_1^3}. + + This has 3 solutions but only one will be real. + """ + a = mchirp**5 / mass1**3 + roots = numpy.roots([1, 0, -a, -a * mass1]) + # Find the real one + real_root = roots[(abs(roots - roots.real)).argmin()] + return real_root.real + +mass2_from_mchirp_mass1 = numpy.vectorize(_mass2_from_mchirp_mass1) + + +def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, + force_real=True): + r"""Returns the other component mass given one of the component masses + and the symmetric mass ratio. + + This requires finding the roots of the quadratic equation: + + .. math:: + \eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0. + + This has two solutions which correspond to :math:`m_1` being the heavier + mass or it being the lighter mass. By default, `known_mass` is assumed to + be the heavier (primary) mass, and the smaller solution is returned. Use + the `other_is_secondary` to invert. + + Parameters + ---------- + known_mass : float + The known component mass. + eta : float + The symmetric mass ratio. + known_is_secondary : {False, bool} + Whether the known component mass is the primary or the secondary. If + True, `known_mass` is assumed to be the secondary (lighter) mass and + the larger solution is returned. Otherwise, the smaller solution is + returned. Default is False. + force_real : {True, bool} + Force the returned mass to be real. + + Returns + ------- + float + The other component mass. + """ + roots = numpy.roots([eta, (2*eta - 1) * known_mass, eta * known_mass**2.]) + if force_real: + roots = numpy.real(roots) + if known_is_secondary: + return roots[roots.argmax()] + else: + return roots[roots.argmin()] + +mass_from_knownmass_eta = numpy.vectorize(_mass_from_knownmass_eta) + + +
+[docs] +def mass2_from_mass1_eta(mass1, eta, force_real=True): + """Returns the secondary mass from the primary mass and symmetric mass + ratio. + """ + return mass_from_knownmass_eta(mass1, eta, known_is_secondary=False, + force_real=force_real)
+ + + +
+[docs] +def mass1_from_mass2_eta(mass2, eta, force_real=True): + """Returns the primary mass from the secondary mass and symmetric mass + ratio. + """ + return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, + force_real=force_real)
+ + + +
+[docs] +def eta_from_q(q): + r"""Returns the symmetric mass ratio from the given mass ratio. + + This is given by: + + .. math:: + \eta = \frac{q}{(1+q)^2}. + + Note that the mass ratio may be either < 1 or > 1. + """ + return q / (1. + q)**2
+ + + +
+[docs] +def mass1_from_mchirp_q(mchirp, q): + """Returns the primary mass from the given chirp mass and mass ratio.""" + mass1 = q**(2./5.) * (1.0 + q)**(1./5.) * mchirp + return mass1
+ + + +
+[docs] +def mass2_from_mchirp_q(mchirp, q): + """Returns the secondary mass from the given chirp mass and mass ratio.""" + mass2 = q**(-3./5.) * (1.0 + q)**(1./5.) * mchirp + return mass2
+ + + +def _a0(f_lower): + """Used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437 + appendix 1, also lalinspiral/python/sbank/tau0tau3.py. + """ + return 5. / (256. * (numpy.pi * f_lower)**(8./3.)) + + +def _a3(f_lower): + """Another parameter used for chirp times""" + return numpy.pi / (8. * (numpy.pi * f_lower)**(5./3.)) + + +
+[docs] +def tau0_from_mtotal_eta(mtotal, eta, f_lower): + r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and + the given frequency. + """ + # convert to seconds + mtotal = mtotal * lal.MTSUN_SI + # formulae from arxiv.org:0706.4437 + return _a0(f_lower) / (mtotal**(5./3.) * eta)
+ + + +
+[docs] +def tau0_from_mchirp(mchirp, f_lower): + r"""Returns :math:`\tau_0` from the chirp mass and the given frequency. + """ + # convert to seconds + mchirp = mchirp * lal.MTSUN_SI + # formulae from arxiv.org:0706.4437 + return _a0(f_lower) / mchirp ** (5./3.)
+ + + +
+[docs] +def tau3_from_mtotal_eta(mtotal, eta, f_lower): + r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and + the given frequency. + """ + # convert to seconds + mtotal = mtotal * lal.MTSUN_SI + # formulae from arxiv.org:0706.4437 + return _a3(f_lower) / (mtotal**(2./3.) * eta)
+ + + +
+[docs] +def tau0_from_mass1_mass2(mass1, mass2, f_lower): + r"""Returns :math:`\tau_0` from the component masses and given frequency. + """ + mtotal = mass1 + mass2 + eta = eta_from_mass1_mass2(mass1, mass2) + return tau0_from_mtotal_eta(mtotal, eta, f_lower)
+ + + +
+[docs] +def tau3_from_mass1_mass2(mass1, mass2, f_lower): + r"""Returns :math:`\tau_3` from the component masses and given frequency. + """ + mtotal = mass1 + mass2 + eta = eta_from_mass1_mass2(mass1, mass2) + return tau3_from_mtotal_eta(mtotal, eta, f_lower)
+ + + +
+[docs] +def mchirp_from_tau0(tau0, f_lower): + r"""Returns chirp mass from :math:`\tau_0` and the given frequency. + """ + mchirp = (_a0(f_lower) / tau0) ** (3./5.) # in seconds + # convert back to solar mass units + return mchirp / lal.MTSUN_SI
+ + + +
+[docs] +def mtotal_from_tau0_tau3(tau0, tau3, f_lower, + in_seconds=False): + r"""Returns total mass from :math:`\tau_0, \tau_3`.""" + mtotal = (tau3 / _a3(f_lower)) / (tau0 / _a0(f_lower)) + if not in_seconds: + # convert back to solar mass units + mtotal /= lal.MTSUN_SI + return mtotal
+ + + +
+[docs] +def eta_from_tau0_tau3(tau0, tau3, f_lower): + r"""Returns symmetric mass ratio from :math:`\tau_0, \tau_3`.""" + mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower, + in_seconds=True) + eta = mtotal**(-2./3.) * (_a3(f_lower) / tau3) + return eta
+ + + +
+[docs] +def mass1_from_tau0_tau3(tau0, tau3, f_lower): + r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`.""" + mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower) + eta = eta_from_tau0_tau3(tau0, tau3, f_lower) + return mass1_from_mtotal_eta(mtotal, eta)
+ + + +
+[docs] +def mass2_from_tau0_tau3(tau0, tau3, f_lower): + r"""Returns the secondary mass from the given :math:`\tau_0, \tau_3`.""" + mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower) + eta = eta_from_tau0_tau3(tau0, tau3, f_lower) + return mass2_from_mtotal_eta(mtotal, eta)
+ + + +
+[docs] +def lambda_tilde(mass1, mass2, lambda1, lambda2): + """ The effective lambda parameter + + The mass-weighted dominant effective lambda parameter defined in + https://journals.aps.org/prd/pdf/10.1103/PhysRevD.91.043002 + """ + m1, m2, lambda1, lambda2, input_is_array = ensurearray( + mass1, mass2, lambda1, lambda2) + lsum = lambda1 + lambda2 + ldiff, _ = ensurearray(lambda1 - lambda2) + mask = m1 < m2 + ldiff[mask] = -ldiff[mask] + eta = eta_from_mass1_mass2(m1, m2) + p1 = lsum * (1 + 7. * eta - 31 * eta ** 2.0) + p2 = (1 - 4 * eta)**0.5 * (1 + 9 * eta - 11 * eta ** 2.0) * ldiff + return formatreturn(8.0 / 13.0 * (p1 + p2), input_is_array)
+ + +
+[docs] +def delta_lambda_tilde(mass1, mass2, lambda1, lambda2): + """ Delta lambda tilde parameter defined as + equation 15 in + https://journals.aps.org/prd/pdf/10.1103/PhysRevD.91.043002 + """ + m1, m2, lambda1, lambda2, input_is_array = ensurearray( + mass1, mass2, lambda1, lambda2) + lsum = lambda1 + lambda2 + ldiff, _ = ensurearray(lambda1 - lambda2) + mask = m1 < m2 + ldiff[mask] = -ldiff[mask] + eta = eta_from_mass1_mass2(m1, m2) + p1 = numpy.sqrt(1 - 4 * eta) * ( + 1 - (13272 / 1319) * eta + + (8944 / 1319) * eta ** 2 + ) * lsum + p2 = ( + 1 - (15910 / 1319) * eta + + (32850 / 1319) * eta ** 2 + + (3380 / 1319) * eta ** 3 + ) * ldiff + return formatreturn(1 / 2 * (p1 + p2), input_is_array)
+ + +
+[docs] +def lambda1_from_delta_lambda_tilde_lambda_tilde(delta_lambda_tilde, + lambda_tilde, + mass1, + mass2): + """ Returns lambda1 parameter by using delta lambda tilde, + lambda tilde, mass1, and mass2. + """ + m1, m2, delta_lambda_tilde, lambda_tilde, input_is_array = ensurearray( + mass1, mass2, delta_lambda_tilde, lambda_tilde) + eta = eta_from_mass1_mass2(m1, m2) + p1 = 1 + 7.0*eta - 31*eta**2.0 + p2 = (1 - 4*eta)**0.5 * (1 + 9*eta - 11*eta**2.0) + p3 = (1 - 4*eta)**0.5 * (1 - 13272/1319*eta + 8944/1319*eta**2) + p4 = 1 - (15910/1319)*eta + (32850/1319)*eta**2 + (3380/1319)*eta**3 + amp = 1/((p1*p4)-(p2*p3)) + l_tilde_lambda1 = 13/16 * (p3-p4) * lambda_tilde + l_delta_tilde_lambda1 = (p1-p2) * delta_lambda_tilde + lambda1 = formatreturn( + amp * (l_delta_tilde_lambda1 - l_tilde_lambda1), + input_is_array + ) + return lambda1
+ + +
+[docs] +def lambda2_from_delta_lambda_tilde_lambda_tilde( + delta_lambda_tilde, + lambda_tilde, + mass1, + mass2): + """ Returns lambda2 parameter by using delta lambda tilde, + lambda tilde, mass1, and mass2. + """ + m1, m2, delta_lambda_tilde, lambda_tilde, input_is_array = ensurearray( + mass1, mass2, delta_lambda_tilde, lambda_tilde) + eta = eta_from_mass1_mass2(m1, m2) + p1 = 1 + 7.0*eta - 31*eta**2.0 + p2 = (1 - 4*eta)**0.5 * (1 + 9*eta - 11*eta**2.0) + p3 = (1 - 4*eta)**0.5 * (1 - 13272/1319*eta + 8944/1319*eta**2) + p4 = 1 - (15910/1319)*eta + (32850/1319)*eta**2 + (3380/1319)*eta**3 + amp = 1/((p1*p4)-(p2*p3)) + l_tilde_lambda2 = 13/16 * (p3+p4) * lambda_tilde + l_delta_tilde_lambda2 = (p1+p2) * delta_lambda_tilde + lambda2 = formatreturn( + amp * (l_tilde_lambda2 - l_delta_tilde_lambda2), + input_is_array + ) + return lambda2
+ + +
+[docs] +def lambda_from_mass_tov_file(mass, tov_file, distance=0.): + """Return the lambda parameter(s) corresponding to the input mass(es) + interpolating from the mass-Lambda data for a particular EOS read in from + an ASCII file. + """ + data = numpy.loadtxt(tov_file) + mass_from_file = data[:, 0] + lambda_from_file = data[:, 1] + mass_src = mass/(1.0 + pycbc.cosmology.redshift(distance)) + lambdav = numpy.interp(mass_src, mass_from_file, lambda_from_file) + return lambdav
+ + + +def ensure_obj1_is_primary(mass1, mass2, *params): + """ + Enforce that the object labelled as 1 is the primary. + + Parameters + ---------- + mass1 : float, numpy.array + Mass values labelled as 1. + mass2 : float, numpy.array + Mass values labelled as 2. + *params : + The binary parameters to be swapped around when mass1 < mass2. + The list must have length 2N and it must be organized so that + params[i] and params[i+1] are the same kind of quantity, but + for object 1 and object 2, respsectively. + E.g., spin1z, spin2z, lambda1, lambda2. + + Returns + ------- + list : + A list with mass1, mass2, params as arrays, with elements, each + with elements re-arranged so that object 1 is the primary. + """ + # Check params are 2N + if len(params) % 2 != 0: + raise ValueError("params must be 2N floats or arrays") + input_properties, input_is_array = ensurearray((mass1, mass2)+params) + # Check inputs are all the same length + shapes = [par.shape for par in input_properties] + if len(set(shapes)) != 1: + raise ValueError("Individual masses and params must have same shape") + # What needs to be swapped + mask = mass1 < mass2 + # Output containter + output_properties = [] + for i in numpy.arange(0, len(shapes), 2): + # primary (p) + p = copy.copy(input_properties[i]) + # secondary (s) + s = copy.copy(input_properties[i+1]) + # Swap + p[mask] = input_properties[i+1][mask] + s[mask] = input_properties[i][mask] + # Format and include in output object + output_properties.append(formatreturn(p, input_is_array)) + output_properties.append(formatreturn(s, input_is_array)) + # Release output + return output_properties + + +
+[docs] +def remnant_mass_from_mass1_mass2_spherical_spin_eos( + mass1, mass2, spin1_a=0.0, spin1_polar=0.0, eos='2H', + spin2_a=0.0, spin2_polar=0.0, swap_companions=False, + ns_bh_mass_boundary=None, extrapolate=False): + """ + Function that determines the remnant disk mass of an NS-BH system + using the fit to numerical-relativity results discussed in + Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018). + The BH spin may be misaligned with the orbital angular momentum. + In such cases the ISSO is approximated following the approach of + Stone, Loeb & Berger, PRD 87, 084053 (2013), which was originally + devised for a previous NS-BH remnant mass fit of + Foucart, PRD 86, 124007 (2012). + Note: The NS spin does not play any role in this fit! + + Parameters + ----------- + mass1 : float + The mass of the black hole, in solar masses. + mass2 : float + The mass of the neutron star, in solar masses. + spin1_a : float, optional + The dimensionless magnitude of the spin of mass1. Default = 0. + spin1_polar : float, optional + The tilt angle of the spin of mass1. Default = 0 (aligned w L). + eos : str, optional + Name of the equation of state being adopted. Default is '2H'. + spin2_a : float, optional + The dimensionless magnitude of the spin of mass2. Default = 0. + spin2_polar : float, optional + The tilt angle of the spin of mass2. Default = 0 (aligned w L). + swap_companions : boolean, optional + If mass2 > mass1, swap mass and spin of object 1 and 2 prior + to applying the fitting formula (otherwise fail). Default is False. + ns_bh_mass_boundary : float, optional + If mass2 is greater than this value, the neutron star is effectively + treated as a black hole and the returned value is 0. For consistency + with the eos, set this to the maximum mass allowed by the eos; set + a lower value for a more stringent cut. Default is None. + extrapolate : boolean, optional + Invoke extrapolation of NS baryonic mass and NS compactness in + scipy.interpolate.interp1d at low masses. If ns_bh_mass_boundary is + provided, it is applied at high masses, otherwise the equation of + state prescribes the maximum possible mass2. Default is False. + + Returns + ---------- + remnant_mass: float + The remnant mass in solar masses + """ + mass1, mass2, spin1_a, spin1_polar, spin2_a, spin2_polar, \ + input_is_array = \ + ensurearray(mass1, mass2, spin1_a, spin1_polar, spin2_a, spin2_polar) + # mass1 must be greater than mass2: swap the properties of 1 and 2 or fail + if swap_companions: + mass1, mass2, spin1_a, spin2_a, spin1_polar, spin2_polar = \ + ensure_obj1_is_primary(mass1, mass2, spin1_a, spin2_a, + spin1_polar, spin2_polar) + else: + try: + if any(mass2 > mass1) and input_is_array: + raise ValueError(f'Require mass1 >= mass2') + except TypeError: + if mass2 > mass1 and not input_is_array: + raise ValueError(f'Require mass1 >= mass2. {mass1} < {mass2}') + eta = eta_from_mass1_mass2(mass1, mass2) + # If a maximum NS mass is not provided, accept all values and + # let the EOS handle this (in ns.initialize_eos) + if ns_bh_mass_boundary is None: + mask = numpy.ones(ensurearray(mass2)[0].size, dtype=bool) + # Otherwise perform the calculation only for small enough NS masses... + else: + mask = mass2 <= ns_bh_mass_boundary + # ...and return 0's otherwise + remnant_mass = numpy.zeros(ensurearray(mass2)[0].size) + ns_compactness, ns_b_mass = ns.initialize_eos(mass2[mask], eos, + extrapolate=extrapolate) + remnant_mass[mask] = ns.foucart18( + eta[mask], ns_compactness, ns_b_mass, + spin1_a[mask], spin1_polar[mask]) + return formatreturn(remnant_mass, input_is_array)
+ + + +
+[docs] +def remnant_mass_from_mass1_mass2_cartesian_spin_eos( + mass1, mass2, spin1x=0.0, spin1y=0.0, spin1z=0.0, eos='2H', + spin2x=0.0, spin2y=0.0, spin2z=0.0, swap_companions=False, + ns_bh_mass_boundary=None, extrapolate=False): + """ + Function that determines the remnant disk mass of an NS-BH system + using the fit to numerical-relativity results discussed in + Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018). + The BH spin may be misaligned with the orbital angular momentum. + In such cases the ISSO is approximated following the approach of + Stone, Loeb & Berger, PRD 87, 084053 (2013), which was originally + devised for a previous NS-BH remnant mass fit of + Foucart, PRD 86, 124007 (2012). + Note: NS spin is assumed to be 0! + + Parameters + ----------- + mass1 : float + The mass of the black hole, in solar masses. + mass2 : float + The mass of the neutron star, in solar masses. + spin1x : float, optional + The dimensionless x-component of the spin of mass1. Default = 0. + spin1y : float, optional + The dimensionless y-component of the spin of mass1. Default = 0. + spin1z : float, optional + The dimensionless z-component of the spin of mass1. Default = 0. + eos: str, optional + Name of the equation of state being adopted. Default is '2H'. + spin2x : float, optional + The dimensionless x-component of the spin of mass2. Default = 0. + spin2y : float, optional + The dimensionless y-component of the spin of mass2. Default = 0. + spin2z : float, optional + The dimensionless z-component of the spin of mass2. Default = 0. + swap_companions : boolean, optional + If mass2 > mass1, swap mass and spin of object 1 and 2 prior + to applying the fitting formula (otherwise fail). Default is False. + ns_bh_mass_boundary : float, optional + If mass2 is greater than this value, the neutron star is effectively + treated as a black hole and the returned value is 0. For consistency + with the eos, set this to the maximum mass allowed by the eos; set + a lower value for a more stringent cut. Default is None. + extrapolate : boolean, optional + Invoke extrapolation of NS baryonic mass and NS compactness in + scipy.interpolate.interp1d at low masses. If ns_bh_mass_boundary is + provided, it is applied at high masses, otherwise the equation of + state prescribes the maximum possible mass2. Default is False. + + Returns + ---------- + remnant_mass: float + The remnant mass in solar masses + """ + spin1_a, _, spin1_polar = _cartesian_to_spherical(spin1x, spin1y, spin1z) + if swap_companions: + spin2_a, _, spin2_polar = _cartesian_to_spherical(spin2x, + spin2y, spin2z) + else: + size = ensurearray(spin1_a)[0].size + spin2_a = numpy.zeros(size) + spin2_polar = numpy.zeros(size) + return remnant_mass_from_mass1_mass2_spherical_spin_eos( + mass1, mass2, spin1_a=spin1_a, spin1_polar=spin1_polar, eos=eos, + spin2_a=spin2_a, spin2_polar=spin2_polar, + swap_companions=swap_companions, + ns_bh_mass_boundary=ns_bh_mass_boundary, extrapolate=extrapolate)
+ + + +# +# ============================================================================= +# +# CBC spin functions +# +# ============================================================================= +# +
+[docs] +def chi_eff(mass1, mass2, spin1z, spin2z): + """Returns the effective spin from mass1, mass2, spin1z, and spin2z.""" + return (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2)
+ + + +
+[docs] +def chi_a(mass1, mass2, spin1z, spin2z): + """ Returns the aligned mass-weighted spin difference from mass1, mass2, + spin1z, and spin2z. + """ + return (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1)
+ + + +
+[docs] +def chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y): + """Returns the effective precession spin from mass1, mass2, spin1x, + spin1y, spin2x, and spin2y. + """ + xi1 = secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y) + xi2 = primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y) + return chi_p_from_xi1_xi2(xi1, xi2)
+ + + +
+[docs] +def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y): + """ Returns the angle between the in-plane perpendicular spins.""" + phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x), + primary_spin(mass1, mass2, spin1y, spin2y)) + phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x), + secondary_spin(mass1, mass2, spin1y, spin2y)) + return (phi1 - phi2) % (2 * numpy.pi)
+ + + +
+[docs] +def phi_s(spin1x, spin1y, spin2x, spin2y): + """ Returns the sum of the in-plane perpendicular spins.""" + phi1 = phi_from_spinx_spiny(spin1x, spin1y) + phi2 = phi_from_spinx_spiny(spin2x, spin2y) + return (phi1 + phi2) % (2 * numpy.pi)
+ + + +
+[docs] +def chi_eff_from_spherical(mass1, mass2, spin1_a, spin1_polar, + spin2_a, spin2_polar): + """Returns the effective spin using spins in spherical coordinates.""" + spin1z = spin1_a * numpy.cos(spin1_polar) + spin2z = spin2_a * numpy.cos(spin2_polar) + return chi_eff(mass1, mass2, spin1z, spin2z)
+ + + +
+[docs] +def chi_p_from_spherical(mass1, mass2, spin1_a, spin1_azimuthal, spin1_polar, + spin2_a, spin2_azimuthal, spin2_polar): + """Returns the effective precession spin using spins in spherical + coordinates. + """ + spin1x, spin1y, _ = _spherical_to_cartesian( + spin1_a, spin1_azimuthal, spin1_polar) + spin2x, spin2y, _ = _spherical_to_cartesian( + spin2_a, spin2_azimuthal, spin2_polar) + return chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y)
+ + + +
+[docs] +def primary_spin(mass1, mass2, spin1, spin2): + """Returns the dimensionless spin of the primary mass.""" + mass1, mass2, spin1, spin2, input_is_array = ensurearray( + mass1, mass2, spin1, spin2) + sp = copy.copy(spin1) + mask = mass1 < mass2 + sp[mask] = spin2[mask] + return formatreturn(sp, input_is_array)
+ + + +
+[docs] +def secondary_spin(mass1, mass2, spin1, spin2): + """Returns the dimensionless spin of the secondary mass.""" + mass1, mass2, spin1, spin2, input_is_array = ensurearray( + mass1, mass2, spin1, spin2) + ss = copy.copy(spin2) + mask = mass1 < mass2 + ss[mask] = spin1[mask] + return formatreturn(ss, input_is_array)
+ + + +
+[docs] +def primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y): + """Returns the effective precession spin argument for the larger mass. + """ + spinx = primary_spin(mass1, mass2, spin1x, spin2x) + spiny = primary_spin(mass1, mass2, spin1y, spin2y) + return chi_perp_from_spinx_spiny(spinx, spiny)
+ + + +
+[docs] +def secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y): + """Returns the effective precession spin argument for the smaller mass. + """ + spinx = secondary_spin(mass1, mass2, spin1x, spin2x) + spiny = secondary_spin(mass1, mass2, spin1y, spin2y) + return xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spinx, spiny)
+ + + +
+[docs] +def xi1_from_spin1x_spin1y(spin1x, spin1y): + """Returns the effective precession spin argument for the larger mass. + This function assumes it's given spins of the primary mass. + """ + return chi_perp_from_spinx_spiny(spin1x, spin1y)
+ + + +
+[docs] +def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y): + """Returns the effective precession spin argument for the smaller mass. + This function assumes it's given spins of the secondary mass. + """ + q = q_from_mass1_mass2(mass1, mass2) + a1 = 2 + 3 * q / 2 + a2 = 2 + 3 / (2 * q) + return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y)
+ + + +
+[docs] +def chi_perp_from_spinx_spiny(spinx, spiny): + """Returns the in-plane spin from the x/y components of the spin. + """ + return numpy.sqrt(spinx**2 + spiny**2)
+ + + +
+[docs] +def chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2): + """Returns the in-plane spin from mass1, mass2, and xi2 for the + secondary mass. + """ + q = q_from_mass1_mass2(mass1, mass2) + a1 = 2 + 3 * q / 2 + a2 = 2 + 3 / (2 * q) + return q**2 * a2 / a1 * xi2
+ + + +
+[docs] +def chi_p_from_xi1_xi2(xi1, xi2): + """Returns effective precession spin from xi1 and xi2. + """ + xi1, xi2, input_is_array = ensurearray(xi1, xi2) + chi_p = copy.copy(xi1) + mask = xi1 < xi2 + chi_p[mask] = xi2[mask] + return formatreturn(chi_p, input_is_array)
+ + + +
+[docs] +def phi1_from_phi_a_phi_s(phi_a, phi_s): + """Returns the angle between the x-component axis and the in-plane + spin for the primary mass from phi_s and phi_a. + """ + return (phi_s + phi_a) / 2.0
+ + + +
+[docs] +def phi2_from_phi_a_phi_s(phi_a, phi_s): + """Returns the angle between the x-component axis and the in-plane + spin for the secondary mass from phi_s and phi_a. + """ + return (phi_s - phi_a) / 2.0
+ + + +
+[docs] +def phi_from_spinx_spiny(spinx, spiny): + """Returns the angle between the x-component axis and the in-plane spin. + """ + phi = numpy.arctan2(spiny, spinx) + return phi % (2 * numpy.pi)
+ + + +
+[docs] +def spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a): + """Returns spin1z. + """ + return (mass1 + mass2) / (2.0 * mass1) * (chi_eff - chi_a)
+ + + +
+[docs] +def spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a): + """Returns spin2z. + """ + return (mass1 + mass2) / (2.0 * mass2) * (chi_eff + chi_a)
+ + + +
+[docs] +def spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s): + """Returns x-component spin for primary mass. + """ + phi1 = phi1_from_phi_a_phi_s(phi_a, phi_s) + return xi1 * numpy.cos(phi1)
+ + + +
+[docs] +def spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s): + """Returns y-component spin for primary mass. + """ + phi1 = phi1_from_phi_a_phi_s(phi_s, phi_a) + return xi1 * numpy.sin(phi1)
+ + + +
+[docs] +def spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s): + """Returns x-component spin for secondary mass. + """ + chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2) + phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s) + return chi_perp * numpy.cos(phi2)
+ + + +
+[docs] +def spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s): + """Returns y-component spin for secondary mass. + """ + chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2) + phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s) + return chi_perp * numpy.sin(phi2)
+ + + +
+[docs] +def dquadmon_from_lambda(lambdav): + r"""Return the quadrupole moment of a neutron star given its lambda + + We use the relations defined here. https://arxiv.org/pdf/1302.4499.pdf. + Note that the convention we use is that: + + .. math:: + + \mathrm{dquadmon} = \bar{Q} - 1. + + Where :math:`\bar{Q}` (dimensionless) is the reduced quadrupole moment. + """ + ll = numpy.log(lambdav) + ai = .194 + bi = .0936 + ci = 0.0474 + di = -4.21 * 10**-3.0 + ei = 1.23 * 10**-4.0 + ln_quad_moment = ai + bi*ll + ci*ll**2.0 + di*ll**3.0 + ei*ll**4.0 + return numpy.exp(ln_quad_moment) - 1
+ + + +
+[docs] +def spin_from_pulsar_freq(mass, radius, freq): + """Returns the dimensionless spin of a pulsar. + + Assumes the pulsar is a solid sphere when computing the moment of inertia. + + Parameters + ---------- + mass : float + The mass of the pulsar, in solar masses. + radius : float + The assumed radius of the pulsar, in kilometers. + freq : float + The spin frequency of the pulsar, in Hz. + """ + omega = 2 * numpy.pi * freq + mt = mass * lal.MTSUN_SI + mominert = (2/5.) * mt * (radius * 1000 / lal.C_SI)**2 + return mominert * omega / mt**2
+ + + +# +# ============================================================================= +# +# Extrinsic parameter functions +# +# ============================================================================= +# +
+[docs] +def chirp_distance(dist, mchirp, ref_mass=1.4): + """Returns the chirp distance given the luminosity distance and chirp mass. + """ + return dist * (2.**(-1./5) * ref_mass / mchirp)**(5./6)
+ + + +def distance_from_chirp_distance_mchirp(chirp_distance, mchirp, ref_mass=1.4): + """Returns the luminosity distance given a chirp distance and chirp mass. + """ + return chirp_distance * (2.**(-1./5) * ref_mass / mchirp)**(-5./6) + + +_detector_cache = {} +
+[docs] +def det_tc(detector_name, ra, dec, tc, ref_frame='geocentric', relative=False): + """Returns the coalescence time of a signal in the given detector. + + Parameters + ---------- + detector_name : string + The name of the detector, e.g., 'H1'. + ra : float + The right ascension of the signal, in radians. + dec : float + The declination of the signal, in radians. + tc : float + The GPS time of the coalescence of the signal in the `ref_frame`. + ref_frame : {'geocentric', string} + The reference frame that the given coalescence time is defined in. + May specify 'geocentric', or a detector name; default is 'geocentric'. + + Returns + ------- + float : + The GPS time of the coalescence in detector `detector_name`. + """ + ref_time = tc + if relative: + tc = 0 + + if ref_frame == detector_name: + return tc + if detector_name not in _detector_cache: + _detector_cache[detector_name] = Detector(detector_name) + detector = _detector_cache[detector_name] + if ref_frame == 'geocentric': + return tc + detector.time_delay_from_earth_center(ra, dec, ref_time) + else: + other = Detector(ref_frame) + return tc + detector.time_delay_from_detector(other, ra, dec, ref_time)
+ + +def optimal_orientation_from_detector(detector_name, tc): + """ Low-level function to be called from _optimal_dec_from_detector + and _optimal_ra_from_detector""" + + d = Detector(detector_name) + ra, dec = d.optimal_orientation(tc) + return ra, dec + +
+[docs] +def optimal_dec_from_detector(detector_name, tc): + """For a given detector and GPS time, return the optimal orientation + (directly overhead of the detector) in declination. + + + Parameters + ---------- + detector_name : string + The name of the detector, e.g., 'H1'. + tc : float + The GPS time of the coalescence of the signal in the `ref_frame`. + + Returns + ------- + float : + The declination of the signal, in radians. + """ + return optimal_orientation_from_detector(detector_name, tc)[1]
+ + +
+[docs] +def optimal_ra_from_detector(detector_name, tc): + """For a given detector and GPS time, return the optimal orientation + (directly overhead of the detector) in right ascension. + + Parameters + ---------- + detector_name : string + The name of the detector, e.g., 'H1'. + tc : float + The GPS time of the coalescence of the signal in the `ref_frame`. + + Returns + ------- + float : + The declination of the signal, in radians. + """ + return optimal_orientation_from_detector(detector_name, tc)[0]
+ + + +# +# ============================================================================= +# +# Likelihood statistic parameter functions +# +# ============================================================================= +# +
+[docs] +def snr_from_loglr(loglr): + """Returns SNR computed from the given log likelihood ratio(s). This is + defined as `sqrt(2*loglr)`.If the log likelihood ratio is < 0, returns 0. + + Parameters + ---------- + loglr : array or float + The log likelihood ratio(s) to evaluate. + + Returns + ------- + array or float + The SNRs computed from the log likelihood ratios. + """ + singleval = isinstance(loglr, float) + if singleval: + loglr = numpy.array([loglr]) + # temporarily quiet sqrt(-1) warnings + with numpy.errstate(invalid="ignore"): + snrs = numpy.sqrt(2*loglr) + snrs[numpy.isnan(snrs)] = 0. + if singleval: + snrs = snrs[0] + return snrs
+ + +# +# ============================================================================= +# +# BH Ringdown functions +# +# ============================================================================= +# + + +def get_lm_f0tau(mass, spin, l, m, n=0, which='both'): + """Return the f0 and the tau for one or more overtones of an l, m mode. + + Parameters + ---------- + mass : float or array + Mass of the black hole (in solar masses). + spin : float or array + Dimensionless spin of the final black hole. + l : int or array + l-index of the harmonic. + m : int or array + m-index of the harmonic. + n : int or array + Overtone(s) to generate, where n=0 is the fundamental mode. + Default is 0. + which : {'both', 'f0', 'tau'}, optional + What to return; 'both' returns both frequency and tau, 'f0' just + frequency, 'tau' just tau. Default is 'both'. + + Returns + ------- + f0 : float or array + Returned if ``which`` is 'both' or 'f0'. + The frequency of the QNM(s), in Hz. + tau : float or array + Returned if ``which`` is 'both' or 'tau'. + The damping time of the QNM(s), in seconds. + """ + # convert to arrays + mass, spin, l, m, n, input_is_array = ensurearray( + mass, spin, l, m, n) + # we'll ravel the arrays so we can evaluate each parameter combination + # one at a a time + getf0 = which == 'both' or which == 'f0' + gettau = which == 'both' or which == 'tau' + out = [] + if getf0: + f0s = pykerr.qnmfreq(mass, spin, l, m, n) + out.append(formatreturn(f0s, input_is_array)) + if gettau: + taus = pykerr.qnmtau(mass, spin, l, m, n) + out.append(formatreturn(taus, input_is_array)) + if not (getf0 and gettau): + out = out[0] + return out + + +def get_lm_f0tau_allmodes(mass, spin, modes): + """Returns a dictionary of all of the frequencies and damping times for the + requested modes. + + Parameters + ---------- + mass : float or array + Mass of the black hole (in solar masses). + spin : float or array + Dimensionless spin of the final black hole. + modes : list of str + The modes to get. Each string in the list should be formatted + 'lmN', where l (m) is the l (m) index of the harmonic and N is the + number of overtones to generate (note, N is not the index of the + overtone). + + Returns + ------- + f0 : dict + Dictionary mapping the modes to the frequencies. The dictionary keys + are 'lmn' string, where l (m) is the l (m) index of the harmonic and + n is the index of the overtone. For example, '220' is the l = m = 2 + mode and the 0th overtone. + tau : dict + Dictionary mapping the modes to the damping times. The keys are the + same as ``f0``. + """ + f0, tau = {}, {} + for lmn in modes: + key = '{}{}{}' + l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) + for n in range(nmodes): + tmp_f0, tmp_tau = get_lm_f0tau(mass, spin, l, m, n) + f0[key.format(l, abs(m), n)] = tmp_f0 + tau[key.format(l, abs(m), n)] = tmp_tau + return f0, tau + + +
+[docs] +def freq_from_final_mass_spin(final_mass, final_spin, l=2, m=2, n=0): + """Returns QNM frequency for the given mass and spin and mode. + + Parameters + ---------- + final_mass : float or array + Mass of the black hole (in solar masses). + final_spin : float or array + Dimensionless spin of the final black hole. + l : int or array, optional + l-index of the harmonic. Default is 2. + m : int or array, optional + m-index of the harmonic. Default is 2. + n : int or array + Overtone(s) to generate, where n=0 is the fundamental mode. + Default is 0. + + Returns + ------- + float or array + The frequency of the QNM(s), in Hz. + """ + return get_lm_f0tau(final_mass, final_spin, l, m, n=n, which='f0')
+ + + +
+[docs] +def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, n=0): + """Returns QNM damping time for the given mass and spin and mode. + + Parameters + ---------- + final_mass : float or array + Mass of the black hole (in solar masses). + final_spin : float or array + Dimensionless spin of the final black hole. + l : int or array, optional + l-index of the harmonic. Default is 2. + m : int or array, optional + m-index of the harmonic. Default is 2. + n : int or array + Overtone(s) to generate, where n=0 is the fundamental mode. + Default is 0. + + Returns + ------- + float or array + The damping time of the QNM(s), in seconds. + """ + return get_lm_f0tau(final_mass, final_spin, l, m, n=n, which='tau')
+ + + +# The following are from Table VIII, IX, X of Berti et al., +# PRD 73 064030, arXiv:gr-qc/0512160 (2006). +# Keys are l,m (only n=0 supported). Constants are for converting from +# frequency and damping time to mass and spin. +_berti_spin_constants = { + (2, 2): (0.7, 1.4187, -0.4990), + (2, 1): (-0.3, 2.3561, -0.2277), + (3, 3): (0.9, 2.343, -0.4810), + (4, 4): (1.1929, 3.1191, -0.4825), + } + +_berti_mass_constants = { + (2, 2): (1.5251, -1.1568, 0.1292), + (2, 1): (0.6, -0.2339, 0.4175), + (3, 3): (1.8956, -1.3043, 0.1818), + (4, 4): (2.3, -1.5056, 0.2244), + } + + +
+[docs] +def final_spin_from_f0_tau(f0, tau, l=2, m=2): + """Returns the final spin based on the given frequency and damping time. + + .. note:: + Currently, only (l,m) = (2,2), (3,3), (4,4), (2,1) are supported. + Any other indices will raise a ``KeyError``. + + Parameters + ---------- + f0 : float or array + Frequency of the QNM (in Hz). + tau : float or array + Damping time of the QNM (in seconds). + l : int, optional + l-index of the harmonic. Default is 2. + m : int, optional + m-index of the harmonic. Default is 2. + + Returns + ------- + float or array + The spin of the final black hole. If the combination of frequency + and damping times give an unphysical result, ``numpy.nan`` will be + returned. + """ + f0, tau, input_is_array = ensurearray(f0, tau) + # from Berti et al. 2006 + a, b, c = _berti_spin_constants[l,m] + origshape = f0.shape + # flatten inputs for storing results + f0 = f0.ravel() + tau = tau.ravel() + spins = numpy.zeros(f0.size) + for ii in range(spins.size): + Q = f0[ii] * tau[ii] * numpy.pi + try: + s = 1. - ((Q-a)/b)**(1./c) + except ValueError: + s = numpy.nan + spins[ii] = s + spins = spins.reshape(origshape) + return formatreturn(spins, input_is_array)
+ + + +
+[docs] +def final_mass_from_f0_tau(f0, tau, l=2, m=2): + """Returns the final mass (in solar masses) based on the given frequency + and damping time. + + .. note:: + Currently, only (l,m) = (2,2), (3,3), (4,4), (2,1) are supported. + Any other indices will raise a ``KeyError``. + + Parameters + ---------- + f0 : float or array + Frequency of the QNM (in Hz). + tau : float or array + Damping time of the QNM (in seconds). + l : int, optional + l-index of the harmonic. Default is 2. + m : int, optional + m-index of the harmonic. Default is 2. + + Returns + ------- + float or array + The mass of the final black hole. If the combination of frequency + and damping times give an unphysical result, ``numpy.nan`` will be + returned. + """ + # from Berti et al. 2006 + spin = final_spin_from_f0_tau(f0, tau, l=l, m=m) + a, b, c = _berti_mass_constants[l,m] + return (a + b*(1-spin)**c)/(2*numpy.pi*f0*lal.MTSUN_SI)
+ + +
+[docs] +def freqlmn_from_other_lmn(f0, tau, current_l, current_m, new_l, new_m): + """Returns the QNM frequency (in Hz) of a chosen new (l,m) mode from the + given current (l,m) mode. + + Parameters + ---------- + f0 : float or array + Frequency of the current QNM (in Hz). + tau : float or array + Damping time of the current QNM (in seconds). + current_l : int, optional + l-index of the current QNM. + current_m : int, optional + m-index of the current QNM. + new_l : int, optional + l-index of the new QNM to convert to. + new_m : int, optional + m-index of the new QNM to convert to. + + Returns + ------- + float or array + The frequency of the new (l, m) QNM mode. If the combination of + frequency and damping time provided for the current (l, m) QNM mode + correspond to an unphysical Kerr black hole mass and/or spin, + ``numpy.nan`` will be returned. + """ + mass = final_mass_from_f0_tau(f0, tau, l=current_l, m=current_m) + spin = final_spin_from_f0_tau(f0, tau, l=current_l, m=current_m) + mass, spin, input_is_array = ensurearray(mass, spin) + + mass[mass < 0] = numpy.nan + spin[numpy.abs(spin) > 0.9996] = numpy.nan + + new_f0 = freq_from_final_mass_spin(mass, spin, l=new_l, m=new_m) + return formatreturn(new_f0, input_is_array)
+ + + +
+[docs] +def taulmn_from_other_lmn(f0, tau, current_l, current_m, new_l, new_m): + """Returns the QNM damping time (in seconds) of a chosen new (l,m) mode + from the given current (l,m) mode. + + Parameters + ---------- + f0 : float or array + Frequency of the current QNM (in Hz). + tau : float or array + Damping time of the current QNM (in seconds). + current_l : int, optional + l-index of the current QNM. + current_m : int, optional + m-index of the current QNM. + new_l : int, optional + l-index of the new QNM to convert to. + new_m : int, optional + m-index of the new QNM to convert to. + + Returns + ------- + float or array + The daming time of the new (l, m) QNM mode. If the combination of + frequency and damping time provided for the current (l, m) QNM mode + correspond to an unphysical Kerr black hole mass and/or spin, + ``numpy.nan`` will be returned. + """ + mass = final_mass_from_f0_tau(f0, tau, l=current_l, m=current_m) + spin = final_spin_from_f0_tau(f0, tau, l=current_l, m=current_m) + mass, spin, input_is_array = ensurearray(mass, spin) + + mass[mass < 0] = numpy.nan + spin[numpy.abs(spin) > 0.9996] = numpy.nan + + new_tau = tau_from_final_mass_spin(mass, spin, l=new_l, m=new_m) + return formatreturn(new_tau, input_is_array)
+ + +def get_final_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0., + spin2x=0., spin2y=0., spin2z=0., + approximant='SEOBNRv4PHM', f_ref=-1): + """Estimates the final mass and spin from the given initial parameters. + + This uses the fits used by either the NRSur7dq4 or EOBNR models for + converting from initial parameters to final, depending on the + ``approximant`` argument. + + Parameters + ---------- + mass1 : float + The mass of one of the components, in solar masses. + mass2 : float + The mass of the other component, in solar masses. + spin1x : float, optional + The dimensionless x-component of the spin of mass1. Default is 0. + spin1y : float, optional + The dimensionless y-component of the spin of mass1. Default is 0. + spin1z : float, optional + The dimensionless z-component of the spin of mass1. Default is 0. + spin2x : float, optional + The dimensionless x-component of the spin of mass2. Default is 0. + spin2y : float, optional + The dimensionless y-component of the spin of mass2. Default is 0. + spin2z : float, optional + The dimensionless z-component of the spin of mass2. Default is 0. + approximant : str, optional + The waveform approximant to use for the fit function. If "NRSur7dq4", + the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4", + the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be + used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation + will be used, with the approximant name passed as the approximant + in that function ("SEOBNRv4PHM" will work with this function). + Default is "SEOBNRv4PHM". + f_ref : float, optional + The reference frequency for the spins. Only used by the NRSur7dq4 + fit. Default (-1) will use the default reference frequency for the + approximant. + + Returns + ------- + final_mass : float + The final mass, in solar masses. + final_spin : float + The dimensionless final spin. + """ + args = (mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z) + args = ensurearray(*args) + input_is_array = args[-1] + origshape = args[0].shape + # flatten inputs for storing results + args = [a.ravel() for a in args[:-1]] + mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z = args + final_mass = numpy.full(mass1.shape, numpy.nan) + final_spin = numpy.full(mass1.shape, numpy.nan) + for ii in range(final_mass.size): + m1 = float(mass1[ii]) + m2 = float(mass2[ii]) + spin1 = list(map(float, [spin1x[ii], spin1y[ii], spin1z[ii]])) + spin2 = list(map(float, [spin2x[ii], spin2y[ii], spin2z[ii]])) + if approximant == 'NRSur7dq4': + from lalsimulation import nrfits + try: + res = nrfits.eval_nrfit(m1*lal.MSUN_SI, + m2*lal.MSUN_SI, + spin1, spin2, 'NRSur7dq4Remnant', + ['FinalMass', 'FinalSpin'], + f_ref=f_ref) + except RuntimeError: + continue + final_mass[ii] = res['FinalMass'][0] / lal.MSUN_SI + sf = res['FinalSpin'] + final_spin[ii] = (sf**2).sum()**0.5 + if sf[-1] < 0: + final_spin[ii] *= -1 + elif approximant == 'SEOBNRv4': + _, fm, fs = lalsim.SimIMREOBFinalMassSpin( + m1, m2, spin1, spin2, getattr(lalsim, approximant)) + final_mass[ii] = fm * (m1 + m2) + final_spin[ii] = fs + else: + _, fm, fs = lalsim.SimIMREOBFinalMassSpinPrec( + m1, m2, spin1, spin2, getattr(lalsim, approximant)) + final_mass[ii] = fm * (m1 + m2) + final_spin[ii] = fs + final_mass = final_mass.reshape(origshape) + final_spin = final_spin.reshape(origshape) + return (formatreturn(final_mass, input_is_array), + formatreturn(final_spin, input_is_array)) + + +
+[docs] +def final_mass_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0., + spin2x=0., spin2y=0., spin2z=0., + approximant='SEOBNRv4PHM', f_ref=-1): + """Estimates the final mass from the given initial parameters. + + This uses the fits used by either the NRSur7dq4 or EOBNR models for + converting from initial parameters to final, depending on the + ``approximant`` argument. + + Parameters + ---------- + mass1 : float + The mass of one of the components, in solar masses. + mass2 : float + The mass of the other component, in solar masses. + spin1x : float, optional + The dimensionless x-component of the spin of mass1. Default is 0. + spin1y : float, optional + The dimensionless y-component of the spin of mass1. Default is 0. + spin1z : float, optional + The dimensionless z-component of the spin of mass1. Default is 0. + spin2x : float, optional + The dimensionless x-component of the spin of mass2. Default is 0. + spin2y : float, optional + The dimensionless y-component of the spin of mass2. Default is 0. + spin2z : float, optional + The dimensionless z-component of the spin of mass2. Default is 0. + approximant : str, optional + The waveform approximant to use for the fit function. If "NRSur7dq4", + the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4", + the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be + used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation + will be used, with the approximant name passed as the approximant + in that function ("SEOBNRv4PHM" will work with this function). + Default is "SEOBNRv4PHM". + f_ref : float, optional + The reference frequency for the spins. Only used by the NRSur7dq4 + fit. Default (-1) will use the default reference frequency for the + approximant. + + Returns + ------- + float + The final mass, in solar masses. + """ + return get_final_from_initial(mass1, mass2, spin1x, spin1y, spin1z, + spin2x, spin2y, spin2z, approximant, + f_ref=f_ref)[0]
+ + + +
+[docs] +def final_spin_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0., + spin2x=0., spin2y=0., spin2z=0., + approximant='SEOBNRv4PHM', f_ref=-1): + """Estimates the final spin from the given initial parameters. + + This uses the fits used by either the NRSur7dq4 or EOBNR models for + converting from initial parameters to final, depending on the + ``approximant`` argument. + + Parameters + ---------- + mass1 : float + The mass of one of the components, in solar masses. + mass2 : float + The mass of the other component, in solar masses. + spin1x : float, optional + The dimensionless x-component of the spin of mass1. Default is 0. + spin1y : float, optional + The dimensionless y-component of the spin of mass1. Default is 0. + spin1z : float, optional + The dimensionless z-component of the spin of mass1. Default is 0. + spin2x : float, optional + The dimensionless x-component of the spin of mass2. Default is 0. + spin2y : float, optional + The dimensionless y-component of the spin of mass2. Default is 0. + spin2z : float, optional + The dimensionless z-component of the spin of mass2. Default is 0. + approximant : str, optional + The waveform approximant to use for the fit function. If "NRSur7dq4", + the NRSur7dq4Remnant fit in lalsimulation will be used. If "SEOBNRv4", + the ``XLALSimIMREOBFinalMassSpin`` function in lalsimulation will be + used. Otherwise, ``XLALSimIMREOBFinalMassSpinPrec`` from lalsimulation + will be used, with the approximant name passed as the approximant + in that function ("SEOBNRv4PHM" will work with this function). + Default is "SEOBNRv4PHM". + f_ref : float, optional + The reference frequency for the spins. Only used by the NRSur7dq4 + fit. Default (-1) will use the default reference frequency for the + approximant. + + Returns + ------- + float + The dimensionless final spin. + """ + return get_final_from_initial(mass1, mass2, spin1x, spin1y, spin1z, + spin2x, spin2y, spin2z, approximant, + f_ref=f_ref)[1]
+ + + +# +# ============================================================================= +# +# post-Newtonian functions +# +# ============================================================================= +# + +def velocity_to_frequency(v, M): + """ Calculate the gravitational-wave frequency from the + total mass and invariant velocity. + + Parameters + ---------- + v : float + Invariant velocity + M : float + Binary total mass + + Returns + ------- + f : float + Gravitational-wave frequency + """ + return v**(3.0) / (M * lal.MTSUN_SI * lal.PI) + +def frequency_to_velocity(f, M): + """ Calculate the invariant velocity from the total + mass and gravitational-wave frequency. + + Parameters + ---------- + f: float + Gravitational-wave frequency + M: float + Binary total mass + + Returns + ------- + v : float or numpy.array + Invariant velocity + """ + return (lal.PI * M * lal.MTSUN_SI * f)**(1.0/3.0) + + +def f_schwarzchild_isco(M): + """ + Innermost stable circular orbit (ISCO) for a test particle + orbiting a Schwarzschild black hole + + Parameters + ---------- + M : float or numpy.array + Total mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + return velocity_to_frequency((1.0/6.0)**(0.5), M) + + +# +# ============================================================================ +# +# p-g mode non-linear tide functions +# +# ============================================================================ +# + +def nltides_coefs(amplitude, n, m1, m2): + """Calculate the coefficents needed to compute the + shift in t(f) and phi(f) due to non-linear tides. + + Parameters + ---------- + amplitude: float + Amplitude of effect + n: float + Growth dependence of effect + m1: float + Mass of component 1 + m2: float + Mass of component 2 + + Returns + ------- + f_ref : float + Reference frequency used to define A and n + t_of_f_factor: float + The constant factor needed to compute t(f) + phi_of_f_factor: float + The constant factor needed to compute phi(f) + """ + + # Use 100.0 Hz as a reference frequency + f_ref = 100.0 + + # Calculate chirp mass + mc = mchirp_from_mass1_mass2(m1, m2) + mc *= lal.lal.MSUN_SI + + # Calculate constants in phasing + a = (96./5.) * \ + (lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.) + b = 6. * amplitude + t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.)) + phi_of_f_factor = -2.*b / (a*a * (n-3.)) + + return f_ref, t_of_f_factor, phi_of_f_factor + + +def nltides_gw_phase_difference(f, f0, amplitude, n, m1, m2): + """Calculate the gravitational-wave phase shift bwtween + f and f_coalescence = infinity due to non-linear tides. + To compute the phase shift between e.g. f_low and f_isco, + call this function twice and compute the difference. + + Parameters + ---------- + f: float or numpy.array + Frequency from which to compute phase + f0: float or numpy.array + Frequency that NL effects switch on + amplitude: float or numpy.array + Amplitude of effect + n: float or numpy.array + Growth dependence of effect + m1: float or numpy.array + Mass of component 1 + m2: float or numpy.array + Mass of component 2 + + Returns + ------- + delta_phi: float or numpy.array + Phase in radians + """ + f, f0, amplitude, n, m1, m2, input_is_array = ensurearray( + f, f0, amplitude, n, m1, m2) + + delta_phi = numpy.zeros(m1.shape) + + f_ref, _, phi_of_f_factor = nltides_coefs(amplitude, n, m1, m2) + + mask = f <= f0 + delta_phi[mask] = - phi_of_f_factor[mask] * (f0[mask]/f_ref)**(n[mask]-3.) + + mask = f > f0 + delta_phi[mask] = - phi_of_f_factor[mask] * (f[mask]/f_ref)**(n[mask]-3.) + + return formatreturn(delta_phi, input_is_array) + + +
+[docs] +def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2): + """Calculate the gravitational-wave phase shift bwtween + f_low and f_isco due to non-linear tides. + + Parameters + ---------- + f_low: float + Frequency from which to compute phase. If the other + arguments are passed as numpy arrays then the value + of f_low is duplicated for all elements in the array + f0: float or numpy.array + Frequency that NL effects switch on + amplitude: float or numpy.array + Amplitude of effect + n: float or numpy.array + Growth dependence of effect + m1: float or numpy.array + Mass of component 1 + m2: float or numpy.array + Mass of component 2 + + Returns + ------- + delta_phi: float or numpy.array + Phase in radians + """ + f0, amplitude, n, m1, m2, input_is_array = ensurearray( + f0, amplitude, n, m1, m2) + + f_low = numpy.zeros(m1.shape) + f_low + + phi_l = nltides_gw_phase_difference( + f_low, f0, amplitude, n, m1, m2) + + f_isco = f_schwarzchild_isco(m1+m2) + + phi_i = nltides_gw_phase_difference( + f_isco, f0, amplitude, n, m1, m2) + + return formatreturn(phi_i - phi_l, input_is_array)
+ + + +__all__ = ['dquadmon_from_lambda', 'lambda_tilde', + 'lambda_from_mass_tov_file', 'primary_mass', + 'secondary_mass', 'mtotal_from_mass1_mass2', + 'q_from_mass1_mass2', 'invq_from_mass1_mass2', + 'eta_from_mass1_mass2', 'mchirp_from_mass1_mass2', + 'mass1_from_mtotal_q', 'mass2_from_mtotal_q', + 'mass1_from_mtotal_eta', 'mass2_from_mtotal_eta', + 'mtotal_from_mchirp_eta', 'mass1_from_mchirp_eta', + 'mass2_from_mchirp_eta', 'mass2_from_mchirp_mass1', + 'mass_from_knownmass_eta', 'mass2_from_mass1_eta', + 'mass1_from_mass2_eta', 'eta_from_q', 'mass1_from_mchirp_q', + 'mass2_from_mchirp_q', 'tau0_from_mtotal_eta', + 'tau3_from_mtotal_eta', 'tau0_from_mass1_mass2', + 'tau0_from_mchirp', 'mchirp_from_tau0', + 'tau3_from_mass1_mass2', 'mtotal_from_tau0_tau3', + 'eta_from_tau0_tau3', 'mass1_from_tau0_tau3', + 'mass2_from_tau0_tau3', 'primary_spin', 'secondary_spin', + 'chi_eff', 'chi_a', 'chi_p', 'phi_a', 'phi_s', + 'primary_xi', 'secondary_xi', + 'xi1_from_spin1x_spin1y', 'xi2_from_mass1_mass2_spin2x_spin2y', + 'chi_perp_from_spinx_spiny', 'chi_perp_from_mass1_mass2_xi2', + 'chi_p_from_xi1_xi2', 'phi_from_spinx_spiny', + 'phi1_from_phi_a_phi_s', 'phi2_from_phi_a_phi_s', + 'spin1z_from_mass1_mass2_chi_eff_chi_a', + 'spin2z_from_mass1_mass2_chi_eff_chi_a', + 'spin1x_from_xi1_phi_a_phi_s', 'spin1y_from_xi1_phi_a_phi_s', + 'spin2x_from_mass1_mass2_xi2_phi_a_phi_s', + 'spin2y_from_mass1_mass2_xi2_phi_a_phi_s', + 'chirp_distance', 'det_tc', 'snr_from_loglr', + 'freq_from_final_mass_spin', 'tau_from_final_mass_spin', + 'final_spin_from_f0_tau', 'final_mass_from_f0_tau', + 'final_mass_from_initial', 'final_spin_from_initial', + 'optimal_dec_from_detector', 'optimal_ra_from_detector', + 'chi_eff_from_spherical', 'chi_p_from_spherical', + 'nltides_gw_phase_diff_isco', 'spin_from_pulsar_freq', + 'freqlmn_from_other_lmn', 'taulmn_from_other_lmn', + 'remnant_mass_from_mass1_mass2_spherical_spin_eos', + 'remnant_mass_from_mass1_mass2_cartesian_spin_eos', + 'lambda1_from_delta_lambda_tilde_lambda_tilde', + 'lambda2_from_delta_lambda_tilde_lambda_tilde', + 'delta_lambda_tilde' + ] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/coordinates/base.html b/latest/html/_modules/pycbc/coordinates/base.html new file mode 100644 index 00000000000..5f5d7df3732 --- /dev/null +++ b/latest/html/_modules/pycbc/coordinates/base.html @@ -0,0 +1,305 @@ + + + + + + pycbc.coordinates.base — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.coordinates.base

+# Copyright (C) 2016 Christopher M. Biwer
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Base coordinate transformations, this module provides transformations between
+cartesian and spherical coordinates.
+"""
+import logging
+import numpy
+
+logger = logging.getLogger('pycbc.coordinates.base')
+
+
+
+[docs] +def cartesian_to_spherical_rho(x, y, z): + """ Calculates the magnitude in spherical coordinates from Cartesian + coordinates. + + Parameters + ---------- + x : {numpy.array, float} + X-coordinate. + y : {numpy.array, float} + Y-coordinate. + z : {numpy.array, float} + Z-coordinate. + + Returns + ------- + rho : {numpy.array, float} + The radial amplitude. + """ + return numpy.sqrt(x**2 + y**2 + z**2)
+ + + +
+[docs] +def cartesian_to_spherical_azimuthal(x, y): + """ Calculates the azimuthal angle in spherical coordinates from Cartesian + coordinates. The azimuthal angle is in [0,2*pi]. + + Parameters + ---------- + x : {numpy.array, float} + X-coordinate. + y : {numpy.array, float} + Y-coordinate. + + Returns + ------- + phi : {numpy.array, float} + The azimuthal angle. + """ + y = float(y) if isinstance(y, int) else y + phi = numpy.arctan2(y, x) + return phi % (2 * numpy.pi)
+ + + +
+[docs] +def cartesian_to_spherical_polar(x, y, z): + """ Calculates the polar angle in spherical coordinates from Cartesian + coordinates. The polar angle is in [0,pi]. + + Parameters + ---------- + x : {numpy.array, float} + X-coordinate. + y : {numpy.array, float} + Y-coordinate. + z : {numpy.array, float} + Z-coordinate. + + Returns + ------- + theta : {numpy.array, float} + The polar angle. + """ + rho = cartesian_to_spherical_rho(x, y, z) + if numpy.isscalar(rho): + return numpy.arccos(z / rho) if rho else 0.0 + else: + return numpy.arccos(numpy.divide(z, rho, out=numpy.ones_like(z), + where=rho != 0))
+ + + +
+[docs] +def cartesian_to_spherical(x, y, z): + """ Maps cartesian coordinates (x,y,z) to spherical coordinates + (rho,phi,theta) where phi is in [0,2*pi] and theta is in [0,pi]. + + Parameters + ---------- + x : {numpy.array, float} + X-coordinate. + y : {numpy.array, float} + Y-coordinate. + z : {numpy.array, float} + Z-coordinate. + + Returns + ------- + rho : {numpy.array, float} + The radial amplitude. + phi : {numpy.array, float} + The azimuthal angle. + theta : {numpy.array, float} + The polar angle. + """ + rho = cartesian_to_spherical_rho(x, y, z) + phi = cartesian_to_spherical_azimuthal(x, y) + theta = cartesian_to_spherical_polar(x, y, z) + return rho, phi, theta
+ + + +
+[docs] +def spherical_to_cartesian(rho, phi, theta): + """ Maps spherical coordinates (rho,phi,theta) to cartesian coordinates + (x,y,z) where phi is in [0,2*pi] and theta is in [0,pi]. + + Parameters + ---------- + rho : {numpy.array, float} + The radial amplitude. + phi : {numpy.array, float} + The azimuthal angle. + theta : {numpy.array, float} + The polar angle. + + Returns + ------- + x : {numpy.array, float} + X-coordinate. + y : {numpy.array, float} + Y-coordinate. + z : {numpy.array, float} + Z-coordinate. + """ + x = rho * numpy.cos(phi) * numpy.sin(theta) + y = rho * numpy.sin(phi) * numpy.sin(theta) + z = rho * numpy.cos(theta) + return x, y, z
+ + + +__all__ = ['cartesian_to_spherical_rho', 'cartesian_to_spherical_azimuthal', + 'cartesian_to_spherical_polar', 'cartesian_to_spherical', + 'spherical_to_cartesian', + ] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/coordinates/space.html b/latest/html/_modules/pycbc/coordinates/space.html new file mode 100644 index 00000000000..ffe3611a9d4 --- /dev/null +++ b/latest/html/_modules/pycbc/coordinates/space.html @@ -0,0 +1,1131 @@ + + + + + + pycbc.coordinates.space — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.coordinates.space

+# Copyright (C) 2023  Shichao Wu, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides coordinate transformations related to space-borne
+detectors, such as coordinate transformations between space-borne detectors
+and ground-based detectors. Note that current LISA orbit used in this module
+is a circular orbit, need to be replaced by a more realistic and general orbit
+model in the near future.
+"""
+
+import logging
+import numpy as np
+
+from scipy.spatial.transform import Rotation
+from scipy.optimize import fsolve
+from astropy import units
+from astropy.constants import c, au
+from astropy.time import Time
+from astropy.coordinates import BarycentricMeanEcliptic, PrecessedGeocentric
+from astropy.coordinates import get_body_barycentric
+from astropy.coordinates import SkyCoord
+from astropy.coordinates.builtin_frames import ecliptic_transforms
+
+logger = logging.getLogger('pycbc.coordinates.space')
+
+# This constant makes sure LISA is behind the Earth by 19-23 degrees.
+# Making this a stand-alone constant will also make it callable by
+# the waveform plugin and PE config file. In the unit of 's'.
+TIME_OFFSET_20_DEGREES = 7365189.431698299
+
+# "rotation_matrix_ssb_to_lisa" and "lisa_position_ssb" should be
+# more general for other detectors in the near future.
+
+
+
+[docs] +def rotation_matrix_ssb_to_lisa(alpha): + """ The rotation matrix (of frame basis) from SSB frame to LISA frame. + This function assumes the angle between LISA plane and the ecliptic + is 60 degrees, and the period of LISA's self-rotation and orbital + revolution is both one year. + + Parameters + ---------- + alpha : float + The angular displacement of LISA in SSB frame. + In the unit of 'radian'. + + Returns + ------- + r_total : numpy.array + A 3x3 rotation matrix from SSB frame to LISA frame. + """ + r = Rotation.from_rotvec([ + [0, 0, alpha], + [0, -np.pi/3, 0], + [0, 0, -alpha] + ]).as_matrix() + r_total = np.array(r[0]) @ np.array(r[1]) @ np.array(r[2]) + + return r_total
+ + + +
+[docs] +def lisa_position_ssb(t_lisa, t0=TIME_OFFSET_20_DEGREES): + """ Calculating the position vector and angular displacement of LISA + in the SSB frame, at a given time. This function assumes LISA's barycenter + is orbiting around a circular orbit within the ecliptic behind the Earth. + The period of it is one year. + + Parameters + ---------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame, + or any other time you want. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (p, alpha) : tuple + p : numpy.array + The position vector of LISA in the SSB frame. In the unit of 'm'. + alpha : float + The angular displacement of LISA in the SSB frame. + In the unit of 'radian'. + """ + OMEGA_0 = 1.99098659277e-7 + R_ORBIT = au.value + alpha = np.mod(OMEGA_0 * (t_lisa + t0), 2*np.pi) + p = np.array([[R_ORBIT * np.cos(alpha)], + [R_ORBIT * np.sin(alpha)], + [0]], dtype=object) + return (p, alpha)
+ + + +
+[docs] +def localization_to_propagation_vector(longitude, latitude, + use_astropy=True, frame=None): + """ Converting the sky localization to the corresponding + propagation unit vector of a GW signal. + + Parameters + ---------- + longitude : float + The longitude, in the unit of 'radian'. + latitude : float + The latitude, in the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + + Returns + ------- + [[x], [y], [z]] : numpy.array + The propagation unit vector of that GW signal. + """ + if use_astropy: + x = -frame.cartesian.x.value + y = -frame.cartesian.y.value + z = -frame.cartesian.z.value + else: + x = -np.cos(latitude) * np.cos(longitude) + y = -np.cos(latitude) * np.sin(longitude) + z = -np.sin(latitude) + v = np.array([[x], [y], [z]]) + + return v / np.linalg.norm(v)
+ + + +
+[docs] +def propagation_vector_to_localization(k, use_astropy=True, frame=None): + """ Converting the propagation unit vector to the corresponding + sky localization of a GW signal. + + Parameters + ---------- + k : numpy.array + The propagation unit vector of a GW signal. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + + Returns + ------- + (longitude, latitude) : tuple + The sky localization of that GW signal. + """ + if use_astropy: + try: + longitude = frame.lon.rad + latitude = frame.lat.rad + except AttributeError: + longitude = frame.ra.rad + latitude = frame.dec.rad + else: + # latitude already within [-pi/2, pi/2] + latitude = np.float64(np.arcsin(-k[2])) + longitude = np.float64(np.arctan2(-k[1]/np.cos(latitude), + -k[0]/np.cos(latitude))) + # longitude should within [0, 2*pi) + longitude = np.mod(longitude, 2*np.pi) + + return (longitude, latitude)
+ + + +
+[docs] +def polarization_newframe(polarization, k, rotation_matrix, use_astropy=True, + old_frame=None, new_frame=None): + """ Converting a polarization angle from a frame to a new frame + by using rotation matrix method. + + Parameters + ---------- + polarization : float + The polarization angle in the old frame, in the unit of 'radian'. + k : numpy.array + The propagation unit vector of a GW signal in the old frame. + rotation_matrix : numpy.array + The rotation matrix (of frame basis) from the old frame to + the new frame. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + old_frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + new_frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. The new frame for the new polarization + angle. + + Returns + ------- + polarization_new_frame : float + The polarization angle in the new frame of that GW signal. + """ + longitude, _ = propagation_vector_to_localization( + k, use_astropy, old_frame) + u = np.array([[np.sin(longitude)], [-np.cos(longitude)], [0]]) + rotation_vector = polarization * k + rotation_polarization = Rotation.from_rotvec(rotation_vector.T[0]) + p = rotation_polarization.apply(u.T[0]).reshape(3, 1) + p_newframe = rotation_matrix.T @ p + k_newframe = rotation_matrix.T @ k + longitude_newframe, latitude_newframe = \ + propagation_vector_to_localization(k_newframe, use_astropy, new_frame) + u_newframe = np.array([[np.sin(longitude_newframe)], + [-np.cos(longitude_newframe)], [0]]) + v_newframe = np.array([ + [-np.sin(latitude_newframe) * np.cos(longitude_newframe)], + [-np.sin(latitude_newframe) * np.sin(longitude_newframe)], + [np.cos(latitude_newframe)]]) + p_dot_u_newframe = np.vdot(p_newframe, u_newframe) + p_dot_v_newframe = np.vdot(p_newframe, v_newframe) + polarization_new_frame = np.arctan2(p_dot_v_newframe, p_dot_u_newframe) + polarization_new_frame = np.mod(polarization_new_frame, 2*np.pi) + # avoid the round error + if polarization_new_frame == 2*np.pi: + polarization_new_frame = 0 + + return polarization_new_frame
+ + + +
+[docs] +def t_lisa_from_ssb(t_ssb, longitude_ssb, latitude_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Calculating the time when a GW signal arrives at the barycenter + of LISA, by using the time and sky localization in SSB frame. + + Parameters + ---------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy=False) + + def equation(t_lisa): + # LISA is moving, when GW arrives at LISA center, + # time is t_lisa, not t_ssb. + p = lisa_position_ssb(t_lisa, t0)[0] + return t_lisa - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_ssb)[0]
+ + + +
+[docs] +def t_ssb_from_t_lisa(t_lisa, longitude_ssb, latitude_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Calculating the time when a GW signal arrives at the barycenter + of SSB, by using the time in LISA frame and sky localization in SSB frame. + + Parameters + ---------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy=False) + # LISA is moving, when GW arrives at LISA center, + # time is t_lisa, not t_ssb. + p = lisa_position_ssb(t_lisa, t0)[0] + + def equation(t_ssb): + return t_lisa - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_lisa)[0]
+ + + +
+[docs] +def ssb_to_lisa(t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Converting the arrive time, the sky localization, and the polarization + from the SSB frame to the LISA frame. + + Parameters + ---------- + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa) : tuple + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + """ + if not isinstance(t_ssb, np.ndarray): + t_ssb = np.array([t_ssb]) + if not isinstance(longitude_ssb, np.ndarray): + longitude_ssb = np.array([longitude_ssb]) + if not isinstance(latitude_ssb, np.ndarray): + latitude_ssb = np.array([latitude_ssb]) + if not isinstance(polarization_ssb, np.ndarray): + polarization_ssb = np.array([polarization_ssb]) + num = len(t_ssb) + t_lisa, longitude_lisa = np.zeros(num), np.zeros(num) + latitude_lisa, polarization_lisa = np.zeros(num), np.zeros(num) + + for i in range(num): + if longitude_ssb[i] < 0 or longitude_ssb[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_ssb[i] < -np.pi/2 or latitude_ssb[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_ssb[i] < 0 or polarization_ssb[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + t_lisa[i] = t_lisa_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], t0) + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], use_astropy=False) + # Although t_lisa calculated above using the corrected LISA position + # vector by adding t0, it corresponds to the true t_ssb, not t_ssb+t0, + # we need to include t0 again to correct LISA position. + alpha = lisa_position_ssb(t_lisa[i], t0)[1] + rotation_matrix_lisa = rotation_matrix_ssb_to_lisa(alpha) + k_lisa = rotation_matrix_lisa.T @ k_ssb + longitude_lisa[i], latitude_lisa[i] = \ + propagation_vector_to_localization(k_lisa, use_astropy=False) + polarization_lisa[i] = polarization_newframe( + polarization_ssb[i], k_ssb, rotation_matrix_lisa, + use_astropy=False) + + if num == 1: + params_lisa = (t_lisa[0], longitude_lisa[0], + latitude_lisa[0], polarization_lisa[0]) + else: + params_lisa = (t_lisa, longitude_lisa, + latitude_lisa, polarization_lisa) + + return params_lisa
+ + + +
+[docs] +def lisa_to_ssb(t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, + t0=TIME_OFFSET_20_DEGREES): + """ Converting the arrive time, the sky localization, and the polarization + from the LISA frame to the SSB frame. + + Parameters + ---------- + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (t_ssb, longitude_ssb, latitude_ssb, polarization_ssb) : tuple + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + """ + if not isinstance(t_lisa, np.ndarray): + t_lisa = np.array([t_lisa]) + if not isinstance(longitude_lisa, np.ndarray): + longitude_lisa = np.array([longitude_lisa]) + if not isinstance(latitude_lisa, np.ndarray): + latitude_lisa = np.array([latitude_lisa]) + if not isinstance(polarization_lisa, np.ndarray): + polarization_lisa = np.array([polarization_lisa]) + num = len(t_lisa) + t_ssb, longitude_ssb = np.zeros(num), np.zeros(num) + latitude_ssb, polarization_ssb = np.zeros(num), np.zeros(num) + + for i in range(num): + if longitude_lisa[i] < 0 or longitude_lisa[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_lisa[i] < -np.pi/2 or latitude_lisa[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_lisa[i] < 0 or polarization_lisa[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + k_lisa = localization_to_propagation_vector( + longitude_lisa[i], latitude_lisa[i], use_astropy=False) + alpha = lisa_position_ssb(t_lisa[i], t0)[1] + rotation_matrix_lisa = rotation_matrix_ssb_to_lisa(alpha) + k_ssb = rotation_matrix_lisa @ k_lisa + longitude_ssb[i], latitude_ssb[i] = \ + propagation_vector_to_localization(k_ssb, use_astropy=False) + t_ssb[i] = t_ssb_from_t_lisa(t_lisa[i], longitude_ssb[i], + latitude_ssb[i], t0) + polarization_ssb[i] = polarization_newframe( + polarization_lisa[i], k_lisa, rotation_matrix_lisa.T, + use_astropy=False) + + if num == 1: + params_ssb = (t_ssb[0], longitude_ssb[0], + latitude_ssb[0], polarization_ssb[0]) + else: + params_ssb = (t_ssb, longitude_ssb, + latitude_ssb, polarization_ssb) + + return params_ssb
+ + + +
+[docs] +def rotation_matrix_ssb_to_geo(epsilon=np.deg2rad(23.439281)): + """ The rotation matrix (of frame basis) from SSB frame to + geocentric frame. + + Parameters + ---------- + epsilon : float + The Earth's axial tilt (obliquity), in the unit of 'radian'. + + Returns + ------- + r : numpy.array + A 3x3 rotation matrix from SSB frame to geocentric frame. + """ + r = Rotation.from_rotvec([ + [-epsilon, 0, 0] + ]).as_matrix() + + return np.array(r[0])
+ + + +
+[docs] +def earth_position_ssb(t_geo): + """ Calculating the position vector and angular displacement of the Earth + in the SSB frame, at a given time. By using Astropy. + + Parameters + ---------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame, + or any other time you want. + + Returns + ------- + (p, alpha) : tuple + p : numpy.array + The position vector of the Earth in the SSB frame. In the unit of 'm'. + alpha : float + The angular displacement of the Earth in the SSB frame. + In the unit of 'radian'. + """ + t = Time(t_geo, format='gps') + pos = get_body_barycentric('earth', t) + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but ICRS is not. + icrs_coord = SkyCoord(pos, frame='icrs', obstime=t) + bme_coord = icrs_coord.transform_to( + BarycentricMeanEcliptic(equinox='J2000')) + x = bme_coord.cartesian.x.to(units.m).value + y = bme_coord.cartesian.y.to(units.m).value + z = bme_coord.cartesian.z.to(units.m).value + p = np.array([[x], [y], [z]]) + alpha = bme_coord.lon.rad + + return (p, alpha)
+ + + +
+[docs] +def t_geo_from_ssb(t_ssb, longitude_ssb, latitude_ssb, + use_astropy=True, frame=None): + """ Calculating the time when a GW signal arrives at the barycenter + of the Earth, by using the time and sky localization in SSB frame. + + Parameters + ---------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + + Returns + ------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy, frame) + + def equation(t_geo): + # Earth is moving, when GW arrives at Earth center, + # time is t_geo, not t_ssb. + p = earth_position_ssb(t_geo)[0] + return t_geo - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_ssb)[0]
+ + + +
+[docs] +def t_ssb_from_t_geo(t_geo, longitude_ssb, latitude_ssb, + use_astropy=True, frame=None): + """ Calculating the time when a GW signal arrives at the barycenter + of SSB, by using the time in geocentric frame and sky localization + in SSB frame. + + Parameters + ---------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + + Returns + ------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy, frame) + # Earth is moving, when GW arrives at Earth center, + # time is t_geo, not t_ssb. + p = earth_position_ssb(t_geo)[0] + + def equation(t_ssb): + return t_geo - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_geo)[0]
+ + + +
+[docs] +def ssb_to_geo(t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, + use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the SSB frame to the geocentric frame. + + Parameters + ---------- + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_geo, longitude_geo, latitude_geo, polarization_geo) : tuple + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + """ + if not isinstance(t_ssb, np.ndarray): + t_ssb = np.array([t_ssb]) + if not isinstance(longitude_ssb, np.ndarray): + longitude_ssb = np.array([longitude_ssb]) + if not isinstance(latitude_ssb, np.ndarray): + latitude_ssb = np.array([latitude_ssb]) + if not isinstance(polarization_ssb, np.ndarray): + polarization_ssb = np.array([polarization_ssb]) + num = len(t_ssb) + t_geo = np.full(num, np.nan) + longitude_geo = np.full(num, np.nan) + latitude_geo = np.full(num, np.nan) + polarization_geo = np.full(num, np.nan) + + for i in range(num): + if longitude_ssb[i] < 0 or longitude_ssb[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_ssb[i] < -np.pi/2 or latitude_ssb[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_ssb[i] < 0 or polarization_ssb[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + + if use_astropy: + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but PrecessedGeocentric is not. + bme_coord = BarycentricMeanEcliptic( + lon=longitude_ssb[i]*units.radian, + lat=latitude_ssb[i]*units.radian, + equinox='J2000') + t_geo[i] = t_geo_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], use_astropy, bme_coord) + geo_sky = bme_coord.transform_to(PrecessedGeocentric( + equinox='J2000', obstime=Time(t_geo[i], format='gps'))) + longitude_geo[i] = geo_sky.ra.rad + latitude_geo[i] = geo_sky.dec.rad + k_geo = localization_to_propagation_vector( + longitude_geo[i], latitude_geo[i], + use_astropy, geo_sky) + k_ssb = localization_to_propagation_vector( + None, None, use_astropy, bme_coord) + rotation_matrix_geo = \ + ecliptic_transforms.icrs_to_baryecliptic( + from_coo=None, + to_frame=BarycentricMeanEcliptic(equinox='J2000')) + polarization_geo[i] = polarization_newframe( + polarization_ssb[i], k_ssb, + rotation_matrix_geo, use_astropy, + old_frame=bme_coord, + new_frame=geo_sky) + else: + t_geo[i] = t_geo_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], use_astropy) + rotation_matrix_geo = rotation_matrix_ssb_to_geo() + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], + use_astropy) + k_geo = rotation_matrix_geo.T @ k_ssb + longitude_geo[i], latitude_geo[i] = \ + propagation_vector_to_localization(k_geo, use_astropy) + polarization_geo[i] = polarization_newframe( + polarization_ssb[i], k_ssb, + rotation_matrix_geo, use_astropy) + + # As mentioned in LDC manual, the p,q vectors are opposite between + # LDC and LAL conventions, see Sec 4.1.5 in <LISA-LCST-SGS-MAN-001>. + polarization_geo[i] = np.mod(polarization_geo[i]+np.pi, 2*np.pi) + + if num == 1: + params_geo = (t_geo[0], longitude_geo[0], + latitude_geo[0], polarization_geo[0]) + else: + params_geo = (t_geo, longitude_geo, + latitude_geo, polarization_geo) + + return params_geo
+ + + +
+[docs] +def geo_to_ssb(t_geo, longitude_geo, latitude_geo, polarization_geo, + use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the geocentric frame to the SSB frame. + + Parameters + ---------- + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_ssb, longitude_ssb, latitude_ssb, polarization_ssb) : tuple + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + """ + if not isinstance(t_geo, np.ndarray): + t_geo = np.array([t_geo]) + if not isinstance(longitude_geo, np.ndarray): + longitude_geo = np.array([longitude_geo]) + if not isinstance(latitude_geo, np.ndarray): + latitude_geo = np.array([latitude_geo]) + if not isinstance(polarization_geo, np.ndarray): + polarization_geo = np.array([polarization_geo]) + num = len(t_geo) + t_ssb = np.full(num, np.nan) + longitude_ssb = np.full(num, np.nan) + latitude_ssb = np.full(num, np.nan) + polarization_ssb = np.full(num, np.nan) + + for i in range(num): + if longitude_geo[i] < 0 or longitude_geo[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_geo[i] < -np.pi/2 or latitude_geo[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_geo[i] < 0 or polarization_geo[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + + if use_astropy: + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but PrecessedGeocentric is not. + geo_coord = PrecessedGeocentric( + ra=longitude_geo[i]*units.radian, + dec=latitude_geo[i]*units.radian, + equinox='J2000', + obstime=Time(t_geo[i], format='gps')) + ssb_sky = geo_coord.transform_to( + BarycentricMeanEcliptic(equinox='J2000')) + longitude_ssb[i] = ssb_sky.lon.rad + latitude_ssb[i] = ssb_sky.lat.rad + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], + use_astropy, ssb_sky) + k_geo = localization_to_propagation_vector( + None, None, use_astropy, geo_coord) + rotation_matrix_geo = \ + ecliptic_transforms.icrs_to_baryecliptic( + from_coo=None, + to_frame=BarycentricMeanEcliptic(equinox='J2000')) + t_ssb[i] = t_ssb_from_t_geo(t_geo[i], longitude_ssb[i], + latitude_ssb[i], use_astropy, + ssb_sky) + polarization_ssb[i] = polarization_newframe( + polarization_geo[i], k_geo, + rotation_matrix_geo.T, + use_astropy, + old_frame=geo_coord, + new_frame=ssb_sky) + else: + rotation_matrix_geo = rotation_matrix_ssb_to_geo() + k_geo = localization_to_propagation_vector( + longitude_geo[i], latitude_geo[i], use_astropy) + k_ssb = rotation_matrix_geo @ k_geo + longitude_ssb[i], latitude_ssb[i] = \ + propagation_vector_to_localization(k_ssb, use_astropy) + t_ssb[i] = t_ssb_from_t_geo(t_geo[i], longitude_ssb[i], + latitude_ssb[i], use_astropy) + polarization_ssb[i] = polarization_newframe( + polarization_geo[i], k_geo, + rotation_matrix_geo.T, use_astropy) + + # As mentioned in LDC manual, the p,q vectors are opposite between + # LDC and LAL conventions, see Sec 4.1.5 in <LISA-LCST-SGS-MAN-001>. + polarization_ssb[i] = np.mod(polarization_ssb[i]-np.pi, 2*np.pi) + + if num == 1: + params_ssb = (t_ssb[0], longitude_ssb[0], + latitude_ssb[0], polarization_ssb[0]) + else: + params_ssb = (t_ssb, longitude_ssb, + latitude_ssb, polarization_ssb) + + return params_ssb
+ + + +
+[docs] +def lisa_to_geo(t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, + t0=TIME_OFFSET_20_DEGREES, use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the LISA frame to the geocentric frame. + + Parameters + ---------- + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_geo, longitude_geo, latitude_geo, polarization_geo) : tuple + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The ecliptic longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The ecliptic latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + """ + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb = lisa_to_ssb( + t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, t0) + t_geo, longitude_geo, latitude_geo, polarization_geo = ssb_to_geo( + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, use_astropy) + + return (t_geo, longitude_geo, latitude_geo, polarization_geo)
+ + + +
+[docs] +def geo_to_lisa(t_geo, longitude_geo, latitude_geo, polarization_geo, + t0=TIME_OFFSET_20_DEGREES, use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the geocentric frame to the LISA frame. + + Parameters + ---------- + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa) : tuple + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + """ + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb = geo_to_ssb( + t_geo, longitude_geo, latitude_geo, polarization_geo, use_astropy) + t_lisa, longitude_lisa, latitude_lisa, polarization_lisa = ssb_to_lisa( + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, t0) + + return (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa)
+ + + +__all__ = ['TIME_OFFSET_20_DEGREES', + 'localization_to_propagation_vector', + 'propagation_vector_to_localization', 'polarization_newframe', + 't_lisa_from_ssb', 't_ssb_from_t_lisa', + 'ssb_to_lisa', 'lisa_to_ssb', + 'rotation_matrix_ssb_to_lisa', 'rotation_matrix_ssb_to_geo', + 'lisa_position_ssb', 'earth_position_ssb', + 't_geo_from_ssb', 't_ssb_from_t_geo', 'ssb_to_geo', 'geo_to_ssb', + 'lisa_to_geo', 'geo_to_lisa', + ] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/cosmology.html b/latest/html/_modules/pycbc/cosmology.html new file mode 100644 index 00000000000..05172da4978 --- /dev/null +++ b/latest/html/_modules/pycbc/cosmology.html @@ -0,0 +1,713 @@ + + + + + + pycbc.cosmology — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.cosmology

+# Copyright (C) 2017  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides functions for computing cosmological quantities, such as
+redshift. This is mostly a wrapper around ``astropy.cosmology``.
+
+Note: in all functions, ``distance`` is short hand for ``luminosity_distance``.
+Any other distance measure is explicitly named; e.g., ``comoving_distance``.
+"""
+
+import logging
+import numpy
+from scipy import interpolate, integrate
+import astropy.cosmology
+from astropy import units
+from astropy.cosmology.core import CosmologyError
+from astropy.cosmology import parameters
+import pycbc.conversions
+
+logger = logging.getLogger('pycbc.cosmology')
+
+DEFAULT_COSMOLOGY = 'Planck15'
+
+
+def get_cosmology(cosmology=None, **kwargs):
+    r"""Gets an astropy cosmology class.
+
+    Parameters
+    ----------
+    cosmology : str or astropy.cosmology.FlatLambdaCDM, optional
+        The name of the cosmology to use. For the list of options, see
+        :py:attr:`astropy.cosmology.parameters.available`. If None, and no
+        other keyword arguments are provided, will default to
+        :py:attr:`DEFAULT_COSMOLOGY`. If an instance of
+        :py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that.
+    \**kwargs :
+        If any other keyword arguments are provided they will be passed to
+        :py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom
+        cosmology.
+
+    Returns
+    -------
+    astropy.cosmology.FlatLambdaCDM
+        The cosmology to use.
+
+    Examples
+    --------
+    Use the default:
+
+    >>> from pycbc.cosmology import get_cosmology
+    >>> get_cosmology()
+    FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307,
+                  Tcmb0=2.725 K, Neff=3.05, m_nu=[0.   0.   0.06] eV,
+                  Ob0=0.0486)
+
+    Use properties measured by WMAP instead:
+
+    >>> get_cosmology("WMAP9")
+    FlatLambdaCDM(name="WMAP9", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K,
+                  Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463)
+
+    Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM`
+    for details on the default values used):
+
+    >>> get_cosmology(H0=70., Om0=0.3)
+    FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None,
+                  Ob0=None)
+
+    """
+    if kwargs and cosmology is not None:
+        raise ValueError("if providing custom cosmological parameters, do "
+                         "not provide a `cosmology` argument")
+    if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):
+        # just return
+        return cosmology
+    if kwargs:
+        cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)
+    else:
+        if cosmology is None:
+            cosmology = DEFAULT_COSMOLOGY
+        if cosmology not in parameters.available:
+            raise ValueError("unrecognized cosmology {}".format(cosmology))
+        cosmology = getattr(astropy.cosmology, cosmology)
+    return cosmology
+
+
+def z_at_value(func, fval, unit, zmax=1000., **kwargs):
+    r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
+
+    Getting a z for a cosmological quantity involves numerically inverting
+    ``func``. The ``zmax`` argument sets how large of a z to guess (see
+    :py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
+    ``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
+    is not large enough, will just return ``numpy.inf``.
+
+    Parameters
+    ----------
+    func : function or method
+        A function that takes redshift as input.
+    fval : float
+        The value of ``func(z)``.
+    unit : astropy.unit
+        The unit of ``fval``.
+    zmax : float, optional
+        The initial maximum search limit for ``z``. Default is 1000.
+    \**kwargs :
+        All other keyword arguments are passed to
+        :py:func:``astropy.cosmology.z_at_value``.
+
+    Returns
+    -------
+    float
+        The redshift at the requested values.
+    """
+    fval, input_is_array = pycbc.conversions.ensurearray(fval)
+    # make sure fval is atleast 1D
+    if fval.size == 1 and fval.ndim == 0:
+        fval = fval.reshape(1)
+    zs = numpy.zeros(fval.shape, dtype=float)  # the output array
+    if 'method' not in kwargs:
+        # workaround for https://github.com/astropy/astropy/issues/14249
+        # FIXME remove when fixed in astropy/scipy
+        kwargs['method'] = 'bounded'
+    for (ii, val) in enumerate(fval):
+        try:
+            zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,
+                                                  **kwargs)
+        except CosmologyError:
+            if ii == len(zs)-1:
+                # if zs[ii] is less than but very close to zmax, let's say
+                # zs[ii] is the last element in the [zmin, zmax],
+                # `z_at_value` will also returns "CosmologyError", please
+                # see (https://docs.astropy.org/en/stable/api/astropy.
+                # cosmology.z_at_value.html), in order to avoid bumping up
+                # zmax, just set zs equals to previous value, we assume
+                # the `func` is smooth
+                zs[ii] = zs[ii-1]
+            else:
+                # we'll get this if the z was larger than zmax; in that
+                # case we'll try bumping up zmax later to get a value
+                zs[ii] = numpy.inf
+    # check if there were any zs > zmax
+    replacemask = numpy.isinf(zs)
+    # try bumping up zmax to get a result
+    if replacemask.any():
+        # we'll keep bumping up the maxz until we can get a result
+        counter = 0  # to prevent running forever
+        while replacemask.any():
+            kwargs['zmin'] = zmax
+            zmax = 10 * zmax
+            idx = numpy.where(replacemask)
+            for ii in idx:
+                val = fval[ii]
+                try:
+                    zs[ii] = astropy.cosmology.z_at_value(
+                        func, val*unit, zmax=zmax, **kwargs)
+                    replacemask[ii] = False
+                except CosmologyError:
+                    # didn't work, try on next loop
+                    pass
+            counter += 1
+            if counter == 5:
+                # give up and warn the user
+                logger.warning("One or more values correspond to a "
+                               "redshift > {0:.1e}. The redshift for these "
+                               "have been set to inf. If you would like "
+                               "better precision, call God.".format(zmax))
+                break
+    return pycbc.conversions.formatreturn(zs, input_is_array)
+
+
+def _redshift(distance, **kwargs):
+    r"""Uses astropy to get redshift from the given luminosity distance.
+
+    Parameters
+    ----------
+    distance : float
+        The luminosity distance, in Mpc.
+    \**kwargs :
+        All other keyword args are passed to :py:func:`get_cosmology` to
+        select a cosmology. If none provided, will use
+        :py:attr:`DEFAULT_COSMOLOGY`.
+
+    Returns
+    -------
+    float :
+        The redshift corresponding to the given luminosity distance.
+    """
+    cosmology = get_cosmology(**kwargs)
+    return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)
+
+
+class DistToZ(object):
+    r"""Interpolates luminosity distance as a function of redshift to allow for
+    fast conversion.
+
+    The :mod:`astropy.cosmology` module provides methods for converting any
+    cosmological parameter (like luminosity distance) to redshift. This can be
+    very slow when operating on a large array, as it involves numerically
+    inverting :math:`z(D)` (where :math:`D` is the luminosity distance). This
+    class speeds that up by pre-interpolating :math:`D(z)`. It works by setting
+    up a dense grid of redshifts, then using linear interpolation to find the
+    inverse function.  The interpolation uses a grid linear in z for z < 1, and
+    log in z for ``default_maxz`` > z > 1. This interpolater is setup the first
+    time `get_redshift` is called.  If a distance is requested that results in
+    a z > ``default_maxz``, the class falls back to calling astropy directly.
+
+    Instances of this class can be called like a function on luminosity
+    distances, which will return the corresponding redshifts.
+
+    Parameters
+    ----------
+    default_maxz : float, optional
+        The maximum z to interpolate up to before falling back to calling
+        astropy directly. Default is 1000.
+    numpoints : int, optional
+        The number of points to use in the linear interpolation between 0 to 1
+        and 1 to ``default_maxz``. Default is 10000.
+    \**kwargs :
+        All other keyword args are passed to :py:func:`get_cosmology` to
+        select a cosmology. If none provided, will use
+        :py:attr:`DEFAULT_COSMOLOGY`.
+    """
+    def __init__(self, default_maxz=1000., numpoints=10000, **kwargs):
+        self.numpoints = int(numpoints)
+        self.default_maxz = default_maxz
+        self.cosmology = get_cosmology(**kwargs)
+        # the interpolating functions; we'll set them to None for now, then set
+        # them up when get_redshift is first called
+        self.nearby_d2z = None
+        self.faraway_d2z = None
+        self.default_maxdist = None
+
+    def setup_interpolant(self):
+        """Initializes the z(d) interpolation."""
+        # for computing nearby (z < 1) redshifts
+        zs = numpy.linspace(0., 1., num=self.numpoints)
+        ds = self.cosmology.luminosity_distance(zs).value
+        self.nearby_d2z = interpolate.interp1d(ds, zs, kind='linear',
+                                                bounds_error=False)
+        # for computing far away (z > 1) redshifts
+        zs = numpy.logspace(0, numpy.log10(self.default_maxz),
+                            num=self.numpoints)
+        ds = self.cosmology.luminosity_distance(zs).value
+        self.faraway_d2z = interpolate.interp1d(ds, zs, kind='linear',
+                                                 bounds_error=False)
+        # store the default maximum distance
+        self.default_maxdist = ds.max()
+
+    def get_redshift(self, dist):
+        """Returns the redshift for the given distance.
+        """
+        dist, input_is_array = pycbc.conversions.ensurearray(dist)
+        try:
+            zs = self.nearby_d2z(dist)
+        except TypeError:
+            # interpolant hasn't been setup yet
+            self.setup_interpolant()
+            zs = self.nearby_d2z(dist)
+        # if any points had red shifts beyond the nearby, will have nans;
+        # replace using the faraway interpolation
+        replacemask = numpy.isnan(zs)
+        if replacemask.any():
+            zs[replacemask] = self.faraway_d2z(dist[replacemask])
+            replacemask = numpy.isnan(zs)
+        # if we still have nans, means that some distances are beyond our
+        # furthest default; fall back to using astropy
+        if replacemask.any():
+            # well... check that the distance is positive and finite first
+            if not (dist > 0.).all() and numpy.isfinite(dist).all():
+                raise ValueError("distance must be finite and > 0")
+            zs[replacemask] = _redshift(dist[replacemask],
+                                        cosmology=self.cosmology)
+        return pycbc.conversions.formatreturn(zs, input_is_array)
+
+    def __call__(self, dist):
+        return self.get_redshift(dist)
+
+
+# set up D(z) interpolating classes for the standard cosmologies
+_d2zs = {_c: DistToZ(cosmology=_c)
+         for _c in parameters.available}
+
+
+
+[docs] +def redshift(distance, **kwargs): + r"""Returns the redshift associated with the given luminosity distance. + + If the requested cosmology is one of the pre-defined ones in + :py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is + used to provide a fast interpolation. This takes a few seconds to setup + on the first call. + + Parameters + ---------- + distance : float + The luminosity distance, in Mpc. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + float : + The redshift corresponding to the given distance. + """ + cosmology = get_cosmology(**kwargs) + try: + z = _d2zs[cosmology.name](distance) + except KeyError: + # not a standard cosmology, call the redshift function + z = _redshift(distance, cosmology=cosmology) + return z
+ + + +class ComovingVolInterpolator(object): + r"""Interpolates comoving volume to distance or redshift. + + The :mod:`astropy.cosmology` module provides methods for converting any + cosmological parameter (like luminosity distance) to redshift. This can be + very slow when operating on a large array, as it involves numerically + inverting :math:`z(D)` (where :math:`D` is the luminosity distance). This + class speeds that up by pre-interpolating :math:`D(z)`. It works by setting + up a dense grid of redshifts, then using linear interpolation to find the + inverse function. The interpolation uses a grid linear in z for z < 1, and + log in z for ``default_maxz`` > z > 1. This interpolater is setup the first + time `get_redshift` is called. If a distance is requested that results in + a z > ``default_maxz``, the class falls back to calling astropy directly. + + Instances of this class can be called like a function on luminosity + distances, which will return the corresponding redshifts. + + Parameters + ---------- + parameter : {'luminosity_distance', 'redshift'} + What parameter to interpolate. + default_maxz : float, optional + The maximum z to interpolate up to before falling back to calling + astropy directly. Default is 10. + numpoints : int, optional + The number of points to use in the linear interpolation between 0 to 1 + and 1 to ``default_maxz``. Default is 1000. + vol_func: function, optional + Optionally set how the volume is calculated by providing a function + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + """ + def __init__(self, parameter, default_maxz=10., numpoints=1000, + vol_func=None, **kwargs): + self.parameter = parameter + self.numpoints = int(numpoints) + self.default_maxz = default_maxz + self.cosmology = get_cosmology(**kwargs) + # the interpolating functions; we'll set them to None for now, then set + # them up when get_redshift is first called + self.nearby_interp = None + self.faraway_interp = None + self.default_maxvol = None + if vol_func is not None: + self.vol_func = vol_func + else: + self.vol_func = self.cosmology.comoving_volume + self.vol_units = self.vol_func(0.5).unit + + def _create_interpolant(self, minz, maxz): + minlogv = numpy.log(self.vol_func(minz).value) + maxlogv = numpy.log(self.vol_func(maxz).value) + logvs = numpy.linspace(minlogv, maxlogv, num=self.numpoints) + + zs = z_at_value(self.vol_func, numpy.exp(logvs), self.vol_units, maxz) + + if self.parameter != 'redshift': + ys = cosmological_quantity_from_redshift(zs, self.parameter) + else: + ys = zs + + return interpolate.interp1d(logvs, ys, kind='linear', + bounds_error=False) + + def setup_interpolant(self): + """Initializes the z(d) interpolation.""" + # get VC bounds + # for computing nearby (z < 1) redshifts + minz = 0.001 + maxz = 1. + self.nearby_interp = self._create_interpolant(minz, maxz) + # for computing far away (z > 1) redshifts + minz = 1. + maxz = self.default_maxz + self.faraway_interp = self._create_interpolant(minz, maxz) + # store the default maximum volume + self.default_maxvol = numpy.log(self.vol_func(maxz).value) + + def get_value_from_logv(self, logv): + """Returns the redshift for the given distance. + """ + logv, input_is_array = pycbc.conversions.ensurearray(logv) + try: + vals = self.nearby_interp(logv) + except TypeError: + # interpolant hasn't been setup yet + self.setup_interpolant() + vals = self.nearby_interp(logv) + # if any points had red shifts beyond the nearby, will have nans; + # replace using the faraway interpolation + replacemask = numpy.isnan(vals) + if replacemask.any(): + vals[replacemask] = self.faraway_interp(logv[replacemask]) + replacemask = numpy.isnan(vals) + # if we still have nans, means that some distances are beyond our + # furthest default; fall back to using astropy + if replacemask.any(): + # well... check that the logv is finite first + if not numpy.isfinite(logv).all(): + raise ValueError("comoving volume must be finite and > 0") + zs = z_at_value(self.vol_func, + numpy.exp(logv[replacemask]), self.vol_units) + if self.parameter == 'redshift': + vals[replacemask] = zs + else: + vals[replacemask] = \ + getattr(self.cosmology, self.parameter)(zs).value + return pycbc.conversions.formatreturn(vals, input_is_array) + + def get_value(self, volume): + return self.get_value_from_logv(numpy.log(volume)) + + def __call__(self, volume): + return self.get_value(volume) + + +# set up D(z) interpolating classes for the standard cosmologies +_v2ds = {_c: ComovingVolInterpolator('luminosity_distance', cosmology=_c) + for _c in parameters.available} + +_v2zs = {_c: ComovingVolInterpolator('redshift', cosmology=_c) + for _c in parameters.available} + + +
+[docs] +def redshift_from_comoving_volume(vc, interp=True, **kwargs): + r"""Returns the redshift from the given comoving volume. + + Parameters + ---------- + vc : float + The comoving volume, in units of cubed Mpc. + interp : bool, optional + If true, this will setup an interpolator between redshift and comoving + volume the first time this function is called. This is useful when + making many successive calls to this function (and is necessary when + using this function in a transform when doing parameter estimation). + However, setting up the interpolator the first time takes O(10)s of + seconds. If you will only be making a single call to this function, or + will only run it on an array with < ~100000 elements, it is faster to + not use the interpolator (i.e., set ``interp=False``). Default is + ``True``. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + float : + The redshift at the given comoving volume. + """ + cosmology = get_cosmology(**kwargs) + lookup = _v2zs if interp else {} + try: + z = lookup[cosmology.name](vc) + except KeyError: + # not using interp or not a standard cosmology, + # call the redshift function directly + z = z_at_value(cosmology.comoving_volume, vc, units.Mpc**3) + return z
+ + + +
+[docs] +def distance_from_comoving_volume(vc, interp=True, **kwargs): + r"""Returns the luminosity distance from the given comoving volume. + + Parameters + ---------- + vc : float + The comoving volume, in units of cubed Mpc. + interp : bool, optional + If true, this will setup an interpolator between distance and comoving + volume the first time this function is called. This is useful when + making many successive calls to this function (such as when using this + function in a transform for parameter estimation). However, setting up + the interpolator the first time takes O(10)s of seconds. If you will + only be making a single call to this function, or will only run it on + an array with < ~100000 elements, it is faster to not use the + interpolator (i.e., set ``interp=False``). Default is ``True``. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + float : + The luminosity distance at the given comoving volume. + """ + cosmology = get_cosmology(**kwargs) + lookup = _v2ds if interp else {} + try: + dist = lookup[cosmology.name](vc) + except KeyError: + # not using interp or not a standard cosmology, + # call the redshift function directly + z = z_at_value(cosmology.comoving_volume, vc, units.Mpc**3) + dist = cosmology.luminosity_distance(z).value + return dist
+ + + +
+[docs] +def cosmological_quantity_from_redshift(z, quantity, strip_unit=True, + **kwargs): + r"""Returns the value of a cosmological quantity (e.g., age) at a redshift. + + Parameters + ---------- + z : float + The redshift. + quantity : str + The name of the quantity to get. The name may be any attribute of + :py:class:`astropy.cosmology.FlatLambdaCDM`. + strip_unit : bool, optional + Just return the value of the quantity, sans units. Default is True. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + float or astropy.units.quantity : + The value of the quantity at the requested value. If ``strip_unit`` is + ``True``, will return the value. Otherwise, will return the value with + units. + """ + cosmology = get_cosmology(**kwargs) + val = getattr(cosmology, quantity)(z) + if strip_unit: + val = val.value + return val
+ + + +__all__ = ['redshift', 'redshift_from_comoving_volume', + 'distance_from_comoving_volume', + 'cosmological_quantity_from_redshift', + ] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/detector.html b/latest/html/_modules/pycbc/detector.html new file mode 100644 index 00000000000..027b746199c --- /dev/null +++ b/latest/html/_modules/pycbc/detector.html @@ -0,0 +1,995 @@ + + + + + + pycbc.detector — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.detector

+# -*- coding: UTF-8 -*-
+
+# Copyright (C) 2012  Alex Nitz
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module provides utilities for calculating detector responses and timing
+between observatories.
+"""
+import os
+import logging
+import numpy as np
+from numpy import cos, sin, pi
+
+import lal
+from astropy.time import Time
+from astropy import constants, coordinates, units
+from astropy.coordinates.matrix_utilities import rotation_matrix
+from astropy.units.si import sday, meter
+
+import pycbc.libutils
+from pycbc.types import TimeSeries
+from pycbc.types.config import InterpolatingConfigParser
+
+logger = logging.getLogger('pycbc.detector')
+
+# Response functions are modelled after those in lalsuite and as also
+# presented in https://arxiv.org/pdf/gr-qc/0008066.pdf
+
+
+[docs] +def gmst_accurate(gps_time): + gmst = Time(gps_time, format='gps', scale='utc', + location=(0, 0)).sidereal_time('mean').rad + return gmst
+ + +
+[docs] +def get_available_detectors(): + """ List the available detectors """ + dets = list(_ground_detectors.keys()) + for pfx, name in get_available_lal_detectors(): + dets += [pfx] + return dets
+ + +
+[docs] +def get_available_lal_detectors(): + """Return list of detectors known in the currently sourced lalsuite. + This function will query lalsuite about which detectors are known to + lalsuite. Detectors are identified by a two character string e.g. 'K1', + but also by a longer, and clearer name, e.g. KAGRA. This function returns + both. As LAL doesn't really expose this functionality we have to make some + assumptions about how this information is stored in LAL. Therefore while + we hope this function will work correctly, it's possible it will need + updating in the future. Better if lal would expose this information + properly. + """ + ld = lal.__dict__ + known_lal_names = [j for j in ld.keys() if "DETECTOR_PREFIX" in j] + known_prefixes = [ld[k] for k in known_lal_names] + known_names = [ld[k.replace('PREFIX', 'NAME')] for k in known_lal_names] + return list(zip(known_prefixes, known_names))
+ + +_ground_detectors = {} + +
+[docs] +def add_detector_on_earth(name, longitude, latitude, + yangle=0, xangle=None, height=0, + xlength=4000, ylength=4000): + """ Add a new detector on the earth + + Parameters + ---------- + + name: str + two-letter name to identify the detector + longitude: float + Longitude in radians using geodetic coordinates of the detector + latitude: float + Latitude in radians using geodetic coordinates of the detector + yangle: float + Azimuthal angle of the y-arm (angle drawn from pointing north) + xangle: float + Azimuthal angle of the x-arm (angle drawn from point north). If not set + we assume a right angle detector following the right-hand rule. + height: float + The height in meters of the detector above the standard + reference ellipsoidal earth + """ + if xangle is None: + # assume right angle detector if no separate xarm direction given + xangle = yangle + np.pi / 2.0 + + # Rotation matrix to move detector to correct orientation + rm1 = rotation_matrix(longitude * units.rad, 'z') + rm2 = rotation_matrix((np.pi / 2.0 - latitude) * units.rad, 'y') + rm = np.matmul(rm2, rm1) + + # Calculate response in earth centered coordinates + # by rotation of response in coordinates aligned + # with the detector arms + resps = [] + vecs = [] + for angle in [yangle, xangle]: + a, b = cos(2 * angle), sin(2 * angle) + resp = np.array([[-a, b, 0], [b, a, 0], [0, 0, 0]]) + + # apply rotation + resp = np.matmul(resp, rm) + resp = np.matmul(rm.T, resp) / 4.0 + resps.append(resp) + + vec = np.matmul(rm.T, np.array([-np.cos(angle), np.sin(angle), 0])) + vecs.append(vec) + + full_resp = (resps[0] - resps[1]) + loc = coordinates.EarthLocation.from_geodetic(longitude * units.rad, + latitude * units.rad, + height=height*units.meter) + loc = np.array([loc.x.value, loc.y.value, loc.z.value]) + _ground_detectors[name] = {'location': loc, + 'response': full_resp, + 'xresp': resps[1], + 'yresp': resps[0], + 'xvec': vecs[1], + 'yvec': vecs[0], + 'yangle': yangle, + 'xangle': xangle, + 'height': height, + 'xaltitude': 0.0, + 'yaltitude': 0.0, + 'ylength': ylength, + 'xlength': xlength, + }
+ + +# Notation matches +# Eq 4 of https://link.aps.org/accepted/10.1103/PhysRevD.96.084004 +
+[docs] +def single_arm_frequency_response(f, n, arm_length): + """ The relative amplitude factor of the arm response due to + signal delay. This is relevant where the long-wavelength + approximation no longer applies) + """ + n = np.clip(n, -0.999, 0.999) + phase = arm_length / constants.c.value * 2.0j * np.pi * f + a = 1.0 / 4.0 / phase + b = (1 - np.exp(-phase * (1 - n))) / (1 - n) + c = np.exp(-2.0 * phase) * (1 - np.exp(phase * (1 + n))) / (1 + n) + return a * (b - c) * 2.0 # We'll make this relative to the static resp
+ + +
+[docs] +def load_detector_config(config_files): + """ Add custom detectors from a configuration file + + Parameters + ---------- + config_files: str or list of strs + The config file(s) which specify new detectors + """ + methods = {'earth_normal': (add_detector_on_earth, + ['longitude', 'latitude'])} + conf = InterpolatingConfigParser(config_files) + dets = conf.get_subsections('detector') + for det in dets: + kwds = dict(conf.items('detector-{}'.format(det))) + try: + method, arg_names = methods[kwds.pop('method')] + except KeyError: + raise ValueError("Missing or unkown method, " + "options are {}".format(methods.keys())) + for k in kwds: + kwds[k] = float(kwds[k]) + try: + args = [kwds.pop(arg) for arg in arg_names] + except KeyError as e: + raise ValueError("missing required detector argument" + " {} are required".format(arg_names)) + method(det.upper(), *args, **kwds)
+ + + +# autoload detector config files +if 'PYCBC_DETECTOR_CONFIG' in os.environ: + load_detector_config(os.environ['PYCBC_DETECTOR_CONFIG'].split(':')) + + +
+[docs] +class Detector(object): + """A gravitational wave detector + """ + def __init__(self, detector_name, reference_time=1126259462.0): + """ Create class representing a gravitational-wave detector + Parameters + ---------- + detector_name: str + The two-character detector string, i.e. H1, L1, V1, K1, I1 + reference_time: float + Default is time of GW150914. In this case, the earth's rotation + will be estimated from a reference time. If 'None', we will + calculate the time for each gps time requested explicitly + using a slower but higher precision method. + """ + self.name = str(detector_name) + + lal_detectors = [pfx for pfx, name in get_available_lal_detectors()] + if detector_name in _ground_detectors: + self.info = _ground_detectors[detector_name] + self.response = self.info['response'] + self.location = self.info['location'] + elif detector_name in lal_detectors: + lalsim = pycbc.libutils.import_optional('lalsimulation') + self._lal = lalsim.DetectorPrefixToLALDetector(self.name) + self.response = self._lal.response + self.location = self._lal.location + else: + raise ValueError("Unkown detector {}".format(detector_name)) + + loc = coordinates.EarthLocation(self.location[0], + self.location[1], + self.location[2], + unit=meter) + self.latitude = loc.lat.rad + self.longitude = loc.lon.rad + + self.reference_time = reference_time + self.sday = None + self.gmst_reference = None + +
+[docs] + def set_gmst_reference(self): + if self.reference_time is not None: + self.sday = float(sday.si.scale) + self.gmst_reference = gmst_accurate(self.reference_time) + else: + raise RuntimeError("Can't get accurate sidereal time without GPS " + "reference time!")
+ + +
+[docs] + def lal(self): + """ Return lal data type detector instance """ + if hasattr(self, '_lal'): + return self._lal + else: + import lal + d = lal.FrDetector() + d.vertexLongitudeRadians = self.longitude + d.vertexLatitudeRadians = self.latitude + d.vertexElevation = self.info['height'] + d.xArmAzimuthRadians = self.info['xangle'] + d.yArmAzimuthRadians = self.info['yangle'] + d.xArmAltitudeRadians = self.info['yaltitude'] + d.xArmAltitudeRadians = self.info['xaltitude'] + + # This is somewhat abused by lalsimulation at the moment + # to determine a filter kernel size. We set this only so that + # value gets a similar number of samples as other detectors + # it is used for nothing else + d.yArmMidpoint = 4000.0 + + x = lal.Detector() + r = lal.CreateDetector(x, d, lal.LALDETECTORTYPE_IFODIFF) + self._lal = r + return r
+ + +
+[docs] + def gmst_estimate(self, gps_time): + if self.reference_time is None: + return gmst_accurate(gps_time) + + if self.gmst_reference is None: + self.set_gmst_reference() + dphase = (gps_time - self.reference_time) / self.sday * (2.0 * np.pi) + gmst = (self.gmst_reference + dphase) % (2.0 * np.pi) + return gmst
+ + +
+[docs] + def light_travel_time_to_detector(self, det): + """ Return the light travel time from this detector + Parameters + ---------- + det: Detector + The other detector to determine the light travel time to. + Returns + ------- + time: float + The light travel time in seconds + """ + d = self.location - det.location + return float(d.dot(d)**0.5 / constants.c.value)
+ + +
+[docs] + def antenna_pattern(self, right_ascension, declination, polarization, t_gps, + frequency=0, + polarization_type='tensor'): + """Return the detector response. + + Parameters + ---------- + right_ascension: float or numpy.ndarray + The right ascension of the source + declination: float or numpy.ndarray + The declination of the source + polarization: float or numpy.ndarray + The polarization angle of the source + polarization_type: string flag: Tensor, Vector or Scalar + The gravitational wave polarizations. Default: 'Tensor' + + Returns + ------- + fplus(default) or fx or fb : float or numpy.ndarray + The plus or vector-x or breathing polarization factor for this sky location / orientation + fcross(default) or fy or fl : float or numpy.ndarray + The cross or vector-y or longitudnal polarization factor for this sky location / orientation + """ + if isinstance(t_gps, lal.LIGOTimeGPS): + t_gps = float(t_gps) + gha = self.gmst_estimate(t_gps) - right_ascension + + cosgha = cos(gha) + singha = sin(gha) + cosdec = cos(declination) + sindec = sin(declination) + cospsi = cos(polarization) + sinpsi = sin(polarization) + + if frequency: + e0 = cosdec * cosgha + e1 = cosdec * -singha + e2 = sin(declination) + nhat = np.array([e0, e1, e2], dtype=object) + + nx = nhat.dot(self.info['xvec']) + ny = nhat.dot(self.info['yvec']) + + rx = single_arm_frequency_response(frequency, nx, + self.info['xlength']) + ry = single_arm_frequency_response(frequency, ny, + self.info['ylength']) + resp = ry * self.info['yresp'] - rx * self.info['xresp'] + ttype = np.complex128 + else: + resp = self.response + ttype = np.float64 + + x0 = -cospsi * singha - sinpsi * cosgha * sindec + x1 = -cospsi * cosgha + sinpsi * singha * sindec + x2 = sinpsi * cosdec + + x = np.array([x0, x1, x2], dtype=object) + dx = resp.dot(x) + + y0 = sinpsi * singha - cospsi * cosgha * sindec + y1 = sinpsi * cosgha + cospsi * singha * sindec + y2 = cospsi * cosdec + + y = np.array([y0, y1, y2], dtype=object) + dy = resp.dot(y) + + if polarization_type != 'tensor': + z0 = -cosdec * cosgha + z1 = cosdec * singha + z2 = -sindec + z = np.array([z0, z1, z2], dtype=object) + dz = resp.dot(z) + + if polarization_type == 'tensor': + if hasattr(dx, 'shape'): + fplus = (x * dx - y * dy).sum(axis=0).astype(ttype) + fcross = (x * dy + y * dx).sum(axis=0).astype(ttype) + else: + fplus = (x * dx - y * dy).sum() + fcross = (x * dy + y * dx).sum() + return fplus, fcross + + elif polarization_type == 'vector': + if hasattr(dx, 'shape'): + fx = (z * dx + x * dz).sum(axis=0).astype(ttype) + fy = (z * dy + y * dz).sum(axis=0).astype(ttype) + else: + fx = (z * dx + x * dz).sum() + fy = (z * dy + y * dz).sum() + + return fx, fy + + elif polarization_type == 'scalar': + if hasattr(dx, 'shape'): + fb = (x * dx + y * dy).sum(axis=0).astype(ttype) + fl = (z * dz).sum(axis=0) + else: + fb = (x * dx + y * dy).sum() + fl = (z * dz).sum() + return fb, fl
+ + +
+[docs] + def time_delay_from_earth_center(self, right_ascension, declination, t_gps): + """Return the time delay from the earth center + """ + return self.time_delay_from_location(np.array([0, 0, 0]), + right_ascension, + declination, + t_gps)
+ + +
+[docs] + def time_delay_from_location(self, other_location, right_ascension, + declination, t_gps): + """Return the time delay from the given location to detector for + a signal with the given sky location + In other words return `t1 - t2` where `t1` is the + arrival time in this detector and `t2` is the arrival time in the + other location. + + Parameters + ---------- + other_location : numpy.ndarray of coordinates + A detector instance. + right_ascension : float + The right ascension (in rad) of the signal. + declination : float + The declination (in rad) of the signal. + t_gps : float + The GPS time (in s) of the signal. + + Returns + ------- + float + The arrival time difference between the detectors. + """ + ra_angle = self.gmst_estimate(t_gps) - right_ascension + cosd = cos(declination) + + e0 = cosd * cos(ra_angle) + e1 = cosd * -sin(ra_angle) + e2 = sin(declination) + + ehat = np.array([e0, e1, e2], dtype=object) + dx = other_location - self.location + return dx.dot(ehat).astype(np.float64) / constants.c.value
+ + +
+[docs] + def time_delay_from_detector(self, other_detector, right_ascension, + declination, t_gps): + """Return the time delay from the given to detector for a signal with + the given sky location; i.e. return `t1 - t2` where `t1` is the + arrival time in this detector and `t2` is the arrival time in the + other detector. Note that this would return the same value as + `time_delay_from_earth_center` if `other_detector` was geocentric. + Parameters + ---------- + other_detector : detector.Detector + A detector instance. + right_ascension : float + The right ascension (in rad) of the signal. + declination : float + The declination (in rad) of the signal. + t_gps : float + The GPS time (in s) of the signal. + Returns + ------- + float + The arrival time difference between the detectors. + """ + return self.time_delay_from_location(other_detector.location, + right_ascension, + declination, + t_gps)
+ + +
+[docs] + def project_wave(self, hp, hc, ra, dec, polarization, + method='lal', + reference_time=None): + """Return the strain of a waveform as measured by the detector. + Apply the time shift for the given detector relative to the assumed + geocentric frame and apply the antenna patterns to the plus and cross + polarizations. + + Parameters + ---------- + hp: pycbc.types.TimeSeries + Plus polarization of the GW + hc: pycbc.types.TimeSeries + Cross polarization of the GW + ra: float + Right ascension of source location + dec: float + Declination of source location + polarization: float + Polarization angle of the source + method: {'lal', 'constant', 'vary_polarization'} + The method to use for projecting the polarizations into the + detector frame. Default is 'lal'. + reference_time: float, Optional + The time to use as, a reference for some methods of projection. + Used by 'constant' and 'vary_polarization' methods. Uses average + time if not provided. + """ + # The robust and most fefature rich method which includes + # time changing antenna patterns and doppler shifts due to the + # earth rotation and orbit + if method == 'lal': + import lalsimulation + h_lal = lalsimulation.SimDetectorStrainREAL8TimeSeries( + hp.astype(np.float64).lal(), hc.astype(np.float64).lal(), + ra, dec, polarization, self.lal()) + ts = TimeSeries( + h_lal.data.data, delta_t=h_lal.deltaT, epoch=h_lal.epoch, + dtype=np.float64, copy=False) + + # 'constant' assume fixed orientation relative to source over the + # duration of the signal, accurate for short duration signals + # 'fixed_polarization' applies only time changing orientation + # but no doppler corrections + elif method in ['constant', 'vary_polarization']: + if reference_time is not None: + rtime = reference_time + else: + # In many cases, one should set the reference time if using + # this method as we don't know where the signal is within + # the given time series. If not provided, we'll choose + # the midpoint time. + rtime = (float(hp.end_time) + float(hp.start_time)) / 2.0 + + if method == 'constant': + time = rtime + elif method == 'vary_polarization': + if (not isinstance(hp, TimeSeries) or + not isinstance(hc, TimeSeries)): + raise TypeError('Waveform polarizations must be given' + ' as time series for this method') + + # this is more granular than needed, may be optimized later + # assume earth rotation in ~30 ms needed for earth ceneter + # to detector is completely negligible. + time = hp.sample_times.numpy() + + fp, fc = self.antenna_pattern(ra, dec, polarization, time) + dt = self.time_delay_from_earth_center(ra, dec, rtime) + ts = fp * hp + fc * hc + ts.start_time = float(ts.start_time) + dt + + # add in only the correction for the time variance in the polarization + # due to the earth's rotation, no doppler correction applied + else: + raise ValueError("Unkown projection method {}".format(method)) + return ts
+ + +
+[docs] + def optimal_orientation(self, t_gps): + """Return the optimal orientation in right ascension and declination + for a given GPS time. + + Parameters + ---------- + t_gps: float + Time in gps seconds + + Returns + ------- + ra: float + Right ascension that is optimally oriented for the detector + dec: float + Declination that is optimally oriented for the detector + """ + ra = self.longitude + (self.gmst_estimate(t_gps) % (2.0*np.pi)) + dec = self.latitude + return ra, dec
+ + +
+[docs] + def get_icrs_pos(self): + """ Transforms GCRS frame to ICRS frame + + Returns + ---------- + loc: numpy.ndarray shape (3,1) units: AU + ICRS coordinates in cartesian system + """ + loc = self.location + loc = coordinates.SkyCoord(x=loc[0], y=loc[1], z=loc[2], unit=units.m, + frame='gcrs', representation_type='cartesian').transform_to('icrs') + loc.representation_type = 'cartesian' + conv = np.float32(((loc.x.unit/units.AU).decompose()).to_string()) + loc = np.array([np.float32(loc.x), np.float32(loc.y), + np.float32(loc.z)])*conv + return loc
+ + +
+[docs] + def effective_distance(self, distance, ra, dec, pol, time, inclination): + """ Distance scaled to account for amplitude factors + + The effective distance of the source. This scales the distance so that + the amplitude is equal to a source which is optimally oriented with + respect to the detector. For fixed detector-frame intrinsic parameters + this is a measure of the expected signal strength. + + Parameters + ---------- + distance: float + Source luminosity distance in megaparsecs + ra: float + The right ascension in radians + dec: float + The declination in radians + pol: float + Polarization angle of the gravitational wave in radians + time: float + GPS time in seconds + inclination: + The inclination of the binary's orbital plane + + Returns + ------- + eff_dist: float + The effective distance of the source + """ + fp, fc = self.antenna_pattern(ra, dec, pol, time) + ic = np.cos(inclination) + ip = 0.5 * (1. + ic * ic) + scale = ((fp * ip) ** 2.0 + (fc * ic) ** 2.0) ** 0.5 + return distance / scale
+
+ + +
+[docs] +def overhead_antenna_pattern(right_ascension, declination, polarization): + """Return the antenna pattern factors F+ and Fx as a function of sky + location and polarization angle for a hypothetical interferometer located + at the north pole. Angles are in radians. Declinations of ±π/2 correspond + to the normal to the detector plane (i.e. overhead and underneath) while + the point with zero right ascension and declination is the direction + of one of the interferometer arms. + Parameters + ---------- + right_ascension: float + declination: float + polarization: float + Returns + ------- + f_plus: float + f_cros: float + """ + # convert from declination coordinate to polar (angle dropped from north axis) + theta = np.pi / 2.0 - declination + + f_plus = - (1.0/2.0) * (1.0 + cos(theta)*cos(theta)) * \ + cos (2.0 * right_ascension) * cos (2.0 * polarization) - \ + cos(theta) * sin(2.0*right_ascension) * sin (2.0 * polarization) + + f_cross = (1.0/2.0) * (1.0 + cos(theta)*cos(theta)) * \ + cos (2.0 * right_ascension) * sin (2.0* polarization) - \ + cos(theta) * sin(2.0*right_ascension) * cos (2.0 * polarization) + + return f_plus, f_cross
+ + + +""" LISA class """ + + +
+[docs] +class LISA(object): + """For LISA detector + """ + def __init__(self): + None + +
+[docs] + def get_pos(self, ref_time): + """Return the position of LISA detector for a given reference time + Parameters + ---------- + ref_time : numpy.ScalarType + + Returns + ------- + location : numpy.ndarray of shape (3,3) + Returns the position of all 3 sattelites with each row + correspoding to a single axis. + """ + ref_time = Time(val=ref_time, format='gps', scale='utc').jyear + n = np.array(range(1, 4)) + kappa, _lambda_ = 0, 0 + alpha = 2. * np.pi * ref_time/1 + kappa + beta_n = (n - 1) * 2.0 * pi / 3.0 + _lambda_ + a, L = 1., 0.03342293561 + e = L/(2. * a * np.sqrt(3)) + + x = a*cos(alpha) + a*e*(sin(alpha)*cos(alpha)*sin(beta_n) - (1 + sin(alpha)**2)*cos(beta_n)) + y = a*sin(alpha) + a*e*(sin(alpha)*cos(alpha)*cos(beta_n) - (1 + cos(alpha)**2)*sin(beta_n)) + z = -np.sqrt(3)*a*e*cos(alpha - beta_n) + self.location = np.array([x, y, z]) + + return self.location
+ + +
+[docs] + def get_gcrs_pos(self, location): + """ Transforms ICRS frame to GCRS frame + + Parameters + ---------- + loc : numpy.ndarray shape (3,1) units: AU + Cartesian Coordinates of the location + in ICRS frame + + Returns + ---------- + loc : numpy.ndarray shape (3,1) units: meters + GCRS coordinates in cartesian system + """ + loc = location + loc = coordinates.SkyCoord(x=loc[0], y=loc[1], z=loc[2], unit=units.AU, + frame='icrs', representation_type='cartesian').transform_to('gcrs') + loc.representation_type = 'cartesian' + conv = np.float32(((loc.x.unit/units.m).decompose()).to_string()) + loc = np.array([np.float32(loc.x), np.float32(loc.y), + np.float32(loc.z)])*conv + return loc
+ + +
+[docs] + def time_delay_from_location(self, other_location, right_ascension, + declination, t_gps): + """Return the time delay from the LISA detector to detector for + a signal with the given sky location. In other words return + `t1 - t2` where `t1` is the arrival time in this detector and + `t2` is the arrival time in the other location. Units(AU) + + Parameters + ---------- + other_location : numpy.ndarray of coordinates in ICRS frame + A detector instance. + right_ascension : float + The right ascension (in rad) of the signal. + declination : float + The declination (in rad) of the signal. + t_gps : float + The GPS time (in s) of the signal. + + Returns + ------- + numpy.ndarray + The arrival time difference between the detectors. + """ + dx = self.location - other_location + cosd = cos(declination) + e0 = cosd * cos(right_ascension) + e1 = cosd * -sin(right_ascension) + e2 = sin(declination) + ehat = np.array([e0, e1, e2]) + return dx.dot(ehat) / constants.c.value
+ + +
+[docs] + def time_delay_from_detector(self, det, right_ascension, + declination, t_gps): + """Return the time delay from the LISA detector for a signal with + the given sky location in ICRS frame; i.e. return `t1 - t2` where + `t1` is the arrival time in this detector and `t2` is the arrival + time in the other detector. + + Parameters + ---------- + other_detector : detector.Detector + A detector instance. + right_ascension : float + The right ascension (in rad) of the signal. + declination : float + The declination (in rad) of the signal. + t_gps : float + The GPS time (in s) of the signal. + + Returns + ------- + numpy.ndarray + The arrival time difference between the detectors. + """ + loc = Detector(det, t_gps).get_icrs_pos() + return self.time_delay_from_location(loc, right_ascension, + declination, t_gps)
+ + +
+[docs] + def time_delay_from_earth_center(self, right_ascension, declination, t_gps): + """Return the time delay from the earth center in ICRS frame + """ + t_gps = Time(val=t_gps, format='gps', scale='utc') + earth = coordinates.get_body('earth', t_gps, + location=None).transform_to('icrs') + earth.representation_type = 'cartesian' + return self.time_delay_from_location( + np.array([np.float32(earth.x), np.float32(earth.y), + np.float32(earth.z)]), right_ascension, + declination, t_gps)
+
+ + + +
+[docs] +def ppdets(ifos, separator=', '): + """Pretty-print a list (or set) of detectors: return a string listing + the given detectors alphabetically and separated by the given string + (comma by default). + """ + if ifos: + return separator.join(sorted(ifos)) + return 'no detectors'
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions.html b/latest/html/_modules/pycbc/distributions.html new file mode 100644 index 00000000000..537cba53252 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions.html @@ -0,0 +1,359 @@ + + + + + + pycbc.distributions — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions

+# Copyright (C)  2016  Collin Capano, Christopher M. Biwer, Alex Nitz,
+#                2021  Yifan Wang, Shichao Wu
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes and functions for drawing and calculating the
+probability density function of distributions.
+"""
+# imports needed for functions below
+import configparser as _ConfigParser
+from pycbc.distributions import constraints
+from pycbc import VARARGS_DELIM as _VARARGS_DELIM
+
+# Promote some classes/functions to the distributions name space
+from pycbc.distributions.utils import draw_samples_from_config
+from pycbc.distributions.angular import UniformAngle, SinAngle, CosAngle, \
+                                        UniformSolidAngle
+from pycbc.distributions.arbitrary import Arbitrary, FromFile
+from pycbc.distributions.gaussian import Gaussian
+from pycbc.distributions.power_law import UniformPowerLaw, UniformRadius
+from pycbc.distributions.sky_location import UniformSky, FisherSky
+from pycbc.distributions.uniform import Uniform
+from pycbc.distributions.uniform_log import UniformLog10
+from pycbc.distributions.spins import IndependentChiPChiEff
+from pycbc.distributions.qnm import UniformF0Tau
+from pycbc.distributions.joint import JointDistribution
+from pycbc.distributions.external import External, DistributionFunctionFromFile
+from pycbc.distributions.fixedsamples import FixedSamples
+from pycbc.distributions.mass import MchirpfromUniformMass1Mass2, \
+                                     QfromUniformMass1Mass2
+
+# a dict of all available distributions
+distribs = {
+    IndependentChiPChiEff.name : IndependentChiPChiEff,
+    Arbitrary.name : Arbitrary,
+    FromFile.name : FromFile,
+    Gaussian.name : Gaussian,
+    UniformPowerLaw.name : UniformPowerLaw,
+    UniformRadius.name : UniformRadius,
+    Uniform.name : Uniform,
+    UniformAngle.name : UniformAngle,
+    CosAngle.name : CosAngle,
+    SinAngle.name : SinAngle,
+    UniformSolidAngle.name : UniformSolidAngle,
+    UniformSky.name : UniformSky,
+    UniformLog10.name : UniformLog10,
+    UniformF0Tau.name : UniformF0Tau,
+    External.name: External,
+    DistributionFunctionFromFile.name: DistributionFunctionFromFile,
+    FixedSamples.name: FixedSamples,
+    MchirpfromUniformMass1Mass2.name: MchirpfromUniformMass1Mass2,
+    QfromUniformMass1Mass2.name: QfromUniformMass1Mass2,
+    FisherSky.name: FisherSky
+}
+
+
+[docs] +def read_distributions_from_config(cp, section="prior"): + """Returns a list of PyCBC distribution instances for a section in the + given configuration file. + + Parameters + ---------- + cp : WorflowConfigParser + An open config file to read. + section : {"prior", string} + Prefix on section names from which to retrieve the distributions. + + Returns + ------- + list + A list of the parsed distributions. + """ + dists = [] + variable_args = [] + for subsection in cp.get_subsections(section): + name = cp.get_opt_tag(section, "name", subsection) + dist = distribs[name].from_config(cp, section, subsection) + if set(dist.params).isdisjoint(variable_args): + dists.append(dist) + variable_args += dist.params + else: + raise ValueError("Same parameter in more than one distribution.") + return dists
+ + + +def _convert_liststring_to_list(lstring): + """Checks if an argument of the configuration file is a string of a list + and returns the corresponding list (of strings). + + The argument is considered to be a list if it starts with '[' and ends + with ']'. List elements should be comma separated. For example, passing + `'[foo bar, cat]'` will result in `['foo bar', 'cat']` being returned. If + the argument does not start and end with '[' and ']', the argument will + just be returned as is. + """ + if lstring[0]=='[' and lstring[-1]==']': + lstring = [str(lstring[1:-1].split(',')[n].strip().strip("'")) + for n in range(len(lstring[1:-1].split(',')))] + return lstring + + +
+[docs] +def read_params_from_config(cp, prior_section='prior', + vargs_section='variable_params', + sargs_section='static_params'): + """Loads static and variable parameters from a configuration file. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config parser to read from. + prior_section : str, optional + Check that priors exist in the given section. Default is 'prior.' + vargs_section : str, optional + The section to get the parameters that will be varied/need priors + defined for them. Default is 'variable_params'. + sargs_section : str, optional + The section to get the parameters that will remain fixed. Default is + 'static_params'. + + Returns + ------- + variable_args : list + The names of the parameters to vary in the PE run. + static_args : dict + Dictionary of names -> values giving the parameters to keep fixed. + """ + # sanity check that each parameter in [variable_params] has a prior section + variable_args = cp.options(vargs_section) + subsections = cp.get_subsections(prior_section) + tags = set([p for tag in subsections for p in tag.split('+')]) + missing_prior = set(variable_args) - tags + if any(missing_prior): + raise KeyError("You are missing a priors section in the config file " + "for parameter(s): {}".format(', '.join(missing_prior))) + # sanity check that each parameter with a priors section is in + # [variable_args] + missing_variable = tags - set(variable_args) + if any(missing_variable): + raise KeyError("Prior section found for parameter(s) {} but not " + "listed as variable parameter(s)." + .format(', '.join(missing_variable))) + # get static args + try: + static_args = dict([(key, cp.get_opt_tags(sargs_section, key, [])) + for key in cp.options(sargs_section)]) + except _ConfigParser.NoSectionError: + static_args = {} + # sanity check that each parameter in [variable_args] + # is not repeated in [static_args] + for arg in variable_args: + if arg in static_args: + raise KeyError("Parameter {} found both in static_args and in " + "variable_args sections.".format(arg)) + # try converting values to float + for key in static_args: + val = static_args[key] + try: + # the following will raise a ValueError if it cannot be cast to + # float (as we would expect for string arguments) + static_args[key] = float(val) + except ValueError: + # try converting to a list of strings; this function will just + # return val if it does not begin (end) with [ (]) + static_args[key] = _convert_liststring_to_list(val) + return variable_args, static_args
+ + + +
+[docs] +def read_constraints_from_config(cp, transforms=None, static_args=None, + constraint_section='constraint'): + """Loads parameter constraints from a configuration file. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config parser to read from. + transforms : list, optional + List of transforms to apply to parameters before applying constraints. + static_args : dict, optional + Dictionary of static parameters and their values to be applied + to constraints. + constraint_section : str, optional + The section to get the constraints from. Default is 'constraint'. + + Returns + ------- + list + List of ``Constraint`` objects. Empty if no constraints were provided. + """ + cons = [] + for subsection in cp.get_subsections(constraint_section): + name = cp.get_opt_tag(constraint_section, "name", subsection) + constraint_arg = cp.get_opt_tag( + constraint_section, "constraint_arg", subsection) + # get any other keyword arguments + kwargs = {} + section = constraint_section + "-" + subsection + extra_opts = [key for key in cp.options(section) + if key not in ["name", "constraint_arg"]] + for key in extra_opts: + val = cp.get(section, key) + if key == "required_parameters": + val = val.split(_VARARGS_DELIM) + else: + try: + val = float(val) + except ValueError: + pass + kwargs[key] = val + cons.append(constraints.constraints[name]( + constraint_arg, static_args=static_args, transforms=transforms, + **kwargs)) + + return cons
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/angular.html b/latest/html/_modules/pycbc/distributions/angular.html new file mode 100644 index 00000000000..9dc31e5d4a7 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/angular.html @@ -0,0 +1,663 @@ + + + + + + pycbc.distributions.angular — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.angular

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating angular distributions.
+"""
+import logging
+from configparser import Error
+import numpy
+
+from pycbc import VARARGS_DELIM
+from pycbc import boundaries
+from pycbc.distributions import bounded
+from pycbc.distributions import uniform
+
+logger = logging.getLogger('pycbc.distributions.angular')
+
+
+
+[docs] +class UniformAngle(uniform.Uniform): + """A uniform distribution in which the dependent variable is between + `[0,2pi)`. + + The domain of the distribution may optionally be made cyclic using the + `cyclic_domain` parameter. + + Bounds may be provided to limit the range for which the pdf has support. + If provided, the parameter bounds are in radians. + + Parameters + ---------- + cyclic_domain : {False, bool} + If True, cyclic bounds on [0, 2pi) are applied to all values when + evaluating the pdf. This is done prior to any additional bounds + specified for a parameter are applied. Default is False. + \**params : + The keyword arguments should provide the names of parameters and + (optionally) their corresponding bounds, as either + `boundaries.Bounds` instances or tuples. The bounds must be + in [0,2PI). These are converted to radians for storage. None may also + be passed; in that case, the domain bounds will be used. + + Notes + ------ + For more information, see Uniform. + """ + name = 'uniform_angle' + + _domainbounds = (0, 2*numpy.pi) + + def __init__(self, cyclic_domain=False, **params): + # _domain is a bounds instance used to apply cyclic conditions; this is + # applied first, before any bounds specified in the initialization + # are used + self._domain = boundaries.Bounds(self._domainbounds[0], + self._domainbounds[1], cyclic=cyclic_domain) + + for p,bnds in params.items(): + if bnds is None: + bnds = self._domain + elif isinstance(bnds, boundaries.Bounds): + # convert to radians + bnds._min = bnds._min.__class__(bnds._min) + bnds._max = bnds._max.__class__(bnds._max) + else: + # create a Bounds instance from the given tuple + bnds = boundaries.Bounds(bnds[0], bnds[1]) + # check that the bounds are in the domain + if bnds.min < self._domain.min or bnds.max > self._domain.max: + raise ValueError("bounds must be in [{x},{y}); " + "got [{a},{b})".format(x=self._domain.min, + y=self._domain.max, a=bnds.min, + b=bnds.max)) + + # update + params[p] = bnds + super(UniformAngle, self).__init__(**params) + + @property + def domain(self): + """Returns the domain of the distribution.""" + return self._domain + +
+[docs] + def apply_boundary_conditions(self, **kwargs): + """Maps values to be in [0, 2pi) (the domain) first, before applying + any additional boundary conditions. + + Parameters + ---------- + \**kwargs : + The keyword args should be the name of a parameter and value to + apply its boundary conditions to. The arguments need not include + all of the parameters in self. + + Returns + ------- + dict + A dictionary of the parameter names and the conditioned values. + """ + # map values to be within the domain + kwargs = dict([[p, self._domain.apply_conditions(val)] + for p,val in kwargs.items() if p in self._bounds]) + # now apply additional conditions + return super(UniformAngle, self).apply_boundary_conditions(**kwargs)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. + + The parameters for the distribution are retrieved from the section + titled "[`section`-`variable_args`]" in the config file. By default, + only the name of the distribution (`uniform_angle`) needs to be + specified. This will results in a uniform prior on `[0, 2pi)`. To + make the domain cyclic, add `cyclic_domain =`. To specify boundaries + that are not `[0, 2pi)`, add `(min|max)-var` arguments, where `var` + is the name of the variable. + + For example, this will initialize a variable called `theta` with a + uniform distribution on `[0, 2pi)` without cyclic boundaries: + + .. code-block:: ini + + [{section}-theta] + name = uniform_angle + + This will make the domain cyclic on `[0, 2pi)`: + + .. code-block:: ini + + [{section}-theta] + name = uniform_angle + cyclic_domain = + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + ``VARARGS_DELIM``. These must appear in the "tag" part + of the section header. + + Returns + ------- + UniformAngle + A distribution instance from the pycbc.inference.prior module. + """ + # we'll retrieve the setting for cyclic_domain directly + additional_opts = {'cyclic_domain': cp.has_option_tag(section, + 'cyclic_domain', variable_args)} + return bounded.bounded_from_config(cls, cp, section, variable_args, + bounds_required=False, + additional_opts=additional_opts)
+
+ + + +
+[docs] +class SinAngle(UniformAngle): + r"""A sine distribution; the pdf of each parameter `\theta` is given by: + + ..math:: + p(\theta) = \frac{\sin \theta}{\cos\theta_0 - \cos\theta_1}, \theta_0 \leq \theta < \theta_1, + + and 0 otherwise. Here, :math:`\theta_0, \theta_1` are the bounds of the + parameter. + + The domain of this distribution is `[0, pi]`. This is accomplished by + putting hard boundaries at `[0, pi]`. Bounds may be provided to further + limit the range for which the pdf has support. As with `UniformAngle`, + these are initialized in radians. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and + (optionally) their corresponding bounds, as either + `boundaries.Bounds` instances or tuples. The bounds must be + in [0,PI]. These are converted to radians for storage. None may also + be passed; in that case, the domain bounds will be used. + """ + name = 'sin_angle' + _func = numpy.cos + _dfunc = numpy.sin + _arcfunc = numpy.arccos + _domainbounds = (0, numpy.pi) + + def __init__(self, **params): + super(SinAngle, self).__init__(**params) + # replace the domain + self._domain = boundaries.Bounds(self._domainbounds[0], + self._domainbounds[1], btype_min='closed', btype_max='closed', + cyclic=False) + self._lognorm = -sum([numpy.log( + abs(self._func(bnd[1]) - self._func(bnd[0]))) \ + for bnd in self._bounds.values()]) + self._norm = numpy.exp(self._lognorm) + + def _cdfinv_param(self, arg, value): + """Return inverse of cdf for mapping unit interval to parameter bounds. + """ + scale = (numpy.cos(self._bounds[arg][0]) + - numpy.cos(self._bounds[arg][1])) + offset = 1. + numpy.cos(self._bounds[arg][1]) / scale + new_value = numpy.arccos(-scale * (value - offset)) + return new_value + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + if kwargs not in self: + return 0. + return self._norm * \ + self._dfunc(numpy.array([kwargs[p] for p in self._params])).prod() + + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + if kwargs not in self: + return -numpy.inf + return self._lognorm + \ + numpy.log(self._dfunc( + numpy.array([kwargs[p] for p in self._params]))).sum()
+ + + +
+[docs] +class CosAngle(SinAngle): + r"""A cosine distribution. This is the same thing as a sine distribution, + but with the domain shifted to `[-pi/2, pi/2]`. See SinAngle for more + details. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and + (optionally) their corresponding bounds, as either + `boundaries.Bounds` instances or tuples. The bounds must be + in [-PI/2, PI/2]. + """ + name = 'cos_angle' + _func = numpy.sin + _dfunc = numpy.cos + _arcfunc = numpy.arcsin + _domainbounds = (-numpy.pi/2, numpy.pi/2) + + def _cdfinv_param(self, param, value): + a = self._bounds[param][0] + b = self._bounds[param][1] + scale = numpy.sin(b) - numpy.sin(a) + offset = 1. - numpy.sin(b)/(numpy.sin(b) - numpy.sin(a)) + new_value = numpy.arcsin((value - offset) * scale) + return new_value
+ + + +
+[docs] +class UniformSolidAngle(bounded.BoundedDist): + """A distribution that is uniform in the solid angle of a sphere. The names + of the two angluar parameters can be specified on initalization. + + Parameters + ---------- + polar_angle : {'theta', str} + The name of the polar angle. + azimuthal_angle : {'phi', str} + The name of the azimuthal angle. + polar_bounds : {None, tuple} + Limit the polar angle to the given bounds. If None provided, the polar + angle will vary from 0 (the north pole) to pi (the south pole). The + bounds should be specified as factors of pi. For example, to limit + the distribution to the northern hemisphere, set + `polar_bounds=(0,0.5)`. + azimuthal_bounds : {None, tuple} + Limit the azimuthal angle to the given bounds. If None provided, the + azimuthal angle will vary from 0 to 2pi. The + bounds should be specified as factors of pi. For example, to limit + the distribution to the one hemisphere, set `azimuthal_bounds=(0,1)`. + azimuthal_cyclic_domain : {False, bool} + Make the domain of the azimuthal angle be cyclic; i.e., azimuthal + values are constrained to be in [0, 2pi) using cyclic boundaries prior + to applying any other boundary conditions and prior to evaluating the + pdf. Default is False. + """ + name = 'uniform_solidangle' + _polardistcls = SinAngle + _azimuthaldistcls = UniformAngle + _default_polar_angle = 'theta' + _default_azimuthal_angle = 'phi' + + def __init__(self, polar_angle=None, azimuthal_angle=None, + polar_bounds=None, azimuthal_bounds=None, + azimuthal_cyclic_domain=False): + if polar_angle is None: + polar_angle = self._default_polar_angle + if azimuthal_angle is None: + azimuthal_angle = self._default_azimuthal_angle + self._polardist = self._polardistcls(**{ + polar_angle: polar_bounds}) + self._azimuthaldist = self._azimuthaldistcls(**{ + azimuthal_angle: azimuthal_bounds, + 'cyclic_domain': azimuthal_cyclic_domain}) + self._polar_angle = polar_angle + self._azimuthal_angle = azimuthal_angle + self._bounds = self._polardist.bounds.copy() + self._bounds.update(self._azimuthaldist.bounds) + self._params = sorted(self._bounds.keys()) + + @property + def bounds(self): + """dict: The bounds on each angle. The keys are the names of the polar + and azimuthal angles, the values are the minimum and maximum of each, + in radians. For example, if the distribution was initialized with + `polar_angle='theta', polar_bounds=(0,0.5)` then the bounds will have + `'theta': 0, 1.5707963267948966` as an entry.""" + return self._bounds + + @property + def polar_angle(self): + """str: The name of the polar angle.""" + return self._polar_angle + + @property + def azimuthal_angle(self): + """str: The name of the azimuthal angle.""" + return self._azimuthal_angle + + def _cdfinv_param(self, param, value): + """ Return the cdfinv for a single given parameter """ + if param == self.polar_angle: + return self._polardist._cdfinv_param(param, value) + elif param == self.azimuthal_angle: + return self._azimuthaldist._cdfinv_param(param, value) + +
+[docs] + def apply_boundary_conditions(self, **kwargs): + """Maps the given values to be within the domain of the azimuthal and + polar angles, before applying any other boundary conditions. + + Parameters + ---------- + \**kwargs : + The keyword args must include values for both the azimuthal and + polar angle, using the names they were initilialized with. For + example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then + the keyword args must be `theta={val1}, phi={val2}`. + + Returns + ------- + dict + A dictionary of the parameter names and the conditioned values. + """ + polarval = kwargs[self._polar_angle] + azval = kwargs[self._azimuthal_angle] + # constrain each angle to its domain + polarval = self._polardist._domain.apply_conditions(polarval) + azval = self._azimuthaldist._domain.apply_conditions(azval) + # apply any other boundary conditions + polarval = self._bounds[self._polar_angle].apply_conditions(polarval) + azval = self._bounds[self._azimuthal_angle].apply_conditions(azval) + return {self._polar_angle: polarval, self._azimuthal_angle: azval}
+ + + + def _pdf(self, **kwargs): + """ + Returns the pdf at the given angles. + + Parameters + ---------- + \**kwargs: + The keyword arguments should specify the value for each angle, + using the names of the polar and azimuthal angles as the keywords. + Unrecognized arguments are ignored. + + Returns + ------- + float + The value of the pdf at the given values. + """ + return self._polardist._pdf(**kwargs) * \ + self._azimuthaldist._pdf(**kwargs) + + + def _logpdf(self, **kwargs): + """ + Returns the logpdf at the given angles. + + Parameters + ---------- + \**kwargs: + The keyword arguments should specify the value for each angle, + using the names of the polar and azimuthal angles as the keywords. + Unrecognized arguments are ignored. + + Returns + ------- + float + The value of the pdf at the given values. + """ + return self._polardist._logpdf(**kwargs) +\ + self._azimuthaldist._logpdf(**kwargs) + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. + + The section must have the names of the polar and azimuthal angles in + the tag part of the section header. For example: + + .. code-block:: ini + + [prior-theta+phi] + name = uniform_solidangle + + If nothing else is provided, the default names and bounds of the polar + and azimuthal angles will be used. To specify a different name for + each angle, set the `polar-angle` and `azimuthal-angle` attributes. For + example: + + .. code-block:: ini + + [prior-foo+bar] + name = uniform_solidangle + polar-angle = foo + azimuthal-angle = bar + + Note that the names of the variable args in the tag part of the section + name must match the names of the polar and azimuthal angles. + + Bounds may also be specified for each angle, as factors of pi. For + example: + + .. code-block:: ini + + [prior-theta+phi] + polar-angle = theta + azimuthal-angle = phi + min-theta = 0 + max-theta = 0.5 + + This will return a distribution that is uniform in the upper + hemisphere. + + By default, the domain of the azimuthal angle is `[0, 2pi)`. To make + this domain cyclic, add `azimuthal_cyclic_domain =`. + + Parameters + ---------- + cp : ConfigParser instance + The config file. + section : str + The name of the section. + variable_args : str + The names of the parameters for this distribution, separated by + ``VARARGS_DELIM``. These must appear in the "tag" part + of the section header. + + Returns + ------- + UniformSolidAngle + A distribution instance from the pycbc.inference.prior module. + """ + tag = variable_args + variable_args = variable_args.split(VARARGS_DELIM) + + # get the variables that correspond to the polar/azimuthal angles + try: + polar_angle = cp.get_opt_tag(section, 'polar-angle', tag) + except Error: + polar_angle = cls._default_polar_angle + try: + azimuthal_angle = cp.get_opt_tag(section, 'azimuthal-angle', tag) + except Error: + azimuthal_angle = cls._default_azimuthal_angle + + if polar_angle not in variable_args: + raise Error("polar-angle %s is not one of the variable args (%s)"%( + polar_angle, ', '.join(variable_args))) + if azimuthal_angle not in variable_args: + raise Error("azimuthal-angle %s is not one of the variable args "%( + azimuthal_angle) + "(%s)"%(', '.join(variable_args))) + + # get the bounds, if provided + polar_bounds = bounded.get_param_bounds_from_config( + cp, section, tag, + polar_angle) + azimuthal_bounds = bounded.get_param_bounds_from_config( + cp, section, tag, + azimuthal_angle) + + # see if the a cyclic domain is desired for the azimuthal angle + azimuthal_cyclic_domain = cp.has_option_tag(section, + 'azimuthal_cyclic_domain', tag) + + return cls(polar_angle=polar_angle, azimuthal_angle=azimuthal_angle, + polar_bounds=polar_bounds, + azimuthal_bounds=azimuthal_bounds, + azimuthal_cyclic_domain=azimuthal_cyclic_domain)
+
+ + + +__all__ = ['UniformAngle', 'SinAngle', 'CosAngle', 'UniformSolidAngle'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/arbitrary.html b/latest/html/_modules/pycbc/distributions/arbitrary.html new file mode 100644 index 00000000000..f74af50106d --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/arbitrary.html @@ -0,0 +1,491 @@ + + + + + + pycbc.distributions.arbitrary — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.arbitrary

+# Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating arbitrary distributions from
+a file.
+"""
+import logging
+import numpy
+import scipy.stats
+
+from pycbc.distributions import bounded
+import pycbc.transforms
+from pycbc.io.hdf import HFile
+
+logger = logging.getLogger('pycbc.distributions.arbitrary')
+
+
+[docs] +class Arbitrary(bounded.BoundedDist): + r"""A distribution constructed from a set of parameter values using a kde. + Bounds may be optionally provided to limit the range. + + Parameters + ---------- + bounds : dict, optional + Independent bounds on one or more parameters may be provided to limit + the range of the kde. + bandwidth : str, optional + Set the bandwidth method for the KDE. See + :py:func:`scipy.stats.gaussian_kde` for details. Default is "scott". + \**params : + The keyword arguments should provide the names of the parameters and + a list of their parameter values. If multiple parameters are provided, + a single kde will be produced with dimension equal to the number of + parameters. + """ + name = 'arbitrary' + + def __init__(self, bounds=None, bandwidth="scott", **kwargs): + # initialize the bounds + if bounds is None: + bounds = {} + bounds.update({p: None for p in kwargs if p not in bounds}) + super(Arbitrary, self).__init__(**bounds) + # check that all parameters specified in bounds have samples + if set(self.params) != set(kwargs.keys()): + raise ValueError("Must provide samples for all parameters given " + "in the bounds dictionary") + # if bounds are provided use logit transform to move the points + # to +/- inifinity + self._transforms = {} + self._tparams = {} + for param,bnds in self.bounds.items(): + if numpy.isfinite(bnds[1] - bnds[0]): + tparam = 'logit'+param + samples = kwargs[param] + t = pycbc.transforms.Logit(param, tparam, domain=bnds) + self._transforms[tparam] = t + self._tparams[param] = tparam + # remove any sample points that fall out side of the bounds + outside = bnds.__contains__(samples) + if outside.any(): + samples = samples[outside] + # transform the sample points + kwargs[param] = t.transform({param: samples})[tparam] + elif not (~numpy.isfinite(bnds[0]) and ~numpy.isfinite(bnds[1])): + raise ValueError("if specifying bounds, both bounds must " + "be finite") + # build the kde + self._kde = self.get_kde_from_arrays(*[kwargs[p] for p in self.params]) + self.set_bandwidth(bandwidth) + + @property + def params(self): + return self._params + + @property + def kde(self): + return self._kde + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + for p in self._params: + if p not in kwargs.keys(): + raise ValueError('Missing parameter {} to construct pdf.' + .format(p)) + if kwargs in self: + # transform into the kde space + jacobian = 1. + for param, tparam in self._tparams.items(): + t = self._transforms[tparam] + try: + samples = t.transform({param: kwargs[param]}) + except ValueError as e: + # can get a value error if the value is exactly == to + # the bounds, in which case, just return 0. + if kwargs[param] in self.bounds[param]: + return 0. + else: + raise ValueError(e) + kwargs[param] = samples[tparam] + # update the jacobian for the transform; if p is the pdf + # in the params frame (the one we want) and p' is the pdf + # in the transformed frame (the one that's calculated) then: + # p = J * p', where J is the Jacobian of going from p to p' + jacobian *= t.jacobian(samples) + # for scipy < 0.15.0, gaussian_kde.pdf = gaussian_kde.evaluate + this_pdf = jacobian * self._kde.evaluate([kwargs[p] + for p in self._params]) + if len(this_pdf) == 1: + return float(this_pdf) + else: + return this_pdf + else: + return 0. + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. + Unrecognized arguments are ignored. + """ + if kwargs not in self: + return -numpy.inf + else: + return numpy.log(self._pdf(**kwargs)) + +
+[docs] + def set_bandwidth(self, set_bw="scott"): + self._kde.set_bandwidth(set_bw)
+ + +
+[docs] + def rvs(self, size=1, param=None): + """Gives a set of random values drawn from the kde. + + Parameters + ---------- + size : {1, int} + The number of values to generate; default is 1. + param : {None, string} + If provided, will just return values for the given parameter. + Otherwise, returns random values for each parameter. + + Returns + ------- + structured array + The random values in a numpy structured array. If a param was + specified, the array will only have an element corresponding to the + given parameter. Otherwise, the array will have an element for each + parameter in self's params. + """ + if param is not None: + dtype = [(param, float)] + else: + dtype = [(p, float) for p in self.params] + size = int(size) + arr = numpy.zeros(size, dtype=dtype) + draws = self._kde.resample(size) + draws = {param: draws[ii,:] for ii,param in enumerate(self.params)} + for (param,_) in dtype: + try: + # transform back to param space + tparam = self._tparams[param] + tdraws = {tparam: draws[param]} + draws[param] = self._transforms[tparam].inverse_transform( + tdraws)[param] + except KeyError: + pass + arr[param] = draws[param] + return arr
+ + +
+[docs] + @staticmethod + def get_kde_from_arrays(*arrays): + """Constructs a KDE from the given arrays. + + \*arrays : + Each argument should be a 1D numpy array to construct the kde from. + The resulting KDE will have dimension given by the number of + parameters. + """ + return scipy.stats.gaussian_kde(numpy.vstack(arrays))
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Raises a NotImplementedError; to load from a config file, use + `FromFile`. + """ + raise NotImplementedError("This class does not support loading from a " + "config file. Use `FromFile` instead.")
+
+ + + +
+[docs] +class FromFile(Arbitrary): + r"""A distribution that reads the values of the parameter(s) from an hdf + file, computes the kde to construct the pdf, and draws random variables + from it. + + Parameters + ---------- + filename : str + The path to an hdf file containing the values of the parameters that + want to be used to construct the distribution. Each parameter should + be a separate dataset in the hdf file, and all datasets should have + the same size. For example, to give a prior for mass1 and mass2 from + file f, f['mass1'] and f['mass2'] contain the n values for each + parameter. + datagroup : str, optional + The name of the group to look in for the samples. For example, if + ``datagroup = 'samples'``, then parameter ``param`` will be retrived + from ``f['samples'][param]``. If none provided (the default) the data + sets will be assumed to be in the top level directory of the file. + \**params : + The keyword arguments should provide the names of the parameters to be + read from the file and (optionally) their bounds. If no parameters are + provided, it will use all the parameters found in the file. To provide + bounds, specify e.g. mass1=[10,100]. Otherwise, mass1=None. + + Attributes + ---------- + norm : float + The normalization of the multi-dimensional pdf. + lognorm : float + The log of the normalization. + kde : + The kde obtained from the values in the file. + """ + name = 'fromfile' + def __init__(self, filename=None, datagroup=None, **params): + if filename is None: + raise ValueError('A file must be specified for this distribution.') + self._filename = filename + self.datagroup = datagroup + # Get the parameter names to pass to get_kde_from_file + if len(params) == 0: + ps = None + else: + ps = list(params.keys()) + param_vals, bw = self.get_arrays_from_file(filename, params=ps) + super(FromFile, self).__init__(bounds=params, bandwidth=bw, + **param_vals) + + @property + def filename(self): + """str: The path to the file containing values for the parameter(s). + """ + return self._filename + +
+[docs] + def get_arrays_from_file(self, params_file, params=None): + """Reads the values of one or more parameters from an hdf file and + returns as a dictionary. + + Parameters + ---------- + params_file : str + The hdf file that contains the values of the parameters. + params : {None, list} + If provided, will just retrieve the given parameter names. + + Returns + ------- + dict + A dictionary of the parameters mapping `param_name -> array`. + """ + try: + f = HFile(params_file, 'r') + except: + raise ValueError('File not found.') + if self.datagroup is not None: + get = f[self.datagroup] + else: + get = f + if params is not None: + if not isinstance(params, list): + params = [params] + for p in params: + if p not in get.keys(): + raise ValueError('Parameter {} is not in {}' + .format(p, params_file)) + else: + params = [str(k) for k in get.keys()] + params_values = {p: get[p][()] for p in params} + try: + bandwidth = f.attrs["bandwidth"] + except KeyError: + bandwidth = "scott" + + f.close() + return params_values, bandwidth
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. + + The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + The file to construct the distribution from must be provided by setting + `filename`. Boundary arguments can be provided in the same way as + described in `get_param_bounds_from_config`. + + .. code-block:: ini + + [{section}-{tag}] + name = fromfile + filename = ra_prior.hdf + min-ra = 0 + max-ra = 6.28 + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + + Returns + ------- + BoundedDist + A distribution instance from the pycbc.inference.prior module. + """ + return bounded.bounded_from_config(cls, cp, section, variable_args, + bounds_required=False)
+
+ + +__all__ = ['Arbitrary', 'FromFile'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/bounded.html b/latest/html/_modules/pycbc/distributions/bounded.html new file mode 100644 index 00000000000..7655799c0c1 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/bounded.html @@ -0,0 +1,514 @@ + + + + + + pycbc.distributions.bounded — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.bounded

+# Copyright (C) 2016  Collin Capano, Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating distributions with bounds.
+"""
+import logging
+import warnings
+from configparser import Error
+import numpy
+
+from pycbc import boundaries
+from pycbc import VARARGS_DELIM
+
+logger = logging.getLogger('pycbc.distributions.bounded')
+
+#
+#   Distributions for priors
+#
+
+[docs] +def get_param_bounds_from_config(cp, section, tag, param): + """Gets bounds for the given parameter from a section in a config file. + + Minimum and maximum values for bounds are specified by adding + `min-{param}` and `max-{param}` options, where `{param}` is the name of + the parameter. The types of boundary (open, closed, or reflected) to create + may also be specified by adding options `btype-min-{param}` and + `btype-max-{param}`. Cyclic conditions can be adding option + `cyclic-{param}`. If no `btype` arguments are provided, the + left bound will be closed and the right open. + + For example, the following will create right-open bounds for parameter + `foo`: + + .. code-block:: ini + + [{section}-{tag}] + min-foo = -1 + max-foo = 1 + + This would make the boundaries cyclic: + + .. code-block:: ini + + [{section}-{tag}] + min-foo = -1 + max-foo = 1 + cyclic-foo = + + For more details on boundary types and their meaning, see + `boundaries.Bounds`. + + If the parameter is not found in the section will just return None (in + this case, all `btype` and `cyclic` arguments are ignored for that + parameter). If bounds are specified, both a minimum and maximum must be + provided, else a Value or Type Error will be raised. + + Parameters + ---------- + cp : ConfigParser instance + The config file. + section : str + The name of the section. + tag : str + Any tag in the section name. The full section name searched for in + the config file is `{section}(-{tag})`. + param : str + The name of the parameter to retrieve bounds for. + + Returns + ------- + bounds : {Bounds instance | None} + If bounds were provided, a `boundaries.Bounds` instance + representing the bounds. Otherwise, `None`. + """ + try: + minbnd = float(cp.get_opt_tag(section, 'min-'+param, tag)) + except Error: + minbnd = None + try: + maxbnd = float(cp.get_opt_tag(section, 'max-'+param, tag)) + except Error: + maxbnd = None + if minbnd is None and maxbnd is None: + bnds = None + elif minbnd is None or maxbnd is None: + raise ValueError("if specifying bounds for %s, " %(param) + + "you must provide both a minimum and a maximum") + else: + bndargs = {'min_bound': minbnd, 'max_bound': maxbnd} + # try to get any other conditions, if provided + try: + minbtype = cp.get_opt_tag(section, 'btype-min-{}'.format(param), + tag) + except Error: + minbtype = 'closed' + try: + maxbtype = cp.get_opt_tag(section, 'btype-max-{}'.format(param), + tag) + except Error: + maxbtype = 'open' + bndargs.update({'btype_min': minbtype, 'btype_max': maxbtype}) + cyclic = cp.has_option_tag(section, 'cyclic-{}'.format(param), tag) + bndargs.update({'cyclic': cyclic}) + bnds = boundaries.Bounds(**bndargs) + return bnds
+ + + +
+[docs] +def bounded_from_config(cls, cp, section, variable_args, + bounds_required=False, additional_opts=None): + """Returns a bounded distribution based on a configuration file. The + parameters for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Parameters + ---------- + cls : pycbc.prior class + The class to initialize with. + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + bounds_required : {False, bool} + If True, raise a ValueError if a min and max are not provided for + every parameter. Otherwise, the prior will be initialized with the + parameter set to None. Even if bounds are not required, a + ValueError will be raised if only one bound is provided; i.e., + either both bounds need to provided or no bounds. + additional_opts : {None, dict} + Provide additional options to be passed to the distribution class; + should be a dictionary specifying option -> value. If an option is + provided that also exists in the config file, the value provided will + be used instead of being read from the file. + + Returns + ------- + cls + An instance of the given class. + """ + tag = variable_args + variable_args = variable_args.split(VARARGS_DELIM) + + if additional_opts is None: + additional_opts = {} + + # list of args that are used to construct distribution + special_args = ["name"] + \ + ['min-{}'.format(arg) for arg in variable_args] + \ + ['max-{}'.format(arg) for arg in variable_args] + \ + ['btype-min-{}'.format(arg) for arg in variable_args] + \ + ['btype-max-{}'.format(arg) for arg in variable_args] + \ + ['cyclic-{}'.format(arg) for arg in variable_args] + \ + list(additional_opts.keys()) + + # get a dict with bounds as value + dist_args = {} + for param in variable_args: + bounds = get_param_bounds_from_config(cp, section, tag, param) + if bounds_required and bounds is None: + raise ValueError("min and/or max missing for parameter %s"%( + param)) + dist_args[param] = bounds + + # add any additional options that user put in that section + for key in cp.options("-".join([section, tag])): + + # ignore options that are already included + if key in special_args: + continue + + # check if option can be cast as a float + val = cp.get_opt_tag(section, key, tag) + try: + val = float(val) + except ValueError: + pass + + # add option + dist_args.update({key:val}) + + dist_args.update(additional_opts) + + # construction distribution and add to list + return cls(**dist_args)
+ + + +
+[docs] +class BoundedDist(object): + """ + A generic class for storing common properties of distributions in which + each parameter has a minimum and maximum value. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + """ + def __init__(self, **params): + # convert input bounds to Bounds class, if necessary + for param,bnds in params.items(): + if bnds is None: + params[param] = boundaries.Bounds() + elif not isinstance(bnds, boundaries.Bounds): + params[param] = boundaries.Bounds(bnds[0], bnds[1]) + # warn the user about reflected boundaries + if isinstance(bnds, boundaries.Bounds) and ( + bnds.min.name == 'reflected' or + bnds.max.name == 'reflected'): + warnings.warn("Param {} has one or more ".format(param) + + "reflected boundaries. Reflected boundaries " + "can cause issues when used in an MCMC.") + self._bounds = params + self._params = sorted(list(params.keys())) + + @property + def params(self): + """list of strings: The list of parameter names.""" + return self._params + + @property + def bounds(self): + """dict: A dictionary of the parameter names and their bounds.""" + return self._bounds + + def __contains__(self, params): + try: + return all(self._bounds[p].contains_conditioned(params[p]) + for p in self._params) + except KeyError: + raise ValueError("must provide all parameters [%s]" %( + ', '.join(self._params))) + +
+[docs] + def apply_boundary_conditions(self, **kwargs): + """Applies any boundary conditions to the given values (e.g., applying + cyclic conditions, and/or reflecting values off of boundaries). This + is done by running `apply_conditions` of each bounds in self on the + corresponding value. See `boundaries.Bounds.apply_conditions` for + details. + + Parameters + ---------- + \**kwargs : + The keyword args should be the name of a parameter and value to + apply its boundary conditions to. The arguments need not include + all of the parameters in self. Any unrecognized arguments are + ignored. + + Returns + ------- + dict + A dictionary of the parameter names and the conditioned values. + """ + return dict([[p, self._bounds[p].apply_conditions(val)] + for p,val in kwargs.items() if p in self._bounds])
+ + +
+[docs] + def pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. Any boundary conditions are applied to the values before the + pdf is evaluated. + """ + return self._pdf(**self.apply_boundary_conditions(**kwargs))
+ + + def _pdf(self, **kwargs): + """The underlying pdf function called by `self.pdf`. This must be set + by any class that inherits from this class. Otherwise, a + `NotImplementedError` is raised. + """ + raise NotImplementedError("pdf function not set") + +
+[docs] + def logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. + Unrecognized arguments are ignored. Any boundary conditions are + applied to the values before the pdf is evaluated. + """ + return self._logpdf(**self.apply_boundary_conditions(**kwargs))
+ + + def _logpdf(self, **kwargs): + """The underlying log pdf function called by `self.logpdf`. This must + be set by any class that inherits from this class. Otherwise, a + `NotImplementedError` is raised. + """ + raise NotImplementedError("pdf function not set") + + __call__ = logpdf + + def _cdfinv_param(self, param, value): + """Return the cdfinv for a single given parameter """ + raise NotImplementedError("inverse cdf not set") + +
+[docs] + def cdfinv(self, **kwds): + """Return the inverse cdf to map the unit interval to parameter bounds. + You must provide a keyword for every parameter. + """ + updated = {} + for param in self.params: + updated[param] = self._cdfinv_param(param, kwds[param]) + return updated
+ + +
+[docs] + def rvs(self, size=1, **kwds): + "Draw random value" + dtype = [(p, float) for p in self.params] + arr = numpy.zeros(size, dtype=dtype) + draw = {} + for param in self.params: + draw[param] = numpy.random.uniform(0, 1, size=size) + exp = self.cdfinv(**draw) + for param in self.params: + arr[param] = exp[param] + return arr
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args, bounds_required=False): + """Returns a distribution based on a configuration file. The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + bounds_required : {False, bool} + If True, raise a ValueError if a min and max are not provided for + every parameter. Otherwise, the prior will be initialized with the + parameter set to None. Even if bounds are not required, a + ValueError will be raised if only one bound is provided; i.e., + either both bounds need to provided or no bounds. + + Returns + ------- + BoundedDist + A distribution instance from the pycbc.distribution subpackage. + """ + return bounded_from_config(cls, cp, section, variable_args, + bounds_required=bounds_required)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/constraints.html b/latest/html/_modules/pycbc/distributions/constraints.html new file mode 100644 index 00000000000..e1b1cc3b6e8 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/constraints.html @@ -0,0 +1,262 @@ + + + + + + pycbc.distributions.constraints — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.constraints

+# Copyright (C) 2017 Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating multi-dimensional constraints.
+"""
+import logging
+import re
+import scipy.spatial
+import numpy
+
+from pycbc import transforms
+from pycbc.io import record, HFile
+
+logger = logging.getLogger('pycbc.distributions.constraints')
+
+
+
+[docs] +class Constraint(object): + """Creates a constraint that evaluates to True if parameters obey + the constraint and False if they do not. + """ + name = "custom" + + def __init__(self, constraint_arg, static_args=None, transforms=None, + **kwargs): + static_args = ( + {} if static_args is None + else dict(sorted( + static_args.items(), key=lambda x: len(x[0]), reverse=True)) + ) + for arg, val in static_args.items(): + swp = f"'{val}'" if isinstance(val, str) else str(val) + # Substitute static arg name for value if it appears in the + # constraint_arg string at the beginning of a word and is not + # followed by an underscore or equals sign. + # This ensures that static_args that are also kwargs in function calls are + # handled correctly, i.e., the kwarg is not touched while its value is replaced + # with the static_arg value. + constraint_arg = re.sub( + r'\b{}(?!\_|\=)'.format(arg), swp, constraint_arg) + self.constraint_arg = constraint_arg + self.transforms = transforms + for kwarg in kwargs.keys(): + setattr(self, kwarg, kwargs[kwarg]) + + def __call__(self, params): + """Evaluates constraint. + """ + # cast to FieldArray + if isinstance(params, dict): + params = record.FieldArray.from_kwargs(**params) + elif not isinstance(params, record.FieldArray): + raise ValueError("params must be dict or FieldArray instance") + + # try to evaluate; this will assume that all of the needed parameters + # for the constraint exists in params + try: + out = self._constraint(params) + except NameError: + # one or more needed parameters don't exist; try applying the + # transforms + params = transforms.apply_transforms(params, self.transforms) \ + if self.transforms else params + out = self._constraint(params) + if isinstance(out, record.FieldArray): + out = out.item() if params.size == 1 else out + return out + + def _constraint(self, params): + """ Evaluates constraint function. + """ + return params[self.constraint_arg]
+ + + +
+[docs] +class SupernovaeConvexHull(Constraint): + """Pre defined constraint for core-collapse waveforms that checks + whether a given set of coefficients lie within the convex hull of + the coefficients of the principal component basis vectors. + """ + name = "supernovae_convex_hull" + required_parameters = ["coeff_0", "coeff_1"] + + def __init__(self, constraint_arg, transforms=None, **kwargs): + super(SupernovaeConvexHull, + self).__init__(constraint_arg, transforms=transforms, **kwargs) + + if 'principal_components_file' in kwargs: + pc_filename = kwargs['principal_components_file'] + hull_dimention = numpy.array(kwargs['hull_dimention']) + self.hull_dimention = int(hull_dimention) + pc_file = HFile(pc_filename, 'r') + pc_coefficients = numpy.array(pc_file.get('coefficients')) + pc_file.close() + hull_points = [] + for dim in range(self.hull_dimention): + hull_points.append(pc_coefficients[:, dim]) + hull_points = numpy.array(hull_points).T + pc_coeffs_hull = scipy.spatial.Delaunay(hull_points) + self._hull = pc_coeffs_hull + + def _constraint(self, params): + + output_array = [] + points = numpy.array([params["coeff_0"], + params["coeff_1"], + params["coeff_2"]]) + for coeff_index in range(len(params["coeff_0"])): + point = points[:, coeff_index][:self.hull_dimention] + output_array.append(self._hull.find_simplex(point) >= 0) + return numpy.array(output_array)
+ + + +# list of all constraints +constraints = { + Constraint.name : Constraint, + SupernovaeConvexHull.name : SupernovaeConvexHull, +} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/external.html b/latest/html/_modules/pycbc/distributions/external.html new file mode 100644 index 00000000000..ebe31abcb09 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/external.html @@ -0,0 +1,384 @@ + + + + + + pycbc.distributions.external — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.external

+# Copyright (C) 2020 Alexander Nitz, 2022 Shichao Wu
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating PDF, logPDF, CDF and inverse CDF
+from external arbitrary distributions, and drawing samples from them.
+"""
+import logging
+import importlib
+import numpy as np
+
+import scipy.integrate as scipy_integrate
+import scipy.interpolate as scipy_interpolate
+
+from pycbc import VARARGS_DELIM
+
+logger = logging.getLogger('pycbc.distributions.external')
+
+
+
+[docs] +class External(object): + """ Distribution defined by external cdfinv and logpdf functions + + To add to an inference configuration file: + + .. code-block:: ini + + [prior-param1+param2] + name = external + module = custom_mod + logpdf = custom_function_name + cdfinv = custom_function_name2 + + Parameters + ---------- + params : list + list of parameter names + custom_mod : module + module from which logpdf and cdfinv functions can be imported + logpdf : function + function which returns the logpdf + cdfinv : function + function which applies the invcdf + + Examples + -------- + To instantate by hand and example of function format. You must provide + the logpdf function, and you may either provide the rvs or cdfinv function. + If the cdfinv is provided, but not the rvs, the random values will + be calculated using the cdfinv function. + + >>> import numpy + >>> params = ['x', 'y'] + >>> def logpdf(x=None, y=None): + ... p = numpy.ones(len(x)) + ... return p + >>> + >>> def cdfinv(**kwds): + ... return kwds + >>> e = External(['x', 'y'], logpdf, cdfinv=cdfinv) + >>> e.rvs(size=10) + """ + name = "external" + + def __init__(self, params=None, logpdf=None, + rvs=None, cdfinv=None, **kwds): + self.params = params + self.logpdf = logpdf + self.cdfinv = cdfinv + self._rvs = rvs + + if not (rvs or cdfinv): + raise ValueError("Must provide either rvs or cdfinv") + +
+[docs] + def rvs(self, size=1, **kwds): + "Draw random value" + if self._rvs: + return self._rvs(size=size) + samples = {param: np.random.uniform(0, 1, size=size) + for param in self.params} + return self.cdfinv(**samples)
+ + +
+[docs] + def apply_boundary_conditions(self, **params): + return params
+ + + def __call__(self, **kwds): + return self.logpdf(**kwds) + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + tag = variable_args + params = variable_args.split(VARARGS_DELIM) + modulestr = cp.get_opt_tag(section, 'module', tag) + mod = importlib.import_module(modulestr) + + logpdfstr = cp.get_opt_tag(section, 'logpdf', tag) + logpdf = getattr(mod, logpdfstr) + + cdfinv = rvs = None + if cp.has_option_tag(section, 'cdfinv', tag): + cdfinvstr = cp.get_opt_tag(section, 'cdfinv', tag) + cdfinv = getattr(mod, cdfinvstr) + + if cp.has_option_tag(section, 'rvs', tag): + rvsstr = cp.get_opt_tag(section, 'rvs', tag) + rvs = getattr(mod, rvsstr) + + return cls(params=params, logpdf=logpdf, rvs=rvs, cdfinv=cdfinv)
+
+ + + +
+[docs] +class DistributionFunctionFromFile(External): + r"""Evaluating PDF, logPDF, CDF and inverse CDF from the external + density function. + + To add to an inference configuration file: + + .. code-block:: ini + + [prior-param1] + name = external_func_fromfile + file_path = spin.txt + column_index = 1 + + Parameters + ---------- + params : list + list of parameter names + file_path: str + The path of the external density function's .txt file. + column_index: int + The column index of the density distribution. By default, the first + should be the values of a certain parameter, such as "mass", other + columns should be the corresponding density values (as a function of + that parameter). If you add the name of the parameter in the first + row, please add the '#' at the beginning. + \**kwargs : + All other keyword args are passed to `scipy.integrate.quad` to control + the numerical accuracy of the inverse CDF. + If not be provided, will use the default values in `self.__init__`. + + Notes + ----- + This class is different from `pycbc.distributions.arbitrary.FromFile`, + which needs samples from the hdf file to construct the PDF by using KDE. + This class reads in any continuous functions of the parameter. + """ + name = "external_func_fromfile" + + def __init__(self, params=None, file_path=None, + column_index=None, **kwargs): + super().__init__(cdfinv=self._cdfinv, logpdf=self.logpdf) + self.params = params + self.data = np.loadtxt(fname=file_path, unpack=True, comments='#') + self.column_index = int(column_index) + self.epsabs = kwargs.get('epsabs', 1.49e-05) + self.epsrel = kwargs.get('epsrel', 1.49e-05) + self.x_list = np.linspace(self.data[0][0], self.data[0][-1], 1000) + self.interp = {'pdf': callable, 'cdf': callable, 'cdfinv': callable} + if not file_path: + raise ValueError("Must provide the path to density function file.") + +
+[docs] + def logpdf(self, **kwargs): + x = kwargs.pop(self.params[0]) + return self._logpdf(x, **kwargs)
+ + + def _pdf(self, x010, **kwargs): + """Calculate and interpolate the PDF by using the given density + function, then return the corresponding value at the given x.""" + if self.interp['pdf'] == callable: + func_unnorm = scipy_interpolate.interp1d( + self.data[0], self.data[self.column_index]) + norm_const = scipy_integrate.quad( + func_unnorm, self.data[0][0], self.data[0][-1], + epsabs=self.epsabs, epsrel=self.epsrel, limit=500, + **kwargs)[0] + self.interp['pdf'] = scipy_interpolate.interp1d( + self.data[0], self.data[self.column_index]/norm_const, + bounds_error=False, fill_value=0) + pdf_val = np.float64(self.interp['pdf'](x010)) + return pdf_val + + def _logpdf(self, x010, **kwargs): + """Calculate the logPDF by calling `pdf` function.""" + z = np.log(self._pdf(x010, **kwargs)) + return z + + def _cdf(self, x, **kwargs): + """Calculate and interpolate the CDF, then return the corresponding + value at the given x.""" + if self.interp['cdf'] == callable: + cdf_list = [] + for x_val in self.x_list: + cdf_x = scipy_integrate.quad( + self._pdf, self.data[0][0], x_val, epsabs=self.epsabs, + epsrel=self.epsrel, limit=500, **kwargs)[0] + cdf_list.append(cdf_x) + self.interp['cdf'] = \ + scipy_interpolate.interp1d(self.x_list, cdf_list) + cdf_val = np.float64(self.interp['cdf'](x)) + return cdf_val + + def _cdfinv(self, **kwargs): + """Calculate and interpolate the inverse CDF, then return the + corresponding parameter value at the given CDF value.""" + if self.interp['cdfinv'] == callable: + cdf_list = [] + for x_value in self.x_list: + cdf_list.append(self._cdf(x_value)) + self.interp['cdfinv'] = \ + scipy_interpolate.interp1d(cdf_list, self.x_list) + cdfinv_val = {self.params[0]: np.float64( + self.interp['cdfinv'](kwargs[self.params[0]]))} + return cdfinv_val + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + tag = variable_args + params = variable_args.split(VARARGS_DELIM) + file_path = cp.get_opt_tag(section, 'file_path', tag) + column_index = cp.get_opt_tag(section, 'column_index', tag) + return cls(params=params, file_path=file_path, + column_index=column_index)
+
+ + + +__all__ = ['External', 'DistributionFunctionFromFile'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/fixedsamples.html b/latest/html/_modules/pycbc/distributions/fixedsamples.html new file mode 100644 index 00000000000..8465ede4176 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/fixedsamples.html @@ -0,0 +1,314 @@ + + + + + + pycbc.distributions.fixedsamples — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.fixedsamples

+# Copyright (C) 2020 Alexander Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating distributions based on a fixed
+set of points
+"""
+import logging
+import numpy
+import numpy.random
+
+from pycbc import VARARGS_DELIM
+
+logger = logging.getLogger('pycbc.distributions.fixedsamples')
+
+
+
+[docs] +class FixedSamples(object): + """ + A distribution consisting of a collection of a large number of fixed points. + Only these values can be drawn from, so the number of points may need to be + large to properly reflect the paramter space. This distribution is intended + to aid in using nested samplers for semi-abitrary or complicated + distributions where it is possible to provide or draw samples but less + straightforward to provide an analytic invcdf. This class numerically + approximates the invcdf for 1 or 2 dimensional distributions + (but no higher). + + Parameters + ---------- + params : + This of parameters this distribution should use + samples : dict of arrays or FieldArray + Sampled points of the distribution. May contain transformed parameters + which are different from the original distribution. If so, an inverse + mapping is provided to associate points with other parameters provided. + """ + + name = "fixed_samples" + + def __init__(self, params, samples): + self.params = params + self.samples = samples + + self.p1 = self.samples[params[0]] + self.frac = len(self.p1)**0.5 / len(self.p1) + self.sort = self.p1.argsort() + self.p1sorted = self.p1[self.sort] + + assert len(numpy.unique(self.p1)) == len(self.p1) + + if len(params) > 2: + raise ValueError("Only one or two parameters supported " + "for fixed sample distribution") + +
+[docs] + def rvs(self, size=1, **kwds): + "Draw random value" + i = numpy.random.randint(0, high=len(self.p1), size=size) + return {p: self.samples[p][i] for p in self.params}
+ + +
+[docs] + def cdfinv(self, **original): + """Map unit cube to parameters in the space""" + new = {} + + #First dimension + u1 = original[self.params[0]] + i1 = int(round(u1 * len(self.p1))) + if i1 >= len(self.p1): + i1 = len(self.p1) - 1 + if i1 < 0: + i1 = 0 + new[self.params[0]] = p1v = self.p1sorted[i1] + if len(self.params) == 1: + return new + + # possible second dimension, probably shouldn't + # do more dimensions than this + u2 = original[self.params[1]] + l = numpy.searchsorted(self.p1sorted, p1v * (1 - self.frac)) + r = numpy.searchsorted(self.p1sorted, p1v * (1 + self.frac)) + if r < l: + l, r = r, l + + region = numpy.array(self.sort[l:r], ndmin=1) + p2 = self.samples[self.params[1]] + p2part = numpy.array(p2[region], ndmin=1) + l = p2part.argsort() + p2part = numpy.array(p2part[l], ndmin=1) + + i2 = int(round(u2 * len(p2part))) + if i2 >= len(p2part): + i2 = len(p2part) - 1 + if i2 < 0: + i2 = 0 + new[self.params[1]] = p2part[i2] + + p1part = numpy.array(self.p1[region[l]], ndmin=1) + new[self.params[0]] = p1part[i2] + return new
+ + +
+[docs] + def apply_boundary_conditions(self, **params): + """ Apply boundary conditions (none here) """ + return params
+ + + def __call__(self, **kwds): + """ Dummy function, not the actual pdf """ + return 0 + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """ Return instance based on config file + + Return a new instance based on the config file. This will draw from + a single distribution section provided in the config file and + apply a single transformation section if desired. If a transformation + is applied, an inverse mapping is also provided for use in the config + file. + """ + from pycbc.distributions import read_distributions_from_config + from pycbc.transforms import (read_transforms_from_config, + apply_transforms, BaseTransform) + from pycbc.transforms import transforms as global_transforms + + params = tag.split(VARARGS_DELIM) + subname = cp.get_opt_tag(section, 'subname', tag) + size = cp.get_opt_tag(section, 'sample-size', tag) + + distsec = '{}_sample'.format(subname) + dist = read_distributions_from_config(cp, section=distsec) + if len(dist) > 1: + raise ValueError("Fixed sample distrubtion only supports a single" + " distribution to sample from.") + + logger.info('Drawing samples for fixed sample distribution:%s', params) + samples = dist[0].rvs(size=int(float(size))) + samples = {p: samples[p] for p in samples.dtype.names} + + transec = '{}_transform'.format(subname) + trans = read_transforms_from_config(cp, section=transec) + if len(trans) > 0: + trans = trans[0] + samples = apply_transforms(samples, [trans]) + p1 = samples[params[0]] + + # We have transformed parameters, so automatically provide the + # inverse transform for use in passing to waveform approximants + class Thook(BaseTransform): + name = subname + _inputs = trans.outputs + _outputs = trans.inputs + p1name = params[0] + sort = p1.argsort() + p1sorted = p1[sort] + def transform(self, maps): + idx = numpy.searchsorted(self.p1sorted, maps[self.p1name]) + out = {p: samples[p][self.sort[idx]] for p in self.outputs} + return self.format_output(maps, out) + global_transforms[Thook.name] = Thook + return cls(params, samples)
+
+ + +__all__ = ['FixedSamples'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/gaussian.html b/latest/html/_modules/pycbc/distributions/gaussian.html new file mode 100644 index 00000000000..18e2c57fcfd --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/gaussian.html @@ -0,0 +1,380 @@ + + + + + + pycbc.distributions.gaussian — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.gaussian

+# Copyright (C) 2016  Christopher M. Biwer, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating Gaussian distributions.
+"""
+import logging
+import numpy
+from scipy.special import erf, erfinv
+import scipy.stats
+
+from pycbc.distributions import bounded
+
+logger = logging.getLogger('pycbc.distributions.gaussian')
+
+
+[docs] +class Gaussian(bounded.BoundedDist): + r"""A Gaussian distribution on the given parameters; the parameters are + independent of each other. + + Bounds can be provided on each parameter, in which case the distribution + will be a truncated Gaussian distribution. The PDF of a truncated + Gaussian distribution is given by: + + .. math:: + p(x|a, b, \mu,\sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}}\frac{e^{- \frac{\left( x - \mu \right)^2}{2 \sigma^2}}}{\Phi(b|\mu, \sigma) - \Phi(a|\mu, \sigma)}, + + where :math:`\mu` is the mean, :math:`\sigma^2` is the variance, + :math:`a,b` are the bounds, and :math:`\Phi` is the cumulative distribution + of an unbounded normal distribution, given by: + + .. math:: + \Phi(x|\mu, \sigma) = \frac{1}{2}\left[1 + \mathrm{erf}\left(\frac{x-\mu}{\sigma \sqrt{2}}\right)\right]. + + Note that if :math:`[a,b) = [-\infty, \infty)`, this reduces to a standard + Gaussian distribution. + + + Instances of this class can be called like a function. By default, logpdf + will be called, but this can be changed by setting the class's __call__ + method to its pdf method. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and + (optionally) some bounds, as either a tuple or a + `boundaries.Bounds` instance. The mean and variance of each + parameter can be provided by additional keyword arguments that have + `_mean` and `_var` adding to the parameter name. For example, + `foo=(-2,10), foo_mean=3, foo_var=2` would create a truncated Gaussian + with mean 3 and variance 2, bounded between :math:`[-2, 10)`. If no + mean or variance is provided, the distribution will have 0 mean and + unit variance. If None is provided for the bounds, the distribution + will be a normal, unbounded Gaussian (equivalent to setting the bounds + to `[-inf, inf)`). + + Examples + -------- + Create an unbounded Gaussian distribution with zero mean and unit variance: + >>> dist = distributions.Gaussian(mass1=None) + + Create a bounded Gaussian distribution on :math:`[1,10)` with a mean of 3 + and a variance of 2: + >>> dist = distributions.Gaussian(mass1=(1,10), mass1_mean=3, mass1_var=2) + + Create a bounded Gaussian distribution with the same parameters, but with + cyclic boundary conditions: + >>> dist = distributions.Gaussian(mass1=Bounds(1,10, cyclic=True), mass1_mean=3, mass1_var=2) + """ + name = "gaussian" + + def __init__(self, **params): + + # save distribution parameters as dict + # calculate the norm and exponential norm ahead of time + # and save to self._norm, self._lognorm, and self._expnorm + self._bounds = {} + self._mean = {} + self._var = {} + self._norm = {} + self._lognorm = {} + self._expnorm = {} + # pull out specified means, variance + mean_args = [p for p in params if p.endswith('_mean')] + var_args = [p for p in params if p.endswith('_var')] + self._mean = dict([[p[:-5], params.pop(p)] for p in mean_args]) + self._var = dict([[p[:-4], params.pop(p)] for p in var_args]) + # initialize the bounds + super(Gaussian, self).__init__(**params) + + # check that there are no params in mean/var that are not in params + missing = set(self._mean.keys()) - set(params.keys()) + if any(missing): + raise ValueError("means provided for unknow params {}".format( + ', '.join(missing))) + missing = set(self._var.keys()) - set(params.keys()) + if any(missing): + raise ValueError("vars provided for unknow params {}".format( + ', '.join(missing))) + # set default mean/var for params not specified + self._mean.update(dict([[p, 0.] + for p in params if p not in self._mean])) + self._var.update(dict([[p, 1.] + for p in params if p not in self._var])) + + # compute norms + for p,bnds in self._bounds.items(): + sigmasq = self._var[p] + mu = self._mean[p] + a,b = bnds + invnorm = scipy.stats.norm.cdf(b, loc=mu, scale=sigmasq**0.5) \ + - scipy.stats.norm.cdf(a, loc=mu, scale=sigmasq**0.5) + invnorm *= numpy.sqrt(2*numpy.pi*sigmasq) + self._norm[p] = 1./invnorm + self._lognorm[p] = numpy.log(self._norm[p]) + self._expnorm[p] = -1./(2*sigmasq) + + + @property + def mean(self): + return self._mean + + + @property + def var(self): + return self._var + + def _normalcdf(self, param, value): + """The CDF of the normal distribution, without bounds.""" + mu = self._mean[param] + var = self._var[param] + return 0.5*(1. + erf((value - mu)/(2*var)**0.5)) + +
+[docs] + def cdf(self, param, value): + """Returns the CDF of the given parameter value.""" + a, b = self._bounds[param] + if a != -numpy.inf: + phi_a = self._normalcdf(param, a) + else: + phi_a = 0. + if b != numpy.inf: + phi_b = self._normalcdf(param, b) + else: + phi_b = 1. + phi_x = self._normalcdf(param, value) + return (phi_x - phi_a)/(phi_b - phi_a)
+ + + def _normalcdfinv(self, param, p): + """The inverse CDF of the normal distribution, without bounds.""" + mu = self._mean[param] + var = self._var[param] + return mu + (2*var)**0.5 * erfinv(2*p - 1.) + + def _cdfinv_param(self, param, p): + """Return inverse of the CDF. + """ + a, b = self._bounds[param] + if a != -numpy.inf: + phi_a = self._normalcdf(param, a) + else: + phi_a = 0. + if b != numpy.inf: + phi_b = self._normalcdf(param, b) + else: + phi_b = 1. + adjusted_p = phi_a + p * (phi_b - phi_a) + return self._normalcdfinv(param, adjusted_p) + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + return numpy.exp(self._logpdf(**kwargs)) + + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + if kwargs in self: + return sum([self._lognorm[p] + + self._expnorm[p]*(kwargs[p]-self._mean[p])**2. + for p in self._params]) + else: + return -numpy.inf + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a Gaussian distribution based on a configuration file. The + parameters for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Boundary arguments should be provided in the same way as described in + `get_param_bounds_from_config`. In addition, the mean and variance of + each parameter can be specified by setting `{param}_mean` and + `{param}_var`, respectively. For example, the following would create a + truncated Gaussian distribution between 0 and 6.28 for a parameter + called `phi` with mean 3.14 and variance 0.5 that is cyclic: + + .. code-block:: ini + + [{section}-{tag}] + min-phi = 0 + max-phi = 6.28 + phi_mean = 3.14 + phi_var = 0.5 + cyclic = + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + + Returns + ------- + Gaussian + A distribution instance from the pycbc.inference.prior module. + """ + return bounded.bounded_from_config(cls, cp, section, variable_args, + bounds_required=False)
+
+ + + +__all__ = ['Gaussian'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/joint.html b/latest/html/_modules/pycbc/distributions/joint.html new file mode 100644 index 00000000000..0040bebee48 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/joint.html @@ -0,0 +1,526 @@ + + + + + + pycbc.distributions.joint — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.joint

+# Copyright (C)  2017 Collin Capano, Christopher M. Biwer, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This module provides classes to describe joint distributions
+"""
+import logging
+import numpy
+
+from pycbc.io.record import FieldArray
+
+logger = logging.getLogger('pycbc.distributions.joint')
+
+
+
+[docs] +class JointDistribution(object): + """ + Callable class that calculates the joint distribution built from a set of + distributions. + + Parameters + ---------- + variable_args : list + A list of strings that contain the names of the variable parameters and + the order they are expected when the class is called. + \*distributions : + The rest of the arguments must be instances of distributions describing + the individual distributions on the variable parameters. + A single distribution may contain + multiple parameters. The set of all params across the distributions + (retrieved from the distributions' params attribute) must be the same + as the set of variable_args provided. + \*\*kwargs : + Valid keyword arguments include: + `constraints` : a list of functions that accept a dict of parameters + with the parameter name as the key. If the constraint is satisfied the + function should return True, if the constraint is violated, then the + function should return False. + `n_test_samples` : number of random draws used to fix pdf normalization + factor after applying constraints. + + Attributes + ---------- + variable_args : tuple + The parameters expected when the evaluator is called. + distributions : list + The distributions for the parameters. + constraints : list + A list of functions to test if parameter values obey multi-dimensional + constraints. + + Examples + -------- + An example of creating a joint distribution with constraint that total mass must + be below 30. + + >>> from pycbc.distributions import Uniform, JointDistribution + >>> def mtotal_lt_30(params): + ... return params["mass1"] + params["mass2"] < 30 + >>> mass_lim = (2, 50) + >>> uniform_prior = Uniform(mass1=mass_lim, mass2=mass_lim) + >>> prior_eval = JointDistribution(["mass1", "mass2"], uniform_prior, + ... constraints=[mtotal_lt_30]) + >>> print(prior_eval(mass1=20, mass2=1)) + + """ + name = 'joint' + + def __init__(self, variable_args, *distributions, **kwargs): + + # store the names of the parameters defined in the distributions + self.variable_args = tuple(variable_args) + + # store the distributions + self.distributions = distributions + + # store the constraints on the parameters defined inside the + # distributions list + self._constraints = kwargs["constraints"] \ + if "constraints" in kwargs.keys() else [] + + # store kwargs + self.kwargs = kwargs + + # check that all of the supplied parameters are described by the given + # distributions + distparams = set() + for dist in distributions: + distparams.update(set(dist.params)) + + varset = set(self.variable_args) + missing_params = distparams - varset + if missing_params: + raise ValueError("provided variable_args do not include " + "parameters %s" %(','.join(missing_params)) + " which are " + "required by the provided distributions") + extra_params = varset - distparams + if extra_params: + raise ValueError("variable_args %s " %(','.join(extra_params)) + + "are not in any of the provided distributions") + + # if there are constraints then find the renormalization factor + # since a constraint will cut out part of the space + # do this by random sampling the full space and find the percent + # of samples rejected + n_test_samples = kwargs["n_test_samples"] \ + if "n_test_samples" in kwargs else int(1e6) + if self._constraints: + logger.info("Renormalizing distribution for constraints") + + # draw samples + samples = {} + for dist in self.distributions: + draw = dist.rvs(n_test_samples) + for param in dist.params: + samples[param] = draw[param] + samples = FieldArray.from_kwargs(**samples) + + # evaluate constraints + result = self.within_constraints(samples) + + # set new scaling factor for prior to be + # the fraction of acceptances in random sampling of entire space + self._pdf_scale = result.sum() / float(n_test_samples) + if self._pdf_scale == 0.0: + raise ValueError("None of the random draws for pdf " + "renormalization satisfied the constraints. " + " You can try increasing the 'n_test_samples' keyword.") + + else: + self._pdf_scale = 1.0 + + # since Distributions will return logpdf we keep the scale factor + # in log scale as well for self.__call__ + self._logpdf_scale = numpy.log(self._pdf_scale) + +
+[docs] + def apply_boundary_conditions(self, **params): + """Applies each distributions' boundary conditions to the given list + of parameters, returning a new list with the conditions applied. + + Parameters + ---------- + **params : + Keyword arguments should give the parameters to apply the + conditions to. + + Returns + ------- + dict + A dictionary of the parameters after each distribution's + `apply_boundary_conditions` function has been applied. + """ + for dist in self.distributions: + params.update(dist.apply_boundary_conditions(**params)) + return params
+ + + @staticmethod + def _return_atomic(params): + """Determines if an array or atomic value should be returned given a + set of input params. + + Parameters + ---------- + params : dict, numpy.record, array, or FieldArray + The input to evaluate. + + Returns + ------- + bool : + Whether or not functions run on the parameters should be returned + as atomic types or not. + """ + if isinstance(params, dict): + return not any(isinstance(val, numpy.ndarray) + for val in params.values()) + elif isinstance(params, numpy.record): + return True + elif isinstance(params, numpy.ndarray): + return False + params = params.view(type=FieldArray) + elif isinstance(params, FieldArray): + return False + else: + raise ValueError("params must be either dict, FieldArray, " + "record, or structured array") + + @staticmethod + def _ensure_fieldarray(params): + """Ensures the given params are a ``FieldArray``. + + Parameters + ---------- + params : dict, FieldArray, numpy.record, or numpy.ndarray + If the given object is a dict, it will be converted to a + FieldArray. + + Returns + ------- + FieldArray + The given values as a FieldArray. + """ + if isinstance(params, dict): + return FieldArray.from_kwargs(**params) + elif isinstance(params, numpy.record): + return FieldArray.from_records(tuple(params), + names=params.dtype.names) + elif isinstance(params, numpy.ndarray): + return params.view(type=FieldArray) + elif isinstance(params, FieldArray): + return params + else: + raise ValueError("params must be either dict, FieldArray, " + "record, or structured array") + +
+[docs] + def within_constraints(self, params): + """Evaluates whether the given parameters satisfy the constraints. + + Parameters + ---------- + params : dict, FieldArray, numpy.record, or numpy.ndarray + The parameter values to evaluate. + + Returns + ------- + (array of) bool : + If params was an array, or if params a dictionary and one or more + of the parameters are arrays, will return an array of booleans. + Otherwise, a boolean. + """ + params = self._ensure_fieldarray(params) + return_atomic = self._return_atomic(params) + # convert params to a field array if it isn't one + result = numpy.ones(params.shape, dtype=bool) + for constraint in self._constraints: + result &= constraint(params) + if return_atomic: + result = result.item() + return result
+ + +
+[docs] + def contains(self, params): + """Evaluates whether the given parameters satisfy the boundary + conditions, boundaries, and constraints. This method is different + from `within_constraints`, that method only check the constraints. + + Parameters + ---------- + params : dict, FieldArray, numpy.record, or numpy.ndarray + The parameter values to evaluate. + + Returns + ------- + (array of) bool : + If params was an array, or if params a dictionary and one or more + of the parameters are arrays, will return an array of booleans. + Otherwise, a boolean. + """ + params = self.apply_boundary_conditions(**params) + result = True + for dist in self.distributions: + param_names = dist.params + vlen = len(params[param_names[0]]) + contain_array = numpy.ones(vlen, dtype=bool) + # note: enable `__contains__` in `pycbc.distributions.bounded` + # to handle array-like input, it doesn't work now. + for i in range(vlen): + data = {pname: params[pname][i] for pname in param_names} + contain_array[i] = data in dist + result &= numpy.array(contain_array) + result &= self.within_constraints(params) + return result
+ + + def __call__(self, **params): + """Evaluate joint distribution for parameters. + """ + return_atomic = self._return_atomic(params) + # check if statisfies constraints + if len(self._constraints) != 0: + parray = self._ensure_fieldarray(params) + isin = self.within_constraints(parray) + if not isin.any(): + if return_atomic: + out = -numpy.inf + else: + out = numpy.full(parray.shape, -numpy.inf) + return out + + # evaluate + # note: this step may fail if arrays of values were provided, as + # not all distributions are vectorized currently + logps = numpy.array([d(**params) for d in self.distributions]) + logp = logps.sum(axis=0) + + if len(self._constraints) != 0: + logp += numpy.log(isin.astype(float)) + + if return_atomic: + logp = logp.item() + + return logp - self._logpdf_scale + +
+[docs] + def rvs(self, size=1): + """ Rejection samples the parameter space. + """ + # create output FieldArray + dtype = [(arg, float) for arg in self.variable_args] + out = FieldArray(size, dtype=dtype) + # loop until enough samples accepted + remaining = size + ndraw = size + while remaining: + # scratch space for evaluating constraints + scratch = FieldArray(ndraw, dtype=dtype) + for dist in self.distributions: + # drawing samples from the distributions is generally faster + # then evaluating constrants, so we'll always draw the full + # size, even if that gives us more points than we need + draw = dist.rvs(size=ndraw) + for param in dist.params: + scratch[param] = draw[param] + # apply any constraints + keep = self.within_constraints(scratch) + nkeep = keep.sum() + kmin = size - remaining + kmax = min(nkeep, remaining) + out[kmin:kmin+kmax] = scratch[keep][:kmax] + remaining = max(0, remaining - nkeep) + # to try to speed up next go around, we'll increase the draw + # size by the fraction of values that were kept, but cap at 1e6 + ndraw = int(min(1e6, ndraw * numpy.ceil(ndraw / (nkeep + 1.)))) + return out
+ + + @property + def well_reflected(self): + """ Get list of which parameters are well reflected + """ + reflect = [] + bounds = self.bounds + for param in bounds: + if bounds[param].reflected == 'well': + reflect.append(param) + return reflect + + @property + def cyclic(self): + """ Get list of which parameters are cyclic + """ + cyclic = [] + bounds = self.bounds + for param in bounds: + if bounds[param].cyclic: + cyclic.append(param) + return cyclic + + @property + def bounds(self): + """ Get the dict of boundaries + """ + bnds = {} + for dist in self.distributions: + if hasattr(dist, 'bounds'): + bnds.update(dist.bounds) + return bnds + +
+[docs] + def cdfinv(self, **original): + """ Apply the inverse cdf to the array of values [0, 1]. Every + variable parameter must be given as a keyword argument. + """ + updated = {} + for dist in self.distributions: + updated.update(dist.cdfinv(**original)) + return updated
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/mass.html b/latest/html/_modules/pycbc/distributions/mass.html new file mode 100644 index 00000000000..dcb0489ff15 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/mass.html @@ -0,0 +1,436 @@ + + + + + + pycbc.distributions.mass — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.mass

+# Copyright (C) 2021 Yifan Wang
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This modules provides classes for evaluating distributions for mchirp and
+q (i.e., mass ratio) from uniform component mass.
+"""
+import logging
+import numpy
+
+from scipy.interpolate import interp1d
+from scipy.special import hyp2f1
+
+from pycbc.distributions import power_law
+from pycbc.distributions import bounded
+
+logger = logging.getLogger('pycbc.distributions.mass')
+
+
+
+[docs] +class MchirpfromUniformMass1Mass2(power_law.UniformPowerLaw): + r"""A distribution for chirp mass from uniform component mass + + constraints given by chirp mass. This is a special case for UniformPowerLaw + with index 1. For more details see UniformPowerLaw. + + The parameters (i.e. `**params`) are independent of each other. Instances + of this class can be called like a function. By default, `logpdf` will be + called, but this can be changed by setting the class's `__call__` method + to its pdf method. + + Derivation for the probability density function: + + .. math:: + + P(m_1,m_2)dm_1dm_2 = P(\mathcal{M}_c,q)d\mathcal{M}_cdq + + Where :math:`\mathcal{M}_c` is chirp mass and :math:`q` is mass ratio, + :math:`m_1` and :math:`m_2` are component masses. The jacobian to transform + chirp mass and mass ratio to component masses is + + .. math:: + + \frac{\partial(m_1,m_2)}{\partial(\mathcal{M}_c,q)} = \ + \mathcal{M}_c \left(\frac{1+q}{q^3}\right)^{2/5} + + (https://github.com/gwastro/pycbc/blob/master/pycbc/transforms.py#L416.) + + Because :math:`P(m_1,m_2) = const`, then + + .. math:: + + P(\mathcal{M}_c,q) = P(\mathcal{M}_c)P(q)\propto + \mathcal{M}_c \left(\frac{1+q}{q^3}\right)^{2/5}`. + + Therefore, + + .. math:: + P(\mathcal{M}_c) \propto \mathcal{M}_c + + and + + .. math:: + P(q) \propto \left(\frac{1+q}{q^3}\right)^{2/5} + + Examples + -------- + + Generate 10000 random numbers from this distribution in [5,100] + + >>> from pycbc import distributions as dist + >>> minmc = 5, maxmc = 100, size = 10000 + >>> mc = dist.MchirpfromUniformMass1Mass2(value=(minmc,maxmc)).rvs(size) + + The settings in the configuration file for pycbc_inference should be + + .. code-block:: ini + + [variable_params] + mchirp = + [prior-mchirp] + name = mchirp_from_uniform_mass1_mass2 + min-mchirp = 10 + max-mchirp = 80 + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + """ + + name = "mchirp_from_uniform_mass1_mass2" + + def __init__(self, dim=2, **params): + super(MchirpfromUniformMass1Mass2, self).__init__(dim=2, **params)
+ + + +
+[docs] +class QfromUniformMass1Mass2(bounded.BoundedDist): + r"""A distribution for mass ratio (i.e., q) from uniform component mass + + constraints given by q. + + The parameters (i.e. `**params`) are independent of each other. Instances + of this class can be called like a function. By default, `logpdf` will be + called, but this can be changed by setting the class's `__call__` method + to its pdf method. + + For mathematical derivation see the documentation above in the class + `MchirpfromUniformMass1Mass2`. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + + Examples + -------- + + Generate 10000 random numbers from this distribution in [1,8] + + >>> from pycbc import distributions as dist + >>> minq = 1, maxq = 8, size = 10000 + >>> q = dist.QfromUniformMass1Mass2(value=(minq,maxq)).rvs(size) + + """ + + name = 'q_from_uniform_mass1_mass2' + + def __init__(self, **params): + super(QfromUniformMass1Mass2, self).__init__(**params) + self._norm = 1.0 + self._lognorm = 0.0 + for p in self._params: + self._norm /= self._cdf_param(p, self._bounds[p][1]) - \ + self._cdf_param(p, self._bounds[p][0]) + self._lognorm = numpy.log(self._norm) + + @property + def norm(self): + """float: The normalization of the multi-dimensional pdf.""" + return self._norm + + @property + def lognorm(self): + """float: The log of the normalization.""" + return self._lognorm + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + for p in self._params: + if p not in kwargs.keys(): + raise ValueError( + 'Missing parameter {} to construct pdf.'.format(p)) + if kwargs in self: + pdf = self._norm * \ + numpy.prod([(1.+kwargs[p])**(2./5)/kwargs[p]**(6./5) + for p in self._params]) + return float(pdf) + else: + return 0.0 + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + for p in self._params: + if p not in kwargs.keys(): + raise ValueError( + 'Missing parameter {} to construct logpdf.'.format(p)) + if kwargs in self: + return numpy.log(self._pdf(**kwargs)) + else: + return -numpy.inf + + def _cdf_param(self, param, value): + r""">>> from sympy import * + >>> x = Symbol('x') + >>> integrate((1+x)**(2/5)/x**(6/5)) + Output: + _ + -0.2 |_ /-0.4, -0.2 | I*pi\ + -5.0*x * | | | x*e | + 2 1 \ 0.8 | / + """ + if param in self._params: + return -5. * value**(-1./5) * hyp2f1(-2./5, -1./5, 4./5, -value) + else: + raise ValueError('{} is not contructed yet.'.format(param)) + + def _cdfinv_param(self, param, value): + """Return the inverse cdf to map the unit interval to parameter bounds. + Note that value should be uniform in [0,1].""" + if (numpy.array(value) < 0).any() or (numpy.array(value) > 1).any(): + raise ValueError( + 'q_from_uniform_m1_m2 cdfinv requires input in [0,1].') + if param in self._params: + lower_bound = self._bounds[param][0] + upper_bound = self._bounds[param][1] + q_array = numpy.linspace( + lower_bound, upper_bound, num=1000, endpoint=True) + q_invcdf_interp = interp1d(self._cdf_param(param, q_array), + q_array, kind='cubic', + bounds_error=True) + + return q_invcdf_interp( + (self._cdf_param(param, upper_bound) - + self._cdf_param(param, lower_bound)) * value + + self._cdf_param(param, lower_bound)) + else: + raise ValueError('{} is not contructed yet.'.format(param)) + +
+[docs] + def rvs(self, size=1, param=None): + """Gives a set of random values drawn from this distribution. + + Parameters + ---------- + size : {1, int} + The number of values to generate; default is 1. + param : {None, string} + If provided, will just return values for the given parameter. + Otherwise, returns random values for each parameter. + + Returns + ------- + structured array + The random values in a numpy structured array. If a param was + specified, the array will only have an element corresponding to the + given parameter. Otherwise, the array will have an element for each + parameter in self's params. + """ + if param is not None: + dtype = [(param, float)] + else: + dtype = [(p, float) for p in self.params] + arr = numpy.zeros(size, dtype=dtype) + for (p, _) in dtype: + uniformcdfvalue = numpy.random.uniform(0, 1, size=size) + arr[p] = self._cdfinv_param(p, uniformcdfvalue) + return arr
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Example: + + .. code-block:: ini + + [variable_params] + q = + [prior-q] + name = q_from_uniform_mass1_mass2 + min-q = 1 + max-q = 8 + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + ``VARARGS_DELIM``. These must appear in the "tag" part + of the section header. + + Returns + ------- + QfromUniformMass1Mass2 + A distribution instance from the pycbc.distributions.bounded + module. + """ + return super(QfromUniformMass1Mass2, cls).from_config( + cp, section, variable_args, bounds_required=True)
+
+ + + +__all__ = ["MchirpfromUniformMass1Mass2", "QfromUniformMass1Mass2"] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/power_law.html b/latest/html/_modules/pycbc/distributions/power_law.html new file mode 100644 index 00000000000..e50a30a2d25 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/power_law.html @@ -0,0 +1,357 @@ + + + + + + pycbc.distributions.power_law — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.power_law

+# Copyright (C) 2016 Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating distributions where the
+probability density function is a power law.
+"""
+import logging
+import numpy
+
+from pycbc.distributions import bounded
+
+logger = logging.getLogger('pycbc.distributions.power_law')
+
+
+[docs] +class UniformPowerLaw(bounded.BoundedDist): + r""" + For a uniform distribution in power law. The parameters are + independent of each other. Instances of this class can be called like + a function. By default, logpdf will be called, but this can be changed + by setting the class's __call__ method to its pdf method. + + The cumulative distribution function (CDF) will be the ratio of volumes: + + .. math:: + + F(r) = \frac{V(r)}{V(R)} + + Where :math:`R` is the radius of the sphere. So we can write our + probability density function (PDF) as: + + .. math:: + + f(r) = c r^n + + For generality we use :math:`n` for the dimension of the volume element, + eg. :math:`n=2` for a 3-dimensional sphere. And use + :math:`c` as a general constant. + + So now we calculate the CDF in general for this type of PDF: + + .. math:: + + F(r) = \int f(r) dr = \int c r^n dr = \frac{1}{n + 1} c r^{n + 1} + k + + Now with the definition of the CDF at radius :math:`r_{l}` is equal to 0 + and at radius :math:`r_{h}` is equal to 1 we find that the constant from + integration from this system of equations: + + .. math:: + + 1 = \frac{1}{n + 1} c ((r_{h})^{n + 1} - (r_{l})^{n + 1}) + k + + Can see that :math:`c = (n + 1) / ((r_{h})^{n + 1} - (r_{l})^{n + 1}))`. + And :math:`k` is: + + .. math:: + + k = - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} + + Can see that :math:`c= \frac{n + 1}{R^{n + 1}}`. So can see that the CDF is: + + .. math:: + + F(r) = \frac{1}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} r^{n + 1} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} + + And the PDF is the derivative of the CDF: + + .. math:: + + f(r) = \frac{(n + 1)}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} (r)^n + + Now we use the probabilty integral transform method to get sampling on + uniform numbers from a continuous random variable. To do this we find + the inverse of the CDF evaluated for uniform numbers: + + .. math:: + + F(r) = u = \frac{1}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} r^{n + 1} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} + + And find :math:`F^{-1}(u)` gives: + + .. math:: + + u = \frac{1}{n + 1} \frac{(r_{h})^{n + 1} - (r_{l})^{n + 1}} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} + + And solving for :math:`r` gives: + + .. math:: + + r = ( ((r_{h})^{n + 1} - (r_{l})^{n + 1}) u + (r_{l})^{n + 1})^{\frac{1}{n + 1}} + + Therefore the radius can be sampled by taking the n-th root of uniform + numbers and multiplying by the radius offset by the lower bound radius. + + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + + Attributes + ---------- + dim : int + The dimension of volume space. In the notation above `dim` + is :math:`n+1`. For a 3-dimensional sphere this is 3. + """ + name = "uniform_power_law" + def __init__(self, dim=None, **params): + super(UniformPowerLaw, self).__init__(**params) + self.dim = dim + self._norm = 1.0 + self._lognorm = 0.0 + for p in self._params: + self._norm *= self.dim / \ + (self._bounds[p][1]**(self.dim) - + self._bounds[p][0]**(self.dim)) + self._lognorm = numpy.log(self._norm) + + @property + def norm(self): + """float: The normalization of the multi-dimensional pdf.""" + return self._norm + + @property + def lognorm(self): + """float: The log of the normalization.""" + return self._lognorm + + def _cdfinv_param(self, param, value): + """Return inverse of cdf to map unit interval to parameter bounds. + """ + n = self.dim - 1 + r_l = self._bounds[param][0] + r_h = self._bounds[param][1] + new_value = ((r_h**(n+1) - r_l**(n+1))*value + r_l**(n+1))**(1./(n+1)) + return new_value + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + for p in self._params: + if p not in kwargs.keys(): + raise ValueError( + 'Missing parameter {} to construct pdf.'.format(p)) + if kwargs in self: + pdf = self._norm * \ + numpy.prod([(kwargs[p])**(self.dim - 1) + for p in self._params]) + return float(pdf) + else: + return 0.0 + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + for p in self._params: + if p not in kwargs.keys(): + raise ValueError( + 'Missing parameter {} to construct pdf.'.format(p)) + if kwargs in self: + log_pdf = self._lognorm + \ + (self.dim - 1) * \ + numpy.log([kwargs[p] for p in self._params]).sum() + return log_pdf + else: + return -numpy.inf + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + + Returns + ------- + Uniform + A distribution instance from the pycbc.inference.prior module. + """ + return super(UniformPowerLaw, cls).from_config(cp, section, + variable_args, + bounds_required=True)
+
+ + + +
+[docs] +class UniformRadius(UniformPowerLaw): + """ For a uniform distribution in volume using spherical coordinates, this + is the distriubtion to use for the radius. + + For more details see UniformPowerLaw. + """ + name = "uniform_radius" + def __init__(self, dim=3, **params): + super(UniformRadius, self).__init__(dim=3, **params)
+ + +__all__ = ["UniformPowerLaw", "UniformRadius"] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/qnm.html b/latest/html/_modules/pycbc/distributions/qnm.html new file mode 100644 index 00000000000..185b7d71f70 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/qnm.html @@ -0,0 +1,407 @@ + + + + + + pycbc.distributions.qnm — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.qnm

+# Copyright (C) 2018 Miriam Cabero, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import re
+import numpy
+
+import pycbc
+from pycbc import conversions, boundaries
+
+from . import uniform, bounded
+
+logger = logging.getLogger('pycbc.distributions.qnm')
+
+
+
+[docs] +class UniformF0Tau(uniform.Uniform): + """A distribution uniform in QNM frequency and damping time. + + Constraints may be placed to exclude frequencies and damping times + corresponding to specific masses and spins. + + To ensure a properly normalized pdf that accounts for the constraints + on final mass and spin, a renormalization factor is calculated upon + initialization. This is calculated numerically: f0 and tau are drawn + randomly, then the norm is scaled by the fraction of points that yield + final masses and spins within the constraints. The `norm_tolerance` keyword + arguments sets the error on the estimate of the norm from this numerical + method. If this value is too large, such that no points are found in + the allowed region, a ValueError is raised. + + Parameters + ---------- + f0 : tuple or boundaries.Bounds + The range of QNM frequencies (in Hz). + tau : tuple or boundaries.Bounds + The range of QNM damping times (in s). + final_mass : tuple or boundaries.Bounds, optional + The range of final masses to allow. Default is [0,inf). + final_spin : tuple or boundaries.Bounds, optional + The range final spins to allow. Must be in [-0.996, 0.996], which is + the default. + rdfreq : str, optional + Use the given string as the name for the f0 parameter. Default is 'f0'. + damping_time : str, optional + Use the given string as the name for the tau parameter. Default is + 'tau'. + norm_tolerance : float, optional + The tolerance on the estimate of the normalization. Default is 1e-3. + norm_seed : int, optional + Seed to use for the random number generator when estimating the norm. + Default is 0. After the norm is estimated, the random number generator + is set back to the state it was in upon initialization. + + Examples + -------- + + Create a distribution: + + >>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2)) + + Check that all random samples drawn from the distribution yield final + masses > 1: + + >>> from pycbc import conversions + >>> samples = dist.rvs(size=1000) + >>> (conversions.final_mass_from_f0_tau(samples['f0'], + samples['tau']) > 1.).all() + True + + Create a distribution with tighter bounds on final mass and spin: + + >>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2), + final_mass=(20., 200.), final_spin=(0,0.996)) + + Check that all random samples drawn from the distribution are in the + final mass and spin constraints: + + >>> samples = dist.rvs(size=1000) + >>> (conversions.final_mass_from_f0_tau(samples['f0'], + samples['tau']) >= 20.).all() + True + >>> (conversions.final_mass_from_f0_tau(samples['f0'], + samples['tau']) < 200.).all() + True + >>> (conversions.final_spin_from_f0_tau(samples['f0'], + samples['tau']) >= 0.).all() + True + >>> (conversions.final_spin_from_f0_tau(samples['f0'], + samples['tau']) < 0.996).all() + True + + """ + + name = 'uniform_f0_tau' + + def __init__(self, f0=None, tau=None, final_mass=None, final_spin=None, + rdfreq='f0', damping_time='tau', norm_tolerance=1e-3, + norm_seed=0): + if f0 is None: + raise ValueError("must provide a range for f0") + if tau is None: + raise ValueError("must provide a range for tau") + self.rdfreq = rdfreq + self.damping_time = damping_time + parent_args = {rdfreq: f0, damping_time: tau} + super(UniformF0Tau, self).__init__(**parent_args) + if final_mass is None: + final_mass = (0., numpy.inf) + if final_spin is None: + final_spin = (-0.996, 0.996) + self.final_mass_bounds = boundaries.Bounds( + min_bound=final_mass[0], max_bound=final_mass[1]) + self.final_spin_bounds = boundaries.Bounds( + min_bound=final_spin[0], max_bound=final_spin[1]) + # Re-normalize to account for cuts: we'll do this by just sampling + # a large number of spaces f0 taus, and seeing how many are in the + # desired range. + # perseve the current random state + s = numpy.random.get_state() + numpy.random.seed(norm_seed) + nsamples = int(1./norm_tolerance**2) + draws = super(UniformF0Tau, self).rvs(size=nsamples) + # reset the random state + numpy.random.set_state(s) + num_in = self._constraints(draws).sum() + # if num_in is 0, than the requested tolerance is too large + if num_in == 0: + raise ValueError("the normalization is < then the norm_tolerance; " + "try again with a smaller nrom_tolerance") + self._lognorm += numpy.log(num_in) - numpy.log(nsamples) + self._norm = numpy.exp(self._lognorm) + + def __contains__(self, params): + isin = super(UniformF0Tau, self).__contains__(params) + if isin: + isin &= self._constraints(params) + return isin + + def _constraints(self, params): + f0 = params[self.rdfreq] + tau = params[self.damping_time] + # check if we need to specify a particular mode (l,m) != (2,2) + if re.match(r'f_\d{3}', self.rdfreq): + mode = self.rdfreq.strip('f_') + l, m = int(mode[0]), int(mode[1]) + else: + l, m = 2, 2 + # temporarily silence invalid warnings... these will just be ruled out + # automatically + with numpy.errstate(invalid="ignore"): + mf = conversions.final_mass_from_f0_tau(f0, tau, l=l, m=m) + sf = conversions.final_spin_from_f0_tau(f0, tau, l=l, m=m) + isin = (self.final_mass_bounds.__contains__(mf)) & ( + self.final_spin_bounds.__contains__(sf)) + return isin + +
+[docs] + def rvs(self, size=1): + """Draw random samples from this distribution. + + Parameters + ---------- + size : int, optional + The number of draws to do. Default is 1. + + Returns + ------- + array + A structured array of the random draws. + """ + size = int(size) + dtype = [(p, float) for p in self.params] + arr = numpy.zeros(size, dtype=dtype) + remaining = size + keepidx = 0 + while remaining: + draws = super(UniformF0Tau, self).rvs(size=remaining) + mask = self._constraints(draws) + addpts = mask.sum() + arr[keepidx:keepidx+addpts] = draws[mask] + keepidx += addpts + remaining = size - keepidx + return arr
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Initialize this class from a config file. + + Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should + be specified by providing ``min-{param}`` and ``max-{param}``. If + the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and + ``damping_time`` should be provided; these must match + ``variable_args``. If ``rdfreq`` and ``damping_time`` are not + provided, ``variable_args`` are expected to be ``f0`` and ``tau``. + + Only ``min/max-f0`` and ``min/max-tau`` need to be provided. + + Example: + + .. code-block:: ini + + [{section}-f0+tau] + name = uniform_f0_tau + min-f0 = 10 + max-f0 = 2048 + min-tau = 0.0001 + max-tau = 0.010 + min-final_mass = 10 + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + WorkflowConfigParser instance to read. + section : str + The name of the section to read. + variable_args : str + The name of the variable args. These should be separated by + ``pycbc.VARARGS_DELIM``. + + Returns + ------- + UniformF0Tau : + This class initialized with the parameters provided in the config + file. + """ + tag = variable_args + variable_args = set(variable_args.split(pycbc.VARARGS_DELIM)) + # get f0 and tau + f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0') + tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau') + # see if f0 and tau should be renamed + if cp.has_option_tag(section, 'rdfreq', tag): + rdfreq = cp.get_opt_tag(section, 'rdfreq', tag) + else: + rdfreq = 'f0' + if cp.has_option_tag(section, 'damping_time', tag): + damping_time = cp.get_opt_tag(section, 'damping_time', tag) + else: + damping_time = 'tau' + # check that they match whats in the variable args + if not variable_args == set([rdfreq, damping_time]): + raise ValueError("variable args do not match rdfreq and " + "damping_time names") + # get the final mass and spin values, if provided + final_mass = bounded.get_param_bounds_from_config( + cp, section, tag, 'final_mass') + final_spin = bounded.get_param_bounds_from_config( + cp, section, tag, 'final_spin') + extra_opts = {} + if cp.has_option_tag(section, 'norm_tolerance', tag): + extra_opts['norm_tolerance'] = float( + cp.get_opt_tag(section, 'norm_tolerance', tag)) + if cp.has_option_tag(section, 'norm_seed', tag): + extra_opts['norm_seed'] = int( + cp.get_opt_tag(section, 'norm_seed', tag)) + return cls(f0=f0, tau=tau, + final_mass=final_mass, final_spin=final_spin, + rdfreq=rdfreq, damping_time=damping_time, + **extra_opts)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/sky_location.html b/latest/html/_modules/pycbc/distributions/sky_location.html new file mode 100644 index 00000000000..9f515c84ad2 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/sky_location.html @@ -0,0 +1,318 @@ + + + + + + pycbc.distributions.sky_location — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.sky_location

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This modules provides classes for evaluating sky distributions in
+right ascension and declination.
+"""
+
+import logging
+import numpy
+
+from scipy.spatial.transform import Rotation
+
+from pycbc.distributions import angular
+from pycbc import VARARGS_DELIM
+from pycbc.io import FieldArray
+
+logger = logging.getLogger('pycbc.distributions.sky_location')
+
+
+
+[docs] +class UniformSky(angular.UniformSolidAngle): + """A distribution that is uniform on the sky. This is the same as + UniformSolidAngle, except that the polar angle varies from pi/2 (the north + pole) to -pi/2 (the south pole) instead of 0 to pi. Also, the default + names are "dec" (declination) for the polar angle and "ra" (right + ascension) for the azimuthal angle, instead of "theta" and "phi". + """ + name = 'uniform_sky' + _polardistcls = angular.CosAngle + _default_polar_angle = 'dec' + _default_azimuthal_angle = 'ra'
+ + + +
+[docs] +class FisherSky(): + """A distribution that returns a random angle drawn from an approximate + `Von_Mises-Fisher distribution`_. Assumes that the Fisher concentration + parameter is large, so that we can draw the samples from a simple + rotationally-invariant distribution centered at the North Pole (which + factors as a uniform distribution for the right ascension, and a Rayleigh + distribution for the declination, as described in + `Fabrycky and Winn 2009 ApJ 696 1230`) and then rotate the samples to be + centered around the specified mean position. As in UniformSky, the + declination varies from π/2 to -π/2 and the right ascension varies from + 0 to 2π. + + .. _Von_Mises-Fisher distribution: + http://en.wikipedia.org/wiki/Von_Mises-Fisher_distribution + + .. _Fabrycky and Winn 2009 ApJ 696 1230: + https://doi.org/10.1088/0004-637X/696/2/1230 + + .. _Briggs et al 1999 ApJS 122 503: + https://doi.org/10.1086/313221 + + Parameters + ---------- + mean_ra: float + RA of the center of the distribution. + mean_dec: float + Declination of the center of the distribution. + sigma: float + Spread of the distribution. For the precise interpretation, see Eq 8 + of `Briggs et al 1999 ApJS 122 503`_. This should be smaller than + about 20 deg for the approximation to be valid. + angle_unit: str + Unit for the angle parameters: either "deg" or "rad". + """ + name = 'fisher_sky' + _params = ['ra', 'dec'] + + def __init__(self, **params): + if params['angle_unit'] not in ['deg', 'rad']: + raise ValueError("Only deg or rad is allowed as angle unit") + mean_ra = params['mean_ra'] + mean_dec = params['mean_dec'] + sigma = params['sigma'] + if params['angle_unit'] == 'deg': + mean_ra = numpy.deg2rad(mean_ra) + mean_dec = numpy.deg2rad(mean_dec) + sigma = numpy.deg2rad(sigma) + if mean_ra < 0 or mean_ra > 2 * numpy.pi: + raise ValueError( + f'The mean RA must be between 0 and 2π, {mean_ra} rad given' + ) + if mean_dec < -numpy.pi/2 or mean_dec > numpy.pi/2: + raise ValueError( + 'The mean declination must be between ' + f'-π/2 and π/2, {mean_dec} rad given' + ) + if sigma <= 0 or sigma > 2 * numpy.pi: + raise ValueError( + 'Sigma must be positive and smaller than 2π ' + '(preferably much smaller)' + ) + if sigma > 0.35: + logger.warning( + 'Warning: sigma = %s rad is probably too large for the ' + 'Fisher approximation to be valid', sigma + ) + self.rayleigh_scale = 0.66 * sigma + # Prepare a rotation that puts the North Pole at the mean position + self.rotation = Rotation.from_euler( + 'yz', + [numpy.pi / 2 - mean_dec, mean_ra] + ) + + @property + def params(self): + return self._params + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + tag = variable_args + variable_args = variable_args.split(VARARGS_DELIM) + if set(variable_args) != set(cls._params): + raise ValueError("Not all parameters used by this distribution " + "included in tag portion of section name") + mean_ra = float(cp.get_opt_tag(section, 'mean_ra', tag)) + mean_dec = float(cp.get_opt_tag(section, 'mean_dec', tag)) + sigma = float(cp.get_opt_tag(section, 'sigma', tag)) + angle_unit = cp.get_opt_tag(section, 'angle_unit', tag) + return cls( + mean_ra=mean_ra, + mean_dec=mean_dec, + sigma=sigma, + angle_unit=angle_unit + )
+ + +
+[docs] + def rvs(self, size): + # Draw samples from a distribution centered on the North pole + np_ra = numpy.random.uniform( + low=0, + high=(2*numpy.pi), + size=size + ) + np_dec = numpy.random.rayleigh( + scale=self.rayleigh_scale, + size=size + ) + + # Convert the samples to intermediate cartesian representation + np_cart = numpy.empty(shape=(size, 3)) + np_cart[:, 0] = numpy.cos(np_ra) * numpy.sin(np_dec) + np_cart[:, 1] = numpy.sin(np_ra) * numpy.sin(np_dec) + np_cart[:, 2] = numpy.cos(np_dec) + + # Rotate the samples according to our pre-built rotation + rot_cart = self.rotation.apply(np_cart) + + # Convert the samples back to spherical coordinates. + # Some unpleasant conditional operations are needed + # to get the correct angle convention. + rot_radec = FieldArray( + size, + dtype=[ + ('ra', '<f8'), + ('dec', '<f8') + ] + ) + rot_radec['ra'] = numpy.arctan2(rot_cart[:, 1], rot_cart[:, 0]) + neg_mask = rot_radec['ra'] < 0 + rot_radec['ra'][neg_mask] += 2 * numpy.pi + rot_radec['dec'] = numpy.arcsin(rot_cart[:, 2]) + return rot_radec
+
+ + + +__all__ = ['UniformSky', 'FisherSky'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/spins.html b/latest/html/_modules/pycbc/distributions/spins.html new file mode 100644 index 00000000000..018190e44e8 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/spins.html @@ -0,0 +1,434 @@ + + + + + + pycbc.distributions.spins — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.spins

+# Copyright (C) 2017 Collin Capano, Chris Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides spin distributions of CBCs.
+"""
+import logging
+import numpy
+
+from pycbc import conversions
+from pycbc.distributions.uniform import Uniform
+from pycbc.distributions.angular import UniformAngle
+from pycbc.distributions.power_law import UniformPowerLaw
+from pycbc.distributions.arbitrary import Arbitrary
+from pycbc.distributions.bounded import get_param_bounds_from_config, \
+    VARARGS_DELIM, BoundedDist
+
+logger = logging.getLogger('pycbc.distributions.spins')
+
+
+
+[docs] +class IndependentChiPChiEff(Arbitrary): + r"""A distribution such that :math:`\chi_{\mathrm{eff}}` and + :math:`\chi_p` are uniform and independent of each other. + + To ensure constraints are applied correctly, this distribution produces all + three components of both spins as well as the component masses. + + Parameters + ---------- + mass1 : BoundedDist, Bounds, or tuple + The distribution or bounds to use for mass1. Must be either a + BoundedDist giving the distribution on mass1, or bounds (as + either a Bounds instance or a tuple) giving the minimum and maximum + values to use for mass1. If the latter, a Uniform distribution will + be used. + mass2 : BoundedDist, Bounds, or tuple + The distribution or bounds to use for mass2. Syntax is the same as + mass1. + chi_eff : BoundedDist, Bounds, or tuple; optional + The distribution or bounds to use for :math:`chi_eff`. Syntax is the + same as mass1, except that None may also be passed. In that case, + `(-1, 1)` will be used for the bounds. Default is None. + chi_a : BoundedDist, Bounds, or tuple; optional + The distribution or bounds to use for :math:`chi_a`. Syntax is the + same as mass1, except that None may also be passed. In that case, + `(-1, 1)` will be used for the bounds. Default is None. + xi_bounds : Bounds or tuple, optional + The bounds to use for :math:`\xi_1` and :math:`\xi_2`. Must be + :math:`\in (0, 1)`. If None (the default), will be `(0, 1)`. + nsamples : int, optional + The number of samples to use for the internal kde. The larger the + number of samples, the more accurate the pdf will be, but the longer + it will take to evaluate. Default is 10000. + seed : int, optional + Seed value to use for the number generator for the kde. The current + random state of numpy will be saved prior to setting the seed. After + the samples are generated, the state will be set back to what it was. + If None provided, will use 0. + """ + name = "independent_chip_chieff" + _params = ['mass1', 'mass2', 'xi1', 'xi2', 'chi_eff', 'chi_a', + 'phi_a', 'phi_s'] + + def __init__(self, mass1=None, mass2=None, chi_eff=None, chi_a=None, + xi_bounds=None, nsamples=None, seed=None): + + if isinstance(mass1, BoundedDist): + self.mass1_distr = mass1 + else: + self.mass1_distr = Uniform(mass1=mass1) + if isinstance(mass2, BoundedDist): + self.mass2_distr = mass2 + else: + self.mass2_distr = Uniform(mass2=mass2) + # chi eff + if isinstance(chi_eff, BoundedDist): + self.chieff_distr = chi_eff + else: + if chi_eff is None: + chi_eff = (-1., 1.) + self.chieff_distr = Uniform(chi_eff=chi_eff) + if isinstance(chi_a, BoundedDist): + self.chia_distr = chi_a + else: + if chi_a is None: + chi_a = (-1., 1.) + self.chia_distr = Uniform(chi_a=chi_a) + # xis + if xi_bounds is None: + xi_bounds = (0, 1.) + if (xi_bounds[0] > 1. or xi_bounds[0] < 0.) or ( + xi_bounds[1] > 1. or xi_bounds[1] < 0.): + raise ValueError("xi bounds must be in [0, 1)") + self.xi1_distr = UniformPowerLaw(dim=0.5, xi1=xi_bounds) + self.xi2_distr = UniformPowerLaw(dim=0.5, xi2=xi_bounds) + # the angles + self.phia_distr = UniformAngle(phi_a=(0,2)) + self.phis_distr = UniformAngle(phi_s=(0,2)) + self.distributions = {'mass1': self.mass1_distr, + 'mass2': self.mass2_distr, + 'xi1': self.xi1_distr, + 'xi2': self.xi2_distr, + 'chi_eff': self.chieff_distr, + 'chi_a': self.chia_distr, + 'phi_a': self.phia_distr, + 'phi_s': self.phis_distr} + # create random variables for the kde + if nsamples is None: + nsamples = 1e4 + # save the current random state + rstate = numpy.random.get_state() + # set the seed + if seed is None: + seed = 0 + numpy.random.seed(seed) + rvals = self.rvs(size=int(nsamples)) + # reset the random state back to what it was + numpy.random.set_state(rstate) + bounds = dict(b for distr in self.distributions.values() + for b in distr.bounds.items()) + super(IndependentChiPChiEff, self).__init__(mass1=rvals['mass1'], + mass2=rvals['mass2'], xi1=rvals['xi1'], xi2=rvals['xi2'], + chi_eff=rvals['chi_eff'], chi_a=rvals['chi_a'], + phi_a=rvals['phi_a'], phi_s=rvals['phi_s'], + bounds=bounds) + + def _constraints(self, values): + """Applies physical constraints to the given parameter values. + + Parameters + ---------- + values : {arr or dict} + A dictionary or structured array giving the values. + + Returns + ------- + bool + Whether or not the values satisfy physical + """ + mass1, mass2, phi_a, phi_s, chi_eff, chi_a, xi1, xi2, _ = \ + conversions.ensurearray(values['mass1'], values['mass2'], + values['phi_a'], values['phi_s'], + values['chi_eff'], values['chi_a'], + values['xi1'], values['xi2']) + s1x = conversions.spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s) + s2x = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, + xi2, phi_a, phi_s) + s1y = conversions.spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s) + s2y = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, + xi2, phi_a, phi_s) + s1z = conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, + chi_eff, chi_a) + s2z = conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, + chi_eff, chi_a) + test = ((s1x**2. + s1y**2. + s1z**2.) < 1.) & \ + ((s2x**2. + s2y**2. + s2z**2.) < 1.) + return test + + def __contains__(self, params): + """Determines whether the given values are in each parameter's bounds + and satisfy the constraints. + """ + isin = all([params in dist for dist in self.distributions.values()]) + if not isin: + return False + # in the individual distributions, apply constrains + return self._constraints(params) + + def _draw(self, size=1, **kwargs): + """Draws random samples without applying physical constrains. + """ + # draw masses + try: + mass1 = kwargs['mass1'] + except KeyError: + mass1 = self.mass1_distr.rvs(size=size)['mass1'] + try: + mass2 = kwargs['mass2'] + except KeyError: + mass2 = self.mass2_distr.rvs(size=size)['mass2'] + # draw angles + try: + phi_a = kwargs['phi_a'] + except KeyError: + phi_a = self.phia_distr.rvs(size=size)['phi_a'] + try: + phi_s = kwargs['phi_s'] + except KeyError: + phi_s = self.phis_distr.rvs(size=size)['phi_s'] + # draw chi_eff, chi_a + try: + chi_eff = kwargs['chi_eff'] + except KeyError: + chi_eff = self.chieff_distr.rvs(size=size)['chi_eff'] + try: + chi_a = kwargs['chi_a'] + except KeyError: + chi_a = self.chia_distr.rvs(size=size)['chi_a'] + # draw xis + try: + xi1 = kwargs['xi1'] + except KeyError: + xi1 = self.xi1_distr.rvs(size=size)['xi1'] + try: + xi2 = kwargs['xi2'] + except KeyError: + xi2 = self.xi2_distr.rvs(size=size)['xi2'] + dtype = [(p, float) for p in self.params] + arr = numpy.zeros(size, dtype=dtype) + arr['mass1'] = mass1 + arr['mass2'] = mass2 + arr['phi_a'] = phi_a + arr['phi_s'] = phi_s + arr['chi_eff'] = chi_eff + arr['chi_a'] = chi_a + arr['xi1'] = xi1 + arr['xi2'] = xi2 + return arr + +
+[docs] + def apply_boundary_conditions(self, **kwargs): + return kwargs
+ + + +
+[docs] + def rvs(self, size=1, **kwargs): + """Returns random values for all of the parameters. + """ + size = int(size) + dtype = [(p, float) for p in self.params] + arr = numpy.zeros(size, dtype=dtype) + remaining = size + keepidx = 0 + while remaining: + draws = self._draw(size=remaining, **kwargs) + mask = self._constraints(draws) + addpts = mask.sum() + arr[keepidx:keepidx+addpts] = draws[mask] + keepidx += addpts + remaining = size - keepidx + return arr
+ + + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + `prior.VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + + Returns + ------- + IndependentChiPChiEff + A distribution instance. + """ + tag = variable_args + variable_args = variable_args.split(VARARGS_DELIM) + if not set(variable_args) == set(cls._params): + raise ValueError("Not all parameters used by this distribution " + "included in tag portion of section name") + # get the bounds for the setable parameters + mass1 = get_param_bounds_from_config(cp, section, tag, 'mass1') + mass2 = get_param_bounds_from_config(cp, section, tag, 'mass2') + chi_eff = get_param_bounds_from_config(cp, section, tag, 'chi_eff') + chi_a = get_param_bounds_from_config(cp, section, tag, 'chi_a') + xi_bounds = get_param_bounds_from_config(cp, section, tag, 'xi_bounds') + if cp.has_option('-'.join([section, tag]), 'nsamples'): + nsamples = int(cp.get('-'.join([section, tag]), 'nsamples')) + else: + nsamples = None + return cls(mass1=mass1, mass2=mass2, chi_eff=chi_eff, chi_a=chi_a, + xi_bounds=xi_bounds, nsamples=nsamples)
+
+ + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/uniform.html b/latest/html/_modules/pycbc/distributions/uniform.html new file mode 100644 index 00000000000..7c56ece2ce1 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/uniform.html @@ -0,0 +1,290 @@ + + + + + + pycbc.distributions.uniform — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.uniform

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes for evaluating uniform distributions.
+"""
+import logging
+import numpy
+
+from pycbc.distributions import bounded
+
+logger = logging.getLogger('pycbc.distributions.uniform')
+
+
+
+[docs] +class Uniform(bounded.BoundedDist): + """ + A uniform distribution on the given parameters. The parameters are + independent of each other. Instances of this class can be called like + a function. By default, logpdf will be called, but this can be changed + by setting the class's __call__ method to its pdf method. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + + Examples + -------- + Create a 2 dimensional uniform distribution: + + >>> from pycbc import distributions + >>> dist = distributions.Uniform(mass1=(10.,50.), mass2=(10.,50.)) + + Get the log of the pdf at a particular value: + + >>> dist.logpdf(mass1=25., mass2=10.) + -7.3777589082278725 + + Do the same by calling the distribution: + + >>> dist(mass1=25., mass2=10.) + -7.3777589082278725 + + Generate some random values: + + >>> dist.rvs(size=3) + array([(36.90885758394699, 51.294212757995254), + (39.109058546060346, 13.36220145743631), + (34.49594465315212, 47.531953033719454)], + dtype=[('mass1', '<f8'), ('mass2', '<f8')]) + + Initialize a uniform distribution using a boundaries.Bounds instance, + with cyclic bounds: + + >>> dist = distributions.Uniform(phi=Bounds(10, 50, cyclic=True)) + + Apply boundary conditions to a value: + + >>> dist.apply_boundary_conditions(phi=60.) + {'mass1': array(20.0)} + + The boundary conditions are applied to the value before evaluating the pdf; + note that the following returns a non-zero pdf. If the bounds were not + cyclic, the following would return 0: + + >>> dist.pdf(phi=60.) + 0.025 + """ + name = 'uniform' + def __init__(self, **params): + super(Uniform, self).__init__(**params) + # compute the norm and save + # temporarily suppress numpy divide by 0 warning + with numpy.errstate(divide="ignore"): + self._lognorm = -sum([numpy.log(abs(bnd[1]-bnd[0])) + for bnd in self._bounds.values()]) + self._norm = numpy.exp(self._lognorm) + + @property + def norm(self): + """float: The normalization of the multi-dimensional pdf.""" + return self._norm + + @property + def lognorm(self): + """float: The log of the normalization""" + return self._lognorm + + def _cdfinv_param(self, param, value): + """Return the inverse cdf to map the unit interval to parameter bounds. + """ + lower_bound = self._bounds[param][0] + upper_bound = self._bounds[param][1] + return (upper_bound - lower_bound) * value + lower_bound + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + if kwargs in self: + return self._norm + else: + return 0. + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + if kwargs in self: + return self._lognorm + else: + return -numpy.inf + +
+[docs] + @classmethod + def from_config(cls, cp, section, variable_args): + """Returns a distribution based on a configuration file. The parameters + for the distribution are retrieved from the section titled + "[`section`-`variable_args`]" in the config file. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the distribution + options. + section : str + Name of the section in the configuration file. + variable_args : str + The names of the parameters for this distribution, separated by + ``VARARGS_DELIM``. These must appear in the "tag" part + of the section header. + + Returns + ------- + Uniform + A distribution instance from the pycbc.inference.prior module. + """ + return super(Uniform, cls).from_config(cp, section, variable_args, + bounds_required=True)
+
+ + + +__all__ = ['Uniform'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/uniform_log.html b/latest/html/_modules/pycbc/distributions/uniform_log.html new file mode 100644 index 00000000000..12e743e2c14 --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/uniform_log.html @@ -0,0 +1,205 @@ + + + + + + pycbc.distributions.uniform_log — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.uniform_log

+# Copyright (C) 2017  Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This modules provides classes for evaluating distributions whose logarithm
+are uniform.
+"""
+import logging
+import numpy
+
+from pycbc.distributions import uniform
+
+logger = logging.getLogger('pycbc.distributions.uniform_log')
+
+
+
+[docs] +class UniformLog10(uniform.Uniform): + """ A uniform distribution on the log base 10 of the given parameters. + The parameters are independent of each other. Instances of this class can + be called like a function. By default, logpdf will be called. + + Parameters + ---------- + \**params : + The keyword arguments should provide the names of parameters and their + corresponding bounds, as either tuples or a `boundaries.Bounds` + instance. + """ + name = "uniform_log10" + + def __init__(self, **params): + super(UniformLog10, self).__init__(**params) + self._norm = numpy.prod([numpy.log10(bnd[1]) - numpy.log10(bnd[0]) + for bnd in self._bounds.values()]) + self._lognorm = numpy.log(self._norm) + + def _cdfinv_param(self, param, value): + """Return the cdfinv for a single given parameter """ + lower_bound = numpy.log10(self._bounds[param][0]) + upper_bound = numpy.log10(self._bounds[param][1]) + return 10. ** ((upper_bound - lower_bound) * value + lower_bound) + + def _pdf(self, **kwargs): + """Returns the pdf at the given values. The keyword arguments must + contain all of parameters in self's params. Unrecognized arguments are + ignored. + """ + if kwargs in self: + vals = numpy.array([numpy.log(10) * self._norm * kwargs[param] + for param in kwargs.keys()]) + return 1.0 / numpy.prod(vals) + else: + return 0. + + def _logpdf(self, **kwargs): + """Returns the log of the pdf at the given values. The keyword + arguments must contain all of parameters in self's params. Unrecognized + arguments are ignored. + """ + if kwargs in self: + return numpy.log(self._pdf(**kwargs)) + else: + return -numpy.inf
+ + +__all__ = ["UniformLog10"] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/distributions/utils.html b/latest/html/_modules/pycbc/distributions/utils.html new file mode 100644 index 00000000000..efd5aaf395b --- /dev/null +++ b/latest/html/_modules/pycbc/distributions/utils.html @@ -0,0 +1,271 @@ + + + + + + pycbc.distributions.utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.distributions.utils

+# Copyright (C) 2021  Shichao Wu
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides functions for drawing samples from a standalone .ini file
+in a Python script, rather than in the command line.
+"""
+import logging
+import numpy as np
+
+from pycbc.types.config import InterpolatingConfigParser
+from pycbc import transforms
+from pycbc import distributions
+
+logger = logging.getLogger('pycbc.distributions.utils')
+
+
+
+[docs] +def prior_from_config(cp, prior_section='prior'): + """Loads a prior distribution from the given config file. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + The config file to read. + sections : list of str, optional + The sections to retrieve the prior from. If ``None`` (the default), + will look in sections starting with 'prior'. + + Returns + ------- + distributions.JointDistribution + The prior distribution. + """ + + # Read variable and static parameters from the config file + variable_params, static_params = distributions.read_params_from_config( + cp, prior_section=prior_section, vargs_section='variable_params', + sargs_section='static_params') + + # Read waveform_transforms to apply to priors from the config file + if any(cp.get_subsections('waveform_transforms')): + waveform_transforms = transforms.read_transforms_from_config( + cp, 'waveform_transforms') + else: + waveform_transforms = None + + # Read constraints to apply to priors from the config file + constraints = distributions.read_constraints_from_config( + cp, transforms=waveform_transforms, static_args=static_params) + + # Get PyCBC distribution instances for each variable parameter in the + # config file + dists = distributions.read_distributions_from_config(cp, prior_section) + + # construct class that will return draws from the prior + return distributions.JointDistribution(variable_params, *dists, + **{"constraints": constraints})
+ + + +
+[docs] +def draw_samples_from_config(path, num=1, seed=150914): + r""" Generate sampling points from a standalone .ini file. + + Parameters + ---------- + path : str + The path to the .ini file. + num : int + The number of samples. + seed: int + The random seed for sampling. + + Returns + -------- + samples : pycbc.io.record.FieldArray + The parameter values and names of sample(s). + + Examples + -------- + Draw a sample from the distribution defined in the .ini file: + + >>> import numpy as np + >>> from pycbc.distributions.utils import draw_samples_from_config + + >>> # A path to the .ini file. + >>> CONFIG_PATH = "./pycbc_bbh_prior.ini" + >>> random_seed = np.random.randint(low=0, high=2**32-1) + >>> sample = draw_samples_from_config( + >>> path=CONFIG_PATH, num=1, seed=random_seed) + + >>> # Print all parameters. + >>> print(sample.fieldnames) + >>> print(sample) + >>> # Print a certain parameter, for example 'mass1'. + >>> print(sample[0]['mass1']) + """ + + np.random.seed(seed) + + # Initialise InterpolatingConfigParser class. + config_parser = InterpolatingConfigParser() + # Read the file + file = open(path, 'r') + config_parser.read_file(file) + file.close() + + # Construct class that will draw the samples. + prior_dists = prior_from_config(cp=config_parser) + # Draw samples from prior distribution. + samples = prior_dists.rvs(size=int(num)) + + # Apply parameter transformation. + if any(config_parser.get_subsections('waveform_transforms')): + waveform_transforms = transforms.read_transforms_from_config( + config_parser, 'waveform_transforms') + samples = transforms.apply_transforms(samples, waveform_transforms) + + return samples
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/dq.html b/latest/html/_modules/pycbc/dq.html new file mode 100644 index 00000000000..f52d43d2640 --- /dev/null +++ b/latest/html/_modules/pycbc/dq.html @@ -0,0 +1,588 @@ + + + + + + pycbc.dq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.dq

+# Copyright (C) 2018 Alex Nitz
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" Utilities to query archival instrument status information of
+gravitational-wave detectors from public sources and/or dqsegdb.
+"""
+
+import logging
+import json
+import numpy
+from ligo.segments import segmentlist, segment
+from pycbc.frame.gwosc import get_run
+from pycbc.io import get_file
+
+logger = logging.getLogger('pycbc.dq')
+
+
+[docs] +def parse_veto_definer(veto_def_filename, ifos): + """ Parse a veto definer file from the filename and return a dictionary + indexed by ifo and veto definer category level. + + Parameters + ---------- + veto_def_filename: str + The path to the veto definer file + ifos: str + The list of ifos for which we require information from the veto + definer file + + Returns + -------- + parsed_definition: dict + Returns a dictionary first indexed by ifo, then category level, and + finally a list of veto definitions. + """ + from ligo.lw import table, utils as ligolw_utils + from pycbc.io.ligolw import LIGOLWContentHandler as h + + data = {} + for ifo_name in ifos: + data[ifo_name] = {} + data[ifo_name]['CAT_H'] = [] + for cat_num in range(1, 5): + data[ifo_name]['CAT_{}'.format(cat_num)] = [] + + indoc = ligolw_utils.load_filename(veto_def_filename, False, + contenthandler=h) + veto_table = table.Table.get_table(indoc, 'veto_definer') + + ifo = veto_table.getColumnByName('ifo') + name = veto_table.getColumnByName('name') + version = numpy.array(veto_table.getColumnByName('version')) + category = numpy.array(veto_table.getColumnByName('category')) + start = numpy.array(veto_table.getColumnByName('start_time')) + end = numpy.array(veto_table.getColumnByName('end_time')) + start_pad = numpy.array(veto_table.getColumnByName('start_pad')) + end_pad = numpy.array(veto_table.getColumnByName('end_pad')) + + for i in range(len(veto_table)): + if ifo[i] not in data: + continue + + # The veto-definer categories are weird! Hardware injections are stored + # in "3" and numbers above that are bumped up by one (although not + # often used any more). So we remap 3 to H and anything above 3 to + # N-1. 2 and 1 correspond to 2 and 1 (YAY!) + if category[i] > 3: + curr_cat = "CAT_{}".format(category[i]-1) + elif category[i] == 3: + curr_cat = "CAT_H" + else: + curr_cat = "CAT_{}".format(category[i]) + + veto_info = {'name': name[i], + 'version': version[i], + 'full_name': name[i]+':'+str(version[i]), + 'start': start[i], + 'end': end[i], + 'start_pad': start_pad[i], + 'end_pad': end_pad[i], + } + data[ifo[i]][curr_cat].append(veto_info) + return data
+ + + +GWOSC_URL = 'https://www.gwosc.org/timeline/segments/json/{}/{}_{}/{}/{}/' + + +
+[docs] +def query_dqsegdb2(detector, flag_name, start_time, end_time, server): + """Utility function for better error reporting when calling dqsegdb2. + """ + from dqsegdb2.query import query_segments + + complete_flag = detector + ':' + flag_name + try: + query_res = query_segments(complete_flag, + int(start_time), + int(end_time), + host=server) + return query_res['active'] + except Exception as e: + logger.error('Could not query segment database, check name ' + '(%s), times (%d-%d) and server (%s)', + complete_flag, int(start_time), int(end_time), + server) + raise e
+ + +
+[docs] +def query_flag(ifo, segment_name, start_time, end_time, + source='any', server="https://segments.ligo.org", + veto_definer=None, cache=False): + """Return the times where the flag is active + + Parameters + ---------- + ifo: string + The interferometer to query (H1, L1). + segment_name: string + The status flag to query from GWOSC. + start_time: int + The starting gps time to begin querying from GWOSC + end_time: int + The end gps time of the query + source: str, Optional + Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may + also be given. The default is to try GWOSC first then try dqsegdb. + server: str, Optional + The server path. Only used with dqsegdb atm. + veto_definer: str, Optional + The path to a veto definer to define groups of flags which + themselves define a set of segments. + cache: bool + If true cache the query. Default is not to cache + + Returns + --------- + segments: ligo.segments.segmentlist + List of segments + """ + flag_segments = segmentlist([]) + + if source in ['GWOSC', 'any']: + # Special cases as the GWOSC convention is backwards from normal + # LIGO / Virgo operation!!!! + if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or + 'VETO' in segment_name): + data = query_flag(ifo, 'DATA', start_time, end_time) + + if '_HW_INJ' in segment_name: + name = 'NO_' + segment_name + else: + name = segment_name.replace('_VETO', '') + + negate = query_flag(ifo, name, start_time, end_time, cache=cache) + return (data - negate).coalesce() + + duration = end_time - start_time + try: + url = GWOSC_URL.format(get_run(start_time + duration/2, ifo), + ifo, segment_name, + int(start_time), int(duration)) + + fname = get_file(url, cache=cache, timeout=10) + data = json.load(open(fname, 'r')) + if 'segments' in data: + flag_segments = data['segments'] + + except Exception as e: + if source != 'any': + raise ValueError("Unable to find {} segments in GWOSC, check " + "flag name or times".format(segment_name)) + + return query_flag(ifo, segment_name, start_time, end_time, + source='dqsegdb', server=server, + veto_definer=veto_definer) + + elif source == 'dqsegdb': + # The veto definer will allow the use of MACRO names + # These directly correspond to the name in the veto definer file + if veto_definer is not None: + veto_def = parse_veto_definer(veto_definer, [ifo]) + + # We treat the veto definer name as if it were its own flag and + # process the flags in the veto definer + if veto_definer is not None and segment_name in veto_def[ifo]: + for flag in veto_def[ifo][segment_name]: + partial = segmentlist([]) + segs = query_dqsegdb2(ifo, flag['full_name'], + start_time, end_time, server) + # Apply padding to each segment + for rseg in segs: + seg_start = rseg[0] + flag['start_pad'] + seg_end = rseg[1] + flag['end_pad'] + partial.append(segment(seg_start, seg_end)) + + # Limit to the veto definer stated valid region of this flag + flag_start = flag['start'] + flag_end = flag['end'] + # Corner case: if the flag end time is 0 it means 'no limit' + # so use the query end time + if flag_end == 0: + flag_end = int(end_time) + send = segmentlist([segment(flag_start, flag_end)]) + flag_segments += (partial.coalesce() & send) + + else: # Standard case just query directly + segs = query_dqsegdb2(ifo, segment_name, start_time, end_time, + server) + for rseg in segs: + flag_segments.append(segment(rseg[0], rseg[1])) + + # dqsegdb output is not guaranteed to lie entirely within start + # and end times, hence restrict to this range + flag_segments = flag_segments.coalesce() & \ + segmentlist([segment(int(start_time), int(end_time))]) + + else: + raise ValueError("Source must be `dqsegdb`, `GWOSC` or `any`." + " Got {}".format(source)) + + return segmentlist(flag_segments).coalesce()
+ + + +
+[docs] +def query_cumulative_flags(ifo, segment_names, start_time, end_time, + source='any', server="https://segments.ligo.org", + veto_definer=None, + bounds=None, + padding=None, + override_ifos=None, + cache=False): + """Return the times where any flag is active + + Parameters + ---------- + ifo: string or dict + The interferometer to query (H1, L1). If a dict, an element for each + flag name must be provided. + segment_name: list of strings + The status flag to query from GWOSC. + start_time: int + The starting gps time to begin querying from GWOSC + end_time: int + The end gps time of the query + source: str, Optional + Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may + also be given. The default is to try GWOSC first then try dqsegdb. + server: str, Optional + The server path. Only used with dqsegdb atm. + veto_definer: str, Optional + The path to a veto definer to define groups of flags which + themselves define a set of segments. + bounds: dict, Optional + Dict containing start-end tuples keyed by the flag name which + indicate places which should have a distinct time period to be active. + padding: dict, Optional + Dict keyed by the flag name. Each element is a tuple + (start_pad, end_pad) which indicates how to change the segment boundaries. + override_ifos: dict, Optional + A dict keyed by flag_name to override the ifo option on a per flag + basis. + + Returns + --------- + segments: ligo.segments.segmentlist + List of segments + """ + total_segs = segmentlist([]) + for flag_name in segment_names: + ifo_name = ifo + if override_ifos is not None and flag_name in override_ifos: + ifo_name = override_ifos[flag_name] + + segs = query_flag(ifo_name, flag_name, start_time, end_time, + source=source, server=server, + veto_definer=veto_definer, + cache=cache) + + if padding and flag_name in padding: + s, e = padding[flag_name] + segs2 = segmentlist([]) + for seg in segs: + segs2.append(segment(seg[0] + s, seg[1] + e)) + segs = segs2 + + if bounds is not None and flag_name in bounds: + s, e = bounds[flag_name] + valid = segmentlist([segment([s, e])]) + segs = (segs & valid).coalesce() + + total_segs = (total_segs + segs).coalesce() + return total_segs
+ + + +
+[docs] +def parse_flag_str(flag_str): + """ Parse a dq flag query string + + Parameters + ---------- + flag_str: str + String to be parsed + + Returns + ------- + flags: list of strings + List of reduced name strings which can be passed to lower level + query commands + signs: dict + Dict of bools indicating if the flag should add positively to the + segmentlist + ifos: dict + Ifo specified for the given flag + bounds: dict + The boundary of a given flag + padding: dict + Any padding that should be applied to the segments for a given flag + """ + flags = flag_str.replace(' ', '').strip().split(',') + + signs = {} + ifos = {} + bounds = {} + padding = {} + bflags = [] + + for flag in flags: + # Check if the flag should add or subtract time + if not (flag[0] == '+' or flag[0] == '-'): + err_msg = "DQ flags must begin with a '+' or a '-' character. " + err_msg += "You provided {}. ".format(flag) + err_msg += "See http://pycbc.org/pycbc/latest/html/workflow/segments.html" + err_msg += " for more information." + raise ValueError(err_msg) + sign = flag[0] == '+' + flag = flag[1:] + + ifo = pad = bound = None + + # Check for non-default IFO + if len(flag.split(':')[0]) == 2: + ifo = flag.split(':')[0] + flag = flag[3:] + + # Check for padding options + if '<' in flag: + popt = flag.split('<')[1].split('>')[0] + spad, epad = popt.split(':') + pad = (float(spad), float(epad)) + flag = flag.replace(popt, '').replace('<>', '') + + # Check if there are bounds on the flag + if '[' in flag: + bopt = flag.split('[')[1].split(']')[0] + start, end = bopt.split(':') + bound = (int(start), int(end)) + flag = flag.replace(bopt, '').replace('[]', '') + + if ifo: + ifos[flag] = ifo + if pad: + padding[flag] = pad + if bound: + bounds[flag] = bound + bflags.append(flag) + signs[flag] = sign + + return bflags, signs, ifos, bounds, padding
+ + + +
+[docs] +def query_str(ifo, flag_str, start_time, end_time, source='any', + server="https://segments.ligo.org", veto_definer=None): + """ Query for flags based on a special str syntax + + Parameters + ---------- + ifo: str + The ifo to query for (may be overridden in syntax) + flag_str: str + Specification of how to do the query. Ex. +H1:DATA:1<-8,8>[0,100000000] + would return H1 time for the DATA available flag with version 1. It + would then apply an 8 second padding and only return times within + the chosen range 0,1000000000. + start_time: int + The start gps time. May be overridden for individual flags with the + flag str bounds syntax + end_time: int + The end gps time. May be overridden for individual flags with the + flag str bounds syntax + source: str, Optional + Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may + also be given. The default is to try GWOSC first then try dqsegdb. + server: str, Optional + The server path. Only used with dqsegdb atm. + veto_definer: str, Optional + The path to a veto definer to define groups of flags which + themselves define a set of segments. + + Returns + ------- + segs: segmentlist + A list of segments corresponding to the flag query string + """ + flags, sign, ifos, bounds, padding = parse_flag_str(flag_str) + up = [f for f in flags if sign[f]] + down = [f for f in flags if not sign[f]] + + if len(up) + len(down) != len(flags): + raise ValueError('Not all flags could be parsed, check +/- prefix') + segs = query_cumulative_flags(ifo, up, start_time, end_time, + source=source, + server=server, + veto_definer=veto_definer, + bounds=bounds, + padding=padding, + override_ifos=ifos) + + mseg = query_cumulative_flags(ifo, down, start_time, end_time, + source=source, + server=server, + veto_definer=veto_definer, + bounds=bounds, + padding=padding, + override_ifos=ifos) + + segs = (segs - mseg).coalesce() + return segs
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/coherent.html b/latest/html/_modules/pycbc/events/coherent.html new file mode 100644 index 00000000000..e0bded9ff4f --- /dev/null +++ b/latest/html/_modules/pycbc/events/coherent.html @@ -0,0 +1,563 @@ + + + + + + pycbc.events.coherent — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.coherent

+# Copyright (C) 2022 Andrew Williamson
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module contains functions for calculating and manipulating coherent
+triggers.
+"""
+import logging
+import numpy as np
+
+logger = logging.getLogger('pycbc.events.coherent')
+
+
+
+[docs] +def get_coinc_indexes(idx_dict, time_delay_idx): + """Return the indexes corresponding to coincident triggers, requiring + they are seen in at least two detectors in the network + + Parameters + ---------- + idx_dict: dict + Dictionary of indexes of triggers above threshold in each + detector + time_delay_idx: dict + Dictionary giving time delay index (time_delay*sample_rate) for + each ifo + + Returns + ------- + coinc_idx: list + List of indexes for triggers in geocent time that appear in + multiple detectors + """ + coinc_list = np.array([], dtype=int) + for ifo in idx_dict.keys(): + # Create list of indexes above threshold in single detector in geocent + # time. Can then search for triggers that appear in multiple detectors + # later. + if len(idx_dict[ifo]) != 0: + coinc_list = np.hstack( + [coinc_list, idx_dict[ifo] - time_delay_idx[ifo]] + ) + # Search through coinc_idx for repeated indexes. These must have been loud + # in at least 2 detectors. + counts = np.unique(coinc_list, return_counts=True) + coinc_idx = counts[0][counts[1] > 1] + return coinc_idx
+ + + +
+[docs] +def get_coinc_triggers(snrs, idx, t_delay_idx): + """Returns the coincident triggers from the longer SNR timeseries + + Parameters + ---------- + snrs: dict + Dictionary of single detector SNR time series + idx: list + List of geocentric time indexes of coincident triggers + t_delay_idx: dict + Dictionary of indexes corresponding to light travel time from + geocenter for each detector + + Returns + ------- + coincs: dict + Dictionary of coincident trigger SNRs in each detector + """ + # loops through snrs + # %len(snrs[ifo]) was included as part of a wrap-around solution + coincs = { + ifo: snrs[ifo][(idx + t_delay_idx[ifo]) % len(snrs[ifo])] + for ifo in snrs} + return coincs
+ + + +
+[docs] +def coincident_snr(snr_dict, index, threshold, time_delay_idx): + """Calculate the coincident SNR for all coincident triggers above + threshold + + Parameters + ---------- + snr_dict: dict + Dictionary of individual detector SNRs + index: list + List of indexes (geocentric) for which to calculate coincident + SNR + threshold: float + Coincident SNR threshold. Triggers below this are cut + time_delay_idx: dict + Dictionary of time delay from geocenter in indexes for each + detector + + Returns + ------- + rho_coinc: numpy.ndarray + Coincident SNR values for surviving triggers + index: list + The subset of input indexes corresponding to triggers that + survive the cuts + coinc_triggers: dict + Dictionary of individual detector SNRs for triggers that + survive cuts + """ + # Restrict the snr timeseries to just the interesting points + coinc_triggers = get_coinc_triggers(snr_dict, index, time_delay_idx) + # Calculate the coincident snr + snr_array = np.array( + [coinc_triggers[ifo] for ifo in coinc_triggers.keys()] + ) + rho_coinc = abs(np.sqrt(np.sum(snr_array * snr_array.conj(), axis=0))) + # Apply threshold + thresh_indexes = rho_coinc > threshold + index = index[thresh_indexes] + coinc_triggers = get_coinc_triggers(snr_dict, index, time_delay_idx) + rho_coinc = rho_coinc[thresh_indexes] + return rho_coinc, index, coinc_triggers
+ + + +
+[docs] +def get_projection_matrix(f_plus, f_cross, sigma, projection="standard"): + """Calculate the matrix that projects the signal onto the network. + Definitions can be found in Fairhurst (2018) [arXiv:1712.04724]. + For the standard projection see Eq. 8, and for left/right + circular projections see Eq. 21, with further discussion in + Appendix A. See also Williamson et al. (2014) [arXiv:1410.6042] + for discussion in context of the GRB search with restricted + binary inclination angles. + + Parameters + ---------- + f_plus: dict + Dictionary containing the plus antenna response factors for + each IFO + f_cross: dict + Dictionary containing the cross antenna response factors for + each IFO + sigma: dict + Dictionary of the sensitivity weights for each IFO + projection: optional, {string, 'standard'} + The signal polarization to project. Choice of 'standard' + (unrestricted; default), 'right' or 'left' (circular + polarizations) + + Returns + ------- + projection_matrix: np.ndarray + The matrix that projects the signal onto the detector network + """ + # Calculate the weighted antenna responses + keys = sorted(sigma.keys()) + w_p = np.array([sigma[ifo] * f_plus[ifo] for ifo in keys]) + w_c = np.array([sigma[ifo] * f_cross[ifo] for ifo in keys]) + + # Get the projection matrix associated with the requested projection + if projection == "standard": + denom = np.dot(w_p, w_p) * np.dot(w_c, w_c) - np.dot(w_p, w_c) ** 2 + projection_matrix = ( + np.dot(w_c, w_c) * np.outer(w_p, w_p) + + np.dot(w_p, w_p) * np.outer(w_c, w_c) + - np.dot(w_p, w_c) * (np.outer(w_p, w_c) + np.outer(w_c, w_p)) + ) / denom + elif projection == "left": + projection_matrix = ( + np.outer(w_p, w_p) + + np.outer(w_c, w_c) + + (np.outer(w_p, w_c) - np.outer(w_c, w_p)) * 1j + ) / (np.dot(w_p, w_p) + np.dot(w_c, w_c)) + elif projection == "right": + projection_matrix = ( + np.outer(w_p, w_p) + + np.outer(w_c, w_c) + + (np.outer(w_c, w_p) - np.outer(w_p, w_c)) * 1j + ) / (np.dot(w_p, w_p) + np.dot(w_c, w_c)) + else: + raise ValueError( + f'Unknown projection: {projection}. Allowed values are: ' + '"standard", "left", and "right"') + + return projection_matrix
+ + + +
+[docs] +def coherent_snr( + snr_triggers, index, threshold, projection_matrix, coinc_snr=None +): + """Calculate the coherent SNR for a given set of triggers. See + Eq. 2.26 of Harry & Fairhurst (2011) [arXiv:1012.4939]. + + + Parameters + ---------- + snr_triggers: dict + Dictionary of the normalised complex snr time series for each + ifo + index: numpy.ndarray + Array of the indexes corresponding to triggers + threshold: float + Coherent SNR threshold. Triggers below this are cut + projection_matrix: numpy.ndarray + Matrix that projects the signal onto the network + coinc_snr: Optional- The coincident snr for each trigger. + + Returns + ------- + rho_coh: numpy.ndarray + Array of coherent SNR for the detector network + index: numpy.ndarray + Indexes that survive cuts + snrv: dict + Dictionary of individual deector triggers that survive cuts + coinc_snr: list or None (default: None) + The coincident SNR values for triggers surviving the coherent + cut + """ + # Calculate rho_coh + snr_array = np.array( + [snr_triggers[ifo] for ifo in sorted(snr_triggers.keys())] + ) + snr_proj = np.inner(snr_array.conj().transpose(), projection_matrix) + rho_coh2 = sum(snr_proj.transpose() * snr_array) + rho_coh = abs(np.sqrt(rho_coh2)) + # Apply thresholds + above = rho_coh > threshold + index = index[above] + coinc_snr = [] if coinc_snr is None else coinc_snr + if len(coinc_snr) != 0: + coinc_snr = coinc_snr[above] + snrv = { + ifo: snr_triggers[ifo][above] + for ifo in snr_triggers.keys() + } + rho_coh = rho_coh[above] + return rho_coh, index, snrv, coinc_snr
+ + + +
+[docs] +def network_chisq(chisq, chisq_dof, snr_dict): + """Calculate the network chi-squared statistic. This is the sum of + SNR-weighted individual detector chi-squared values. See Eq. 5.4 + of Dorrington (2019) [http://orca.cardiff.ac.uk/id/eprint/128124]. + + Parameters + ---------- + chisq: dict + Dictionary of individual detector chi-squared statistics + chisq_dof: dict + Dictionary of the number of degrees of freedom of the + chi-squared statistic + snr_dict: dict + Dictionary of complex individual detector SNRs + + Returns + ------- + net_chisq: list + Network chi-squared values + """ + ifos = sorted(snr_dict.keys()) + chisq_per_dof = dict.fromkeys(ifos) + for ifo in ifos: + chisq_per_dof[ifo] = chisq[ifo] / chisq_dof[ifo] + snr2 = { + ifo: np.real(np.array(snr_dict[ifo]) * np.array(snr_dict[ifo]).conj()) + for ifo in ifos + } + coinc_snr2 = sum(snr2.values()) + snr2_ratio = {ifo: snr2[ifo] / coinc_snr2 for ifo in ifos} + net_chisq = sum([chisq_per_dof[ifo] * snr2_ratio[ifo] for ifo in ifos]) + return net_chisq
+ + + +
+[docs] +def null_snr( + rho_coh, rho_coinc, apply_cut=True, null_min=5.25, null_grad=0.2, + null_step=20.0, index=None, snrv=None +): + """Calculate the null SNR and optionally apply threshold cut where + null SNR > null_min where coherent SNR < null_step + and null SNR > (null_grad * rho_coh + null_min) elsewhere. See + Eq. 3.1 of Harry & Fairhurst (2011) [arXiv:1012.4939] or + Eqs. 11 and 12 of Williamson et al. (2014) [arXiv:1410.6042].. + + Parameters + ---------- + rho_coh: numpy.ndarray + Array of coherent snr triggers + rho_coinc: numpy.ndarray + Array of coincident snr triggers + apply_cut: bool + Apply a cut and downweight on null SNR determined by null_min, + null_grad, null_step (default True) + null_min: scalar + Any trigger with null SNR below this is retained + null_grad: scalar + Gradient of null SNR cut where coherent SNR > null_step + null_step: scalar + The threshold in coherent SNR rho_coh above which the null SNR + threshold increases as null_grad * rho_coh + index: dict or None (optional; default None) + Indexes of triggers. If given, will remove triggers that fail + cuts + snrv: dict of None (optional; default None) + Individual detector SNRs. If given will remove triggers that + fail cut + + Returns + ------- + null: numpy.ndarray + Null SNR for surviving triggers + rho_coh: numpy.ndarray + Coherent SNR for surviving triggers + rho_coinc: numpy.ndarray + Coincident SNR for suviving triggers + index: dict + Indexes for surviving triggers + snrv: dict + Single detector SNRs for surviving triggers + """ + index = {} if index is None else index + snrv = {} if snrv is None else snrv + # Calculate null SNRs + null2 = rho_coinc ** 2 - rho_coh ** 2 + # Numerical errors may make this negative and break the sqrt, so set + # negative values to 0. + null2[null2 < 0] = 0 + null = null2 ** 0.5 + if apply_cut: + # Make cut on null. + keep = ( + ((null < null_min) & (rho_coh <= null_step)) + | ( + (null < ((rho_coh - null_step) * null_grad + null_min)) + & (rho_coh > null_step) + ) + ) + index = index[keep] + rho_coh = rho_coh[keep] + snrv = {ifo: snrv[ifo][keep] for ifo in snrv} + rho_coinc = rho_coinc[keep] + null = null[keep] + return null, rho_coh, rho_coinc, index, snrv
+ + + +
+[docs] +def reweight_snr_by_null( + network_snr, null, coherent, null_min=5.25, null_grad=0.2, + null_step=20.0): + """Re-weight the detection statistic as a function of the null SNR. + See Eq. 16 of Williamson et al. (2014) [arXiv:1410.6042]. + + Parameters + ---------- + network_snr: numpy.ndarray + Array containing SNR statistic to be re-weighted + null: numpy.ndarray + Null SNR array + coherent: + Coherent SNR array + + Returns + ------- + rw_snr: numpy.ndarray + Re-weighted SNR for each trigger + """ + downweight = ( + ((null > null_min - 1) & (coherent <= null_step)) + | ( + (null > (coherent * null_grad + null_min - 1)) + & (coherent > null_step) + ) + ) + rw_fac = np.where( + coherent > null_step, + 1 + null - (null_min - 1) - (coherent - null_step) * null_grad, + 1 + null - (null_min - 1) + ) + rw_snr = np.where(downweight, network_snr / rw_fac, network_snr) + return rw_snr
+ + + +
+[docs] +def reweightedsnr_cut(rw_snr, rw_snr_threshold): + """ + Performs a cut on reweighted snr based on a given threshold + + Parameters + ---------- + rw_snr: array of reweighted snr + rw_snr_threshhold: any reweighted snr below this threshold is set to 0 + + Returns + ------- + rw_snr: array of reweighted snr with cut values as 0 + + """ + if rw_snr_threshold is not None: + rw_snr = np.where(rw_snr < rw_snr_threshold, 0, rw_snr) + return rw_snr
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/coinc.html b/latest/html/_modules/pycbc/events/coinc.html new file mode 100644 index 00000000000..4783a371aa0 --- /dev/null +++ b/latest/html/_modules/pycbc/events/coinc.html @@ -0,0 +1,1697 @@ + + + + + + pycbc.events.coinc — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.coinc

+# Copyright (C) 2015 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module contains functions for calculating and manipulating
+coincident triggers.
+"""
+
+import numpy
+import logging
+import copy
+import time as timemod
+import threading
+
+import pycbc.pnutils
+from pycbc.detector import Detector, ppdets
+from pycbc import conversions as conv
+
+from . import stat as pycbcstat
+from .eventmgr_cython import coincbuffer_expireelements
+from .eventmgr_cython import coincbuffer_numgreater
+from .eventmgr_cython import timecoincidence_constructidxs
+from .eventmgr_cython import timecoincidence_constructfold
+from .eventmgr_cython import timecoincidence_getslideint
+from .eventmgr_cython import timecoincidence_findidxlen
+from .eventmgr_cython import timecluster_cython
+
+logger = logging.getLogger('pycbc.events.coinc')
+
+
+
+[docs] +def background_bin_from_string(background_bins, data): + """ Return template ids for each bin as defined by the format string + + Parameters + ---------- + bins: list of strings + List of strings which define how a background bin is taken from the + list of templates. + data: dict of numpy.ndarrays + Dict with parameter key values and numpy.ndarray values which define + the parameters of the template bank to bin up. + + Returns + ------- + bins: dict + Dictionary of location indices indexed by a bin name + """ + used = numpy.array([], dtype=numpy.uint32) + bins = {} + # Some duration/peak frequency functions are expensive. + # Do not want to recompute many times, if using lots of bins. + cached_values = {} + for mbin in background_bins: + locs = None + name, bin_type_list, boundary_list = tuple(mbin.split(':')) + + bin_type_list = bin_type_list.split(',') + boundary_list = boundary_list.split(',') + + for bin_type, boundary in zip(bin_type_list, boundary_list): + if boundary[0:2] == 'lt': + member_func = lambda vals, bd=boundary : vals < float(bd[2:]) + elif boundary[0:2] == 'gt': + member_func = lambda vals, bd=boundary : vals > float(bd[2:]) + else: + raise RuntimeError("Can't parse boundary condition! Must begin " + "with 'lt' or 'gt'") + + if bin_type in cached_values: + vals = cached_values[bin_type] + elif bin_type == 'component' and boundary[0:2] == 'lt': + # maximum component mass is less than boundary value + vals = numpy.maximum(data['mass1'], data['mass2']) + elif bin_type == 'component' and boundary[0:2] == 'gt': + # minimum component mass is greater than bdary + vals = numpy.minimum(data['mass1'], data['mass2']) + elif bin_type == 'total': + vals = data['mass1'] + data['mass2'] + elif bin_type == 'chirp': + vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta( + data['mass1'], data['mass2'])[0] + elif bin_type == 'ratio': + vals = conv.q_from_mass1_mass2(data['mass1'], data['mass2']) + elif bin_type == 'eta': + vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta( + data['mass1'], + data['mass2'] + )[1] + elif bin_type == 'chi_eff': + vals = conv.chi_eff( + data['mass1'], + data['mass2'], + data['spin1z'], + data['spin2z'] + ) + elif bin_type.endswith('Peak'): + vals = pycbc.pnutils.get_freq( + 'f' + bin_type, + data['mass1'], + data['mass2'], + data['spin1z'], + data['spin2z'] + ) + cached_values[bin_type] = vals + elif bin_type.endswith('duration'): + vals = pycbc.pnutils.get_imr_duration( + data['mass1'], + data['mass2'], + data['spin1z'], + data['spin2z'], + data['f_lower'], + approximant=bin_type.replace('duration', '') + ) + cached_values[bin_type] = vals + else: + raise ValueError('Invalid bin type %s' % bin_type) + + sub_locs = member_func(vals) + sub_locs = numpy.where(sub_locs)[0] + if locs is not None: + # find intersection of boundary conditions + locs = numpy.intersect1d(locs, sub_locs) + else: + locs = sub_locs + + # make sure we don't reuse anything from an earlier bin + locs = numpy.delete(locs, numpy.where(numpy.in1d(locs, used))[0]) + used = numpy.concatenate([used, locs]) + bins[name] = locs + + return bins
+ + + +
+[docs] +def timeslide_durations(start1, start2, end1, end2, timeslide_offsets): + """ Find the coincident time for each timeslide. + + Find the coincident time for each timeslide, where the first time vector + is slid to the right by the offset in the given timeslide_offsets vector. + + Parameters + ---------- + start1: numpy.ndarray + Array of the start of valid analyzed times for detector 1 + start2: numpy.ndarray + Array of the start of valid analyzed times for detector 2 + end1: numpy.ndarray + Array of the end of valid analyzed times for detector 1 + end2: numpy.ndarray + Array of the end of valid analyzed times for detector 2 + timseslide_offset: numpy.ndarray + Array of offsets (in seconds) for each timeslide + + Returns + -------- + durations: numpy.ndarray + Array of coincident time for each timeslide in the offset array + """ + from . import veto + durations = [] + seg2 = veto.start_end_to_segments(start2, end2) + for offset in timeslide_offsets: + seg1 = veto.start_end_to_segments(start1 + offset, end1 + offset) + durations.append(abs((seg1 & seg2).coalesce())) + return numpy.array(durations)
+ + + +
+[docs] +def time_coincidence(t1, t2, window, slide_step=0): + """ Find coincidences by time window + + Parameters + ---------- + t1 : numpy.ndarray + Array of trigger times from the first detector + t2 : numpy.ndarray + Array of trigger times from the second detector + window : float + Coincidence window maximum time difference, arbitrary units (usually s) + slide_step : float (default 0) + If calculating background coincidences, the interval between background + slides, arbitrary units (usually s) + + Returns + ------- + idx1 : numpy.ndarray + Array of indices into the t1 array for coincident triggers + idx2 : numpy.ndarray + Array of indices into the t2 array + slide : numpy.ndarray + Array of slide ids + """ + if slide_step: + length1 = len(t1) + length2 = len(t2) + fold1 = numpy.zeros(length1, dtype=numpy.float64) + fold2 = numpy.zeros(length2, dtype=numpy.float64) + timecoincidence_constructfold(fold1, fold2, t1, t2, slide_step, + length1, length2) + else: + fold1 = t1 + fold2 = t2 + + sort1 = fold1.argsort() + sort2 = fold2.argsort() + fold1 = fold1[sort1] + fold2 = fold2[sort2] + + if slide_step: + # FIXME explain this + fold2 = numpy.concatenate([fold2 - slide_step, fold2, + fold2 + slide_step]) + + left = fold2.searchsorted(fold1 - window) + right = fold2.searchsorted(fold1 + window) + + lenidx = timecoincidence_findidxlen(left, right, len(left)) + idx1 = numpy.zeros(lenidx, dtype=numpy.uint32) + idx2 = numpy.zeros(lenidx, dtype=numpy.uint32) + timecoincidence_constructidxs(idx1, idx2, sort1, sort2, left, right, + len(left), len(sort2)) + + slide = numpy.zeros(lenidx, dtype=numpy.int32) + if slide_step: + timecoincidence_getslideint(slide, t1, t2, idx1, idx2, slide_step) + else: + slide = numpy.zeros(len(idx1)) + + return idx1, idx2, slide
+ + + +
+[docs] +def time_multi_coincidence(times, slide_step=0, slop=.003, + pivot='H1', fixed='L1'): + """ Find multi detector coincidences. + + Parameters + ---------- + times: dict of numpy.ndarrays + Dictionary keyed by ifo of single ifo trigger times + slide_step: float + Interval between time slides + slop: float + The amount of time to add to the TOF between detectors for coincidence + pivot: str + The ifo to which time shifts are applied in first stage coincidence + fixed: str + The other ifo used in first stage coincidence, subsequently used as a + time reference for additional ifos. All other ifos are not time shifted + relative to this ifo + + Returns + ------- + ids: dict of arrays of int + Dictionary keyed by ifo with ids of trigger times forming coincidences. + Coincidence is tested for every pair of ifos that can be formed from + the input dict: only those tuples of times passing all tests are + recorded + slide: array of int + Slide ids of coincident triggers in pivot ifo + """ + def win(ifo1, ifo2): + d1 = Detector(ifo1) + d2 = Detector(ifo2) + return d1.light_travel_time_to_detector(d2) + slop + + # Find coincs between the 'pivot' and 'fixed' detectors as in 2-ifo case + pivot_id, fix_id, slide = time_coincidence(times[pivot], times[fixed], + win(pivot, fixed), + slide_step=slide_step) + + # Additional detectors do not slide independently of the 'fixed' one + # Each trigger in an additional detector must be concident with both + # triggers in an existing coincidence + + # Slide 'pivot' trigger times to be coincident with trigger times in + # 'fixed' detector + fixed_time = times[fixed][fix_id] + pivot_time = times[pivot][pivot_id] - slide_step * slide + ctimes = {fixed: fixed_time, pivot: pivot_time} + ids = {fixed: fix_id, pivot: pivot_id} + + dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot] + for ifo1 in dep_ifos: + # FIXME - make this loop into a function? + + # otime is extra ifo time in original trigger order + otime = times[ifo1] + # tsort gives ordering from original order to time sorted order + tsort = otime.argsort() + time1 = otime[tsort] + + # Find coincidences between dependent ifo triggers and existing coincs + # - Cycle over fixed and pivot + # - At the 1st iteration, the fixed and pivot triggers are reduced to + # those for which the first out of fixed/pivot forms a coinc with ifo1 + # - At the 2nd iteration, we are left with triggers for which both + # fixed and pivot are coincident with ifo1 + # - If there is more than 1 dependent ifo, ones that were previously + # tested against fixed and pivot are now present for testing with new + # dependent ifos + for ifo2 in ids: + logger.info('added ifo %s, testing against %s', ifo1, ifo2) + w = win(ifo1, ifo2) + left = time1.searchsorted(ctimes[ifo2] - w) + right = time1.searchsorted(ctimes[ifo2] + w) + # Any times within time1 coincident with the time in ifo2 have + # indices between 'left' and 'right' + # 'nz' indexes into times in ifo2 which have coincidences with ifo1 + # times + nz = (right - left).nonzero() + if len(right - left): + rlmax = (right - left).max() + if len(nz[0]) and rlmax > 1: + # We expect at most one coincident time in ifo1, assuming + # trigger spacing in ifo1 > time window. + # However there are rare corner cases at starts/ends of inspiral + # jobs. For these, arbitrarily keep the first trigger and + # discard the second (and any subsequent ones). + logger.warning('Triggers in %s are closer than coincidence ' + 'window, 1 or more coincs will be discarded. ' + 'This is a warning, not an error.' % ifo1) + # identify indices of times in ifo1 that form coincs with ifo2 + dep_ids = left[nz] + # slide is array of slide ids attached to pivot ifo + slide = slide[nz] + + for ifo in ctimes: + # cycle over fixed and pivot & any previous additional ifos + # reduce times and IDs to just those forming a coinc with ifo1 + ctimes[ifo] = ctimes[ifo][nz] + ids[ifo] = ids[ifo][nz] + + # undo time sorting on indices of ifo1 triggers, add ifo1 ids and times + # to dicts for testing against any additional detectrs + ids[ifo1] = tsort[dep_ids] + ctimes[ifo1] = otime[ids[ifo1]] + + return ids, slide
+ + + +
+[docs] +def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, **kwargs): + """Cluster coincident events for each timeslide separately, across + templates, based on the ranking statistic + + Parameters + ---------- + stat: numpy.ndarray + vector of ranking values to maximize + time1: numpy.ndarray + first time vector + time2: numpy.ndarray + second time vector + timeslide_id: numpy.ndarray + vector that determines the timeslide offset + slide: float + length of the timeslides offset interval + window: float + length to cluster over + + Returns + ------- + cindex: numpy.ndarray + The set of indices corresponding to the surviving coincidences. + """ + if len(time1) == 0 or len(time2) == 0: + logger.info('No coinc triggers in one, or both, ifos.') + return numpy.array([]) + + if numpy.isfinite(slide): + # for a time shifted coinc, time1 is greater than time2 by approximately timeslide_id*slide + # adding this quantity gives a mean coinc time located around time1 + time = (time1 + time2 + timeslide_id * slide) / 2 + else: + time = 0.5 * (time2 + time1) + + tslide = timeslide_id.astype(numpy.longdouble) + time = time.astype(numpy.longdouble) + + span = (time.max() - time.min()) + window * 10 + time = time + span * tslide + logger.info('Clustering events over %s s window', window) + cidx = cluster_over_time(stat, time, window, **kwargs) + logger.info('%d triggers remaining', len(cidx)) + return cidx
+ + + +
+[docs] +def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, + **kwargs): + """Cluster coincident events for each timeslide separately, across + templates, based on the ranking statistic + + Parameters + ---------- + stat: numpy.ndarray + vector of ranking values to maximize + time_coincs: tuple of numpy.ndarrays + trigger times for each ifo, or -1 if an ifo does not participate in a coinc + timeslide_id: numpy.ndarray + vector that determines the timeslide offset + slide: float + length of the timeslides offset interval + window: float + duration of clustering window in seconds + + Returns + ------- + cindex: numpy.ndarray + The set of indices corresponding to the surviving coincidences + """ + time_coinc_zip = list(zip(*time_coincs)) + if len(time_coinc_zip) == 0: + logger.info('No coincident triggers.') + return numpy.array([]) + + time_avg_num = [] + #find number of ifos and mean time over participating ifos for each coinc + for tc in time_coinc_zip: + time_avg_num.append(mean_if_greater_than_zero(tc)) + + time_avg, num_ifos = zip(*time_avg_num) + + time_avg = numpy.array(time_avg) + num_ifos = numpy.array(num_ifos) + + # shift all but the pivot ifo by (num_ifos-1) * timeslide_id * slide + # this leads to a mean coinc time located around pivot time + if numpy.isfinite(slide): + nifos_minusone = (num_ifos - numpy.ones_like(num_ifos)) + time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos + + tslide = timeslide_id.astype(numpy.longdouble) + time_avg = time_avg.astype(numpy.longdouble) + + span = (time_avg.max() - time_avg.min()) + window * 10 + time_avg = time_avg + span * tslide + logger.info('Clustering events over %s s window', window) + cidx = cluster_over_time(stat, time_avg, window, **kwargs) + logger.info('%d triggers remaining', len(cidx)) + + return cidx
+ + + +
+[docs] +def mean_if_greater_than_zero(vals): + """ Calculate mean over numerical values, ignoring values less than zero. + E.g. used for mean time over coincident triggers when timestamps are set + to -1 for ifos not included in the coincidence. + + Parameters + ---------- + vals: iterator of numerical values + values to be mean averaged + + Returns + ------- + mean: float + The mean of the values in the original vector which are + greater than zero + num_above_zero: int + The number of entries in the vector which are above zero + """ + vals = numpy.array(vals) + above_zero = vals > 0 + return vals[above_zero].mean(), above_zero.sum()
+ + + +
+[docs] +def cluster_over_time(stat, time, window, method='python', + argmax=numpy.argmax): + """Cluster generalized transient events over time via maximum stat over a + symmetric sliding window + + Parameters + ---------- + stat: numpy.ndarray + vector of ranking values to maximize + time: numpy.ndarray + time to use for clustering + window: float + length to cluster over + method: string + Either "cython" to use the cython implementation, or "python" to use + the pure python version. + argmax: function + the function used to calculate the maximum value + + Returns + ------- + cindex: numpy.ndarray + The set of indices corresponding to the surviving coincidences. + """ + + indices = [] + time_sorting = time.argsort() + stat = stat[time_sorting] + time = time[time_sorting] + + left = time.searchsorted(time - window) + right = time.searchsorted(time + window) + indices = numpy.zeros(len(left), dtype=numpy.uint32) + + logger.debug('%d triggers before clustering', len(time)) + + if method == 'cython': + j = timecluster_cython(indices, left, right, stat, len(left)) + elif method == 'python': + # i is the index we are inspecting, j is the next one to save + i = 0 + j = 0 + while i < len(left): + l = left[i] + r = right[i] + + # If there are no other points to compare it is obviously the max + if (r - l) == 1: + indices[j] = i + j += 1 + i += 1 + continue + + # Find the location of the maximum within the time interval + # around i + max_loc = argmax(stat[l:r]) + l + + # If this point is the max, we can skip to the right boundary + if max_loc == i: + indices[j] = i + i = r + j += 1 + + # If the max is later than i, we can skip to it + elif max_loc > i: + i = max_loc + + elif max_loc < i: + i += 1 + else: + raise ValueError(f'Do not recognize method {method}') + + indices = indices[:j] + + logger.debug('%d triggers remaining', len(indices)) + return time_sorting[indices]
+ + + +
+[docs] +class MultiRingBuffer(object): + """Dynamic size n-dimensional ring buffer that can expire elements.""" + + def __init__(self, num_rings, max_time, dtype, min_buffer_size=16, + buffer_increment=8, resize_invalid_fraction=0.4): + """ + Parameters + ---------- + num_rings: int + The number of ring buffers to create. They all will have the same + intrinsic size and will expire at the same time. + max_time: int + The maximum "time" an element can exist in each ring. + dtype: numpy.dtype + The type of each element in the ring buffer. + min_buffer_size: int (optional: default=16) + All ring buffers will be initialized to this length. If a buffer is + made larger it will no smaller than this value. Buffers may become + smaller than this length at any given time as triggers are expired. + buffer_increment: int (optional: default=8) + When increasing ring buffers, add this many points. Be careful if + changing this and min_buffer_size from default values, it is + possible to get stuck in a mode where the buffers are always being + resized. + resize_invalid_fraction: float (optional:default=0.4) + If this fraction of any buffer contains unused data points then + resize it to contain only valid points. As with the previous two + options, be careful changing default values, it is + possible to get stuck in a mode where the buffers are always being + resized. + """ + self.max_time = max_time + self.buffer = [] + self.buffer_expire = [] + self.valid_ends = [] + self.valid_starts = [] + self.min_buffer_size = min_buffer_size + self.buffer_increment = buffer_increment + self.resize_invalid_fraction = resize_invalid_fraction + for _ in range(num_rings): + self.buffer.append(numpy.zeros(self.min_buffer_size, dtype=dtype)) + self.buffer_expire.append(numpy.zeros(self.min_buffer_size, + dtype=int)) + self.valid_ends.append(0) + self.valid_starts.append(0) + self.time = 0 + + @property + def filled_time(self): + return min(self.time, self.max_time) + +
+[docs] + def num_elements(self): + count = 0 + for idx, a in enumerate(self.buffer): + vals = self.valid_starts[idx] + vale = self.valid_ends[idx] + count += len(a[vals:vale]) + return count
+ + + @property + def nbytes(self): + return sum([a.nbytes for a in self.buffer]) + +
+[docs] + def discard_last(self, indices): + """Discard the triggers added in the latest update""" + for i in indices: + self.valid_ends[i] -= 1
+ + +
+[docs] + def advance_time(self): + """Advance the internal time increment by 1, expiring any triggers + that are now too old. + """ + self.time += 1
+ + +
+[docs] + def add(self, indices, values): + """Add triggers in 'values' to the buffers indicated by the indices + """ + for i, v in zip(indices, values): + # Expand ring buffer size if needed + if self.valid_ends[i] == len(self.buffer[i]): + # First clear out any old triggers before resizing + self.update_valid_start(i) + self.check_expired_triggers(i) + + # Then increase arrays by buffer_increment + self.buffer[i] = numpy.resize( + self.buffer[i], + max( + len(self.buffer[i]) + self.buffer_increment, + self.min_buffer_size + ) + ) + self.buffer_expire[i] = numpy.resize( + self.buffer_expire[i], + max( + len(self.buffer[i]) + self.buffer_increment, + self.min_buffer_size + ) + ) + curr_pos = self.valid_ends[i] + self.buffer[i][curr_pos] = v + self.buffer_expire[i][curr_pos] = self.time + self.valid_ends[i] = self.valid_ends[i] + 1 + self.advance_time()
+ + +
+[docs] + def valid_slice(self, buffer_index): + """Return the valid slice for this buffer index""" + ret_slice = slice( + self.valid_starts[buffer_index], + self.valid_ends[buffer_index] + ) + return ret_slice
+ + +
+[docs] + def expire_vector(self, buffer_index): + """Return the expiration vector of a given ring buffer """ + return self.buffer_expire[buffer_index][self.valid_slice(buffer_index)]
+ + +
+[docs] + def update_valid_start(self, buffer_index): + """Update the valid_start for the given buffer index""" + expired = self.time - self.max_time + exp = self.buffer_expire[buffer_index] + j = self.valid_starts[buffer_index] + while j < self.valid_ends[buffer_index]: + # Everything before this j must be expired + if exp[j] >= expired: + break + j += 1 + self.valid_starts[buffer_index] = j
+ + +
+[docs] + def check_expired_triggers(self, buffer_index): + """Check if we should free memory for this buffer index. + + Check what fraction of triggers are expired in the specified buffer + and if it is more than the allowed fraction (set by + self.resize_invalid_fraction) resize the array to remove them. + """ + val_start = self.valid_starts[buffer_index] + val_end = self.valid_ends[buffer_index] + buf_len = len(self.buffer[buffer_index]) + invalid_limit = self.resize_invalid_fraction * buf_len + if (buf_len - val_end) + val_start > invalid_limit: + # If self.resize_invalid_fraction of stored triggers are expired + # or are not set, free up memory + self.buffer_expire[buffer_index] = self.buffer_expire[buffer_index][val_start:val_end].copy() + self.buffer[buffer_index] = self.buffer[buffer_index][val_start:val_end].copy() + self.valid_ends[buffer_index] -= val_start + self.valid_starts[buffer_index] = 0
+ + +
+[docs] + def data(self, buffer_index): + """Return the data vector for a given ring buffer""" + self.update_valid_start(buffer_index) + self.check_expired_triggers(buffer_index) + + return self.buffer[buffer_index][self.valid_slice(buffer_index)]
+
+ + + +
+[docs] +class CoincExpireBuffer(object): + """Unordered dynamic sized buffer that handles + multiple expiration vectors. + """ + + def __init__(self, expiration, ifos, + initial_size=2**20, dtype=numpy.float32): + """ + Parameters + ---------- + expiration: int + The 'time' in arbitrary integer units to allow to pass before + removing an element. + ifos: list of strs + List of strings to identify the multiple data expiration times. + initial_size: int, optional + The initial size of the buffer. + dtype: numpy.dtype + The dtype of each element of the buffer. + """ + + self.expiration = expiration + self.buffer = numpy.zeros(initial_size, dtype=dtype) + self.index = 0 + self.ifos = ifos + + self.time = {} + self.timer = {} + for ifo in self.ifos: + self.time[ifo] = 0 + self.timer[ifo] = numpy.zeros(initial_size, dtype=numpy.int32) + + def __len__(self): + return self.index + + @property + def nbytes(self): + """Returns the approximate memory usage of self. + """ + nbs = [self.timer[ifo].nbytes for ifo in self.ifos] + nbs.append(self.buffer.nbytes) + return sum(nbs) + +
+[docs] + def increment(self, ifos): + """Increment without adding triggers""" + self.add([], [], ifos)
+ + +
+[docs] + def remove(self, num): + """Remove the the last 'num' elements from the buffer""" + self.index -= num
+ + +
+[docs] + def add(self, values, times, ifos): + """Add values to the internal buffer + + Parameters + ---------- + values: numpy.ndarray + Array of elements to add to the internal buffer. + times: dict of arrays + The current time to use for each element being added. + ifos: list of strs + The set of timers to be incremented. + """ + + for ifo in ifos: + self.time[ifo] += 1 + + # Resize the internal buffer if we need more space + if self.index + len(values) >= len(self.buffer): + newlen = len(self.buffer) * 2 + for ifo in self.ifos: + self.timer[ifo].resize(newlen) + self.buffer.resize(newlen, refcheck=False) + + self.buffer[self.index:self.index+len(values)] = values + if len(values) > 0: + for ifo in self.ifos: + self.timer[ifo][self.index:self.index+len(values)] = times[ifo] + + self.index += len(values) + + # Remove the expired old elements + if len(ifos) == 2: + # Cython version for two ifo case + self.index = coincbuffer_expireelements( + self.buffer, + self.timer[ifos[0]], + self.timer[ifos[1]], + self.time[ifos[0]], + self.time[ifos[1]], + self.expiration, + self.index + ) + else: + # Numpy version for >2 ifo case + keep = None + for ifo in ifos: + kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration + keep = numpy.logical_and(keep, kt) if keep is not None else kt + + self.buffer[:keep.sum()] = self.buffer[:self.index][keep] + for ifo in self.ifos: + self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep] + self.index = keep.sum()
+ + +
+[docs] + def num_greater(self, value): + """Return the number of elements larger than 'value'""" + return coincbuffer_numgreater(self.buffer, self.index, value)
+ + + @property + def data(self): + """Return the array of elements""" + return self.buffer[:self.index]
+ + + +
+[docs] +class LiveCoincTimeslideBackgroundEstimator(object): + """Rolling buffer background estimation.""" + + def __init__(self, num_templates, analysis_block, background_statistic, + sngl_ranking, stat_files, ifos, + ifar_limit=100, + timeslide_interval=.035, + coinc_window_pad=.002, + statistic_refresh_rate=None, + return_background=False, + **kwargs): + """ + Parameters + ---------- + num_templates: int + The size of the template bank + analysis_block: int + The number of seconds in each analysis segment + background_statistic: str + The name of the statistic to rank coincident events. + sngl_ranking: str + The single detector ranking to use with the background statistic + stat_files: list of strs + List of filenames that contain information used to construct + various coincident statistics. + ifos: list of strs + List of ifo names that are being analyzed. At the moment this must + be two items such as ['H1', 'L1']. + ifar_limit: float + The largest inverse false alarm rate in years that we would like to + calculate. + timeslide_interval: float + The time in seconds between consecutive timeslide offsets. + coinc_window_pad: float + Amount of time allowed to form a coincidence in addition to the + time of flight in seconds. + statistic_refresh_rate: float + How regularly to run the update_files method on the statistic + class (in seconds), default not do do this + return_background: boolean + If true, background triggers will also be included in the file + output. + kwargs: dict + Additional options for the statistic to use. See stat.py + for more details on statistic options. + """ + self.num_templates = num_templates + self.analysis_block = analysis_block + + stat_class = pycbcstat.get_statistic(background_statistic) + self.stat_calculator = stat_class( + sngl_ranking, + stat_files, + ifos=ifos, + **kwargs + ) + + self.time_stat_refreshed = timemod.time() + self.stat_calculator_lock = threading.Lock() + self.statistic_refresh_rate = statistic_refresh_rate + + self.timeslide_interval = timeslide_interval + self.return_background = return_background + self.coinc_window_pad = coinc_window_pad + + self.ifos = ifos + if len(self.ifos) != 2: + raise ValueError("Only a two ifo analysis is supported at this time") + + self.lookback_time = (ifar_limit / conv.sec_to_year(1.) * timeslide_interval) ** 0.5 + self.buffer_size = int(numpy.ceil(self.lookback_time / analysis_block)) + + self.dets = {ifo: Detector(ifo) for ifo in ifos} + + self.time_window = self.dets[ifos[0]].light_travel_time_to_detector( + self.dets[ifos[1]]) + coinc_window_pad + self.coincs = CoincExpireBuffer(self.buffer_size, self.ifos) + + self.singles = {} + + # temporary array used in `_find_coincs()` to turn `trig_stat` + # into an array much faster than using `numpy.resize()` + self.trig_stat_memory = None + +
+[docs] + @classmethod + def pick_best_coinc(cls, coinc_results): + """Choose the best two-ifo coinc by ifar first, then statistic if needed. + + This function picks which of the available double-ifo coincs to use. + It chooses the best (highest) ifar. The ranking statistic is used as + a tie-breaker. + A trials factor is applied if multiple types of coincs are possible + at this time given the active ifos. + + Parameters + ---------- + coinc_results: list of coinc result dicts + Dictionary by detector pair of coinc result dicts. + + Returns + ------- + best: coinc results dict + If there is a coinc, this will contain the 'best' one. Otherwise + it will return the provided dict. + """ + mstat = 0 + mifar = 0 + mresult = None + + # record the trials factor from the possible coincs we could + # maximize over + trials = 0 + for result in coinc_results: + # Check that a coinc was possible. See the 'add_singles' method + # to see where this flag was added into the results dict + if 'coinc_possible' in result: + trials += 1 + + # Check that a coinc exists + if 'foreground/ifar' in result: + ifar = result['foreground/ifar'] + stat = result['foreground/stat'] + if ifar > mifar or (ifar == mifar and stat > mstat): + mifar = ifar + mstat = stat + mresult = result + + # apply trials factor for the best coinc + if mresult: + mresult['foreground/ifar'] = mifar / float(trials) + logger.info('Found %s coinc with ifar %s', + mresult['foreground/type'], + mresult['foreground/ifar']) + return mresult + # If no coinc, just return one of the results dictionaries. They will + # all contain the same results (i.e. single triggers) in this case. + else: + return coinc_results[0]
+ + +
+[docs] + @classmethod + def from_cli(cls, args, num_templates, analysis_chunk, ifos): + + # Allow None inputs + stat_files = args.statistic_files or [] + stat_keywords = args.statistic_keywords or [] + + # flatten the list of lists of filenames to a single list (may be empty) + stat_files = sum(stat_files, []) + + kwargs = pycbcstat.parse_statistic_keywords_opt(stat_keywords) + + return cls(num_templates, analysis_chunk, + args.ranking_statistic, + args.sngl_ranking, + stat_files, + return_background=args.store_background, + ifar_limit=args.background_ifar_limit, + timeslide_interval=args.timeslide_interval, + ifos=ifos, + coinc_window_pad=args.coinc_window_pad, + statistic_refresh_rate=args.statistic_refresh_rate, + **kwargs)
+ + +
+[docs] + @staticmethod + def insert_args(parser): + + pycbcstat.insert_statistic_option_group(parser) + + group = parser.add_argument_group('Coincident Background Estimation') + group.add_argument('--store-background', action='store_true', + help="Return background triggers with zerolag coincidencs") + group.add_argument('--background-ifar-limit', type=float, + help="The limit on inverse false alarm rate to calculate " + "background in years", default=100.0) + group.add_argument('--timeslide-interval', type=float, + help="The interval between timeslides in seconds", default=0.1) + group.add_argument('--ifar-remove-threshold', type=float, + help="NOT YET IMPLEMENTED", default=100.0)
+ + +
+[docs] + @staticmethod + def verify_args(args, parser): + """Verify that psd-var-related options are consistent""" + if ((hasattr(args, 'psd_variation') and not args.psd_variation) + and 'psdvar' in args.sngl_ranking): + parser.error(f"The single ifo ranking stat {args.sngl_ranking} " + "requires --psd-variation.")
+ + + @property + def background_time(self): + """Return the amount of background time that the buffers contain""" + time = 1.0 / self.timeslide_interval + for ifo in self.singles: + time *= self.singles[ifo].filled_time * self.analysis_block + return time + +
+[docs] + def save_state(self, filename): + """Save the current state of the background buffers""" + import pickle + pickle.dump(self, filename)
+ + +
+[docs] + @staticmethod + def restore_state(filename): + """Restore state of the background buffers from a file""" + import pickle + return pickle.load(filename)
+ + +
+[docs] + def ifar(self, coinc_stat): + """Map a given value of the coincident ranking statistic to an inverse + false-alarm rate (IFAR) using the interally stored background sample. + + Parameters + ---------- + coinc_stat: float + Value of the coincident ranking statistic to be converted. + + Returns + ------- + ifar: float + Inverse false-alarm rate in unit of years. + ifar_saturated: bool + True if `coinc_stat` is larger than all the available background, + in which case `ifar` is to be considered an upper limit. + """ + n = self.coincs.num_greater(coinc_stat) + ifar = conv.sec_to_year(self.background_time) / (n + 1) + return ifar, n == 0
+ + +
+[docs] + def set_singles_buffer(self, results): + """Create the singles buffer + + This creates the singles buffer for each ifo. The dtype is determined + by a representative sample of the single triggers in the results. + + Parameters + ---------- + results: dict of dict + Dict indexed by ifo and then trigger column. + """ + # Determine the dtype from a sample of the data. + self.singles_dtype = [] + data = False + for ifo in self.ifos: + if ifo in results and results[ifo] is not False \ + and len(results[ifo]['snr']): + data = results[ifo] + break + + if data is False: + return + + for key in data: + self.singles_dtype.append((key, data[key].dtype)) + + if 'stat' not in data: + self.singles_dtype.append(('stat', self.stat_calculator.single_dtype)) + + # Create a ring buffer for each template ifo combination + for ifo in self.ifos: + self.singles[ifo] = MultiRingBuffer(self.num_templates, + self.buffer_size, + self.singles_dtype)
+ + + def _add_singles_to_buffer(self, results, ifos): + """Add single detector triggers to the internal buffer + + Parameters + ---------- + results: dict + Dictionary of dictionaries indexed by ifo and keys such as 'snr', + 'chisq', etc. The specific format is determined by the + LiveBatchMatchedFilter class. + + Returns + ------- + updated_singles: dict of numpy.ndarrays + Array of indices that have been just updated in the internal + buffers of single detector triggers. + """ + if len(self.singles.keys()) == 0: + self.set_singles_buffer(results) + # If this *still* didn't work, no triggers in first set, try next time + if len(self.singles.keys()) == 0: + return {} + + # convert to single detector trigger values + # FIXME Currently configured to use pycbc live output + # where chisq is the reduced chisq and chisq_dof is the actual DOF + logger.info("adding singles to the background estimate...") + updated_indices = {} + for ifo in ifos: + trigs = results[ifo] + + if len(trigs['snr'] > 0): + trigsc = copy.copy(trigs) + trigsc['ifo'] = ifo + trigsc['chisq'] = trigs['chisq'] * trigs['chisq_dof'] + trigsc['chisq_dof'] = (trigs['chisq_dof'] + 2) / 2 + single_stat = self.stat_calculator.single(trigsc) + del trigsc['ifo'] + else: + single_stat = numpy.array([], ndmin=1, + dtype=self.stat_calculator.single_dtype) + trigs['stat'] = single_stat + + # add each single detector trigger to the and advance the buffer + data = numpy.zeros(len(single_stat), dtype=self.singles_dtype) + for key, value in trigs.items(): + data[key] = value + + self.singles[ifo].add(trigs['template_id'], data) + updated_indices[ifo] = trigs['template_id'] + return updated_indices + + def _find_coincs(self, results, valid_ifos): + """Look for coincs within the set of single triggers + + Parameters + ---------- + results: dict + Dictionary of dictionaries indexed by ifo and keys such as 'snr', + 'chisq', etc. The specific format is determined by the + LiveBatchMatchedFilter class. + valid_ifos: list of strs + List of ifos for which new triggers might exist. This must be a + subset of self.ifos. If an ifo is in self.ifos but not in this list + either the ifo is down, or its data has been flagged as "bad". + + Returns + ------- + num_background: int + Number of time shifted coincidences found. + coinc_results: dict of arrays + A dictionary of arrays containing the coincident results. + """ + # For each new single detector trigger find the allowed coincidences + # Record the template and the index of the single trigger that forms + # each coincidence + + # Initialize + cstat = [[]] + offsets = [] + ctimes = {self.ifos[0]:[], self.ifos[1]:[]} + single_expire = {self.ifos[0]:[], self.ifos[1]:[]} + template_ids = [[]] + trigger_ids = {self.ifos[0]:[[]], self.ifos[1]:[[]]} + + # Calculate all the permutations of coincident triggers for each + # new single detector trigger collected + # Currently only two detectors are supported. + # For each ifo, check its newly added triggers for (zerolag and time + # shift) coincs with all currently stored triggers in the other ifo. + # Do this by keeping the ifo with new triggers fixed and time shifting + # the other ifo. The list 'shift_vec' must be in the same order as + # self.ifos and contain -1 for the shift_ifo / 0 for the fixed_ifo. + for fixed_ifo, shift_ifo, shift_vec in zip( + [self.ifos[0], self.ifos[1]], + [self.ifos[1], self.ifos[0]], + [[0, -1], [-1, 0]] + ): + if fixed_ifo not in valid_ifos: + # This ifo is not online now, so no new triggers or coincs + continue + # Find newly added triggers in fixed_ifo + trigs = results[fixed_ifo] + # Calculate mchirp as a vectorized operation + mchirps = conv.mchirp_from_mass1_mass2( + trigs['mass1'], trigs['mass2'] + ) + # Loop over them one trigger at a time + for i in range(len(trigs['end_time'])): + trig_stat = trigs['stat'][i] + trig_time = trigs['end_time'][i] + template = trigs['template_id'][i] + mchirp = mchirps[i] + + # Get current shift_ifo triggers in the same template + times = self.singles[shift_ifo].data(template)['end_time'] + stats = self.singles[shift_ifo].data(template)['stat'] + + # Perform coincidence. i1 is the list of trigger indices in the + # shift_ifo which make coincs, slide is the corresponding slide + # index. + # (The second output would just be a list of zeroes as we only + # have one trigger in the fixed_ifo.) + i1, _, slide = time_coincidence(times, + numpy.array(trig_time, ndmin=1, + dtype=numpy.float64), + self.time_window, + self.timeslide_interval) + + # Make a copy of the fixed ifo trig_stat for each coinc. + # NB for some statistics the "stat" entry holds more than just + # a ranking number. E.g. for the phase time consistency test, + # it must also contain the phase, time and sensitivity. + if self.trig_stat_memory is None: + self.trig_stat_memory = numpy.zeros( + 1, + dtype=trig_stat.dtype + ) + while len(self.trig_stat_memory) < len(i1): + self.trig_stat_memory = numpy.resize( + self.trig_stat_memory, + len(self.trig_stat_memory)*2 + ) + self.trig_stat_memory[:len(i1)] = trig_stat + + # Force data into form needed by stat.py and then compute the + # ranking statistic values. + sngls_list = [[fixed_ifo, self.trig_stat_memory[:len(i1)]], + [shift_ifo, stats[i1]]] + + c = self.stat_calculator.rank_stat_coinc( + sngls_list, + slide, + self.timeslide_interval, + shift_vec, + time_addition=self.coinc_window_pad, + mchirp=mchirp, + dets=self.dets + ) + + # Store data about new triggers: slide index, stat value and + # times. + offsets.append(slide) + cstat.append(c) + ctimes[shift_ifo].append(times[i1]) + ctimes[fixed_ifo].append(numpy.zeros(len(c), + dtype=numpy.float64)) + ctimes[fixed_ifo][-1].fill(trig_time) + + # As background triggers are removed after a certain time, we + # need to log when this will be for new background triggers. + single_expire[shift_ifo].append( + self.singles[shift_ifo].expire_vector(template)[i1] + ) + single_expire[fixed_ifo].append(numpy.zeros(len(c), + dtype=numpy.int32)) + single_expire[fixed_ifo][-1].fill( + self.singles[fixed_ifo].time - 1 + ) + + # Save the template and trigger ids to keep association + # to singles. The trigger was just added so it must be in + # the last position: we mark this with -1 so the + # slicing picks the right point + template_ids.append(numpy.zeros(len(c)) + template) + trigger_ids[shift_ifo].append(i1) + trigger_ids[fixed_ifo].append(numpy.zeros(len(c)) - 1) + + cstat = numpy.concatenate(cstat) + template_ids = numpy.concatenate(template_ids).astype(numpy.int32) + for ifo in valid_ifos: + trigger_ids[ifo] = numpy.concatenate(trigger_ids[ifo]).astype(numpy.int32) + + logger.info( + "%s: %s background and zerolag coincs", + ppdets(self.ifos, "-"), len(cstat) + ) + + # Cluster the triggers we've found + # (both zerolag and shifted are handled together) + num_zerolag = 0 + num_background = 0 + if len(cstat) > 0: + offsets = numpy.concatenate(offsets) + ctime0 = numpy.concatenate(ctimes[self.ifos[0]]).astype(numpy.float64) + ctime1 = numpy.concatenate(ctimes[self.ifos[1]]).astype(numpy.float64) + logger.info("Clustering %s coincs", ppdets(self.ifos, "-")) + cidx = cluster_coincs(cstat, ctime0, ctime1, offsets, + self.timeslide_interval, + self.analysis_block + 2*self.time_window, + method='cython') + offsets = offsets[cidx] + zerolag_idx = (offsets == 0) + bkg_idx = (offsets != 0) + + for ifo in self.ifos: + single_expire[ifo] = numpy.concatenate(single_expire[ifo]) + single_expire[ifo] = single_expire[ifo][cidx][bkg_idx] + + self.coincs.add(cstat[cidx][bkg_idx], single_expire, valid_ifos) + num_zerolag = zerolag_idx.sum() + num_background = bkg_idx.sum() + elif len(valid_ifos) > 0: + self.coincs.increment(valid_ifos) + + # Collect coinc results for saving + coinc_results = {} + # Save information about zerolag triggers + if num_zerolag > 0: + idx = cidx[zerolag_idx][0] + zerolag_cstat = cstat[cidx][zerolag_idx] + ifar, ifar_sat = self.ifar(zerolag_cstat) + zerolag_results = { + 'foreground/ifar': ifar, + 'foreground/ifar_saturated': ifar_sat, + 'foreground/stat': zerolag_cstat, + 'foreground/type': '-'.join(self.ifos) + } + template = template_ids[idx] + for ifo in self.ifos: + trig_id = trigger_ids[ifo][idx] + single_data = self.singles[ifo].data(template)[trig_id] + for key in single_data.dtype.names: + path = f'foreground/{ifo}/{key}' + zerolag_results[path] = single_data[key] + coinc_results.update(zerolag_results) + + # Save some summary statistics about the background + coinc_results['background/time'] = numpy.array([self.background_time]) + coinc_results['background/count'] = len(self.coincs.data) + + # Save all the background triggers + if self.return_background: + coinc_results['background/stat'] = self.coincs.data + + return num_background, coinc_results + +
+[docs] + def backout_last(self, updated_singles, num_coincs): + """Remove the recently added singles and coincs + + Parameters + ---------- + updated_singles: dict of numpy.ndarrays + Array of indices that have been just updated in the internal + buffers of single detector triggers. + num_coincs: int + The number of coincs that were just added to the internal buffer + of coincident triggers + """ + for ifo in updated_singles: + self.singles[ifo].discard_last(updated_singles[ifo]) + self.coincs.remove(num_coincs)
+ + +
+[docs] + def add_singles(self, results): + """Add singles to the background estimate and find candidates + + Parameters + ---------- + results: dict + Dictionary of dictionaries indexed by ifo and keys such as 'snr', + 'chisq', etc. The specific format is determined by the + LiveBatchMatchedFilter class. + + Returns + ------- + coinc_results: dict of arrays + A dictionary of arrays containing the coincident results. + """ + # Let's see how large everything is + logger.info( + "%s: %s coincs, %s bytes", + ppdets(self.ifos, "-"), len(self.coincs), self.coincs.nbytes + ) + + # If there are no results just return + valid_ifos = [k for k in results.keys() if results[k] and k in self.ifos] + if len(valid_ifos) == 0: return {} + + with self.stat_calculator_lock: + # Add single triggers to the internal buffer + self._add_singles_to_buffer(results, ifos=valid_ifos) + + # Calculate zerolag and background coincidences + _, coinc_results = self._find_coincs(results, valid_ifos=valid_ifos) + + # record if a coinc is possible in this chunk + if len(valid_ifos) == 2: + coinc_results['coinc_possible'] = True + + return coinc_results
+ + +
+[docs] + def start_refresh_thread(self): + """ + Start a thread managing whether the stat_calculator will be updated + """ + if self.statistic_refresh_rate is None: + logger.info( + "Statistic refresh disabled for %s", ppdets(self.ifos, "-") + ) + return + thread = threading.Thread( + target=self.refresh_statistic, + daemon=True, + name="Stat refresh " + ppdets(self.ifos, "-") + ) + logger.info( + "Starting %s statistic refresh thread", ppdets(self.ifos, "-") + ) + thread.start()
+ + +
+[docs] + def refresh_statistic(self): + """ + Function to refresh the stat_calculator at regular intervals + """ + while True: + # How long since the statistic was last updated? + since_stat_refresh = timemod.time() - self.time_stat_refreshed + if since_stat_refresh > self.statistic_refresh_rate: + self.time_stat_refreshed = timemod.time() + logger.info( + "Checking %s statistic for updated files", + ppdets(self.ifos, "-"), + ) + with self.stat_calculator_lock: + self.stat_calculator.check_update_files() + # Sleep one second for safety + timemod.sleep(1) + # Now include the time it took the check / update the statistic + since_stat_refresh = timemod.time() - self.time_stat_refreshed + logger.debug( + "%s statistic: Waiting %.3fs for next refresh", + ppdets(self.ifos, "-"), + self.statistic_refresh_rate - since_stat_refresh, + ) + timemod.sleep(self.statistic_refresh_rate - since_stat_refresh + 1)
+
+ + + +__all__ = [ + "background_bin_from_string", + "timeslide_durations", + "time_coincidence", + "time_multi_coincidence", + "cluster_coincs", + "cluster_coincs_multiifo", + "mean_if_greater_than_zero", + "cluster_over_time", + "MultiRingBuffer", + "CoincExpireBuffer", + "LiveCoincTimeslideBackgroundEstimator" +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/coinc_rate.html b/latest/html/_modules/pycbc/events/coinc_rate.html new file mode 100644 index 00000000000..d7c9022a642 --- /dev/null +++ b/latest/html/_modules/pycbc/events/coinc_rate.html @@ -0,0 +1,362 @@ + + + + + + pycbc.events.coinc_rate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.coinc_rate

+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module contains functions for calculating expected rates of noise
+    and signal coincidences.
+"""
+
+import itertools
+import logging
+import numpy
+import pycbc.detector
+
+logger = logging.getLogger('pycbc.events.coinc_rate')
+
+
+
+[docs] +def multiifo_noise_lograte(log_rates, slop): + """ + Calculate the expected rate of noise coincidences for multiple + combinations of detectors + + Parameters + ---------- + log_rates: dict + Key: ifo string, Value: sequence of log single-detector trigger rates, + units assumed to be Hz + slop: float + time added to maximum time-of-flight between detectors to account + for timing error + + Returns + ------- + expected_log_rates: dict + Key: ifo combination string + Value: expected log coincidence rate in the combination, units log Hz + """ + expected_log_rates = {} + + # Order of ifos must be stable in output dict keys, so sort them + ifos = sorted(list(log_rates.keys())) + ifostring = ' '.join(ifos) + + # Calculate coincidence for all-ifo combination + expected_log_rates[ifostring] = \ + combination_noise_lograte(log_rates, slop) + + # If more than one possible coincidence type exists, + # calculate coincidence for subsets through recursion + if len(ifos) > 2: + # Calculate rate for each 'miss-one-out' detector combination + subsets = itertools.combinations(ifos, len(ifos) - 1) + for subset in subsets: + rates_subset = {} + for ifo in subset: + rates_subset[ifo] = log_rates[ifo] + sub_coinc_rates = multiifo_noise_lograte(rates_subset, slop) + # add these sub-coincidences to the overall dictionary + for sub_coinc in sub_coinc_rates: + expected_log_rates[sub_coinc] = sub_coinc_rates[sub_coinc] + + return expected_log_rates
+ + + +
+[docs] +def combination_noise_rate(rates, slop): + """ + Calculate the expected rate of noise coincidences for a combination of + detectors + WARNING: for high stat values, numerical underflow can occur + + Parameters + ---------- + rates: dict + Key: ifo string, Value: sequence of single-detector trigger rates, + units assumed to be Hz + slop: float + time added to maximum time-of-flight between detectors to account + for timing error + + Returns + ------- + numpy array + Expected coincidence rate in the combination, units Hz + """ + logger.warning('combination_noise_rate() is liable to numerical ' + 'underflows, use combination_noise_lograte ' + 'instead') + log_rates = {k: numpy.log(r) for (k, r) in rates.items()} + # exp may underflow + return numpy.exp(combination_noise_lograte(log_rates, slop))
+ + + +
+[docs] +def combination_noise_lograte(log_rates, slop, dets=None): + """ + Calculate the expected rate of noise coincidences for a combination of + detectors given log of single detector noise rates + + Parameters + ---------- + log_rates: dict + Key: ifo string, Value: sequence of log single-detector trigger rates, + units assumed to be Hz + slop: float + time added to maximum time-of-flight between detectors to account + for timing error + dets: dict + Key: ifo string, Value: pycbc.detector.Detector object + If not provided, will be created from ifo strings + + Returns + ------- + numpy array + Expected log coincidence rate in the combination, units Hz + """ + # multiply product of trigger rates by the overlap time + allowed_area = multiifo_noise_coincident_area(list(log_rates), + slop, + dets=dets) + # list(dict.values()) is python-3-proof + rateprod = numpy.sum(list(log_rates.values()), axis=0) + return numpy.log(allowed_area) + rateprod
+ + + +
+[docs] +def multiifo_noise_coincident_area(ifos, slop, dets=None): + """ + Calculate the total extent of time offset between 2 detectors, + or area of the 2d space of time offsets for 3 detectors, for + which a coincidence can be generated + Cannot yet handle more than 3 detectors. + + Parameters + ---------- + ifos: list of strings + list of interferometers + slop: float + extra time to add to maximum time-of-flight for timing error + dets: dict + Key: ifo string, Value: pycbc.detector.Detector object + If not provided, will be created from ifo strings + + Returns + ------- + allowed_area: float + area in units of seconds^(n_ifos-1) that coincident values can fall in + """ + # set up detector objects + if dets is None: + dets = {ifo: pycbc.detector.Detector(ifo) for ifo in ifos} + n_ifos = len(ifos) + + if n_ifos == 2: + allowed_area = 2. * \ + (dets[ifos[0]].light_travel_time_to_detector(dets[ifos[1]]) + slop) + elif n_ifos == 3: + tofs = numpy.zeros(n_ifos) + ifo2_num = [] + + # calculate travel time between detectors (plus extra for timing error) + # TO DO: allow for different timing errors between different detectors + for i, ifo in enumerate(ifos): + ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) + det0 = dets[ifo] + det1 = dets[ifos[ifo2_num[i]]] + tofs[i] = det0.light_travel_time_to_detector(det1) + slop + + # combine these to calculate allowed area + allowed_area = 0 + for i, _ in enumerate(ifos): + allowed_area += 2 * tofs[i] * tofs[ifo2_num[i]] - tofs[i]**2 + else: + raise NotImplementedError("Not able to deal with more than 3 ifos") + + return allowed_area
+ + + +
+[docs] +def multiifo_signal_coincident_area(ifos): + """ + Calculate the area in which signal time differences are physically allowed + + Parameters + ---------- + ifos: list of strings + list of interferometers + + Returns + ------- + allowed_area: float + area in units of seconds^(n_ifos-1) that coincident signals will occupy + """ + n_ifos = len(ifos) + + if n_ifos == 2: + det0 = pycbc.detector.Detector(ifos[0]) + det1 = pycbc.detector.Detector(ifos[1]) + allowed_area = 2 * det0.light_travel_time_to_detector(det1) + elif n_ifos == 3: + dets = {} + tofs = numpy.zeros(n_ifos) + ifo2_num = [] + # set up detector objects + for ifo in ifos: + dets[ifo] = pycbc.detector.Detector(ifo) + + # calculate travel time between detectors + for i, ifo in enumerate(ifos): + ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) + det0 = dets[ifo] + det1 = dets[ifos[ifo2_num[i]]] + tofs[i] = det0.light_travel_time_to_detector(det1) + + # calculate allowed area + phi_12 = numpy.arccos((tofs[0]**2 + tofs[1]**2 - tofs[2]**2) + / (2 * tofs[0] * tofs[1])) + allowed_area = numpy.pi * tofs[0] * tofs[1] * numpy.sin(phi_12) + else: + raise NotImplementedError("Not able to deal with more than 3 ifos") + + return allowed_area
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/cuts.html b/latest/html/_modules/pycbc/events/cuts.html new file mode 100644 index 00000000000..c4342ae2fe6 --- /dev/null +++ b/latest/html/_modules/pycbc/events/cuts.html @@ -0,0 +1,606 @@ + + + + + + pycbc.events.cuts — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.cuts

+# Copyright (C) 2022 Gareth Cabourn Davies
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module contains functions for reading in command line options and
+applying cuts to triggers or templates in the offline search
+"""
+import logging
+import copy
+import numpy as np
+from pycbc.events import ranking
+from pycbc.io import hdf
+from pycbc.tmpltbank import bank_conversions as bank_conv
+from pycbc.io import get_chisq_from_file_choice
+
+# Only used to check isinstance:
+from pycbc.io.hdf import ReadByTemplate
+
+logger = logging.getLogger('pycbc.events.cuts')
+
+# sngl_rank_keys are the allowed names of reweighted SNR functions
+sngl_rank_keys = ranking.sngls_ranking_function_dict.keys()
+
+trigger_param_choices = list(sngl_rank_keys)
+trigger_param_choices += [cc + '_chisq' for cc in hdf.chisq_choices]
+trigger_param_choices += ['end_time', 'psd_var_val', 'sigmasq',
+                          'sigma_multiple']
+
+template_fit_param_choices = ['fit_by_fit_coeff', 'smoothed_fit_coeff',
+                              'fit_by_count_above_thresh',
+                              'smoothed_fit_count_above_thresh',
+                              'fit_by_count_in_template',
+                              'smoothed_fit_count_in_template']
+template_param_choices = bank_conv.conversion_options + \
+                             template_fit_param_choices
+
+# What are the inequalities associated with the cuts?
+# 'upper' means upper limit, and so requires value < threshold
+# to keep a trigger
+ineq_functions = {
+    'upper': np.less,
+    'lower': np.greater,
+    'upper_inc': np.less_equal,
+    'lower_inc': np.greater_equal
+}
+ineq_choices = list(ineq_functions.keys())
+
+
+
+[docs] +def insert_cuts_option_group(parser): + """ + Add options to the parser for cuts to the templates/triggers + """ + parser.add_argument('--trigger-cuts', nargs='+', + help="Cuts to apply to the triggers, supplied as " + "PARAMETER:VALUE:LIMIT, where, PARAMETER is the " + "parameter to be cut, VALUE is the value at " + "which it is cut, and LIMIT is one of '" + + "', '".join(ineq_choices) + + "' to indicate the inequality needed. " + "PARAMETER is one of:'" + + "', '".join(trigger_param_choices) + + "'. For example snr:6:LOWER removes triggers " + "with matched filter SNR < 6") + parser.add_argument('--template-cuts', nargs='+', + help="Cuts to apply to the triggers, supplied as " + "PARAMETER:VALUE:LIMIT. Format is the same as in " + "--trigger-cuts. PARAMETER can be one of '" + + "', '".join(template_param_choices) + "'.")
+ + + +
+[docs] +def convert_inputstr(inputstr, choices): + """ + Convert the inputstr into a dictionary keyed on parameter + with a tuple of the function to be used in the cut, and + the float to compare to. + Do input checks + """ + try: + cut_param, cut_value_str, cut_limit = inputstr.split(':') + except ValueError as value_e: + logger.warning("ERROR: Cut string format not correct, please " + "supply as PARAMETER:VALUE:LIMIT") + raise value_e + + if cut_param.lower() not in choices: + raise NotImplementedError("Cut parameter " + cut_param.lower() + " " + "not recognised, choose from " + + ", ".join(choices)) + if cut_limit.lower() not in ineq_choices: + raise NotImplementedError("Cut inequality " + cut_limit.lower() + " " + "not recognised, choose from " + + ", ".join(ineq_choices)) + + try: + cut_value = float(cut_value_str) + except ValueError as value_e: + logger.warning("ERROR: Cut value must be convertible into a float, " + "got '%s'.", cut_value_str) + raise value_e + + return {(cut_param, ineq_functions[cut_limit]): cut_value}
+ + + +
+[docs] +def check_update_cuts(cut_dict, new_cut): + """ + Update a cuts dictionary, but check whether the cut exists already, + warn and only apply the strictest cuts + + + Parameters + ---------- + cut_dict: dictionary + Dictionary containing the cuts to be checked, will be updated + + new_cut: single-entry dictionary + dictionary to define the new cut which is being considered to add + """ + new_cut_key = list(new_cut.keys())[0] + if new_cut_key in cut_dict: + # The cut has already been called + logger.warning("WARNING: Cut parameter %s and function %s have " + "already been used. Utilising the strictest cut.", + new_cut_key[0], new_cut_key[1].__name__) + # Extract the function and work out which is strictest + cut_function = new_cut_key[1] + value_new = list(new_cut.values())[0] + value_old = cut_dict[new_cut_key] + if cut_function(value_new, value_old): + # The new threshold would survive the cut of the + # old threshold, therefore the new threshold is stricter + # - update it + logger.warning("WARNING: New threshold of %.3f is " + "stricter than old threshold %.3f, " + "using cut at %.3f.", + value_new, value_old, value_new) + cut_dict.update(new_cut) + else: + # New cut would not make a difference, ignore it + logger.warning("WARNING: New threshold of %.3f is less " + "strict than old threshold %.3f, using " + "cut at %.3f.", + value_new, value_old, value_old) + else: + # This is a new cut - add it + cut_dict.update(new_cut)
+ + + +
+[docs] +def ingest_cuts_option_group(args): + """ + Return dictionaries for trigger and template cuts. + """ + # Deal with the case where no cuts are supplied: + if not args.trigger_cuts and not args.template_cuts: + return {}, {} + + # Deal with the case where one set of cuts is supplied + # but not the other + trigger_cut_strs = args.trigger_cuts or [] + template_cut_strs = args.template_cuts or [] + + # Handle trigger cuts + trigger_cut_dict = {} + for inputstr in trigger_cut_strs: + new_trigger_cut = convert_inputstr(inputstr, trigger_param_choices) + check_update_cuts(trigger_cut_dict, new_trigger_cut) + + # Handle template cuts + template_cut_dict = {} + for inputstr in template_cut_strs: + new_template_cut = convert_inputstr(inputstr, template_param_choices) + check_update_cuts(template_cut_dict, new_template_cut) + + return trigger_cut_dict, template_cut_dict
+ + + +
+[docs] +def sigma_multiple_cut_thresh(template_ids, statistic, + cut_thresh, ifo): + """ + Apply cuts based on a multiple of the median sigma value for the template + + Parameters + ---------- + + template_ids: + template_id values for each of the triggers to be considered, + this will be used to associate a sigma threshold for each trigger + statistic: + A PyCBC ranking statistic instance. Used to get the median_sigma + value for the cuts. If fits_by_tid does not exist for the specified + ifo (where median_sigma lives), an error will be raised. + ifo: + The IFO for which we want to read median_sigma + cut_thresh: int or float + The multiple of median_sigma to compare triggers to + + Returns + ------- + idx_out: numpy array + An array of the indices of triggers which meet the criteria + set by the dictionary + """ + statistic_classname = statistic.__class__.__name__ + if not hasattr(statistic, 'fits_by_tid'): + raise ValueError("Cut parameter 'sigma_muliple' cannot " + "be used when the ranking statistic " + + statistic_classname + " does not use " + "template fitting.") + tid_med_sigma = statistic.fits_by_tid[ifo]['median_sigma'] + return cut_thresh * tid_med_sigma[template_ids]
+ + + +
+[docs] +def apply_trigger_cuts(triggers, trigger_cut_dict, statistic=None): + """ + Fetch/Calculate the parameter for triggers, and then + apply the cuts defined in template_cut_dict + + Parameters + ---------- + triggers: ReadByTemplate object or dictionary + The triggers in this particular template. This + must have the correct datasets required to calculate + the values we cut on. + + trigger_cut_dict: dictionary + Dictionary with tuples of (parameter, cut_function) + as keys, cut_thresholds as values + made using ingest_cuts_option_group function + + + Returns + ------- + idx_out: numpy array + An array of the indices which meet the criteria + set by the dictionary + """ + idx_out = np.arange(len(triggers['snr'])) + + # Loop through the different cuts, and apply them + for parameter_cut_function, cut_thresh in trigger_cut_dict.items(): + # The function and threshold are stored as a tuple so unpack it + parameter, cut_function = parameter_cut_function + + # What kind of parameter is it? + if parameter.endswith('_chisq'): + # parameter is a chisq-type thing + chisq_choice = parameter.split('_')[0] + # Currently calculated for all triggers - this seems inefficient + value = get_chisq_from_file_choice(triggers, chisq_choice) + # Apply any previous cuts to the value for comparison + value = value[idx_out] + elif parameter == "sigma_multiple": + if isinstance(triggers, ReadByTemplate): + ifo_grp = triggers.file[triggers.ifo] + value = np.sqrt(ifo_grp['sigmasq'][idx_out]) + template_ids = ifo_grp['template_id'][idx_out] + # Get a cut threshold value, this will be different + # depending on the template ID, so we rewrite cut_thresh + # as a value for each trigger, numpy comparison functions + # allow this + cut_thresh = sigma_multiple_cut_thresh(template_ids, + statistic, + cut_thresh, + triggers.ifo) + else: + err_msg = "Cuts on 'sigma_multiple' are only implemented for " + err_msg += "triggers in a ReadByTemplate format. This code " + err_msg += f"uses a {type(triggers).__name__} format." + raise NotImplementedError(err_msg) + elif ((not hasattr(triggers, "file") and parameter in triggers) + or (hasattr(triggers, "file") + and parameter in triggers.file[triggers.ifo])): + # parameter can be read direct from the trigger dictionary / file + if not hasattr(triggers, 'file') and parameter in triggers: + value = triggers[parameter] + else: + value = triggers.file[triggers.ifo][parameter] + # Apply any previous cuts to the value for comparison + value = value[idx_out] + elif parameter in sngl_rank_keys: + # parameter is a newsnr-type thing + # Currently calculated for all triggers - this seems inefficient + value = ranking.get_sngls_ranking_from_trigs(triggers, parameter) + # Apply any previous cuts to the value for comparison + value = value[idx_out] + else: + raise NotImplementedError("Parameter '" + parameter + "' not " + "recognised. Input sanitisation means " + "this shouldn't have happened?!") + + idx_out = idx_out[cut_function(value, cut_thresh)] + + return idx_out
+ + + +
+[docs] +def apply_template_fit_cut(statistic, ifos, parameter_cut_function, cut_thresh, + template_ids): + """ + Apply cuts to template fit parameters, these have a few more checks + needed, so we separate out from apply_template_cuts defined later + + Parameters + ---------- + statistic: + A PyCBC ranking statistic instance. Used for the template fit + cuts. If fits_by_tid does not exist for each ifo, then + template fit cuts will be skipped. If a fit cut has been specified + and fits_by_tid does not exist for all ifos, an error will be raised. + + ifos: list of strings + List of IFOS used in this findtrigs instance. + Templates must pass cuts in all IFOs. + + parameter_cut_function: tuple + First entry: Which parameter is being used for the cut? + Second entry: Cut function + + cut_thresh: float or int + Cut threshold to the parameter according to the cut function + + template_ids: numpy array + Array of template_ids which have passed previous cuts + + + Returns + ------- + tids_out: numpy array + Array of template_ids which have passed this cut + """ + parameter, cut_function = parameter_cut_function + statistic_classname = statistic.__class__.__name__ + + # We can only apply template fit cuts if template fits have been done + if not hasattr(statistic, 'fits_by_tid'): + raise ValueError("Cut parameter " + parameter + " cannot " + "be used when the ranking statistic " + + statistic_classname + " does not use " + "template fitting.") + + # Is the parameter actually in the fits dictionary? + if parameter not in statistic.fits_by_tid[ifos[0]]: + # Shouldn't get here due to input sanitisation + raise ValueError("Cut parameter " + parameter + " not " + "available in fits file.") + + # Template IDs array to cut down in each IFO + tids_out = copy.copy(template_ids) + # Need to apply this cut to all IFOs + for ifo in ifos: + fits_dict = statistic.fits_by_tid[ifo] + values = fits_dict[parameter][tids_out] + # Only keep templates which pass this cut + tids_out = tids_out[cut_function(values, cut_thresh)] + + return tids_out
+ + + +
+[docs] +def apply_template_cuts(bank, template_cut_dict, template_ids=None, + statistic=None, ifos=None): + """ + Fetch/calculate the parameter for the templates, possibly already + preselected by template_ids, and then apply the cuts defined + in template_cut_dict + As this is used to select templates for use in findtrigs codes, + we remove anything which does not pass + + Parameters + ---------- + bank: h5py File object, or a dictionary + Must contain the usual template bank datasets + + template_cut_dict: dictionary + Dictionary with tuples of (parameter, cut_function) + as keys, cut_thresholds as values + made using ingest_cuts_option_group function + + Optional Parameters + ------------------- + template_ids: list of indices + Indices of templates to consider within the bank, useful if + templates have already been down-selected + + statistic: + A PyCBC ranking statistic instance. Used for the template fit + cuts. If fits_by_tid does not exist for each ifo, then + template fit cuts will be skipped. If a fit cut has been specified + and fits_by_tid does not exist for all ifos, an error will be raised. + If not supplied, no template fit cuts will be attempted. + + ifos: list of strings + List of IFOS used in this findtrigs instance. + Templates must pass cuts in all IFOs. This is important + e.g. for template fit parameter cuts. + + + Returns + ------- + tids_out: numpy array + Array of template_ids which have passed all cuts + """ + # Get the initial list of templates: + tids_out = np.arange(bank['mass1'].size) \ + if template_ids is None else template_ids[:] + + if (statistic is None) ^ (ifos is None): + raise NotImplementedError("Either both or neither of statistic and " + "ifos must be supplied.") + + if not template_cut_dict: + # No cuts are defined in the dictionary: just return the + # list of all tids + return tids_out + + # Loop through the different cuts, and apply them + for parameter_cut_function, cut_thresh in template_cut_dict.items(): + # The function and threshold are stored as a tuple so unpack it + parameter, cut_function = parameter_cut_function + + if parameter in bank_conv.conversion_options: + # Calculate the parameter values using the bank property helper + values = bank_conv.get_bank_property(parameter, bank, tids_out) + # Only keep templates which pass this cut + tids_out = tids_out[cut_function(values, cut_thresh)] + elif parameter in template_fit_param_choices: + if statistic and ifos: + tids_out = apply_template_fit_cut(statistic, + ifos, + parameter_cut_function, + cut_thresh, + tids_out) + else: + raise ValueError("Cut parameter " + parameter + " not recognised." + " This shouldn't happen with input sanitisation") + + return tids_out
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/eventmgr.html b/latest/html/_modules/pycbc/events/eventmgr.html new file mode 100644 index 00000000000..72550f23d4c --- /dev/null +++ b/latest/html/_modules/pycbc/events/eventmgr.html @@ -0,0 +1,1321 @@ + + + + + + pycbc.events.eventmgr — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.eventmgr

+# Copyright (C) 2012  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This modules defines functions for clustering and thresholding timeseries to
+produces event triggers
+"""
+import os.path
+import copy
+import itertools
+import logging
+import pickle
+import numpy
+import h5py
+
+from pycbc.types import Array
+from pycbc.scheme import schemed
+from pycbc.detector import Detector
+
+from . import coinc, ranking
+
+from .eventmgr_cython import findchirp_cluster_over_window_cython
+
+logger = logging.getLogger('pycbc.events.eventmgr')
+
+
+[docs] +@schemed("pycbc.events.threshold_") +def threshold(series, value): + """Return list of values and indices values over threshold in series. + """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] +@schemed("pycbc.events.threshold_") +def threshold_only(series, value): + """Return list of values and indices whose values in series are + larger (in absolute value) than value + """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +# FIXME: This should be under schemed, but I don't understand that yet! +
+[docs] +def threshold_real_numpy(series, value): + arr = series.data + locs = numpy.where(arr > value)[0] + vals = arr[locs] + return locs, vals
+ + +
+[docs] +@schemed("pycbc.events.threshold_") +def threshold_and_cluster(series, threshold, window): + """Return list of values and indices values over threshold in series. + """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +@schemed("pycbc.events.threshold_") +def _threshold_cluster_factory(series): + err_msg = "This class is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + + +
+[docs] +class ThresholdCluster(object): + """Create a threshold and cluster engine + + Parameters + ----------- + series : complex64 + Input pycbc.types.Array (or subclass); it will be searched for + points above threshold that are then clustered + """ + def __new__(cls, *args, **kwargs): + real_cls = _threshold_cluster_factory(*args, **kwargs) + return real_cls(*args, **kwargs) # pylint:disable=not-callable
+ + + +# The class below should serve as the parent for all schemed classes. +# The intention is that this class serves simply as the location for +# all documentation of the class and its methods, though that is not +# yet implemented. Perhaps something along the lines of: +# +# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance +# +# will work? Is there a better way? +class _BaseThresholdCluster(object): + def threshold_and_cluster(self, threshold, window): + """ + Threshold and cluster the memory specified at instantiation with the + threshold and window size specified at creation. + + Parameters + ----------- + threshold : float32 + The minimum absolute value of the series given at object initialization + to return when thresholding and clustering. + window : uint32 + The size (in number of samples) of the window over which to cluster + + Returns: + -------- + event_vals : complex64 + Numpy array, complex values of the clustered events + event_locs : uint32 + Numpy array, indices into series of location of events + """ + pass + + +
+[docs] +def findchirp_cluster_over_window(times, values, window_length): + """ Reduce the events by clustering over a window using + the FindChirp clustering algorithm + + Parameters + ----------- + indices: Array + The list of indices of the SNR values + snr: Array + The list of SNR value + window_size: int + The size of the window in integer samples. Must be positive. + + Returns + ------- + indices: Array + The reduced list of indices of the SNR values + """ + assert window_length > 0, 'Clustering window length is not positive' + + indices = numpy.zeros(len(times), dtype=numpy.int32) + tlen = len(times) + absvalues = numpy.asarray(abs(values)) + times = numpy.asarray(times, dtype=numpy.int32) + k = findchirp_cluster_over_window_cython(times, absvalues, window_length, + indices, tlen) + + return indices[0:k+1]
+ + + +
+[docs] +def cluster_reduce(idx, snr, window_size): + """ Reduce the events by clustering over a window + + Parameters + ----------- + indices: Array + The list of indices of the SNR values + snr: Array + The list of SNR value + window_size: int + The size of the window in integer samples. + + Returns + ------- + indices: Array + The list of indices of the SNR values + snr: Array + The list of SNR values + """ + ind = findchirp_cluster_over_window(idx, snr, window_size) + return idx.take(ind), snr.take(ind)
+ + + +class H5FileSyntSugar(object): + """Convenience class that adds some syntactic sugar to h5py.File. + """ + def __init__(self, name, prefix=''): + self.f = h5py.File(name, 'w') + self.prefix = prefix + + def __setitem__(self, name, data): + self.f.create_dataset( + self.prefix + '/' + name, + data=data, + compression='gzip', + compression_opts=9, + shuffle=True + ) + + +
+[docs] +class EventManager(object): + def __init__(self, opt, column, column_types, **kwds): + self.opt = opt + self.global_params = kwds + + self.event_dtype = [('template_id', int)] + for col, coltype in zip(column, column_types): + self.event_dtype.append((col, coltype)) + + self.events = numpy.array([], dtype=self.event_dtype) + self.accumulate = [self.events] + self.template_params = [] + self.template_index = -1 + self.template_events = numpy.array([], dtype=self.event_dtype) + self.write_performance = False + +
+[docs] + def save_state(self, tnum_finished, filename): + """Save the current state of the background buffers""" + from pycbc.io.hdf import dump_state + + self.tnum_finished = tnum_finished + logger.info('Writing checkpoint file at template %s', tnum_finished) + fp = h5py.File(filename, 'w') + dump_state(self, fp, protocol=pickle.HIGHEST_PROTOCOL) + fp.close()
+ + +
+[docs] + @staticmethod + def restore_state(filename): + """Restore state of the background buffers from a file""" + from pycbc.io.hdf import load_state + + fp = h5py.File(filename, 'r') + try: + mgr = load_state(fp) + except Exception as e: + fp.close() + raise e + fp.close() + next_template = mgr.tnum_finished + 1 + logger.info('Restoring with checkpoint at template %s', next_template) + return mgr.tnum_finished + 1, mgr
+ + +
+[docs] + @classmethod + def from_multi_ifo_interface(cls, opt, ifo, column, column_types, **kwds): + """ + To use this for a single ifo from the multi ifo interface requires + some small fixing of the opt structure. This does that. As we edit the + opt structure the process_params table will not be correct. + """ + opt = copy.deepcopy(opt) + opt_dict = vars(opt) + for arg, value in opt_dict.items(): + if isinstance(value, dict): + setattr(opt, arg, getattr(opt, arg)[ifo]) + return cls(opt, column, column_types, **kwds)
+ + +
+[docs] + def chisq_threshold(self, value, num_bins, delta=0): + remove = [] + for i, event in enumerate(self.events): + xi = event['chisq'] / (event['chisq_dof'] + + delta * event['snr'].conj() * event['snr']) + if xi > value: + remove.append(i) + self.events = numpy.delete(self.events, remove)
+ + +
+[docs] + def newsnr_threshold(self, threshold): + """ Remove events with newsnr smaller than given threshold + """ + if not self.opt.chisq_bins: + raise RuntimeError('Chi-square test must be enabled in order to ' + 'use newsnr threshold') + + nsnrs = ranking.newsnr(abs(self.events['snr']), + self.events['chisq'] / self.events['chisq_dof']) + remove_idxs = numpy.where(nsnrs < threshold)[0] + self.events = numpy.delete(self.events, remove_idxs)
+ + +
+[docs] + def keep_near_injection(self, window, injections): + from pycbc.events.veto import indices_within_times + if len(self.events) == 0: + return + + inj_time = numpy.array(injections.end_times()) + gpstime = self.events['time_index'].astype(numpy.float64) + gpstime = gpstime / self.opt.sample_rate + self.opt.gps_start_time + i = indices_within_times(gpstime, inj_time - window, inj_time + window) + self.events = self.events[i]
+ + +
+[docs] + def keep_loudest_in_interval(self, window, num_keep, statname="newsnr", + log_chirp_width=None): + if len(self.events) == 0: + return + + e_copy = self.events.copy() + + # Here self.events['snr'] is the complex SNR + e_copy['snr'] = abs(e_copy['snr']) + # Messy step because pycbc inspiral's internal 'chisq_dof' is 2p-2 + # but stat.py / ranking.py functions use 'chisq_dof' = p + e_copy['chisq_dof'] = e_copy['chisq_dof'] / 2 + 1 + statv = ranking.get_sngls_ranking_from_trigs(e_copy, statname) + + # Convert trigger time to integer bin number + # NB time_index and window are in units of samples + wtime = (e_copy['time_index'] / window).astype(numpy.int32) + bins = numpy.unique(wtime) + + if log_chirp_width: + from pycbc.conversions import mchirp_from_mass1_mass2 + m1 = numpy.array([p['tmplt'].mass1 for p in self.template_params]) + m2 = numpy.array([p['tmplt'].mass2 for p in self.template_params]) + mc = mchirp_from_mass1_mass2(m1, m2)[e_copy['template_id']] + + # convert chirp mass to integer bin number + imc = (numpy.log(mc) / log_chirp_width).astype(numpy.int32) + cbins = numpy.unique(imc) + + keep = [] + for b in bins: + if log_chirp_width: + for b2 in cbins: + bloc = numpy.where((wtime == b) & (imc == b2))[0] + bloudest = statv[bloc].argsort()[-num_keep:] + keep.append(bloc[bloudest]) + else: + bloc = numpy.where((wtime == b))[0] + bloudest = statv[bloc].argsort()[-num_keep:] + keep.append(bloc[bloudest]) + + keep = numpy.concatenate(keep) + self.events = self.events[keep]
+ + +
+[docs] + def add_template_events(self, columns, vectors): + """ Add a vector indexed """ + # initialize with zeros - since vectors can be None, look for the + # longest one that isn't + new_events = None + for v in vectors: + if v is not None: + new_events = numpy.zeros(len(v), dtype=self.event_dtype) + break + # they shouldn't all be None + assert new_events is not None + new_events['template_id'] = self.template_index + for c, v in zip(columns, vectors): + if v is not None: + if isinstance(v, Array): + new_events[c] = v.numpy() + else: + new_events[c] = v + self.template_events = numpy.append(self.template_events, new_events)
+ + +
+[docs] + def cluster_template_events(self, tcolumn, column, window_size): + """ Cluster the internal events over the named column + """ + cvec = self.template_events[column] + tvec = self.template_events[tcolumn] + if window_size == 0: + indices = numpy.arange(len(tvec)) + else: + indices = findchirp_cluster_over_window(tvec, cvec, window_size) + self.template_events = numpy.take(self.template_events, indices)
+ + +
+[docs] + def new_template(self, **kwds): + self.template_params.append(kwds) + self.template_index += 1
+ + +
+[docs] + def add_template_params(self, **kwds): + self.template_params[-1].update(kwds)
+ + +
+[docs] + def finalize_template_events(self): + self.accumulate.append(self.template_events) + self.template_events = numpy.array([], dtype=self.event_dtype)
+ + +
+[docs] + def consolidate_events(self, opt, gwstrain=None): + self.events = numpy.concatenate(self.accumulate) + logger.info("We currently have %d triggers", len(self.events)) + if opt.chisq_threshold and opt.chisq_bins: + logger.info("Removing triggers with poor chisq") + self.chisq_threshold(opt.chisq_threshold, opt.chisq_bins, + opt.chisq_delta) + logger.info("%d remaining triggers", len(self.events)) + + if opt.newsnr_threshold and opt.chisq_bins: + logger.info("Removing triggers with NewSNR below threshold") + self.newsnr_threshold(opt.newsnr_threshold) + logger.info("%d remaining triggers", len(self.events)) + + if opt.keep_loudest_interval: + logger.info("Removing triggers not within the top %s " + "loudest of a %s second interval by %s", + opt.keep_loudest_num, opt.keep_loudest_interval, + opt.keep_loudest_stat) + self.keep_loudest_in_interval\ + (opt.keep_loudest_interval * opt.sample_rate, + opt.keep_loudest_num, statname=opt.keep_loudest_stat, + log_chirp_width=opt.keep_loudest_log_chirp_window) + logger.info("%d remaining triggers", len(self.events)) + + if opt.injection_window and hasattr(gwstrain, 'injections'): + logger.info("Keeping triggers within %s seconds of injection", + opt.injection_window) + self.keep_near_injection(opt.injection_window, + gwstrain.injections) + logger.info("%d remaining triggers", len(self.events)) + + self.accumulate = [self.events]
+ + +
+[docs] + def finalize_events(self): + self.events = numpy.concatenate(self.accumulate)
+ + +
+[docs] + def make_output_dir(self, outname): + path = os.path.dirname(outname) + if path != '': + if not os.path.exists(path) and path is not None: + os.makedirs(path)
+ + +
+[docs] + def save_performance(self, ncores, nfilters, ntemplates, run_time, + setup_time): + """ + Calls variables from pycbc_inspiral to be used in a timing calculation + """ + self.run_time = run_time + self.setup_time = setup_time + self.ncores = ncores + self.nfilters = nfilters + self.ntemplates = ntemplates + self.write_performance = True
+ + +
+[docs] + def write_events(self, outname): + """Write the found events to a file. The only currently supported + format is HDF5, indicated by an .hdf or .h5 extension. + """ + self.make_output_dir(outname) + if outname.endswith(('.hdf', '.h5')): + self.write_to_hdf(outname) + return + raise ValueError('Unsupported event output file format')
+ + +
+[docs] + def write_to_hdf(self, outname): + self.events.sort(order='template_id') + th = numpy.array([p['tmplt'].template_hash for p in + self.template_params]) + tid = self.events['template_id'] + f = H5FileSyntSugar(outname, self.opt.channel_name[0:2]) + + if len(self.events): + f['snr'] = abs(self.events['snr']) + try: + # Precessing + f['u_vals'] = self.events['u_vals'] + f['coa_phase'] = self.events['coa_phase'] + f['hplus_cross_corr'] = self.events['hplus_cross_corr'] + except Exception: + # Not precessing + f['coa_phase'] = numpy.angle(self.events['snr']) + f['chisq'] = self.events['chisq'] + f['bank_chisq'] = self.events['bank_chisq'] + f['bank_chisq_dof'] = self.events['bank_chisq_dof'] + f['cont_chisq'] = self.events['cont_chisq'] + f['end_time'] = self.events['time_index'] / \ + float(self.opt.sample_rate) \ + + self.opt.gps_start_time + try: + # Precessing + template_sigmasq_plus = numpy.array( + [t['sigmasq_plus'] for t in self.template_params], + dtype=numpy.float32) + f['sigmasq_plus'] = template_sigmasq_plus[tid] + template_sigmasq_cross = numpy.array( + [t['sigmasq_cross'] for t in self.template_params], + dtype=numpy.float32) + f['sigmasq_cross'] = template_sigmasq_cross[tid] + # FIXME: I want to put something here, but I haven't yet + # figured out what it should be. I think we would also + # need information from the plus and cross correlation + # (both real and imaginary(?)) to get this. + f['sigmasq'] = template_sigmasq_plus[tid] + except Exception: + # Not precessing + f['sigmasq'] = self.events['sigmasq'] + + # Template durations should ideally be stored in the bank file. + # At present, however, a few plotting/visualization codes + # downstream in the offline search workflow rely on durations being + # stored in the trigger files instead. + template_durations = [p['tmplt'].template_duration for p in + self.template_params] + f['template_duration'] = numpy.array(template_durations, + dtype=numpy.float32)[tid] + + # FIXME: Can we get this value from the autochisq instance? + cont_dof = self.opt.autochi_number_points + if self.opt.autochi_onesided is None: + cont_dof = cont_dof * 2 + if self.opt.autochi_two_phase: + cont_dof = cont_dof * 2 + if self.opt.autochi_max_valued_dof: + cont_dof = self.opt.autochi_max_valued_dof + f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(self.events)) + + if 'chisq_dof' in self.events.dtype.names: + f['chisq_dof'] = self.events['chisq_dof'] / 2 + 1 + else: + f['chisq_dof'] = numpy.zeros(len(self.events)) + + f['template_hash'] = th[tid] + + if 'sg_chisq' in self.events.dtype.names: + f['sg_chisq'] = self.events['sg_chisq'] + + if self.opt.psdvar_segment is not None: + f['psd_var_val'] = self.events['psd_var_val'] + + if self.opt.trig_start_time: + f['search/start_time'] = numpy.array([self.opt.trig_start_time]) + search_start_time = float(self.opt.trig_start_time) + else: + f['search/start_time'] = numpy.array([self.opt.gps_start_time + + self.opt.segment_start_pad]) + search_start_time = float(self.opt.gps_start_time + + self.opt.segment_start_pad) + if self.opt.trig_end_time: + f['search/end_time'] = numpy.array([self.opt.trig_end_time]) + search_end_time = float(self.opt.trig_end_time) + else: + f['search/end_time'] = numpy.array([self.opt.gps_end_time - + self.opt.segment_end_pad]) + search_end_time = float(self.opt.gps_end_time - + self.opt.segment_end_pad) + + if self.write_performance: + self.analysis_time = search_end_time - search_start_time + time_ratio = numpy.array( + [float(self.analysis_time) / float(self.run_time)]) + temps_per_core = float(self.ntemplates) / float(self.ncores) + filters_per_core = float(self.nfilters) / float(self.ncores) + f['search/templates_per_core'] = \ + numpy.array([float(temps_per_core) * float(time_ratio)]) + f['search/filter_rate_per_core'] = \ + numpy.array([filters_per_core / float(self.run_time)]) + f['search/setup_time_fraction'] = \ + numpy.array([float(self.setup_time) / float(self.run_time)]) + f['search/run_time'] = numpy.array([float(self.run_time)]) + + if 'q_trans' in self.global_params: + qtrans = self.global_params['q_trans'] + for key in qtrans: + if key == 'qtiles': + for seg in qtrans[key]: + for q in qtrans[key][seg]: + f['qtransform/%s/%s/%s' % (key, seg, q)] = \ + qtrans[key][seg][q] + elif key == 'qplanes': + for seg in qtrans[key]: + f['qtransform/%s/%s' % (key, seg)] = qtrans[key][seg] + + if 'gating_info' in self.global_params: + gating_info = self.global_params['gating_info'] + for gate_type in ['file', 'auto']: + if gate_type in gating_info: + f['gating/' + gate_type + '/time'] = \ + numpy.array([float(g[0]) for g in gating_info[gate_type]]) + f['gating/' + gate_type + '/width'] = \ + numpy.array([g[1] for g in gating_info[gate_type]]) + f['gating/' + gate_type + '/pad'] = \ + numpy.array([g[2] for g in gating_info[gate_type]]) + + f.f.close()
+
+ + + +class EventManagerMultiDetBase(EventManager): + def __init__(self, opt, ifos, column, column_types, psd=None, **kwargs): + self.opt = opt + self.ifos = ifos + self.global_params = kwargs + if psd is not None: + self.global_params['psd'] = psd[ifos[0]] + + # The events array does not like holding the ifo as string, + # so create a mapping dict and hold as an int + self.ifo_dict = {} + self.ifo_reverse = {} + for i, ifo in enumerate(ifos): + self.ifo_dict[ifo] = i + self.ifo_reverse[i] = ifo + + self.event_dtype = [('template_id', int), ('event_id', int)] + for col, coltype in zip(column, column_types): + self.event_dtype.append((col, coltype)) + + self.events = numpy.array([], dtype=self.event_dtype) + self.event_id_map = {} + self.template_params = [] + self.template_index = -1 + self.template_event_dict = {} + self.coinc_list = [] + self.write_performance = False + for ifo in ifos: + self.template_event_dict[ifo] = \ + numpy.array([], dtype=self.event_dtype) + + def add_template_events_to_ifo(self, ifo, columns, vectors): + """ Add a vector indexed """ + # Just call through to the standard function + self.template_events = self.template_event_dict[ifo] + self.add_template_events(columns, vectors) + self.template_event_dict[ifo] = self.template_events + self.template_events = None + + def write_gating_info_to_hdf(self, hf): + """Write per-detector gating information to an h5py file object. + The information is laid out according to the following groups and + datasets: `/<detector>/gating/{file, auto}/{time, width, pad}` where + "file" and "auto" indicate respectively externally-provided gates and + internally-generated gates (autogating), and "time", "width" and "pad" + indicate the gate center times, total durations and padding durations + in seconds respectively. + """ + if 'gating_info' not in self.global_params: + return + gates = self.global_params['gating_info'] + for ifo, gate_type in itertools.product(self.ifos, ['file', 'auto']): + if gate_type not in gates[ifo]: + continue + hf[f'{ifo}/gating/{gate_type}/time'] = numpy.array( + [float(g[0]) for g in gates[ifo][gate_type]] + ) + hf[f'{ifo}/gating/{gate_type}/width'] = numpy.array( + [g[1] for g in gates[ifo][gate_type]] + ) + hf[f'{ifo}/gating/{gate_type}/pad'] = numpy.array( + [g[2] for g in gates[ifo][gate_type]] + ) + + +
+[docs] +class EventManagerCoherent(EventManagerMultiDetBase): + def __init__(self, opt, ifos, column, column_types, network_column, + network_column_types, segments, time_slides, + psd=None, **kwargs): + super(EventManagerCoherent, self).__init__( + opt, ifos, column, column_types, psd=None, **kwargs) + self.network_event_dtype = \ + [(ifo + '_event_id', int) for ifo in self.ifos] + self.network_event_dtype.append(('template_id', int)) + self.network_event_dtype.append(('event_id', int)) + for col, coltype in zip(network_column, network_column_types): + self.network_event_dtype.append((col, coltype)) + self.network_events = numpy.array([], dtype=self.network_event_dtype) + self.event_index = {} + for ifo in self.ifos: + self.event_index[ifo] = 0 + self.event_index['network'] = 0 + self.template_event_dict['network'] = numpy.array( + [], dtype=self.network_event_dtype) + self.segments = segments + self.time_slides = time_slides + +
+[docs] + def cluster_template_network_events(self, tcolumn, column, window_size, + slide=0): + """ Cluster the internal events over the named column + Parameters + ---------------- + tcolumn + Indicates which column contains the time. + column + The named column to cluster. + window_size + The size of the window. + slide + Default is 0. + """ + slide_indices = ( + self.template_event_dict['network']['slide_id'] == slide) + cvec = self.template_event_dict['network'][column][slide_indices] + tvec = self.template_event_dict['network'][tcolumn][slide_indices] + if not window_size == 0: + # cluster events over the window + indices = findchirp_cluster_over_window(tvec, cvec, window_size) + # if a slide_indices = 0 + if any(~slide_indices): + indices = numpy.concatenate(( + numpy.flatnonzero(~slide_indices), + numpy.flatnonzero(slide_indices)[indices])) + indices.sort() + # get value of key for where you have indicies + for key in self.template_event_dict: + self.template_event_dict[key] = \ + self.template_event_dict[key][indices] + else: + indices = numpy.arange(len(tvec))
+ + +
+[docs] + def add_template_network_events(self, columns, vectors): + """ Add a vector indexed """ + # initialize with zeros - since vectors can be None, look for the + # longest one that isn't + new_events = numpy.zeros( + max([len(v) for v in vectors if v is not None]), + dtype=self.network_event_dtype + ) + # they shouldn't all be None + assert new_events is not None + new_events['template_id'] = self.template_index + for c, v in zip(columns, vectors): + if v is not None: + if isinstance(v, Array): + new_events[c] = v.numpy() + else: + new_events[c] = v + self.template_events = numpy.append(self.template_events, new_events)
+ + +
+[docs] + def add_template_events_to_network(self, columns, vectors): + """ Add a vector indexed """ + # Just call through to the standard function + self.template_events = self.template_event_dict['network'] + self.add_template_network_events(columns, vectors) + self.template_event_dict['network'] = self.template_events + self.template_events = None
+ + +
+[docs] + def write_to_hdf(self, outname): + self.events.sort(order='template_id') + th = numpy.array( + [p['tmplt'].template_hash for p in self.template_params]) + f = H5FileSyntSugar(outname) + self.write_gating_info_to_hdf(f) + # Output network stuff + f.prefix = 'network' + network_events = numpy.array( + [e for e in self.network_events], dtype=self.network_event_dtype) + for col in network_events.dtype.names: + if col == 'time_index': + f['end_time_gc'] = ( + network_events[col] + / float(self.opt.sample_rate[self.ifos[0].lower()]) + + self.opt.gps_start_time[self.ifos[0].lower()] + ) + else: + f[col] = network_events[col] + starts = [] + ends = [] + for seg in self.segments[self.ifos[0]]: + starts.append(seg.start_time.gpsSeconds) + ends.append(seg.end_time.gpsSeconds) + f['search/segments/start_times'] = starts + f['search/segments/end_times'] = ends + # Individual ifo stuff + for i, ifo in enumerate(self.ifos): + tid = self.events['template_id'][self.events['ifo'] == i] + f.prefix = ifo + ifo_events = numpy.array([e for e in self.events + if e['ifo'] == self.ifo_dict[ifo]], dtype=self.event_dtype) + if len(ifo_events): + f['snr'] = abs(ifo_events['snr']) + f['event_id'] = ifo_events['event_id'] + try: + # Precessing + f['u_vals'] = ifo_events['u_vals'] + f['coa_phase'] = ifo_events['coa_phase'] + f['hplus_cross_corr'] = ifo_events['hplus_cross_corr'] + except Exception: + f['coa_phase'] = numpy.angle(ifo_events['snr']) + f['chisq'] = ifo_events['chisq'] + f['bank_chisq'] = ifo_events['bank_chisq'] + f['bank_chisq_dof'] = ifo_events['bank_chisq_dof'] + f['auto_chisq'] = ifo_events['auto_chisq'] + f['auto_chisq_dof'] = ifo_events['auto_chisq_dof'] + f['end_time'] = ifo_events['time_index'] / \ + float(self.opt.sample_rate[ifo]) + \ + self.opt.gps_start_time[ifo] + f['time_index'] = ifo_events['time_index'] + f['slide_id'] = ifo_events['slide_id'] + try: + # Precessing + template_sigmasq_plus = numpy.array( + [t['sigmasq_plus'] for t in self.template_params], + dtype=numpy.float32 + ) + f['sigmasq_plus'] = template_sigmasq_plus[tid] + template_sigmasq_cross = numpy.array( + [t['sigmasq_cross'] for t in self.template_params], + dtype=numpy.float32 + ) + f['sigmasq_cross'] = template_sigmasq_cross[tid] + # FIXME: I want to put something here, but I haven't yet + # figured out what it should be. I think we would also + # need information from the plus and cross correlation + # (both real and imaginary(?)) to get this. + f['sigmasq'] = template_sigmasq_plus[tid] + except Exception: + # Not precessing + template_sigmasq = numpy.array( + [t['sigmasq'][ifo] for t in self.template_params], + dtype=numpy.float32) + f['sigmasq'] = template_sigmasq[tid] + + # FIXME: Can we get this value from the autochisq instance? + # cont_dof = self.opt.autochi_number_points + # if self.opt.autochi_onesided is None: + # cont_dof = cont_dof * 2 + # if self.opt.autochi_two_phase: + # cont_dof = cont_dof * 2 + # if self.opt.autochi_max_valued_dof: + # cont_dof = self.opt.autochi_max_valued_dof + # f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(ifo_events)) + + if 'chisq_dof' in ifo_events.dtype.names: + f['chisq_dof'] = ifo_events['chisq_dof'] / 2 + 1 + else: + f['chisq_dof'] = numpy.zeros(len(ifo_events)) + + f['template_hash'] = th[tid] + f['search/time_slides'] = numpy.array(self.time_slides[ifo]) + if self.opt.trig_start_time: + f['search/start_time'] = numpy.array([ + self.opt.trig_start_time[ifo]], dtype=numpy.int32) + search_start_time = float(self.opt.trig_start_time[ifo]) + else: + f['search/start_time'] = numpy.array([ + self.opt.gps_start_time[ifo] + + self.opt.segment_start_pad[ifo]], dtype=numpy.int32) + search_start_time = float(self.opt.gps_start_time[ifo] + + self.opt.segment_start_pad[ifo]) + if self.opt.trig_end_time: + f['search/end_time'] = numpy.array([ + self.opt.trig_end_time[ifo]], dtype=numpy.int32) + search_end_time = float(self.opt.trig_end_time[ifo]) + else: + f['search/end_time'] = numpy.array( + [self.opt.gps_end_time[ifo] - + self.opt.segment_end_pad[ifo]], dtype=numpy.int32) + search_end_time = float(self.opt.gps_end_time[ifo] - + self.opt.segment_end_pad[ifo]) + + if self.write_performance: + self.analysis_time = search_end_time - search_start_time + time_ratio = numpy.array([float(self.analysis_time) / + float(self.run_time)]) + temps_per_core = float(self.ntemplates) / float(self.ncores) + filters_per_core = float(self.nfilters) / float(self.ncores) + f['search/templates_per_core'] = \ + numpy.array([float(temps_per_core) * float(time_ratio)]) + f['search/filter_rate_per_core'] = \ + numpy.array([filters_per_core / float(self.run_time)]) + f['search/setup_time_fraction'] = \ + numpy.array([float(self.setup_time) / float(self.run_time)])
+ + +
+[docs] + def finalize_template_events(self): + # Check that none of the template events have the same time index as an + # existing event in events. I.e. don't list the same ifo event multiple + # times when looping over sky points and time slides. + existing_times = {} + new_times = {} + existing_template_id = {} + new_template_id = {} + existing_events_mask = {} + new_template_event_mask = {} + existing_template_event_mask = {} + for i, ifo in enumerate(self.ifos): + ifo_events = numpy.where(self.events['ifo'] == i) + existing_times[ifo] = self.events['time_index'][ifo_events] + new_times[ifo] = self.template_event_dict[ifo]['time_index'] + existing_template_id[ifo] = self.events['template_id'][ifo_events] + new_template_id[ifo] = self.template_event_dict[ifo]['template_id'] + # This is true for each existing event that has the same time index + # and template id as a template trigger. + existing_events_mask[ifo] = numpy.argwhere( + numpy.logical_and( + numpy.isin(existing_times[ifo], new_times[ifo]), + numpy.isin(existing_template_id[ifo], new_template_id[ifo]) + )).reshape(-1,) + # This is true for each template event that has either a new + # trigger time or a new template id. + new_template_event_mask[ifo] = numpy.argwhere( + numpy.logical_or( + ~numpy.isin(new_times[ifo], existing_times[ifo]), + ~numpy.isin(new_template_id[ifo], existing_template_id[ifo]) + )).reshape(-1,) + # This is true for each template event that has the same time index + # and template id as an exisitng event trigger. + existing_template_event_mask[ifo] = numpy.argwhere( + numpy.logical_and( + numpy.isin(new_times[ifo], existing_times[ifo]), + numpy.isin(new_template_id[ifo], existing_template_id[ifo]) + )).reshape(-1,) + # Set ids (These show how each trigger in the single ifo trigger + # list correspond to the network triggers) + num_events = len(new_template_event_mask[ifo]) + new_event_ids = numpy.arange(self.event_index[ifo], + self.event_index[ifo] + num_events) + # Every template event that corresponds to a new trigger gets a new + # id. Triggers that have been found before are not saved. + self.template_event_dict[ifo]['event_id'][ + new_template_event_mask[ifo]] = new_event_ids + self.template_event_dict['network'][ifo + '_event_id'][ + new_template_event_mask[ifo]] = new_event_ids + # Template events that have been found before get the event id of + # the first time they were found. + self.template_event_dict['network'][ifo + '_event_id'][ + existing_template_event_mask[ifo]] = \ + self.events[self.events['ifo'] == i][ + existing_events_mask[ifo]]['event_id'] + self.event_index[ifo] = self.event_index[ifo] + num_events + + # Add the network event ids for the events with this template. + num_events = len(self.template_event_dict['network']) + new_event_ids = numpy.arange(self.event_index['network'], + self.event_index['network'] + num_events) + self.event_index['network'] = self.event_index['network'] + num_events + self.template_event_dict['network']['event_id'] = new_event_ids + # Move template events for each ifo to the events list + for ifo in self.ifos: + self.events = numpy.append( + self.events, + self.template_event_dict[ifo][new_template_event_mask[ifo]] + ) + self.template_event_dict[ifo] = \ + numpy.array([], dtype=self.event_dtype) + # Move the template events for the network to the network events list + self.network_events = numpy.append(self.network_events, + self.template_event_dict['network']) + self.template_event_dict['network'] = \ + numpy.array([], dtype=self.network_event_dtype)
+
+ + + +
+[docs] +class EventManagerMultiDet(EventManagerMultiDetBase): + def __init__(self, opt, ifos, column, column_types, psd=None, **kwargs): + super(EventManagerMultiDet, self).__init__( + opt, ifos, column, column_types, psd=None, **kwargs) + self.event_index = 0 + +
+[docs] + def cluster_template_events_single_ifo( + self, tcolumn, column, window_size, ifo): + """ Cluster the internal events over the named column + """ + # Just call through to the standard function + self.template_events = self.template_event_dict[ifo] + self.cluster_template_events(tcolumn, column, window_size) + self.template_event_dict[ifo] = self.template_events + self.template_events = None
+ + +
+[docs] + def finalize_template_events(self, perform_coincidence=True, + coinc_window=0.0): + # Set ids + for ifo in self.ifos: + num_events = len(self.template_event_dict[ifo]) + new_event_ids = numpy.arange(self.event_index, + self.event_index+num_events) + self.template_event_dict[ifo]['event_id'] = new_event_ids + self.event_index = self.event_index+num_events + + if perform_coincidence: + if not len(self.ifos) == 2: + err_msg = "Coincidence currently only supported for 2 ifos." + raise ValueError(err_msg) + ifo1 = self.ifos[0] + ifo2 = self.ifos[1] + end_times1 = self.template_event_dict[ifo1]['time_index'] /\ + float(self.opt.sample_rate[ifo1]) + self.opt.gps_start_time[ifo1] + end_times2 = self.template_event_dict[ifo2]['time_index'] /\ + float(self.opt.sample_rate[ifo2]) + self.opt.gps_start_time[ifo2] + light_travel_time = Detector(ifo1).light_travel_time_to_detector( + Detector(ifo2)) + coinc_window = coinc_window + light_travel_time + # FIXME: Remove!!! + coinc_window = 2.0 + if len(end_times1) and len(end_times2): + idx_list1, idx_list2, _ = \ + coinc.time_coincidence(end_times1, end_times2, + coinc_window) + if len(idx_list1): + for idx1, idx2 in zip(idx_list1, idx_list2): + event1 = self.template_event_dict[ifo1][idx1] + event2 = self.template_event_dict[ifo2][idx2] + self.coinc_list.append((event1, event2)) + for ifo in self.ifos: + self.events = numpy.append(self.events, + self.template_event_dict[ifo]) + self.template_event_dict[ifo] = numpy.array([], + dtype=self.event_dtype)
+ + +
+[docs] + def write_to_hdf(self, outname): + self.events.sort(order='template_id') + th = numpy.array([p['tmplt'].template_hash for p in + self.template_params]) + tid = self.events['template_id'] + f = H5FileSyntSugar(outname) + self.write_gating_info_to_hdf(f) + for ifo in self.ifos: + f.prefix = ifo + ifo_events = numpy.array([e for e in self.events if + e['ifo'] == self.ifo_dict[ifo]], + dtype=self.event_dtype) + if len(ifo_events): + f['snr'] = abs(ifo_events['snr']) + try: + # Precessing + f['u_vals'] = ifo_events['u_vals'] + f['coa_phase'] = ifo_events['coa_phase'] + f['hplus_cross_corr'] = ifo_events['hplus_cross_corr'] + except Exception: + f['coa_phase'] = numpy.angle(ifo_events['snr']) + f['chisq'] = ifo_events['chisq'] + f['bank_chisq'] = ifo_events['bank_chisq'] + f['bank_chisq_dof'] = ifo_events['bank_chisq_dof'] + f['cont_chisq'] = ifo_events['cont_chisq'] + f['end_time'] = ifo_events['time_index'] / \ + float(self.opt.sample_rate[ifo]) + \ + self.opt.gps_start_time[ifo] + try: + # Precessing + template_sigmasq_plus = numpy.array([t['sigmasq_plus'] for + t in self.template_params], dtype=numpy.float32) + f['sigmasq_plus'] = template_sigmasq_plus[tid] + template_sigmasq_cross = numpy.array([t['sigmasq_cross'] + for t in self.template_params], dtype=numpy.float32) + f['sigmasq_cross'] = template_sigmasq_cross[tid] + # FIXME: I want to put something here, but I haven't yet + # figured out what it should be. I think we would also + # need information from the plus and cross correlation + # (both real and imaginary(?)) to get this. + f['sigmasq'] = template_sigmasq_plus[tid] + except Exception: + # Not precessing + template_sigmasq = numpy.array([t['sigmasq'][ifo] for t in + self.template_params], + dtype=numpy.float32) + f['sigmasq'] = template_sigmasq[tid] + + # FIXME: Can we get this value from the autochisq instance? + cont_dof = self.opt.autochi_number_points + if self.opt.autochi_onesided is None: + cont_dof = cont_dof * 2 + # if self.opt.autochi_two_phase: + # cont_dof = cont_dof * 2 + # if self.opt.autochi_max_valued_dof: + # cont_dof = self.opt.autochi_max_valued_dof + f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(ifo_events)) + + if 'chisq_dof' in ifo_events.dtype.names: + f['chisq_dof'] = ifo_events['chisq_dof'] / 2 + 1 + else: + f['chisq_dof'] = numpy.zeros(len(ifo_events)) + + f['template_hash'] = th[tid] + + if self.opt.psdvar_segment is not None: + f['psd_var_val'] = ifo_events['psd_var_val'] + + if self.opt.trig_start_time: + f['search/start_time'] = numpy.array( + [self.opt.trig_start_time[ifo]], dtype=numpy.int32) + search_start_time = float(self.opt.trig_start_time[ifo]) + else: + f['search/start_time'] = numpy.array( + [self.opt.gps_start_time[ifo] + + self.opt.segment_start_pad[ifo]], dtype=numpy.int32) + search_start_time = float(self.opt.gps_start_time[ifo] + + self.opt.segment_start_pad[ifo]) + if self.opt.trig_end_time: + f['search/end_time'] = numpy.array( + [self.opt.trig_end_time[ifo]], dtype=numpy.int32) + search_end_time = float(self.opt.trig_end_time[ifo]) + else: + f['search/end_time'] = numpy.array( + [self.opt.gps_end_time[ifo] - + self.opt.segment_end_pad[ifo]], dtype=numpy.int32) + search_end_time = float(self.opt.gps_end_time[ifo] - + self.opt.segment_end_pad[ifo]) + + if self.write_performance: + self.analysis_time = search_end_time - search_start_time + time_ratio = numpy.array( + [float(self.analysis_time) / float(self.run_time)]) + temps_per_core = float(self.ntemplates) / float(self.ncores) + filters_per_core = float(self.nfilters) / float(self.ncores) + f['search/templates_per_core'] = \ + numpy.array([float(temps_per_core) * float(time_ratio)]) + f['search/filter_rate_per_core'] = \ + numpy.array([filters_per_core / float(self.run_time)]) + f['search/setup_time_fraction'] = \ + numpy.array([float(self.setup_time) / float(self.run_time)])
+
+ + + +__all__ = ['threshold_and_cluster', 'findchirp_cluster_over_window', + 'threshold', 'cluster_reduce', 'ThresholdCluster', + 'threshold_real_numpy', 'threshold_only', + 'EventManager', 'EventManagerMultiDet', 'EventManagerCoherent'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/ranking.html b/latest/html/_modules/pycbc/events/ranking.html new file mode 100644 index 00000000000..d56f24ef966 --- /dev/null +++ b/latest/html/_modules/pycbc/events/ranking.html @@ -0,0 +1,516 @@ + + + + + + pycbc.events.ranking — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.ranking

+""" This module contains functions for calculating single-ifo ranking
+statistic values
+"""
+import logging
+import numpy
+
+logger = logging.getLogger('pycbc.events.ranking')
+
+
+
+[docs] +def effsnr(snr, reduced_x2, fac=250.): + """Calculate the effective SNR statistic. See (S5y1 paper) for definition. + """ + snr = numpy.array(snr, ndmin=1, dtype=numpy.float64) + rchisq = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64) + esnr = snr / (1 + snr ** 2 / fac) ** 0.25 / rchisq ** 0.25 + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return esnr + else: + return esnr[0]
+ + + +
+[docs] +def newsnr(snr, reduced_x2, q=6., n=2.): + """Calculate the re-weighted SNR statistic ('newSNR') from given SNR and + reduced chi-squared values. See http://arxiv.org/abs/1208.3491 for + definition. Previous implementation in glue/ligolw/lsctables.py + """ + nsnr = numpy.array(snr, ndmin=1, dtype=numpy.float64) + reduced_x2 = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64) + + # newsnr is only different from snr if reduced chisq > 1 + ind = numpy.where(reduced_x2 > 1.)[0] + nsnr[ind] *= (0.5 * (1. + reduced_x2[ind] ** (q/n))) ** (-1./q) + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def newsnr_sgveto(snr, brchisq, sgchisq): + """ Combined SNR derived from NewSNR and Sine-Gaussian Chisq""" + nsnr = numpy.array(newsnr(snr, brchisq), ndmin=1) + sgchisq = numpy.array(sgchisq, ndmin=1) + t = numpy.array(sgchisq > 4, ndmin=1) + if len(t): + nsnr[t] = nsnr[t] / (sgchisq[t] / 4.0) ** 0.5 + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def newsnr_sgveto_psdvar(snr, brchisq, sgchisq, psd_var_val, + min_expected_psdvar=0.65): + """ Combined SNR derived from SNR, reduced Allen chisq, sine-Gaussian chisq and + PSD variation statistic""" + # If PSD var is lower than the 'minimum usually expected value' stop this + # being used in the statistic. This low value might arise because a + # significant fraction of the "short" PSD period was gated (for instance). + psd_var_val = numpy.array(psd_var_val, copy=True) + psd_var_val[psd_var_val < min_expected_psdvar] = 1. + scaled_snr = snr * (psd_var_val ** -0.5) + scaled_brchisq = brchisq * (psd_var_val ** -1.) + nsnr = newsnr_sgveto(scaled_snr, scaled_brchisq, sgchisq) + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def newsnr_sgveto_psdvar_threshold(snr, brchisq, sgchisq, psd_var_val, + min_expected_psdvar=0.65, + brchisq_threshold=10.0, + psd_var_val_threshold=10.0): + """ newsnr_sgveto_psdvar with thresholds applied. + + This is the newsnr_sgveto_psdvar statistic with additional options + to threshold on chi-squared or PSD variation. + """ + nsnr = newsnr_sgveto_psdvar(snr, brchisq, sgchisq, psd_var_val, + min_expected_psdvar=min_expected_psdvar) + nsnr = numpy.array(nsnr, ndmin=1) + nsnr[brchisq > brchisq_threshold] = 1. + nsnr[psd_var_val > psd_var_val_threshold] = 1. + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def newsnr_sgveto_psdvar_scaled(snr, brchisq, sgchisq, psd_var_val, + scaling=0.33, min_expected_psdvar=0.65): + """ Combined SNR derived from NewSNR, Sine-Gaussian Chisq and scaled PSD + variation statistic. """ + nsnr = numpy.array(newsnr_sgveto(snr, brchisq, sgchisq), ndmin=1) + psd_var_val = numpy.array(psd_var_val, ndmin=1, copy=True) + psd_var_val[psd_var_val < min_expected_psdvar] = 1. + + # Default scale is 0.33 as tuned from analysis of data from O2 chunks + nsnr = nsnr / psd_var_val ** scaling + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def newsnr_sgveto_psdvar_scaled_threshold(snr, bchisq, sgchisq, psd_var_val, + threshold=2.0): + """ Combined SNR derived from NewSNR and Sine-Gaussian Chisq, and + scaled psd variation. + """ + nsnr = newsnr_sgveto_psdvar_scaled(snr, bchisq, sgchisq, psd_var_val) + nsnr = numpy.array(nsnr, ndmin=1) + nsnr[bchisq > threshold] = 1. + + # If snr input is float, return a float. Otherwise return numpy array. + if hasattr(snr, '__len__'): + return nsnr + else: + return nsnr[0]
+ + + +
+[docs] +def get_snr(trigs): + """ + Return SNR from a trigs/dictionary object + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) + Dictionary-like object holding single detector trigger information. + 'snr' is a required key + + Returns + ------- + numpy.ndarray + Array of snr values + """ + return numpy.array(trigs['snr'][:], ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr(trigs): + """ + Calculate newsnr ('reweighted SNR') for a trigs/dictionary object + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) + Dictionary-like object holding single detector trigger information. + 'chisq_dof', 'snr', and 'chisq' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr = newsnr(trigs['snr'][:], trigs['chisq'][:] / dof) + return numpy.array(nsnr, ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr_sgveto(trigs): + """ + Calculate newsnr re-weigthed by the sine-gaussian veto + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) + Dictionary-like object holding single detector trigger information. + 'chisq_dof', 'snr', 'sg_chisq' and 'chisq' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr_sg = newsnr_sgveto(trigs['snr'][:], + trigs['chisq'][:] / dof, + trigs['sg_chisq'][:]) + return numpy.array(nsnr_sg, ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr_sgveto_psdvar(trigs): + """ + Calculate snr re-weighted by Allen chisq, sine-gaussian veto and + psd variation statistic + + Parameters + ---------- + trigs: dict of numpy.ndarrays + Dictionary holding single detector trigger information. + 'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr_sg_psd = \ + newsnr_sgveto_psdvar(trigs['snr'][:], trigs['chisq'][:] / dof, + trigs['sg_chisq'][:], + trigs['psd_var_val'][:]) + return numpy.array(nsnr_sg_psd, ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr_sgveto_psdvar_threshold(trigs): + """ + Calculate newsnr re-weighted by the sine-gaussian veto and scaled + psd variation statistic + + Parameters + ---------- + trigs: dict of numpy.ndarrays + Dictionary holding single detector trigger information. + 'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr_sg_psdt = newsnr_sgveto_psdvar_threshold( + trigs['snr'][:], trigs['chisq'][:] / dof, + trigs['sg_chisq'][:], + trigs['psd_var_val'][:] + ) + return numpy.array(nsnr_sg_psdt, ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr_sgveto_psdvar_scaled(trigs): + """ + Calculate newsnr re-weighted by the sine-gaussian veto and scaled + psd variation statistic + + Parameters + ---------- + trigs: dict of numpy.ndarrays + Dictionary holding single detector trigger information. + 'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr_sg_psdscale = \ + newsnr_sgveto_psdvar_scaled( + trigs['snr'][:], trigs['chisq'][:] / dof, + trigs['sg_chisq'][:], + trigs['psd_var_val'][:]) + return numpy.array(nsnr_sg_psdscale, ndmin=1, dtype=numpy.float32)
+ + + +
+[docs] +def get_newsnr_sgveto_psdvar_scaled_threshold(trigs): + """ + Calculate newsnr re-weighted by the sine-gaussian veto and scaled + psd variation statistic. A further threshold is applied to the + reduced chisq. + + Parameters + ---------- + trigs: dict of numpy.ndarrays + Dictionary holding single detector trigger information. + 'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys + + Returns + ------- + numpy.ndarray + Array of newsnr values + """ + dof = 2. * trigs['chisq_dof'][:] - 2. + nsnr_sg_psdt = \ + newsnr_sgveto_psdvar_scaled_threshold( + trigs['snr'][:], trigs['chisq'][:] / dof, + trigs['sg_chisq'][:], + trigs['psd_var_val'][:]) + return numpy.array(nsnr_sg_psdt, ndmin=1, dtype=numpy.float32)
+ + + +sngls_ranking_function_dict = { + 'snr': get_snr, + 'newsnr': get_newsnr, + 'new_snr': get_newsnr, + 'newsnr_sgveto': get_newsnr_sgveto, + 'newsnr_sgveto_psdvar': get_newsnr_sgveto_psdvar, + 'newsnr_sgveto_psdvar_threshold': get_newsnr_sgveto_psdvar_threshold, + 'newsnr_sgveto_psdvar_scaled': get_newsnr_sgveto_psdvar_scaled, + 'newsnr_sgveto_psdvar_scaled_threshold': get_newsnr_sgveto_psdvar_scaled_threshold, +} + +# Lists of datasets required in the trigs object for each function +required_datasets = {} +required_datasets['snr'] = ['snr'] +required_datasets['newsnr'] = required_datasets['snr'] + ['chisq', 'chisq_dof'] +required_datasets['new_snr'] = required_datasets['newsnr'] +required_datasets['newsnr_sgveto'] = required_datasets['newsnr'] + ['sg_chisq'] +required_datasets['newsnr_sgveto_psdvar'] = \ + required_datasets['newsnr_sgveto'] + ['psd_var_val'] +required_datasets['newsnr_sgveto_psdvar_threshold'] = \ + required_datasets['newsnr_sgveto_psdvar'] +required_datasets['newsnr_sgveto_psdvar_scaled'] = \ + required_datasets['newsnr_sgveto_psdvar'] +required_datasets['newsnr_sgveto_psdvar_scaled_threshold'] = \ + required_datasets['newsnr_sgveto_psdvar'] + + +
+[docs] +def get_sngls_ranking_from_trigs(trigs, statname, **kwargs): + """ + Return ranking for all trigs given a statname. + + Compute the single-detector ranking for a list of input triggers for a + specific statname. + + Parameters + ----------- + trigs: dict of numpy.ndarrays, SingleDetTriggers or ReadByTemplate + Dictionary holding single detector trigger information. + statname: + The statistic to use. + """ + # Identify correct function + try: + sngl_func = sngls_ranking_function_dict[statname] + except KeyError as exc: + err_msg = 'Single-detector ranking {} not recognized'.format(statname) + raise ValueError(err_msg) from exc + + # NOTE: In the sngl_funcs all the kwargs are explicitly stated, so any + # kwargs sent here must be known to the function. + return sngl_func(trigs, **kwargs)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/significance.html b/latest/html/_modules/pycbc/events/significance.html new file mode 100644 index 00000000000..9c51e0461fa --- /dev/null +++ b/latest/html/_modules/pycbc/events/significance.html @@ -0,0 +1,625 @@ + + + + + + pycbc.events.significance — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.significance

+# Copyright (C) 2022 Gareth Cabourn Davies
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module contains functions to calculate the significance
+through different estimation methods of the background, and functions that
+read in the associated options to do so.
+"""
+import logging
+import copy
+import numpy as np
+from pycbc.events import trigger_fits as trstats
+from pycbc import conversions as conv
+
+logger = logging.getLogger('pycbc.events.significance')
+
+
+
+[docs] +def count_n_louder(bstat, fstat, dec, + **kwargs): # pylint:disable=unused-argument + """ Calculate for each foreground event the number of background events + that are louder than it. + + Parameters + ---------- + bstat: numpy.ndarray + Array of the background statistic values + fstat: numpy.ndarray or scalar + Array of the foreground statistic values or single value + dec: numpy.ndarray + Array of the decimation factors for the background statistics + + Returns + ------- + cum_back_num: numpy.ndarray + The cumulative array of background triggers. + fore_n_louder: numpy.ndarray + The number of background triggers above each foreground trigger + """ + sort = bstat.argsort() + bstat = copy.deepcopy(bstat)[sort] + dec = copy.deepcopy(dec)[sort] + + # calculate cumulative number of triggers louder than the trigger in + # a given index. We need to subtract the decimation factor, as the cumsum + # includes itself in the first sum (it is inclusive of the first value) + n_louder = dec[::-1].cumsum()[::-1] - dec + + # Determine how many values are louder than the foreground ones + # We need to subtract one from the index, to be consistent with definition + # of n_louder, as here we do want to include the background value at the + # found index + idx = np.searchsorted(bstat, fstat, side='left') - 1 + + # If the foreground are *quieter* than the background or at the same value + # then the search sorted algorithm will choose position -1, which does not + # exist. We force it back to zero. + if isinstance(idx, np.ndarray): # Case where our input is an array + idx[idx < 0] = 0 + else: # Case where our input is just a scalar value + if idx < 0: + idx = 0 + + fore_n_louder = n_louder[idx] + + unsort = sort.argsort() + back_cum_num = n_louder[unsort] + return back_cum_num, fore_n_louder
+ + + +
+[docs] +def n_louder_from_fit(back_stat, fore_stat, dec_facs, + fit_function='exponential', fit_threshold=0, + **kwargs): # pylint:disable=unused-argument + """ + Use a fit to events in back_stat in order to estimate the + distribution for use in recovering the estimate count of louder + background events. Below the fit threshold, use the n_louder + method for these triggers + + back_stat: numpy.ndarray + Array of the background statistic values + fore_stat: numpy.ndarray or scalar + Array of the foreground statistic values or single value + dec_facs: numpy.ndarray + Array of the decimation factors for the background statistics + fit_function: str + Name of the function to be used for the fit to background + statistic values + fit_threshold: float + Threshold above which triggers use the fitted value, below this + the counted number of louder events will be used + + Returns + ------- + bg_n_louder: numpy.ndarray + The estimated number of background events louder than each + background event + fg_n_louder: numpy.ndarray + The estimated number of background events louder than each + foreground event + """ + + # Calculate the fitting factor of the ranking statistic distribution + alpha, _ = trstats.fit_above_thresh(fit_function, back_stat, + thresh=fit_threshold, + weights=dec_facs) + + # Count background events above threshold as the cum_fit is + # normalised to 1 + bg_above = back_stat > fit_threshold + n_above = np.sum(dec_facs[bg_above]) + fg_above = fore_stat > fit_threshold + + # These will be overwritten, but just to silence a warning + # in the case where trstats.cum_fit returns zero + bg_n_louder = np.zeros_like(back_stat) + fg_n_louder = np.zeros_like(fore_stat) + + # Ue the fit above the threshold + bg_n_louder[bg_above] = n_above * trstats.cum_fit(fit_function, + back_stat[bg_above], + alpha, + fit_threshold) + fg_n_louder[fg_above] = n_above * trstats.cum_fit(fit_function, + fore_stat[fg_above], + alpha, + fit_threshold) + + # Below the fit threshold, we expect there to be sufficient events + # to use the count_n_louder method, and the distribution may deviate + # from the fit function + fg_below = np.logical_not(fg_above) + bg_below = np.logical_not(bg_above) + + # Count the number of below-threshold background events louder than the + # bg and foreground + bg_n_louder[bg_below], fg_n_louder[fg_below] = count_n_louder( + back_stat[bg_below], + fore_stat[fg_below], + dec_facs[bg_below] + ) + + # As we have only counted the louder below-threshold events, need to + # add the above threshold events, which by definition are louder than + # all the below-threshold events + bg_n_louder[bg_below] += n_above + fg_n_louder[fg_below] += n_above + + return bg_n_louder, fg_n_louder
+ + + +_significance_meth_dict = { + 'trigger_fit': n_louder_from_fit, + 'n_louder': count_n_louder +} + +_default_opt_dict = { + 'method': 'n_louder', + 'fit_threshold': None, + 'fit_function': None, + 'far_limit': 0.} + + +
+[docs] +def get_n_louder(back_stat, fore_stat, dec_facs, + method=_default_opt_dict['method'], + **kwargs): # pylint:disable=unused-argument + """ + Wrapper to find the correct n_louder calculation method using standard + inputs + """ + return _significance_meth_dict[method]( + back_stat, + fore_stat, + dec_facs, + **kwargs)
+ + + +
+[docs] +def get_far(back_stat, fore_stat, dec_facs, + background_time, + method=_default_opt_dict['method'], + **kwargs): # pylint:disable=unused-argument + """ + Return the appropriate FAR given the significance calculation method + + If the n_louder method is used, find the IFAR according to Eq.17-18 + of Usman et al., arXiv:1508.02357. The p-value of a candidate in a + search of duration T, with n_bg louder time shifted events over a + total background time T_bg is + `p = 1 - exp(-T * (n_bg + 1) / T_bg)` + corresponding to an effective false alarm rate of (n_bg + 1) / T_bg. + + If the trigger_fit method is used, we are extrapolating the background + for the specific aim of FAR not being limited to 1 / T_bg, and so we + do not add 1 to n_bg + + Parameters + ---------- + See description in get_n_louder for most parameters + + background_time: float + The amount of time to convert the number of louder events into + a FAR + + """ + bg_n_louder, fg_n_louder = get_n_louder( + back_stat, + fore_stat, + dec_facs, + method=method, + **kwargs + ) + + # If we are counting the number of louder events in the background, + # we add one. This is part of the p-value calculation in Usman 2015. + # If we are doing trigger fit extrapolation, this is not needed + if method == 'n_louder': + bg_n_louder += 1 + fg_n_louder += 1 + + bg_far = bg_n_louder / background_time + fg_far = fg_n_louder / background_time + + return bg_far, fg_far
+ + + +
+[docs] +def insert_significance_option_group(parser): + """ + Add some options for use when a significance is being estimated from + events or event distributions. + """ + parser.add_argument('--far-calculation-method', nargs='+', + default=[], + help="Method used for FAR calculation in each " + "detector combination, given as " + "combination:method pairs, i.e. " + "H1:trigger_fit H1L1:n_louder H1L1V1:n_louder " + "etc. Method options are [" + + ",".join(_significance_meth_dict.keys()) + + "]. Default = n_louder for all not given") + parser.add_argument('--fit-threshold', nargs='+', default=[], + help="Trigger statistic fit thresholds for FAN " + "estimation, given as combination-value pairs " + "ex. H1:0 L1:0 V1:-4 for all combinations with " + "--far-calculation-method = trigger_fit") + parser.add_argument("--fit-function", nargs='+', default=[], + help="Functional form for the statistic slope fit if " + "--far-calculation-method is 'trigger_fit'. " + "Given as combination:function pairs, i.e. " + "H1:exponential H1L1:n_louder H1L1V1:n_louder. " + "Options: [" + + ",".join(trstats.fitalpha_dict.keys()) + "]. " + "Default = exponential for all") + parser.add_argument('--limit-ifar', nargs='+', default=[], + help="Impose upper limits on IFAR values (years)" + ". Given as combination:value pairs, eg " + "H1L1:10000 L1:1000. Used to avoid under/" + "overflows for loud signals and injections " + "using the fit extrapolation method. A value" + " 0 or no value means unlimited IFAR")
+ + + +
+[docs] +def positive_float(inp): + """ + Wrapper around float conversion which ensures that the float must be + positive or zero + """ + fl_in = float(inp) + if fl_in < 0: + logger.warning("Value provided to positive_float is less than zero, " + "this is not allowed") + raise ValueError + return fl_in
+ + + +
+[docs] +def check_significance_options(args, parser): + """ + Check the significance group options + """ + # Check that the combo:method/function/threshold are in the + # right format, and are in allowed combinations + lists_to_check = [(args.far_calculation_method, str, + _significance_meth_dict.keys()), + (args.fit_function, str, + trstats.fitalpha_dict.keys()), + (args.fit_threshold, float, None), + (args.limit_ifar, positive_float, None)] + + for list_to_check, type_to_convert, allowed_values in lists_to_check: + combo_list = [] + for combo_value in list_to_check: + try: + combo, value = tuple(combo_value.split(':')) + except ValueError: + parser.error("Need combo:value format, got %s" % combo_value) + + if combo in combo_list: + parser.error("Duplicate combo %s in a significance " + "option" % combo) + combo_list.append(combo) + + try: + type_to_convert(value) + except ValueError: + err_fmat = "Value {} of combo {} can't be converted" + parser.error(err_fmat.format(value, combo)) + + if allowed_values is not None and \ + type_to_convert(value) not in allowed_values: + err_fmat = "Value {} of combo {} is not in allowed values: {}" + parser.error(err_fmat.format(value, combo, allowed_values)) + + # Are the functions/thresholds appropriate for the methods given? + methods = {} + # A method has been specified + for combo_value in args.far_calculation_method: + combo, value = tuple(combo_value.split(':')) + methods[combo] = value + + # A function or threshold has been specified + function_or_thresh_given = [] + for combo_value in args.fit_function + args.fit_threshold: + combo, _ = tuple(combo_value.split(':')) + if combo not in methods: + # Assign the default method for use in further tests + methods[combo] = _default_opt_dict['method'] + function_or_thresh_given.append(combo) + + for combo, value in methods.items(): + if value != 'trigger_fit' and combo in function_or_thresh_given: + # Function/Threshold given for combo not using trigger_fit method + parser.error("--fit-function and/or --fit-threshold given for " + + combo + " which has method " + value) + elif value == 'trigger_fit' and combo not in function_or_thresh_given: + # Threshold not given for trigger_fit combo + parser.error("Threshold required for combo " + combo)
+ + + +
+[docs] +def ifar_opt_to_far_limit(ifar_str): + """ + Convert the string of an IFAR limit in years into a + float FAR limit in Hz. + + Parameters + ---------- + ifar_str: string + Upper limit on IFAR in years. Zero indicates no upper limit + + """ + ifar_float = positive_float(ifar_str) + + far_hz = 0. if (ifar_float == 0.) else conv.sec_to_year(1. / ifar_float) + + return far_hz
+ + + +
+[docs] +def digest_significance_options(combo_keys, args): + """ + Read in information from the significance option group and ensure + it makes sense before putting into a dictionary + + Parameters + ---------- + + combo_keys: list of strings + list of detector combinations for which options are needed + + args: parsed arguments + from argparse ArgumentParser parse_args() + + Returns + ------- + significance_dict: dictionary + Dictionary containing method, threshold and function for trigger fits + as appropriate, and any limit on FAR (Hz) + """ + + lists_to_unpack = [('method', args.far_calculation_method, str), + ('fit_function', args.fit_function, str), + ('fit_threshold', args.fit_threshold, float), + ('far_limit', args.limit_ifar, ifar_opt_to_far_limit)] + + significance_dict = {} + # Set everything as a default to start with: + for combo in combo_keys: + significance_dict[combo] = copy.deepcopy(_default_opt_dict) + + # Unpack everything from the arguments into the dictionary + for argument_key, arg_to_unpack, conv_func in lists_to_unpack: + for combo_value in arg_to_unpack: + combo, value = tuple(combo_value.split(':')) + if combo not in significance_dict: + # Allow options for detector combos that are not actually + # used/required for a given job. Such options have + # no effect, but emit a warning for (e.g.) diagnostic checks + logger.warning("Key %s not used by this code, uses %s", + combo, combo_keys) + significance_dict[combo] = copy.deepcopy(_default_opt_dict) + significance_dict[combo][argument_key] = conv_func(value) + + return significance_dict
+ + + +
+[docs] +def apply_far_limit(far, significance_dict, combo=None): + """ + Apply a FAR limit to events according to command line options. + + If far_limit in significance_dict is zero, no limit is applied. + + Parameters + ---------- + far: numpy array + significance_dict: dictionary + Dictionary containing any limit on FAR (Hz), made by + digest_significance_options + active_combination: numpy.array or string + Array of IFO combinations given as utf-8 encoded strings, or a string + which defines the IFO combination for all events + + Returns + ------- + far_out: numpy array + FARs with far limit applied as appropriate + + """ + far_out = copy.deepcopy(far) + if isinstance(combo, str): + # Single IFO combo used + if significance_dict[combo]['far_limit'] == 0: + return far_out + far_limit_str = f"{significance_dict[combo]['far_limit']:.3e}" + logger.info("Applying FAR limit of %s to %s events", + far_limit_str, combo) + far_out = np.maximum(far, significance_dict[combo]['far_limit']) + else: + # IFO combo supplied as an array, by e.g. pycbc_add_statmap + # Need to check which events are in which IFO combo in order to + # apply the right limit to each + for ifo_combo in significance_dict: + if significance_dict[ifo_combo]['far_limit'] == 0: + continue + far_limit_str = f"{significance_dict[ifo_combo]['far_limit']:.3e}" + logger.info("Applying FAR limit of %s to %s events", + far_limit_str, ifo_combo) + this_combo_idx = combo == ifo_combo.encode('utf-8') + far_out[this_combo_idx] = np.maximum( + far[this_combo_idx], + significance_dict[ifo_combo]['far_limit'] + ) + return far_out
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/single.html b/latest/html/_modules/pycbc/events/single.html new file mode 100644 index 00000000000..b01f1d08813 --- /dev/null +++ b/latest/html/_modules/pycbc/events/single.html @@ -0,0 +1,547 @@ + + + + + + pycbc.events.single — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.single

+""" utilities for assigning FAR to single detector triggers
+"""
+import logging
+import copy
+import threading
+import time
+import numpy as np
+
+from pycbc.events import trigger_fits as fits, stat
+from pycbc.types import MultiDetOptionAction
+from pycbc import conversions as conv
+from pycbc.io.hdf import HFile
+from pycbc import bin_utils
+
+logger = logging.getLogger('pycbc.events.single')
+
+
+
+[docs] +class LiveSingle(object): + def __init__(self, ifo, + ranking_threshold=10.0, + reduced_chisq_threshold=5, + duration_threshold=0, + fit_file=None, + sngl_ifar_est_dist=None, + fixed_ifar=None, + maximum_ifar=None, + statistic=None, + sngl_ranking=None, + stat_files=None, + statistic_refresh_rate=None, + **kwargs): + """ + Parameters + ---------- + ifo: str + Name of the ifo that is being analyzed + newsnr_threshold: float + Minimum value for the reweighted SNR of the event under + consideration. Which reweighted SNR is defined by sngl_ranking + reduced_chisq_threshold: float + Maximum value for the reduced chisquared of the event under + consideration + duration_threshold: float + Minimum value for the duration of the template which found the + event under consideration + fit_file: str or path + (optional) the file containing information about the + single-detector event significance distribution fits + sngl_ifar_est_dist: str + Which trigger distribution to use when calculating IFAR of + single-detector events + fixed_ifar: float + (optional) give a fixed IFAR value to any event which passes the + threshold criteria + statistic: str + The name of the statistic to rank events. + sngl_ranking: str + The single detector ranking to use with the background statistic + stat_files: list of strs + List of filenames that contain information used to construct + various coincident statistics. + maximum_ifar: float + The largest inverse false alarm rate in years that we would like to + calculate. + statistic_refresh_rate: float + How regularly to run the update_files method on the statistic + class (in seconds), default not do do this + kwargs: dict + Additional options for the statistic to use. See stat.py + for more details on statistic options. + """ + self.ifo = ifo + self.fit_file = fit_file + self.sngl_ifar_est_dist = sngl_ifar_est_dist + self.fixed_ifar = fixed_ifar + self.maximum_ifar = maximum_ifar + + self.time_stat_refreshed = time.time() + self.stat_calculator_lock = threading.Lock() + self.statistic_refresh_rate = statistic_refresh_rate + + stat_class = stat.get_statistic(statistic) + self.stat_calculator = stat_class( + sngl_ranking, + stat_files, + ifos=[ifo], + **kwargs + ) + + self.thresholds = { + "ranking": ranking_threshold, + "reduced_chisq": reduced_chisq_threshold, + "duration": duration_threshold} + +
+[docs] + @staticmethod + def insert_args(parser): + parser.add_argument('--single-ranking-threshold', nargs='+', + type=float, action=MultiDetOptionAction, + help='Single ranking threshold for ' + 'single-detector events. Can be given ' + 'as a single value or as detector-value ' + 'pairs, e.g. H1:6 L1:7 V1:6.5') + parser.add_argument('--single-reduced-chisq-threshold', nargs='+', + type=float, action=MultiDetOptionAction, + help='Maximum reduced chi-squared threshold for ' + 'single triggers. Calcuated after any PSD ' + 'variation reweighting is applied. Can be ' + 'given as a single value or as ' + 'detector-value pairs, e.g. H1:2 L1:2 V1:3') + parser.add_argument('--single-duration-threshold', nargs='+', + type=float, action=MultiDetOptionAction, + help='Minimum duration threshold for single ' + 'triggers. Can be given as a single value ' + 'or as detector-value pairs, e.g. H1:6 L1:6 ' + 'V1:8') + parser.add_argument('--single-fixed-ifar', nargs='+', + type=float, action=MultiDetOptionAction, + help='A fixed value for IFAR, still uses cuts ' + 'defined by command line. Can be given as ' + 'a single value or as detector-value pairs, ' + 'e.g. H1:0.001 L1:0.001 V1:0.0005') + parser.add_argument('--single-maximum-ifar', nargs='+', + type=float, action=MultiDetOptionAction, + help='A maximum possible value for IFAR for ' + 'single-detector events. Can be given as ' + 'a single value or as detector-value pairs, ' + 'e.g. H1:100 L1:1000 V1:50') + parser.add_argument('--single-fit-file', + help='File which contains definitons of fit ' + 'coefficients and counts for specific ' + 'single trigger IFAR fitting.') + parser.add_argument('--sngl-ifar-est-dist', nargs='+', + action=MultiDetOptionAction, + help='Which trigger distribution to use when ' + 'calculating IFAR of single triggers. ' + 'Can be given as a single value or as ' + 'detector-value pairs, e.g. H1:mean ' + 'L1:mean V1:conservative')
+ + +
+[docs] + @staticmethod + def verify_args(args, parser, ifos): + sngl_opts = [args.single_reduced_chisq_threshold, + args.single_duration_threshold, + args.single_ranking_threshold, + args.sngl_ifar_est_dist] + + sngl_opts_str = ("--single-reduced-chisq-threshold, " + "--single-duration-threshold, " + "--single-ranking-threshold, " + "--sngl-ifar-est-dist") + + if any(sngl_opts) and not all(sngl_opts): + parser.error(f"Single detector trigger options ({sngl_opts_str}) " + "must either all be given or none.") + + if args.enable_single_detector_upload \ + and not args.enable_gracedb_upload: + parser.error("--enable-single-detector-upload requires " + "--enable-gracedb-upload to be set.") + + sngl_optional_opts = [args.single_fixed_ifar, + args.single_fit_file, + args.single_maximum_ifar] + sngl_optional_opts_str = ("--single-fixed-ifar, " + "--single-fit-file," + "--single-maximum-ifar") + if any(sngl_optional_opts) and not all(sngl_opts): + parser.error("Optional singles options " + f"({sngl_optional_opts_str}) given but not all " + f"required options ({sngl_opts_str}) are.") + + for ifo in ifos: + # Check which option(s) are needed for each IFO and if they exist: + + # Notes for the logic here: + # args.sngl_ifar_est_dist.default_set is True if single value has + # been set to be the same for all values + # bool(args.sngl_ifar_est_dist) is True if option is given + if args.sngl_ifar_est_dist and \ + not args.sngl_ifar_est_dist.default_set \ + and not args.sngl_ifar_est_dist[ifo]: + # Option has been given, different for each IFO, + # and this one is not present + parser.error("All IFOs required in --single-ifar-est-dist " + "if IFO-specific options are given.") + + if args.sngl_ifar_est_dist[ifo] is None: + # Default - no singles being used + continue + + if not args.sngl_ifar_est_dist[ifo] == 'fixed': + if not args.single_fit_file: + # Fixed IFAR option doesnt need the fits file + parser.error(f"Single detector trigger fits file must be " + "given if --single-ifar-est-dist is not " + f"fixed for all ifos (at least {ifo} has " + f"option {args.sngl_ifar_est_dist[ifo]}).") + if ifo in args.single_fixed_ifar: + parser.error(f"Value {args.single_fixed_ifar[ifo]} given " + f"for {ifo} in --single-fixed-ifar, but " + f"--single-ifar-est-dist for {ifo} " + f"is {args.sngl_ifar_est_dist[ifo]}, not " + "fixed.") + else: + # Check that the fixed IFAR value has actually been + # given if using this instead of a distribution + if not args.single_fixed_ifar[ifo]: + parser.error(f"--single-fixed-ifar must be " + "given if --single-ifar-est-dist is fixed. " + f"This is true for at least {ifo}.") + + # Return value is a boolean whether we are analysing singles or not + # The checks already performed mean that all(sngl_opts) is okay + return all(sngl_opts)
+ + +
+[docs] + @classmethod + def from_cli(cls, args, ifo): + # Allow None inputs + stat_files = args.statistic_files or [] + stat_keywords = args.statistic_keywords or [] + + # flatten the list of lists of filenames to a single list + # (may be empty) + stat_files = sum(stat_files, []) + + kwargs = stat.parse_statistic_keywords_opt(stat_keywords) + return cls( + ifo, ranking_threshold=args.single_ranking_threshold[ifo], + reduced_chisq_threshold=args.single_reduced_chisq_threshold[ifo], + duration_threshold=args.single_duration_threshold[ifo], + fixed_ifar=args.single_fixed_ifar, + maximum_ifar=args.single_maximum_ifar[ifo], + fit_file=args.single_fit_file, + sngl_ifar_est_dist=args.sngl_ifar_est_dist[ifo], + statistic=args.ranking_statistic, + sngl_ranking=args.sngl_ranking, + stat_files=stat_files, + statistic_refresh_rate=args.statistic_refresh_rate, + **kwargs + )
+ + +
+[docs] + def check(self, trigs, data_reader): + """ Look for a single detector trigger that passes the thresholds in + the current data. + """ + + # Apply cuts to trigs before clustering + # Cut on snr so that triggers which could not reach the ranking + # threshold do not have ranking calculated + if 'psd_var_val' in trigs: + # We should apply the PSD variation rescaling, as this can + # re-weight the SNR to be above SNR + trig_chisq = trigs['chisq'] / trigs['psd_var_val'] + trig_snr = trigs['snr'] / (trigs['psd_var_val'] ** 0.5) + else: + trig_chisq = trigs['chisq'] + trig_snr = trigs['snr'] + + valid_idx = (trigs['template_duration'] > + self.thresholds['duration']) & \ + (trig_chisq < + self.thresholds['reduced_chisq']) & \ + (trig_snr > + self.thresholds['ranking']) + if not np.any(valid_idx): + return None + + cut_trigs = {k: trigs[k][valid_idx] for k in trigs} + + # Convert back from the pycbc live convention of chisq always + # meaning the reduced chisq. + trigsc = copy.copy(cut_trigs) + trigsc['ifo'] = self.ifo + trigsc['chisq'] = cut_trigs['chisq'] * cut_trigs['chisq_dof'] + trigsc['chisq_dof'] = (cut_trigs['chisq_dof'] + 2) / 2 + + # Calculate the ranking reweighted SNR for cutting + with self.stat_calculator_lock: + single_rank = self.stat_calculator.get_sngl_ranking(trigsc) + + sngl_idx = single_rank > self.thresholds['ranking'] + if not np.any(sngl_idx): + return None + + cutall_trigs = {k: trigsc[k][sngl_idx] + for k in trigs} + + # Calculate the ranking statistic + with self.stat_calculator_lock: + sngl_stat = self.stat_calculator.single(cutall_trigs) + rank = self.stat_calculator.rank_stat_single((self.ifo, sngl_stat)) + + # 'cluster' by taking the maximal statistic value over the trigger set + i = rank.argmax() + + # calculate the (inverse) false-alarm rate + ifar = self.calculate_ifar( + rank[i], + trigsc['template_duration'][i] + ) + if ifar is None: + return None + + # fill in a new candidate event + candidate = { + f'foreground/{self.ifo}/{k}': cutall_trigs[k][i] for k in trigs + } + candidate['foreground/stat'] = rank[i] + candidate['foreground/ifar'] = ifar + candidate['HWINJ'] = data_reader.near_hwinj() + return candidate
+ + +
+[docs] + def calculate_ifar(self, sngl_ranking, duration): + logger.info("Calculating IFAR") + if self.fixed_ifar and self.ifo in self.fixed_ifar: + return self.fixed_ifar[self.ifo] + + try: + with HFile(self.fit_file, 'r') as fit_file: + bin_edges = fit_file['bins_edges'][:] + live_time = fit_file[self.ifo].attrs['live_time'] + thresh = fit_file.attrs['fit_threshold'] + dist_grp = fit_file[self.ifo][self.sngl_ifar_est_dist] + rates = dist_grp['counts'][:] / live_time + coeffs = dist_grp['fit_coeff'][:] + except FileNotFoundError: + logger.error( + 'Single fit file %s not found; ' + 'dropping a potential single-detector candidate!', + self.fit_file + ) + return None + + bins = bin_utils.IrregularBins(bin_edges) + dur_bin = bins[duration] + + rate = rates[dur_bin] + coeff = coeffs[dur_bin] + if np.isnan(coeff) or np.isnan(rate): + logger.warning( + "Single trigger fits are not valid - singles " + "cannot be assessed for this detector at this time." + ) + return None + + rate_louder = rate * fits.cum_fit( + 'exponential', + [sngl_ranking], + coeff, + thresh + )[0] + + # apply a trials factor of the number of duration bins + rate_louder *= len(rates) + + return min(conv.sec_to_year(1. / rate_louder), self.maximum_ifar)
+ + +
+[docs] + def start_refresh_thread(self): + """ + Start a thread managing whether the stat_calculator will be updated + """ + if self.statistic_refresh_rate is None: + logger.info("Statistic refresh disabled for %s", self.ifo) + return + thread = threading.Thread( + target=self.refresh_statistic, + daemon=True, + name="Stat refresh " + self.ifo + ) + logger.info("Starting %s statistic refresh thread", self.ifo) + thread.start()
+ + +
+[docs] + def refresh_statistic(self): + """ + Function to refresh the stat_calculator at regular intervals + """ + while True: + # How long since the statistic was last updated? + since_stat_refresh = time.time() - self.time_stat_refreshed + if since_stat_refresh > self.statistic_refresh_rate: + self.time_stat_refreshed = time.time() + logger.info( + "Checking %s statistic for updated files", self.ifo + ) + with self.stat_calculator_lock: + self.stat_calculator.check_update_files() + # Sleep one second for safety + time.sleep(1) + # Now use the time it took the check / update the statistic + since_stat_refresh = time.time() - self.time_stat_refreshed + logger.debug( + "%s statistic: Waiting %.3fs for next refresh", + self.ifo, + self.statistic_refresh_rate - since_stat_refresh + ) + time.sleep(self.statistic_refresh_rate - since_stat_refresh)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/stat.html b/latest/html/_modules/pycbc/events/stat.html new file mode 100644 index 00000000000..8076ad4f4ad --- /dev/null +++ b/latest/html/_modules/pycbc/events/stat.html @@ -0,0 +1,3166 @@ + + + + + + pycbc.events.stat — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.stat

+# Copyright (C) 2016 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module contains functions for calculating coincident ranking statistic
+values.
+"""
+import logging
+from hashlib import sha1
+from datetime import datetime as dt
+import numpy
+import h5py
+
+from . import ranking
+from . import coinc_rate
+from .eventmgr_cython import logsignalrateinternals_computepsignalbins
+from .eventmgr_cython import logsignalrateinternals_compute2detrate
+
+logger = logging.getLogger('pycbc.events.stat')
+
+
+
+[docs] +class Stat(object): + """Base class which should be extended to provide a coincident statistic""" + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Create a statistic class instance + + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed for some statistics + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, needed for some statistics + The list of detector names + """ + + self.files = {} + files = files or [] + for filename in files: + with h5py.File(filename, 'r') as f: + stat = f.attrs['stat'] + if hasattr(stat, 'decode'): + stat = stat.decode() + if stat in self.files: + raise RuntimeError("We already have one file with stat attr =" + " %s. Can't provide more than one!" % stat) + logger.info("Found file %s for stat %s", filename, stat) + self.files[stat] = filename + # Keep track of when stat files hashes so it can be + # reloaded if it has changed + self.file_hashes = self.get_file_hashes() + + # Provide the dtype of the single detector method's output + # This is used by background estimation codes that need to maintain + # a buffer of such values. + self.single_dtype = numpy.float32 + # True if a larger single detector statistic will produce a larger + # coincident statistic + self.single_increasing = True + + self.ifos = ifos or [] + + self.sngl_ranking = sngl_ranking + self.sngl_ranking_kwargs = {} + for key, value in kwargs.items(): + if key.startswith('sngl_ranking_'): + self.sngl_ranking_kwargs[key[13:]] = value + +
+[docs] + def get_file_hashes(self): + """ + Get sha1 hashes for all the files + """ + logger.debug( + "Getting file hashes" + ) + start = dt.now() + file_hashes = {} + for stat, filename in self.files.items(): + with open(filename, 'rb') as file_binary: + file_hashes[stat] = sha1(file_binary.read()).hexdigest() + logger.debug( + "Got file hashes for %d files, took %.3es", + len(self.files), + (dt.now() - start).total_seconds() + ) + return file_hashes
+ + +
+[docs] + def files_changed(self): + """ + Compare hashes of files now with the ones we have cached + """ + changed_file_hashes = self.get_file_hashes() + for stat, old_hash in self.file_hashes.items(): + if changed_file_hashes[stat] != old_hash: + logger.info( + "%s statistic file %s has changed", + ''.join(self.ifos), + stat, + ) + else: + # Remove the dataset from the dictionary of hashes + del changed_file_hashes[stat] + + if changed_file_hashes == {}: + logger.debug( + "No %s statistic files have changed", + ''.join(self.ifos) + ) + + return list(changed_file_hashes.keys())
+ + +
+[docs] + def check_update_files(self): + """ + Check whether files associated with the statistic need updated, + then do so for each file which needs changing + """ + files_changed = self.files_changed() + for file_key in files_changed: + self.update_file(file_key) + self.file_hashes = self.get_file_hashes()
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic referenced by key. + """ + err_msg = "This function is a stub that should be overridden by the " + err_msg += "sub-classes. You shouldn't be seeing this error!" + raise NotImplementedError(err_msg)
+ + +
+[docs] + def get_sngl_ranking(self, trigs): + """ + Returns the ranking for the single detector triggers. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + return ranking.get_sngls_ranking_from_trigs( + trigs, + self.sngl_ranking, + **self.sngl_ranking_kwargs + )
+ + +
+[docs] + def single(self, trigs): # pylint:disable=unused-argument + """ + Calculate the necessary single detector information + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + err_msg = "This function is a stub that should be overridden by the " + err_msg += "sub-classes. You shouldn't be seeing this error!" + raise NotImplementedError(err_msg)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + err_msg = "This function is a stub that should be overridden by the " + err_msg += "sub-classes. You shouldn't be seeing this error!" + raise NotImplementedError(err_msg)
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + """ + err_msg = "This function is a stub that should be overridden by the " + err_msg += "sub-classes. You shouldn't be seeing this error!" + raise NotImplementedError(err_msg)
+ + + def _check_coinc_lim_subclass(self, allowed_names): + """ + Check that we are not using coinc_lim_for_thresh when not valid. + + coinc_lim_for_thresh is only defined for the statistic it is present + in. If we subclass, we must check explicitly that it is still valid and + indicate this in the code. If the code does not have this explicit + check you will see the failure message here. + + Parameters + ----------- + allowed_names : list + list of allowed classes for the specific sub-classed method. + """ + if type(self).__name__ not in allowed_names: + err_msg = "This is being called from a subclass which has not " + err_msg += "been checked for validity with this method. If it is " + err_msg += "valid for the subclass to come here, include in the " + err_msg += "list of allowed_names above." + raise NotImplementedError(err_msg) + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + """ + + err_msg = "This function is a stub that should be overridden by the " + err_msg += "sub-classes. You shouldn't be seeing this error!" + raise NotImplementedError(err_msg)
+
+ + + +
+[docs] +class QuadratureSumStatistic(Stat): + """Calculate the quadrature sum coincident detection statistic""" + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + Here just the ranking is computed and returned. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + return self.get_sngl_ranking(trigs)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + For this statistic this is just passing through the + single value, which will be the second entry in the tuple. + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + return single_info[1]
+ + +
+[docs] + def rank_stat_coinc(self, sngls_list, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + + Parameters + ---------- + sngls_list: list + List of (ifo, single detector statistic) tuples + slide: (unused in this statistic) + step: (unused in this statistic) + to_shift: list + List of integers indicating what multiples of the time shift will + be applied (unused in this statistic) + + Returns + ------- + numpy.ndarray + Array of coincident ranking statistic values + """ + cstat = sum(sngl[1] ** 2. for sngl in sngls_list) ** 0.5 + # For single-detector "cuts" the single ranking is set to -1 + for sngls in sngls_list: + cstat[sngls == -1] = 0 + return cstat
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + # Safety against subclassing and not rethinking this + allowed_names = ['QuadratureSumStatistic'] + self._check_coinc_lim_subclass(allowed_names) + + s0 = thresh ** 2. - sum(sngl[1] ** 2. for sngl in s) + s0[s0 < 0] = 0 + return s0 ** 0.5
+
+ + + +
+[docs] +class PhaseTDStatistic(QuadratureSumStatistic): + """ + Statistic that re-weights combined newsnr using coinc parameters. + + The weighting is based on the PDF of time delays, phase differences and + amplitude ratios between triggers in different ifos. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, + pregenerate_hist=True, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + + files: list of strs, unused here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + + ifos: list of strs, needed here + The list of detector names + + pregenerate_hist: bool, optional + If False, do not pregenerate histogram on class instantiation. + Default is True. + """ + QuadratureSumStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + + self.single_dtype = [ + ('snglstat', numpy.float32), + ('coa_phase', numpy.float32), + ('end_time', numpy.float64), + ('sigmasq', numpy.float32), + ('snr', numpy.float32) + ] + + # Assign attribute so that it can be replaced with other functions + self.has_hist = False + self.hist_ifos = None + self.ref_snr = 5. + self.relsense = {} + self.swidth = self.pwidth = self.twidth = None + self.srbmin = self.srbmax = None + self.max_penalty = None + self.pdtype = [] + self.weights = {} + self.param_bin = {} + self.two_det_flag = (len(ifos) == 2) + self.two_det_weights = {} + # Some memory + self.pdif = numpy.zeros(128, dtype=numpy.float64) + self.tdif = numpy.zeros(128, dtype=numpy.float64) + self.sdif = numpy.zeros(128, dtype=numpy.float64) + self.tbin = numpy.zeros(128, dtype=numpy.int32) + self.pbin = numpy.zeros(128, dtype=numpy.int32) + self.sbin = numpy.zeros(128, dtype=numpy.int32) + + if pregenerate_hist and not len(ifos) == 1: + self.get_hist() + elif len(ifos) == 1: + # remove all phasetd files from self.files and self.file_hashes, + # as they are not needed + for k in list(self.files.keys()): + if 'phasetd_newsnr' in k: + del self.files[k] + del self.file_hashes[k] + +
+[docs] + def get_hist(self, ifos=None): + """ + Read in a signal density file for the ifo combination + + Parameters + ---------- + ifos: list + The list of ifos. Needed if not given when initializing the class + instance. + """ + ifos = ifos or self.ifos + + selected = None + for name in self.files: + # Pick out the statistic files that provide phase / time/ amp + # relationships and match to the ifos in use + if 'phasetd_newsnr' in name: + ifokey = name.split('_')[2] + num = len(ifokey) / 2 + if num != len(ifos): + continue + + match = [ifo in ifokey for ifo in ifos] + if False in match: + continue + selected = name + break + + # If there are other phasetd_newsnr files, they aren't needed. + # So tidy them out of the self.files dictionary + rejected = [key for key in self.files.keys() + if 'phasetd_newsnr' in key and not key == selected] + for k in rejected: + del self.files[k] + del self.file_hashes[k] + + if selected is None and len(ifos) > 1: + raise RuntimeError("Couldn't figure out which stat file to use") + if len(ifos) == 1: + # We dont need the histogram file, but we are trying to get one + # just skip it in this case + return + + logger.info("Using signal histogram %s for ifos %s", selected, ifos) + weights = {} + param = {} + + with h5py.File(self.files[selected], 'r') as histfile: + self.hist_ifos = histfile.attrs['ifos'] + + # Patch for pre-hdf5=3.0 histogram files + try: + logger.info("Decoding hist ifos ..") + self.hist_ifos = [i.decode('UTF-8') for i in self.hist_ifos] + except (UnicodeDecodeError, AttributeError): + pass + + # Histogram bin attributes + self.twidth = histfile.attrs['twidth'] + self.pwidth = histfile.attrs['pwidth'] + self.swidth = histfile.attrs['swidth'] + self.srbmin = histfile.attrs['srbmin'] + self.srbmax = histfile.attrs['srbmax'] + relfac = histfile.attrs['sensitivity_ratios'] + + for ifo in self.hist_ifos: + weights[ifo] = histfile[ifo]['weights'][:] + param[ifo] = histfile[ifo]['param_bin'][:] + + n_ifos = len(self.hist_ifos) + + bin_volume = (self.twidth * self.pwidth * self.swidth) ** (n_ifos - 1) + self.hist_max = - 1. * numpy.inf + + # Read histogram for each ifo, to use if that ifo has smallest SNR in + # the coinc + for ifo in self.hist_ifos: + + # renormalise to PDF + self.weights[ifo] = \ + (weights[ifo] / (weights[ifo].sum() * bin_volume)) + self.weights[ifo] = self.weights[ifo].astype(numpy.float32) + + if param[ifo].dtype == numpy.int8: + # Older style, incorrectly sorted histogram file + ncol = param[ifo].shape[1] + self.pdtype = [('c%s' % i, param[ifo].dtype) for i in range(ncol)] + self.param_bin[ifo] = numpy.zeros(len(self.weights[ifo]), + dtype=self.pdtype) + for i in range(ncol): + self.param_bin[ifo]['c%s' % i] = param[ifo][:, i] + + lsort = self.param_bin[ifo].argsort() + self.param_bin[ifo] = self.param_bin[ifo][lsort] + self.weights[ifo] = self.weights[ifo][lsort] + else: + # New style, efficient histogram file + # param bin and weights have already been sorted + self.param_bin[ifo] = param[ifo] + self.pdtype = self.param_bin[ifo].dtype + + # Max_penalty is a small number to assigned to any bins without + # histogram entries. All histograms in a given file have the same + # min entry by design, so use the min of the last one read in. + self.max_penalty = self.weights[ifo].min() + self.hist_max = max(self.hist_max, self.weights[ifo].max()) + + if self.two_det_flag: + # The density of signals is computed as a function of 3 binned + # parameters: time difference (t), phase difference (p) and + # SNR ratio (s). These are computed for each combination of + # detectors, so for detectors 6 differences are needed. However + # many combinations of these parameters are highly unlikely and + # no instances of these combinations occurred when generating + # the statistic files. Rather than storing a bunch of 0s, these + # values are just not stored at all. This reduces the size of + # the statistic file, but means we have to identify the correct + # value to read for every trigger. For 2 detectors we can + # expand the weights lookup table here, basically adding in all + # the "0" values. This makes looking up a value in the + # "weights" table a O(N) rather than O(NlogN) operation. It + # sacrifices RAM to do this, so is a good tradeoff for 2 + # detectors, but not for 3! + if not hasattr(self, 'c0_size'): + self.c0_size = {} + self.c1_size = {} + self.c2_size = {} + + self.c0_size[ifo] = numpy.int32( + 2 * (abs(self.param_bin[ifo]['c0']).max() + 1) + ) + self.c1_size[ifo] = numpy.int32( + 2 * (abs(self.param_bin[ifo]['c1']).max() + 1) + ) + self.c2_size[ifo] = numpy.int32( + 2 * (abs(self.param_bin[ifo]['c2']).max() + 1) + ) + + array_size = [self.c0_size[ifo], self.c1_size[ifo], + self.c2_size[ifo]] + dtypec = self.weights[ifo].dtype + self.two_det_weights[ifo] = \ + numpy.zeros(array_size, dtype=dtypec) + self.max_penalty + id0 = self.param_bin[ifo]['c0'].astype(numpy.int32) \ + + self.c0_size[ifo] // 2 + id1 = self.param_bin[ifo]['c1'].astype(numpy.int32) \ + + self.c1_size[ifo] // 2 + id2 = self.param_bin[ifo]['c2'].astype(numpy.int32) \ + + self.c2_size[ifo] // 2 + self.two_det_weights[ifo][id0, id1, id2] = self.weights[ifo] + + for ifo, sense in zip(self.hist_ifos, relfac): + self.relsense[ifo] = sense + + self.has_hist = True
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + if 'phasetd_newsnr' in key and not len(self.ifos) == 1: + if ''.join(sorted(self.ifos)) not in key: + logger.debug( + "%s file is not used for %s statistic", + key, + ''.join(self.ifos) + ) + return False + logger.info( + "Updating %s statistic %s file", + ''.join(self.ifos), + key + ) + # This is a PhaseTDStatistic file which needs updating + self.get_hist() + return True + return False
+ + +
+[docs] + def logsignalrate(self, stats, shift, to_shift): + """ + Calculate the normalized log rate density of signals via lookup + + Parameters + ---------- + stats: dict of dicts + Single-detector quantities for each detector + shift: numpy array of float + Time shift vector for each coinc to be ranked + to_shift: list of ints + Multiple of the time shift to apply, ordered as self.ifos + + Returns + ------- + value: log of coinc signal rate density for the given single-ifo + triggers and time shifts + """ + # Convert time shift vector to dict, as hist ifos and self.ifos may + # not be in same order + to_shift = {ifo: s for ifo, s in zip(self.ifos, to_shift)} + + if not self.has_hist: + self.get_hist() + + # Figure out which ifo of the contributing ifos has the smallest SNR, + # to use as reference for choosing the signal histogram. + snrs = numpy.array([numpy.array(stats[ifo]['snr'], ndmin=1) + for ifo in self.ifos]) + smin = snrs.argmin(axis=0) + # Store a list of the triggers using each ifo as reference + rtypes = {ifo: numpy.where(smin == j)[0] + for j, ifo in enumerate(self.ifos)} + + # Get reference ifo information + rate = numpy.zeros(len(shift), dtype=numpy.float32) + ps = {ifo: numpy.array(stats[ifo]['coa_phase'], + dtype=numpy.float32, ndmin=1) + for ifo in self.ifos} + ts = {ifo: numpy.array(stats[ifo]['end_time'], + dtype=numpy.float64, ndmin=1) + for ifo in self.ifos} + ss = {ifo: numpy.array(stats[ifo]['snr'], + dtype=numpy.float32, ndmin=1) + for ifo in self.ifos} + sigs = {ifo: numpy.array(stats[ifo]['sigmasq'], + dtype=numpy.float32, ndmin=1) + for ifo in self.ifos} + for ref_ifo in self.ifos: + rtype = rtypes[ref_ifo] + pref = ps[ref_ifo] + tref = ts[ref_ifo] + sref = ss[ref_ifo] + sigref = sigs[ref_ifo] + senseref = self.relsense[self.hist_ifos[0]] + + binned = [] + other_ifos = [ifo for ifo in self.ifos if ifo != ref_ifo] + for ifo in other_ifos: + # Assign cached memory + length = len(rtype) + while length > len(self.pdif): + newlen = len(self.pdif) * 2 + self.pdif = numpy.zeros(newlen, dtype=numpy.float64) + self.tdif = numpy.zeros(newlen, dtype=numpy.float64) + self.sdif = numpy.zeros(newlen, dtype=numpy.float64) + self.pbin = numpy.zeros(newlen, dtype=numpy.int32) + self.tbin = numpy.zeros(newlen, dtype=numpy.int32) + self.sbin = numpy.zeros(newlen, dtype=numpy.int32) + + # Calculate differences + logsignalrateinternals_computepsignalbins( + self.pdif, + self.tdif, + self.sdif, + self.pbin, + self.tbin, + self.sbin, + ps[ifo], + ts[ifo], + ss[ifo], + sigs[ifo], + pref, + tref, + sref, + sigref, + shift, + rtype, + self.relsense[ifo], + senseref, + self.twidth, + self.pwidth, + self.swidth, + to_shift[ref_ifo], + to_shift[ifo], + length + ) + + binned += [ + self.tbin[:length], + self.pbin[:length], + self.sbin[:length] + ] + + # Read signal weight from precalculated histogram + if self.two_det_flag: + # High-RAM, low-CPU option for two-det + logsignalrateinternals_compute2detrate( + binned[0], + binned[1], + binned[2], + self.c0_size[ref_ifo], + self.c1_size[ref_ifo], + self.c2_size[ref_ifo], + rate, + rtype, + sref, + self.two_det_weights[ref_ifo], + self.max_penalty, + self.ref_snr, + len(rtype) + ) + else: + # Low[er]-RAM, high[er]-CPU option for >two det + + # Convert binned to same dtype as stored in hist + nbinned = numpy.zeros(len(binned[1]), dtype=self.pdtype) + for i, b in enumerate(binned): + nbinned[f'c{i}'] = b + + loc = numpy.searchsorted(self.param_bin[ref_ifo], nbinned) + loc[loc == len(self.weights[ref_ifo])] = 0 + rate[rtype] = self.weights[ref_ifo][loc] + + # These weren't in our histogram so give them max penalty + # instead of random value + missed = numpy.where( + self.param_bin[ref_ifo][loc] != nbinned + )[0] + rate[rtype[missed]] = self.max_penalty + # Scale by signal population SNR + rate[rtype] *= (sref[rtype] / self.ref_snr) ** -4. + + return numpy.log(rate)
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + Here the ranking as well as phase, endtime and sigma-squared values. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. 'snr', 'chisq', + 'chisq_dof', 'coa_phase', 'end_time', and 'sigmasq' are required + keys. + + Returns + ------- + numpy.ndarray + Array of single detector parameter values + """ + sngl_stat = self.get_sngl_ranking(trigs) + singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) + singles['snglstat'] = sngl_stat + singles['coa_phase'] = trigs['coa_phase'][:] + singles['end_time'] = trigs['end_time'][:] + singles['sigmasq'] = trigs['sigmasq'][:] + singles['snr'] = trigs['snr'][:] + return numpy.array(singles, ndmin=1)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + For this statistic this is just passing through the + single value, which will be the second entry in the tuple. + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + return single_info[1]['snglstat']
+ + +
+[docs] + def rank_stat_coinc(self, sngls_list, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic, defined in Eq 2 of + [Nitz et al, 2017](https://doi.org/10.3847/1538-4357/aa8f50). + """ + rstat = sum(s[1]['snglstat'] ** 2 for s in sngls_list) + cstat = rstat + 2. * self.logsignalrate(dict(sngls_list), + slide * step, + to_shift) + cstat[cstat < 0] = 0 + return cstat ** 0.5
+ + +
+[docs] + def coinc_lim_for_thresh(self, sngls_list, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest. + Calculate the required single detector statistic to exceed the + threshold for each of the input triggers. + """ + # Safety against subclassing and not rethinking this + allowed_names = ['PhaseTDStatistic'] + self._check_coinc_lim_subclass(allowed_names) + + if not self.has_hist: + self.get_hist() + + fixed_statsq = sum( + [b['snglstat'] ** 2 for a, b in sngls_list if a != limifo] + ) + s1 = thresh ** 2. - fixed_statsq + # Assume best case scenario and use maximum signal rate + s1 -= 2. * self.hist_max + s1[s1 < 0] = 0 + return s1 ** 0.5
+
+ + + +
+[docs] +class ExpFitStatistic(QuadratureSumStatistic): + """ + Detection statistic using an exponential falloff noise model. + + Statistic approximates the negative log noise coinc rate density per + template over single-ifo newsnr values. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + + ifos: list of strs, not used here + The list of detector names + """ + if not files: + raise RuntimeError("Statistic files not specified") + QuadratureSumStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + + # the stat file attributes are hard-coded as '%{ifo}-fit_coeffs' + parsed_attrs = [f.split('-') for f in self.files.keys()] + self.bg_ifos = [at[0] for at in parsed_attrs if + (len(at) == 2 and at[1] == 'fit_coeffs')] + + if not len(self.bg_ifos): + raise RuntimeError("None of the statistic files has the required " + "attribute called {ifo}-fit_coeffs !") + + self.fits_by_tid = {} + self.alphamax = {} + for i in self.bg_ifos: + self.fits_by_tid[i] = self.assign_fits(i) + self.get_ref_vals(i) + + self.single_increasing = False + +
+[docs] + def assign_fits(self, ifo): + """ + Extract fits from fit files + + Parameters + ----------- + ifo: str + The detector to get fits for. + + Returns + ------- + rate_dict: dict + A dictionary containing the fit information in the `alpha`, `rate` + and `thresh` keys. + """ + coeff_file = h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r') + template_id = coeff_file['template_id'][:] + # the template_ids and fit coeffs are stored in an arbitrary order + # create new arrays in template_id order for easier recall + tid_sort = numpy.argsort(template_id) + + fits_by_tid_dict = {} + fits_by_tid_dict['smoothed_fit_coeff'] = \ + coeff_file['fit_coeff'][:][tid_sort] + fits_by_tid_dict['smoothed_rate_above_thresh'] = \ + coeff_file['count_above_thresh'][:][tid_sort].astype(float) + fits_by_tid_dict['smoothed_rate_in_template'] = \ + coeff_file['count_in_template'][:][tid_sort].astype(float) + + # The by-template fits may have been stored in the smoothed fits file + if 'fit_by_template' in coeff_file: + coeff_fbt = coeff_file['fit_by_template'] + fits_by_tid_dict['fit_by_fit_coeff'] = \ + coeff_fbt['fit_coeff'][:][tid_sort] + fits_by_tid_dict['fit_by_rate_above_thresh'] = \ + coeff_fbt['count_above_thresh'][:][tid_sort].astype(float) + fits_by_tid_dict['fit_by_rate_in_template'] = \ + coeff_file['count_in_template'][:][tid_sort].astype(float) + + # Keep the fit threshold in fits_by_tid + fits_by_tid_dict['thresh'] = coeff_file.attrs['stat_threshold'] + + coeff_file.close() + + return fits_by_tid_dict
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + if key.endswith('-fit_coeffs'): + # This is a ExpFitStatistic file which needs updating + # Which ifo is it? + ifo = key[:2] + self.fits_by_tid[ifo] = self.assign_fits(ifo) + self.get_ref_vals(ifo) + logger.info( + "Updating %s statistic %s file", + ''.join(self.ifos), + key + ) + return True + return False
+ + +
+[docs] + def get_ref_vals(self, ifo): + """ + Get the largest `alpha` value over all templates for given ifo. + + This is stored in `self.alphamax[ifo]` in the class instance. + + Parameters + ----------- + ifo: str + The detector to get fits for. + """ + self.alphamax[ifo] = self.fits_by_tid[ifo]['smoothed_fit_coeff'].max()
+ + +
+[docs] + def find_fits(self, trigs): + """ + Get fit coeffs for a specific ifo and template id(s) + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + The coincidence executable will always call this using a bunch of + trigs from a single template, there template_num is stored as an + attribute and we just return the single value for all templates. + If multiple templates are in play we must return arrays. + + Returns + -------- + alphai: float or numpy array + The alpha fit value(s) + ratei: float or numpy array + The rate fit value(s) + thresh: float or numpy array + The thresh fit value(s) + """ + try: + # Exists where trigs is a class with the template num attribute + tnum = trigs.template_num + except AttributeError: + # Exists where trigs is dict-like + tnum = trigs['template_id'] + + try: + ifo = trigs.ifo + except AttributeError: + ifo = trigs.get('ifo', None) + if ifo is None: + ifo = self.ifos[0] + assert ifo in self.ifos + + # fits_by_tid is a dictionary of dictionaries of arrays + # indexed by ifo / coefficient name / template_id + alphai = self.fits_by_tid[ifo]['smoothed_fit_coeff'][tnum] + ratei = self.fits_by_tid[ifo]['smoothed_rate_above_thresh'][tnum] + thresh = self.fits_by_tid[ifo]['thresh'] + + return alphai, ratei, thresh
+ + +
+[docs] + def lognoiserate(self, trigs): + """ + Calculate the log noise rate density over single-ifo ranking + + Read in single trigger information, compute the ranking + and rescale by the fitted coefficients alpha and rate + + Parameters + ----------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + --------- + lognoisel: numpy.array + Array of log noise rate density for each input trigger. + """ + alphai, ratei, thresh = self.find_fits(trigs) + sngl_stat = self.get_sngl_ranking(trigs) + # alphai is constant of proportionality between single-ifo newsnr and + # negative log noise likelihood in given template + # ratei is rate of trigs in given template compared to average + # thresh is stat threshold used in given ifo + lognoisel = - alphai * (sngl_stat - thresh) + numpy.log(alphai) + \ + numpy.log(ratei) + return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + In this case the ranking rescaled (see the lognoiserate method here). + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + + return self.lognoiserate(trigs)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + + # Keeping this here to help write the new coinc method. +
+[docs] + def coinc_OLD(self, s0, s1, slide, step): # pylint:disable=unused-argument + """Calculate the final coinc ranking statistic""" + + # Approximate log likelihood ratio by summing single-ifo negative + # log noise likelihoods + loglr = - s0 - s1 + # add squares of threshold stat values via idealized Gaussian formula + threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos] + loglr += sum([t ** 2. / 2. for t in threshes]) + # convert back to a coinc-SNR-like statistic + # via log likelihood ratio \propto rho_c^2 / 2 + return (2. * loglr) ** 0.5
+ + + # Keeping this here to help write the new coinc_lim method +
+[docs] + def coinc_lim_for_thresh_OLD(self, s0, thresh): + """Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s0: numpy.ndarray + Single detector ranking statistic for the first detector. + thresh: float + The threshold on the coincident statistic. + + Returns + ------- + numpy.ndarray + Array of limits on the second detector single statistic to + exceed thresh. + """ + s1 = - (thresh ** 2.) / 2. - s0 + threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos] + s1 += sum([t ** 2. / 2. for t in threshes]) + return s1
+
+ + + +
+[docs] +class ExpFitCombinedSNR(ExpFitStatistic): + """ + Reworking of ExpFitStatistic designed to resemble network SNR + + Use a monotonic function of the negative log noise rate density which + approximates combined (new)snr for coincs with similar newsnr in each ifo + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + + ifos: list of strs, not used here + The list of detector names + """ + ExpFitStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, + **kwargs) + # for low-mass templates the exponential slope alpha \approx 6 + self.alpharef = 6. + self.single_increasing = True + self.single_dtype = numpy.float32 + +
+[docs] + def use_alphamax(self): + """ + Compute the reference alpha from the fit files. + + Use the harmonic mean of the maximum individual ifo slopes as the + reference value of alpha. + """ + inv_alphas = [1. / self.alphamax[i] for i in self.bg_ifos] + self.alpharef = 1. / (sum(inv_alphas) / len(inv_alphas))
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + logr_n = self.lognoiserate(trigs) + _, _, thresh = self.find_fits(trigs) + # shift by log of reference slope alpha + logr_n += -1. * numpy.log(self.alpharef) + # add threshold and rescale by reference slope + stat = thresh - (logr_n / self.alpharef) + return numpy.array(stat, ndmin=1, dtype=numpy.float32)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for single detector candidates + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + if self.single_increasing: + sngl_multiifo = single_info[1] + else: + sngl_multiifo = -1. * single_info[1] + return sngl_multiifo
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + + Parameters + ---------- + sngls_list: list + List of (ifo, single detector statistic) tuples + slide: (unused in this statistic) + step: (unused in this statistic) + to_shift: list + List of integers indicating what multiples of the time shift will + be applied (unused in this statistic) + + Returns + ------- + numpy.ndarray + Array of coincident ranking statistic values + """ + # scale by 1/sqrt(number of ifos) to resemble network SNR + return sum(sngl[1] for sngl in s) / (len(s) ** 0.5)
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + # Safety against subclassing and not rethinking this + allowed_names = ['ExpFitCombinedSNR'] + self._check_coinc_lim_subclass(allowed_names) + + return thresh * ((len(s) + 1) ** 0.5) - sum(sngl[1] for sngl in s)
+
+ + + +
+[docs] +class PhaseTDExpFitStatistic(PhaseTDStatistic, ExpFitCombinedSNR): + """ + Statistic combining exponential noise model with signal histogram PDF + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, needed here + The list of detector names + """ + # read in both foreground PDF and background fit info + ExpFitCombinedSNR.__init__(self, sngl_ranking, files=files, ifos=ifos, + **kwargs) + # need the self.single_dtype value from PhaseTDStatistic + PhaseTDStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Here we inherit the PhaseTD and ExpFit file checks, + # nothing else needs doing + uf_exp_fit = ExpFitCombinedSNR.update_file(self, key) + uf_phasetd = PhaseTDStatistic.update_file(self, key) + return uf_exp_fit or uf_phasetd
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + In this case the ranking rescaled (see the lognoiserate method here) + with the phase, end time, sigma and SNR values added in. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + # same single-ifo stat as ExpFitCombinedSNR + sngl_stat = ExpFitCombinedSNR.single(self, trigs) + singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) + singles['snglstat'] = sngl_stat + singles['coa_phase'] = trigs['coa_phase'][:] + singles['end_time'] = trigs['end_time'][:] + singles['sigmasq'] = trigs['sigmasq'][:] + singles['snr'] = trigs['snr'][:] + return numpy.array(singles, ndmin=1)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + """ + err_msg = "Sorry! No-one has implemented this method yet! " + raise NotImplementedError(err_msg)
+ + + # Keeping the old statistic code here for now to help with reimplementing +
+[docs] + def coinc_OLD(self, s0, s1, slide, step): + # logsignalrate function inherited from PhaseTDStatistic + logr_s = self.logsignalrate(s0, s1, slide * step) + # rescale by ExpFitCombinedSNR reference slope as for sngl stat + cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef + # cut off underflowing and very small values + cstat[cstat < 8.] = 8. + # scale to resemble network SNR + return cstat / (2. ** 0.5)
+ + +
+[docs] + def coinc_lim_for_thresh_OLD(self, s0, thresh): + # if the threshold is below this value all triggers will + # pass because of rounding in the coinc method + if thresh <= (8. / (2. ** 0.5)): + return -1. * numpy.ones(len(s0['snglstat'])) * numpy.inf + if not self.has_hist: + self.get_hist() + # Assume best case scenario and use maximum signal rate + logr_s = self.hist_max + s1 = (2 ** 0.5) * thresh - s0['snglstat'] - logr_s / self.alpharef + return s1
+
+ + + +
+[docs] +class ExpFitBgRateStatistic(ExpFitStatistic): + """ + Detection statistic using an exponential falloff noise model. + + Statistic calculates the log noise coinc rate for each + template over single-ifo newsnr values. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, + benchmark_lograte=-14.6, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, not used here + The list of detector names + benchmark_lograte: float, default=-14.6 + benchmark_lograte is log of a representative noise trigger rate. + The default comes from H1L1 (O2) and is 4.5e-7 Hz. + """ + + super(ExpFitBgRateStatistic, self).__init__(sngl_ranking, + files=files, ifos=ifos, + **kwargs) + self.benchmark_lograte = benchmark_lograte + + # Reassign the rate to be number per time rather than an arbitrarily + # normalised number + for ifo in self.bg_ifos: + self.reassign_rate(ifo) + +
+[docs] + def reassign_rate(self, ifo): + """ + Reassign the rate to be number per time rather + + Reassign the rate to be number per time rather than an arbitrarily + normalised number. + + Parameters + ----------- + ifo: str + The ifo to consider. + """ + with h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r') as coeff_file: + analysis_time = float(coeff_file.attrs['analysis_time']) + fbt = 'fit_by_template' in coeff_file + + self.fits_by_tid[ifo]['smoothed_rate_above_thresh'] /= analysis_time + self.fits_by_tid[ifo]['smoothed_rate_in_template'] /= analysis_time + # The by-template fits may have been stored in the smoothed fits file + if fbt: + self.fits_by_tid[ifo]['fit_by_rate_above_thresh'] /= analysis_time + self.fits_by_tid[ifo]['fit_by_rate_in_template'] /= analysis_time
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Check if the file to update is an ExpFit file + uf_expfit = ExpFitStatistic.update_file(self, key) + # If this has been updated we must do the reassign_rate step here + # on top of the file update from earlier + if uf_expfit: + # This is a fit coeff file which needs updating + # Which ifo is it? + ifo = key[:2] + self.reassign_rate(ifo) + return True + return False
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + + Parameters + ---------- + sngls_list: list + List of (ifo, single detector statistic) tuples + slide: (unused in this statistic) + step: (unused in this statistic) + to_shift: list + List of integers indicating what multiples of the time shift will + be applied (unused in this statistic) + + Returns + ------- + numpy.ndarray + Array of coincident ranking statistic values + """ + # ranking statistic is -ln(expected rate density of noise triggers) + # plus normalization constant + sngl_dict = {sngl[0]: sngl[1] for sngl in s} + ln_noise_rate = coinc_rate.combination_noise_lograte( + sngl_dict, kwargs['time_addition']) + loglr = - ln_noise_rate + self.benchmark_lograte + return loglr
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + # Safety against subclassing and not rethinking this + allowed_names = ['ExpFitBgRateStatistic'] + self._check_coinc_lim_subclass(allowed_names) + + sngl_dict = {sngl[0]: sngl[1] for sngl in s} + sngl_dict[limifo] = numpy.zeros(len(s[0][1])) + ln_noise_rate = coinc_rate.combination_noise_lograte( + sngl_dict, kwargs['time_addition']) + loglr = - thresh - ln_noise_rate + self.benchmark_lograte + return loglr
+
+ + + +
+[docs] +class ExpFitFgBgNormStatistic(PhaseTDStatistic, + ExpFitBgRateStatistic): + """ + Statistic combining PhaseTD, ExpFitBg and additional foreground info. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, + reference_ifos='H1,L1', **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs + The list of detector names + reference_ifos: string of comma separated ifo prefixes + Detectors to be used as the reference network for network + sensitivity comparisons. Each must be in fits_by_tid + """ + # read in background fit info and store it + ExpFitBgRateStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + # if ifos not already set, determine via background fit info + self.ifos = self.ifos or self.bg_ifos + # PhaseTD statistic single_dtype plus network sensitivity benchmark + PhaseTDStatistic.__init__(self, sngl_ranking, files=files, + ifos=self.ifos, **kwargs) + self.single_dtype.append(('benchmark_logvol', numpy.float32)) + + for ifo in self.bg_ifos: + self.assign_median_sigma(ifo) + + self.ref_ifos = reference_ifos.split(',') + self.benchmark_logvol = None + self.assign_benchmark_logvol() + self.single_increasing = False + # Initialize variable to hold event template id(s) + self.curr_tnum = None + +
+[docs] + def assign_median_sigma(self, ifo): + """ + Read and sort the median_sigma values from input files. + + Parameters + ---------- + ifo: str + The ifo to consider. + """ + + with h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r') as coeff_file: + template_id = coeff_file['template_id'][:] + tid_sort = numpy.argsort(template_id) + self.fits_by_tid[ifo]['median_sigma'] = \ + coeff_file['median_sigma'][:][tid_sort]
+ + +
+[docs] + def assign_benchmark_logvol(self): + """ + Assign the benchmark log-volume used by the statistic. + This is the sensitive log-volume of each template in the + network of reference IFOs + """ + # benchmark_logvol is a benchmark sensitivity array over template id + bench_net_med_sigma = numpy.amin( + [self.fits_by_tid[ifo]['median_sigma'] for ifo in self.ref_ifos], + axis=0, + ) + self.benchmark_logvol = 3. * numpy.log(bench_net_med_sigma)
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Here we inherit the PhaseTD file checks + uf_phasetd = PhaseTDStatistic.update_file(self, key) + uf_exp_fit = ExpFitBgRateStatistic.update_file(self, key) + if uf_phasetd: + # The key to update refers to a PhaseTDStatistic file + return True + if uf_exp_fit: + # The key to update refers to a ExpFitBgRateStatistic file + # In this case we must reload some statistic information + # Which ifo is it? + ifo = key[:2] + self.assign_median_sigma(ifo) + self.assign_benchmark_logvol() + return True + return False
+ + +
+[docs] + def lognoiserate(self, trigs, alphabelow=6): + """ + Calculate the log noise rate density over single-ifo ranking + + Read in single trigger information, make the newsnr statistic + and rescale by the fitted coefficients alpha and rate + + Parameters + ----------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + alphabelow: float, default=6 + Use this slope to fit the noise triggers below the point at which + fits are present in the input files. + + Returns + --------- + lognoisel: numpy.array + Array of log noise rate density for each input trigger. + """ + alphai, ratei, thresh = self.find_fits(trigs) + newsnr = self.get_sngl_ranking(trigs) + # Above the threshold we use the usual fit coefficient (alpha) + # below threshold use specified alphabelow + bt = newsnr < thresh + lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \ + numpy.log(ratei) + lognoiselbt = - alphabelow * (newsnr - thresh) + \ + numpy.log(alphabelow) + numpy.log(ratei) + lognoisel[bt] = lognoiselbt[bt] + return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + In this case the ranking rescaled (see the lognoiserate method here) + with the phase, end time, sigma, SNR, template_id and the + benchmark_logvol values added in. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + Returns + ------- + numpy.ndarray + The array of single detector values + """ + try: + # exists if accessed via coinc_findtrigs + self.curr_tnum = trigs.template_num + except AttributeError: + # exists for SingleDetTriggers & pycbc_live get_coinc + self.curr_tnum = trigs['template_id'] + + # single-ifo stat = log of noise rate + sngl_stat = self.lognoiserate(trigs) + # populate other fields to calculate phase/time/amp consistency + # and sigma comparison + singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) + singles['snglstat'] = sngl_stat + singles['coa_phase'] = trigs['coa_phase'][:] + singles['end_time'] = trigs['end_time'][:] + singles['sigmasq'] = trigs['sigmasq'][:] + singles['snr'] = trigs['snr'][:] + + # Store benchmark log volume as single-ifo information since the coinc + # method does not have access to template id + singles['benchmark_logvol'] = self.benchmark_logvol[self.curr_tnum] + return numpy.array(singles, ndmin=1)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for single detector candidates + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + sngls = single_info[1] + + ln_noise_rate = sngls['snglstat'] + ln_noise_rate -= self.benchmark_lograte + network_sigmasq = sngls['sigmasq'] + network_logvol = 1.5 * numpy.log(network_sigmasq) + benchmark_logvol = sngls['benchmark_logvol'] + network_logvol -= benchmark_logvol + ln_s = -4 * numpy.log(sngls['snr'] / self.ref_snr) + loglr = network_logvol - ln_noise_rate + ln_s + # cut off underflowing and very small values + loglr[loglr < -30.] = -30. + return loglr
+ + +
+[docs] + def rank_stat_coinc(self, s, slide, step, to_shift, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the coincident detection statistic. + + Parameters + ---------- + sngls_list: list + List of (ifo, single detector statistic) tuples + slide: (unused in this statistic) + step: (unused in this statistic) + to_shift: list + List of integers indicating what multiples of the time shift will + be applied (unused in this statistic) + + Returns + ------- + numpy.ndarray + Array of coincident ranking statistic values + """ + + sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s} + # Find total volume of phase-time-amplitude space occupied by + # noise coincs + if 'dets' in kwargs: + ln_noise_rate = coinc_rate.combination_noise_lograte( + sngl_rates, kwargs['time_addition'], + kwargs['dets']) + # Extent of time-difference space occupied + noise_twindow = coinc_rate.multiifo_noise_coincident_area( + self.hist_ifos, kwargs['time_addition'], + kwargs['dets']) + else: + ln_noise_rate = coinc_rate.combination_noise_lograte( + sngl_rates, kwargs['time_addition']) + noise_twindow = coinc_rate.multiifo_noise_coincident_area( + self.hist_ifos, kwargs['time_addition']) + + ln_noise_rate -= self.benchmark_lograte + + # Network sensitivity for a given coinc type is approximately + # determined by the least sensitive ifo + network_sigmasq = numpy.amin([sngl[1]['sigmasq'] for sngl in s], + axis=0) + # Volume \propto sigma^3 or sigmasq^1.5 + network_logvol = 1.5 * numpy.log(network_sigmasq) + # Get benchmark log volume as single-ifo information : + # benchmark_logvol for a given template is not ifo-dependent, so + # choose the first ifo for convenience + benchmark_logvol = s[0][1]['benchmark_logvol'] + network_logvol -= benchmark_logvol + + # Use prior histogram to get Bayes factor for signal vs noise + # given the time, phase and SNR differences between IFOs + + # First get signal PDF logr_s + stat = {ifo: st for ifo, st in s} + logr_s = self.logsignalrate(stat, slide * step, to_shift) + + # Volume is the allowed time difference window, multiplied by 2pi for + # each phase difference dimension and by allowed range of SNR ratio + # for each SNR ratio dimension : there are (n_ifos - 1) dimensions + # for both phase and SNR + n_ifos = len(self.hist_ifos) + hist_vol = noise_twindow * \ + (2. * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \ + (n_ifos - 1) + # Noise PDF is 1/volume, assuming a uniform distribution of noise + # coincs + logr_n = - numpy.log(hist_vol) + + # Combine to get final statistic: log of + # ((rate of signals / rate of noise) * PTA Bayes factor) + loglr = network_logvol - ln_noise_rate + logr_s - logr_n + + # cut off underflowing and very small values + loglr[loglr < -30.] = -30. + return loglr
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + + # Safety against subclassing and not rethinking this + allowed_names = ['ExpFitFgBgNormStatistic', + 'ExpFitFgBgNormBBHStatistic', + 'DQExpFitFgBgNormStatistic', + 'DQExpFitFgBgKDEStatistic', + 'ExpFitFgBgKDEStatistic'] + self._check_coinc_lim_subclass(allowed_names) + + if not self.has_hist: + self.get_hist() + # if the threshold is below this value all triggers will + # pass because of rounding in the coinc method + if thresh <= -30.: + return numpy.ones(len(s[0][1]['snglstat'])) * numpy.inf + sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s} + # Add limifo to singles dict so that overlap time is calculated correctly + sngl_rates[limifo] = numpy.zeros(len(s[0][1])) + ln_noise_rate = coinc_rate.combination_noise_lograte( + sngl_rates, kwargs['time_addition']) + ln_noise_rate -= self.benchmark_lograte + + # Assume best case and use the maximum sigma squared from all triggers + network_sigmasq = numpy.ones(len(s[0][1])) * kwargs['max_sigmasq'] + # Volume \propto sigma^3 or sigmasq^1.5 + network_logvol = 1.5 * numpy.log(network_sigmasq) + # Get benchmark log volume as single-ifo information : + # benchmark_logvol for a given template is not ifo-dependent, so + # choose the first ifo for convenience + benchmark_logvol = s[0][1]['benchmark_logvol'] + network_logvol -= benchmark_logvol + + # Assume best case scenario and use maximum signal rate + logr_s = numpy.log(self.hist_max + * (kwargs['min_snr'] / self.ref_snr) ** -4.) + + # Find total volume of phase-time-amplitude space occupied by noise + # coincs + # Extent of time-difference space occupied + noise_twindow = coinc_rate.multiifo_noise_coincident_area( + self.hist_ifos, kwargs['time_addition']) + # Volume is the allowed time difference window, multiplied by 2pi for + # each phase difference dimension and by allowed range of SNR ratio + # for each SNR ratio dimension : there are (n_ifos - 1) dimensions + # for both phase and SNR + n_ifos = len(self.hist_ifos) + hist_vol = noise_twindow * \ + (2. * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \ + (n_ifos - 1) + # Noise PDF is 1/volume, assuming a uniform distribution of noise + # coincs + logr_n = - numpy.log(hist_vol) + + loglr = - thresh + network_logvol - ln_noise_rate + logr_s - logr_n + return loglr
+
+ + + +
+[docs] +class ExpFitFgBgNormBBHStatistic(ExpFitFgBgNormStatistic): + """ + The ExpFitFgBgNormStatistic with a mass weighting factor. + + This is the same as the ExpFitFgBgNormStatistic except the likelihood + is multiplied by a signal rate prior modelled as uniform over chirp mass. + As templates are distributed roughly according to mchirp^(-11/3) we + weight by the inverse of this. This ensures that quiet signals at high + mass where template density is sparse are not swamped by events at lower + masses where template density is high. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, + max_chirp_mass=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, not used here + The list of detector names + max_chirp_mass: float, default=None + If given, if a template's chirp mass is above this value it will + be reweighted as if it had this chirp mass. This is to avoid the + problem where the distribution fails to be accurate at high mass + and we can have a case where a single highest-mass template might + produce *all* the loudest background (and foreground) events. + """ + ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + self.mcm = max_chirp_mass + self.curr_mchirp = None + +
+[docs] + def logsignalrate(self, stats, shift, to_shift): + """ + Calculate the normalized log rate density of signals via lookup + + This calls back to the Parent class and then applies the chirp mass + weighting factor. + + Parameters + ---------- + stats: list of dicts giving single-ifo quantities, ordered as + self.ifos + shift: numpy array of float, size of the time shift vector for each + coinc to be ranked + to_shift: list of int, multiple of the time shift to apply ordered + as self.ifos + + Returns + ------- + value: log of coinc signal rate density for the given single-ifo + triggers and time shifts + """ + # Model signal rate as uniform over chirp mass, background rate is + # proportional to mchirp^(-11/3) due to density of templates + logr_s = ExpFitFgBgNormStatistic.logsignalrate( + self, + stats, + shift, + to_shift + ) + logr_s += numpy.log((self.curr_mchirp / 20.) ** (11. / 3.)) + return logr_s
+ + +
+[docs] + def single(self, trigs): + """ + Calculate the necessary single detector information + + In this case the ranking rescaled (see the lognoiserate method here) + with the phase, end time, sigma, SNR, template_id and the + benchmark_logvol values added in. This also stored the current chirp + mass for use when computing the coinc statistic values. + + Parameters + ---------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + ------- + numpy.ndarray + The array of single detector values + """ + from pycbc.conversions import mchirp_from_mass1_mass2 + try: + mass1 = trigs.param['mass1'] + mass2 = trigs.param['mass2'] + except AttributeError: + mass1 = trigs['mass1'] + mass2 = trigs['mass2'] + self.curr_mchirp = mchirp_from_mass1_mass2(mass1, mass2) + + if self.mcm is not None: + # Careful - input might be a str, so cast to float + self.curr_mchirp = min(self.curr_mchirp, float(self.mcm)) + return ExpFitFgBgNormStatistic.single(self, trigs)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + This calls back to the Parent class and then applies the chirp mass + weighting factor. + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + rank_sngl = ExpFitFgBgNormStatistic.rank_stat_single( + self, + single_info, + **kwargs) + rank_sngl += numpy.log((self.curr_mchirp / 20.) ** (11. / 3.)) + return rank_sngl
+ + +
+[docs] + def rank_stat_coinc(self, sngls_list, slide, step, to_shift, **kwargs): + """ + Calculate the coincident detection statistic. + + Parameters + ---------- + sngls_list: list + List of (ifo, single detector statistic) tuples + slide: (unused in this statistic) + step: (unused in this statistic) + to_shift: list + List of integers indicating what multiples of the time shift will + be applied (unused in this statistic) + + Returns + ------- + numpy.ndarray + Array of coincident ranking statistic values + """ + + if 'mchirp' in kwargs: + self.curr_mchirp = kwargs['mchirp'] + + return ExpFitFgBgNormStatistic.rank_stat_coinc(self, + sngls_list, + slide, + step, + to_shift, + **kwargs)
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, + **kwargs): # pylint:disable=unused-argument + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed + the threshold for each of the input triggers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + loglr = ExpFitFgBgNormStatistic.coinc_lim_for_thresh( + self, s, thresh, limifo, **kwargs) + loglr += numpy.log((self.curr_mchirp / 20.) ** (11. / 3.)) + return loglr
+
+ + + +
+[docs] +class ExpFitFgBgKDEStatistic(ExpFitFgBgNormStatistic): + """ + The ExpFitFgBgNormStatistic with an additional mass and spin weighting + factor determined by KDE statistic files. + + This is the same as the ExpFitFgBgNormStatistic except the likelihood + ratio is multiplied by the ratio of signal KDE to template KDE over some + parameters covering the bank. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, not used here + The list of detector names + """ + ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + self.kde_names = [] + self.find_kdes() + self.kde_by_tid = {} + for kname in self.kde_names: + self.assign_kdes(kname) + +
+[docs] + def find_kdes(self): + """ + Find which associated files are for the KDE reweighting + """ + # The stat file attributes are hard-coded as 'signal-kde_file' + # and 'template-kde_file' + parsed_attrs = [f.split('-') for f in self.files.keys()] + self.kde_names = [at[0] for at in parsed_attrs if + (len(at) == 2 and at[1] == 'kde_file')] + assert sorted(self.kde_names) == ['signal', 'template'], \ + "Two stat files are required, they should have stat attr " \ + "'signal-kde_file' and 'template-kde_file' respectively"
+ + +
+[docs] + def assign_kdes(self, kname): + """ + Extract values from KDE files + + Parameters + ----------- + kname: str + Used to label the kde files. + """ + with h5py.File(self.files[kname + '-kde_file'], 'r') as kde_file: + self.kde_by_tid[kname + '_kdevals'] = kde_file['data_kde'][:]
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Inherit from ExpFitFgBgNormStatistic + uf_expfit = ExpFitFgBgNormStatistic.update_file(self, key) + if uf_expfit: + # The key to update refers to a ExpFitFgBgNormStatistic file + return True + # Is the key a KDE statistic file that we update here? + if key.endswith('kde_file'): + logger.info( + "Updating %s statistic %s file", + ''.join(self.ifos), + key + ) + kde_style = key.split('-')[0] + self.assign_kdes(kde_style) + return True + return False
+ + +
+[docs] + def kde_ratio(self): + """ + Calculate the weighting factor according to the ratio of the + signal and template KDE lookup tables + """ + signal_kde = self.kde_by_tid["signal_kdevals"][self.curr_tnum] + template_kde = self.kde_by_tid["template_kdevals"][self.curr_tnum] + + return numpy.log(signal_kde / template_kde)
+ + +
+[docs] + def logsignalrate(self, stats, shift, to_shift): + """ + Calculate the normalized log rate density of signals via lookup. + + This calls back to the parent class and then applies the ratio_kde + weighting factor. + + Parameters + ---------- + stats: list of dicts giving single-ifo quantities, ordered as + self.ifos + shift: numpy array of float, size of the time shift vector for each + coinc to be ranked + to_shift: list of int, multiple of the time shift to apply ordered + as self.ifos + + Returns + ------- + value: log of coinc signal rate density for the given single-ifo + triggers and time shifts + """ + logr_s = ExpFitFgBgNormStatistic.logsignalrate(self, stats, shift, + to_shift) + logr_s += self.kde_ratio() + + return logr_s
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Calculate the statistic for a single detector candidate + + Parameters + ---------- + single_info: tuple + Tuple containing two values. The first is the ifo (str) and the + second is the single detector triggers. + + Returns + ------- + numpy.ndarray + The array of single detector statistics + """ + rank_sngl = ExpFitFgBgNormStatistic.rank_stat_single( + self, + single_info, + **kwargs) + rank_sngl += self.kde_ratio() + return rank_sngl
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): + """ + Optimization function to identify coincs too quiet to be of interest + + Calculate the required single detector statistic to exceed the + threshold for each of the input trigers. + + Parameters + ---------- + s: list + List of (ifo, single detector statistic) tuples for all detectors + except limifo. + thresh: float + The threshold on the coincident statistic. + limifo: string + The ifo for which the limit is to be found. + + Returns + ------- + numpy.ndarray + Array of limits on the limifo single statistic to + exceed thresh. + """ + loglr = ExpFitFgBgNormStatistic.coinc_lim_for_thresh( + self, s, thresh, limifo, **kwargs) + signal_kde = self.kde_by_tid["signal_kdevals"][self.curr_tnum] + template_kde = self.kde_by_tid["template_kdevals"][self.curr_tnum] + loglr += numpy.log(signal_kde / template_kde) + return loglr
+
+ + + +
+[docs] +class DQExpFitFgBgNormStatistic(ExpFitFgBgNormStatistic): + """ + The ExpFitFgBgNormStatistic with DQ-based reranking. + + This is the same as the ExpFitFgBgNormStatistic except the likelihood + ratio is corrected via estimating relative noise trigger rates based on + the DQ time series. + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, + **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, not used here + The list of detector names + """ + ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + self.dq_rates_by_state = {} + self.dq_bin_by_tid = {} + self.dq_state_segments = None + self.low_latency = False + self.single_dtype.append(('dq_state', int)) + + for ifo in self.ifos: + key = f'{ifo}-dq_stat_info' + if key in self.files.keys(): + self.dq_rates_by_state[ifo] = self.assign_dq_rates(key) + self.dq_bin_by_tid[ifo] = self.assign_template_bins(key) + self.check_low_latency(key) + if not self.low_latency: + if self.dq_state_segments is None: + self.dq_state_segments = {} + self.dq_state_segments[ifo] = self.setup_segments(key) + +
+[docs] + def check_low_latency(self, key): + """ + Check if the statistic file indicates low latency mode. + Parameters + ---------- + key: str + Statistic file key string. + Returns + ------- + None + """ + ifo = key.split('-')[0] + with h5py.File(self.files[key], 'r') as dq_file: + ifo_grp = dq_file[ifo] + if 'dq_segments' not in ifo_grp.keys(): + # if segs are not in file, we must be in LL + if self.dq_state_segments is not None: + raise ValueError( + 'Either all dq stat files must have segments or none' + ) + self.low_latency = True + elif self.low_latency: + raise ValueError( + 'Either all dq stat files must have segments or none' + )
+ + +
+[docs] + def assign_template_bins(self, key): + """ + Assign bin ID values + Assign each template id to a bin name based on a + referenced statistic file. + + Parameters + ---------- + key: str + statistic file key string + + Returns + --------- + bin_dict: dict of strs + Dictionary containing the bin name for each template id + """ + ifo = key.split('-')[0] + with h5py.File(self.files[key], 'r') as dq_file: + tids = [] + bin_nums = [] + bin_grp = dq_file[f'{ifo}/bins'] + for bin_name in bin_grp.keys(): + bin_tids = bin_grp[f'{bin_name}/tids'][:] + tids = list(tids) + list(bin_tids.astype(int)) + bin_nums = list(bin_nums) + list([bin_name] * len(bin_tids)) + + bin_dict = dict(zip(tids, bin_nums)) + return bin_dict
+ + +
+[docs] + def assign_dq_rates(self, key): + """ + Assign dq values to each time for every bin based on a + referenced statistic file. + + Parameters + ---------- + key: str + statistic file key string + + Returns + --------- + dq_dict: dict of {time: dq_value} dicts for each bin + Dictionary containing the mapping between the time + and the dq value for each individual bin. + + """ + ifo = key.split('-')[0] + with h5py.File(self.files[key], 'r') as dq_file: + bin_grp = dq_file[f'{ifo}/bins'] + dq_dict = {} + for bin_name in bin_grp.keys(): + dq_dict[bin_name] = bin_grp[f'{bin_name}/dq_rates'][:] + + return dq_dict
+ + +
+[docs] + def setup_segments(self, key): + """ + Store segments from stat file + """ + ifo = key.split('-')[0] + with h5py.File(self.files[key], 'r') as dq_file: + ifo_grp = dq_file[ifo] + dq_state_segs_dict = {} + for k in ifo_grp['dq_segments'].keys(): + seg_dict = {} + seg_dict['start'] = \ + ifo_grp[f'dq_segments/{k}/segment_starts'][:] + seg_dict['end'] = \ + ifo_grp[f'dq_segments/{k}/segment_ends'][:] + dq_state_segs_dict[k] = seg_dict + + return dq_state_segs_dict
+ + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Inherit from ExpFitFgBgNormStatistic + uf_expfit = ExpFitFgBgNormStatistic.update_file(self, key) + if uf_expfit: + # We have updated a ExpFitFgBgNormStatistic file already + return True + # We also need to check if the DQ files have updated + if key.endswith('dq_stat_info'): + ifo = key.split('-')[0] + logger.info( + "Updating %s statistic %s file", + ifo, + key + ) + self.dq_rates_by_state[ifo] = self.assign_dq_rates(key) + self.dq_bin_by_tid[ifo] = self.assign_template_bins(key) + return True + return False
+ + +
+[docs] + def find_dq_noise_rate(self, trigs): + """Get dq values for a specific ifo and dq states""" + + try: + ifo = trigs.ifo + except AttributeError: + ifo = trigs.get('ifo', None) + if ifo is None: + ifo = self.ifos[0] + assert ifo in self.ifos + + dq_state = trigs['dq_state'] + dq_val = numpy.ones(len(dq_state)) + + tnum = self.curr_tnum + if ifo in self.dq_rates_by_state: + for (i, st) in enumerate(dq_state): + if isinstance(tnum, numpy.ndarray): + bin_name = self.dq_bin_by_tid[ifo][tnum[i]] + else: + bin_name = self.dq_bin_by_tid[ifo][tnum] + dq_val[i] = self.dq_rates_by_state[ifo][bin_name][st] + return dq_val
+ + +
+[docs] + def find_dq_state_by_time(self, ifo, times): + """Get the dq state for an ifo at times""" + dq_state = numpy.zeros(len(times), dtype=numpy.uint8) + if ifo in self.dq_state_segments: + from pycbc.events.veto import indices_within_times + for k in self.dq_state_segments[ifo]: + starts = self.dq_state_segments[ifo][k]['start'] + ends = self.dq_state_segments[ifo][k]['end'] + inds = indices_within_times(times, starts, ends) + # states are named in file as 'dq_state_N', need to extract N + dq_state[inds] = int(k[9:]) + return dq_state
+ + +
+[docs] + def lognoiserate(self, trigs): + """ + Calculate the log noise rate density over single-ifo ranking + + Read in single trigger information, compute the ranking + and rescale by the fitted coefficients alpha and rate + + Parameters + ----------- + trigs: dict of numpy.ndarrays, h5py group or similar dict-like object + Object holding single detector trigger information. + + Returns + --------- + lognoiserate: numpy.array + Array of log noise rate density for each input trigger. + """ + + dq_rate = self.find_dq_noise_rate(trigs) + dq_rate = numpy.maximum(dq_rate, 1) + + logr_n = ExpFitFgBgNormStatistic.lognoiserate( + self, trigs) + logr_n += numpy.log(dq_rate) + return logr_n
+ + +
+[docs] + def single(self, trigs): + # make sure every trig has a dq state + try: + ifo = trigs.ifo + except AttributeError: + ifo = trigs.get('ifo', None) + if ifo is None: + ifo = self.ifos[0] + assert ifo in self.ifos + + singles = ExpFitFgBgNormStatistic.single(self, trigs) + + if self.low_latency: + # trigs should already have a dq state assigned + singles['dq_state'] = trigs['dq_state'][:] + else: + singles['dq_state'] = self.find_dq_state_by_time( + ifo, trigs['end_time'][:] + ) + return singles
+
+ + + +
+[docs] +class DQExpFitFgBgKDEStatistic(DQExpFitFgBgNormStatistic): + """ + The ExpFitFgBgKDEStatistic with DQ-based reranking. + + This is the same as the DQExpFitFgBgNormStatistic except the signal + rate is adjusted according to the KDE statistic files + """ + + def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): + """ + Parameters + ---------- + sngl_ranking: str + The name of the ranking to use for the single-detector triggers. + files: list of strs, needed here + A list containing the filenames of hdf format files used to help + construct the coincident statistics. The files must have a 'stat' + attribute which is used to associate them with the appropriate + statistic class. + ifos: list of strs, not used here + The list of detector names + """ + DQExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, + ifos=ifos, **kwargs) + self.kde_names = [] + ExpFitFgBgKDEStatistic.find_kdes(self) + self.kde_by_tid = {} + for kname in self.kde_names: + ExpFitFgBgKDEStatistic.assign_kdes(self, kname) + +
+[docs] + def update_file(self, key): + """ + Update file used in this statistic. + If others are used (i.e. this statistic is inherited), they will + need updated separately + """ + # Inherit from DQExpFitFgBgNormStatistic and ExpFitFgBgKDEStatistic + uf_dq = DQExpFitFgBgNormStatistic.update_file(self, key) + uf_kde = ExpFitFgBgKDEStatistic.update_file(self, key) + return uf_dq or uf_kde
+ + +
+[docs] + def kde_ratio(self): + """ + Inherited, see docstring for ExpFitFgBgKDEStatistic.kde_signalrate + """ + return ExpFitFgBgKDEStatistic.kde_ratio(self)
+ + +
+[docs] + def logsignalrate(self, stats, shift, to_shift): + """ + Inherited, see docstring for ExpFitFgBgKDEStatistic.logsignalrate + """ + return ExpFitFgBgKDEStatistic.logsignalrate(self, stats, shift, + to_shift)
+ + +
+[docs] + def rank_stat_single(self, single_info, + **kwargs): # pylint:disable=unused-argument + """ + Inherited, see docstring for ExpFitFgBgKDEStatistic.rank_stat_single + """ + return ExpFitFgBgKDEStatistic.rank_stat_single( + self, + single_info, + **kwargs)
+ + +
+[docs] + def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): + """ + Inherited, see docstring for + ExpFitFgBgKDEStatistic.coinc_lim_for_thresh + """ + return ExpFitFgBgKDEStatistic.coinc_lim_for_thresh( + self, s, thresh, limifo, **kwargs)
+
+ + + +statistic_dict = { + 'quadsum': QuadratureSumStatistic, + 'single_ranking_only': QuadratureSumStatistic, + 'phasetd': PhaseTDStatistic, + 'exp_fit_stat': ExpFitStatistic, + 'exp_fit_csnr': ExpFitCombinedSNR, + 'phasetd_exp_fit_stat': PhaseTDExpFitStatistic, + 'dq_phasetd_exp_fit_fgbg_norm': DQExpFitFgBgNormStatistic, + 'exp_fit_bg_rate': ExpFitBgRateStatistic, + 'phasetd_exp_fit_fgbg_norm': ExpFitFgBgNormStatistic, + 'phasetd_exp_fit_fgbg_bbh_norm': ExpFitFgBgNormBBHStatistic, + 'phasetd_exp_fit_fgbg_kde': ExpFitFgBgKDEStatistic, + 'dq_phasetd_exp_fit_fgbg_kde': DQExpFitFgBgKDEStatistic, +} + + +
+[docs] +def get_statistic(stat): + """ + Error-handling sugar around dict lookup for coincident statistics + + Parameters + ---------- + stat : string + Name of the coincident statistic + + Returns + ------- + class + Subclass of Stat base class + + Raises + ------ + RuntimeError + If the string is not recognized as corresponding to a Stat subclass + """ + try: + return statistic_dict[stat] + except KeyError: + raise RuntimeError('%s is not an available detection statistic' % stat)
+ + + +
+[docs] +def insert_statistic_option_group(parser, default_ranking_statistic=None): + """ + Add ranking statistic options to the optparser object. + + Adds the options used to initialize a PyCBC Stat class. + + Parameters + ----------- + parser : object + OptionParser instance. + default_ranking_statisic : str + Allows setting a default statistic for the '--ranking-statistic' + option. The option is no longer required if a default is provided. + + Returns + -------- + strain_opt_group : optparser.argument_group + The argument group that is added to the parser. + """ + statistic_opt_group = parser.add_argument_group( + "Options needed to initialize a PyCBC Stat class for computing the " + "ranking of events from a PyCBC search." + ) + + statistic_opt_group.add_argument( + "--ranking-statistic", + default=default_ranking_statistic, + choices=statistic_dict.keys(), + required=True if default_ranking_statistic is None else False, + help="The coinc ranking statistic to calculate" + ) + + statistic_opt_group.add_argument( + "--sngl-ranking", + choices=ranking.sngls_ranking_function_dict.keys(), + required=True, + help="The single-detector trigger ranking to use." + ) + + statistic_opt_group.add_argument( + "--statistic-files", + nargs='*', + action='append', + default=[], + help="Files containing ranking statistic info" + ) + + statistic_opt_group.add_argument( + "--statistic-keywords", + nargs='*', + default=[], + help="Provide additional key-word arguments to be sent to " + "the statistic class when it is initialized. Should " + "be given in format --statistic-keywords " + "KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ..." + ) + + return statistic_opt_group
+ + + +
+[docs] +def parse_statistic_keywords_opt(stat_kwarg_list): + """ + Parse the list of statistic keywords into an appropriate dictionary. + + Take input from the input argument ["KWARG1:VALUE1", "KWARG2:VALUE2", + "KWARG3:VALUE3"] and convert into a dictionary. + + Parameters + ---------- + stat_kwarg_list : list + Statistic keywords in list format + + Returns + ------- + stat_kwarg_dict : dict + Statistic keywords in dict format + """ + stat_kwarg_dict = {} + for inputstr in stat_kwarg_list: + try: + key, value = inputstr.split(':') + stat_kwarg_dict[key] = value + except ValueError: + err_txt = "--statistic-keywords must take input in the " \ + "form KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ... " \ + "Received {}".format(' '.join(stat_kwarg_list)) + raise ValueError(err_txt) + + return stat_kwarg_dict
+ + + +
+[docs] +def get_statistic_from_opts(opts, ifos): + """ + Return a Stat class from an optparser object. + + This will assume that the options in the statistic_opt_group are present + and will use these options to call stat.get_statistic and initialize the + appropriate Stat subclass with appropriate kwargs. + + Parameters + ---------- + opts : optparse.OptParser instance + The command line options + ifos : list + The list of detector names + + Returns + ------- + class + Subclass of Stat base class + """ + # Allow None inputs + if opts.statistic_files is None: + opts.statistic_files = [] + if opts.statistic_keywords is None: + opts.statistic_keywords = [] + + # flatten the list of lists of filenames to a single list (may be empty) + # if needed (e.g. not calling get_statistic_from_opts in a loop) + if len(opts.statistic_files) > 0 and \ + isinstance(opts.statistic_files[0], list): + opts.statistic_files = sum(opts.statistic_files, []) + + extra_kwargs = parse_statistic_keywords_opt(opts.statistic_keywords) + + stat_class = get_statistic(opts.ranking_statistic)( + opts.sngl_ranking, + opts.statistic_files, + ifos=ifos, + **extra_kwargs + ) + + return stat_class
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/threshold_cpu.html b/latest/html/_modules/pycbc/events/threshold_cpu.html new file mode 100644 index 00000000000..7b211ae5b63 --- /dev/null +++ b/latest/html/_modules/pycbc/events/threshold_cpu.html @@ -0,0 +1,232 @@ + + + + + + pycbc.events.threshold_cpu — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.threshold_cpu

+# Copyright (C) 2012  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+import logging
+import numpy
+from .simd_threshold_cython import parallel_thresh_cluster, parallel_threshold
+from .eventmgr import _BaseThresholdCluster
+from .. import opt
+
+logger = logging.getLogger('pycbc.events.threshold_cpu')
+
+if opt.HAVE_GETCONF:
+    default_segsize = opt.LEVEL2_CACHE_SIZE / numpy.dtype('complex64').itemsize
+else:
+    # Seems to work for Sandy Bridge/Ivy Bridge/Haswell, for now?
+    default_segsize = 32768
+
+
+[docs] +def threshold_numpy(series, value): + arr = series.data + locs = numpy.where(arr.real**2 + arr.imag**2 > value**2)[0] + vals = arr[locs] + return locs, vals
+ + + +outl = None +outv = None +count = None +
+[docs] +def threshold_inline(series, value): + arr = numpy.array(series.data, copy=False, dtype=numpy.complex64) + global outl, outv, count + if outl is None or len(outl) < len(series): + outl = numpy.zeros(len(series), dtype=numpy.uint32) + outv = numpy.zeros(len(series), dtype=numpy.complex64) + count = numpy.zeros(1, dtype=numpy.uint32) + + N = len(series) + threshold = value**2.0 + parallel_threshold(N, arr, outv, outl, count, threshold) + num = count[0] + if num > 0: + return outl[0:num], outv[0:num] + else: + return numpy.array([], numpy.uint32), numpy.array([], numpy.float32)
+ + +# threshold_numpy can also be used here, but for now we use the inline code +# in all instances. Not sure why we're defining threshold *and* threshold_only +# but we are, and I'm not going to change this at this point. +threshold = threshold_inline +threshold_only = threshold_inline + +
+[docs] +class CPUThresholdCluster(_BaseThresholdCluster): + def __init__(self, series): + self.series = numpy.array(series.data, copy=False, + dtype=numpy.complex64) + self.slen = numpy.uint32(len(series)) + self.outv = numpy.zeros(self.slen, numpy.complex64) + self.outl = numpy.zeros(self.slen, numpy.uint32) + self.segsize = numpy.uint32(default_segsize) + +
+[docs] + def threshold_and_cluster(self, threshold, window): + self.count = parallel_thresh_cluster(self.series, self.slen, + self.outv, self.outl, + numpy.float32(threshold), + numpy.uint32(window), + self.segsize) + if self.count > 0: + return self.outv[0:self.count], self.outl[0:self.count] + else: + return numpy.array([], dtype = numpy.complex64), numpy.array([], dtype = numpy.uint32)
+
+ + + +def _threshold_cluster_factory(series): + return CPUThresholdCluster +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/trigger_fits.html b/latest/html/_modules/pycbc/events/trigger_fits.html new file mode 100644 index 00000000000..d46bcc01393 --- /dev/null +++ b/latest/html/_modules/pycbc/events/trigger_fits.html @@ -0,0 +1,461 @@ + + + + + + pycbc.events.trigger_fits — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.trigger_fits

+"""
+Tools for maximum likelihood fits to single trigger statistic values
+
+For some set of values above a threshold, e.g. trigger SNRs, the functions
+in this module perform maximum likelihood fits with 1-sigma uncertainties
+to various simple functional forms of PDF, all normalized to 1.
+You can also obtain the fitted function and its (inverse) CDF and perform
+a Kolmogorov-Smirnov test.
+
+Usage:
+# call the fit function directly if the threshold is known
+alpha, sigma_alpha = fit_exponential(snrs, 5.5)
+
+# apply a threshold explicitly
+alpha, sigma_alpha = fit_above_thresh('exponential', snrs, thresh=6.25)
+
+# let the code work out the threshold from the smallest value via the default thresh=None
+alpha, sigma_alpha = fit_above_thresh('exponential', snrs)
+
+# or only fit the largest N values, i.e. tail fitting
+thresh = tail_threshold(snrs, N=500)
+alpha, sigma_alpha = fit_above_thresh('exponential', snrs, thresh)
+
+# obtain the fitted function directly
+xvals = numpy.xrange(5.5, 10.5, 20)
+exponential_fit = expfit(xvals, alpha, thresh)
+
+# or access function by name
+exponential_fit_1 = fit_fn('exponential', xvals, alpha, thresh)
+
+# Use weighting factors to e.g. take decimation into account
+alpha, sigma_alpha = fit_above_thresh('exponential', snrs, weights=weights)
+
+# get the KS test statistic and p-value - see scipy.stats.kstest
+ks_stat, ks_pval = KS_test('exponential', snrs, alpha, thresh)
+
+"""
+
+# Copyright T. Dent 2015 (thomas.dent@aei.mpg.de)
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+
+import logging
+import numpy
+from scipy.stats import kstest
+
+logger = logging.getLogger('pycbc.events.trigger_fits')
+
+
+[docs] +def exponential_fitalpha(vals, thresh, w): + """ + Maximum likelihood estimator for the fit factor for + an exponential decrease model + """ + return 1. / (numpy.average(vals, weights=w) - thresh)
+ + + +
+[docs] +def rayleigh_fitalpha(vals, thresh, w): + """ + Maximum likelihood estimator for the fit factor for + a Rayleigh distribution of events + """ + return 2. / (numpy.average(vals ** 2., weights=w) - thresh ** 2.)
+ + + +
+[docs] +def power_fitalpha(vals, thresh, w): + """ + Maximum likelihood estimator for the fit factor for + a power law model + """ + return numpy.average(numpy.log(vals/thresh), weights=w) ** -1. + 1.
+ + + +fitalpha_dict = { + 'exponential' : exponential_fitalpha, + 'rayleigh' : rayleigh_fitalpha, + 'power' : power_fitalpha +} + +# measurement standard deviation = (-d^2 log L/d alpha^2)^(-1/2) +fitstd_dict = { + 'exponential' : lambda weights, alpha : alpha / sum(weights) ** 0.5, + 'rayleigh' : lambda weights, alpha : alpha / sum(weights) ** 0.5, + 'power' : lambda weights, alpha : (alpha - 1.) / sum(weights) ** 0.5 +} + +
+[docs] +def fit_above_thresh(distr, vals, thresh=None, weights=None): + """ + Maximum likelihood fit for the coefficient alpha + + Fitting a distribution of discrete values above a given threshold. + Exponential p(x) = alpha exp(-alpha (x-x_t)) + Rayleigh p(x) = alpha x exp(-alpha (x**2-x_t**2)/2) + Power p(x) = ((alpha-1)/x_t) (x/x_t)**-alpha + Values below threshold will be discarded. + If no threshold is specified the minimum sample value will be used. + + Parameters + ---------- + distr : {'exponential', 'rayleigh', 'power'} + Name of distribution + vals : sequence of floats + Values to fit + thresh : float + Threshold to apply before fitting; if None, use min(vals) + weights: sequence of floats + Weighting factors to use for the values when fitting. + Default=None - all the same + + Returns + ------- + alpha : float + Fitted value + sigma_alpha : float + Standard error in fitted value + """ + vals = numpy.array(vals) + if thresh is None: + thresh = min(vals) + above_thresh = numpy.ones_like(vals, dtype=bool) + else: + above_thresh = vals >= thresh + if numpy.count_nonzero(above_thresh) == 0: + # Nothing is above threshold - warn and return -1 + logger.warning("No values are above the threshold, %.2f, " + "maximum is %.2f.", thresh, vals.max()) + return -1., -1. + + vals = vals[above_thresh] + + # Set up the weights + if weights is not None: + weights = numpy.array(weights) + w = weights[above_thresh] + else: + w = numpy.ones_like(vals) + + alpha = fitalpha_dict[distr](vals, thresh, w) + return alpha, fitstd_dict[distr](w, alpha)
+ + + +# Variables: +# x: the trigger stat value(s) at which to evaluate the function +# a: slope parameter of the fit +# t: lower threshold stat value +fitfn_dict = { + 'exponential' : lambda x, a, t : a * numpy.exp(-a * (x - t)), + 'rayleigh' : lambda x, a, t : (a * x * \ + numpy.exp(-a * (x ** 2 - t ** 2) / 2.)), + 'power' : lambda x, a, t : (a - 1.) * x ** (-a) * t ** (a - 1.) +} + +
+[docs] +def fit_fn(distr, xvals, alpha, thresh): + """ + The fitted function normalized to 1 above threshold + + To normalize to a given total count multiply by the count. + + Parameters + ---------- + xvals : sequence of floats + Values where the function is to be evaluated + alpha : float + The fitted parameter + thresh : float + Threshold value applied to fitted values + + Returns + ------- + fit : array of floats + Fitted function at the requested xvals + """ + xvals = numpy.array(xvals) + fit = fitfn_dict[distr](xvals, alpha, thresh) + # set fitted values below threshold to 0 + numpy.putmask(fit, xvals < thresh, 0.) + return fit
+ + + +cum_fndict = { + 'exponential' : lambda x, alpha, t : numpy.exp(-alpha * (x - t)), + 'rayleigh' : lambda x, alpha, t : numpy.exp(-alpha * (x ** 2. - t ** 2.) / 2.), + 'power' : lambda x, alpha, t : x ** (1. - alpha) * t ** (alpha - 1.) +} + +
+[docs] +def cum_fit(distr, xvals, alpha, thresh): + """ + Integral of the fitted function above a given value (reverse CDF) + + The fitted function is normalized to 1 above threshold + + Parameters + ---------- + xvals : sequence of floats + Values where the function is to be evaluated + alpha : float + The fitted parameter + thresh : float + Threshold value applied to fitted values + + Returns + ------- + cum_fit : array of floats + Reverse CDF of fitted function at the requested xvals + """ + xvals = numpy.array(xvals) + cum_fit = cum_fndict[distr](xvals, alpha, thresh) + # set fitted values below threshold to 0 + numpy.putmask(cum_fit, xvals < thresh, 0.) + return cum_fit
+ + +
+[docs] +def tail_threshold(vals, N=1000): + """Determine a threshold above which there are N louder values""" + vals = numpy.array(vals) + if len(vals) < N: + raise RuntimeError('Not enough input values to determine threshold') + vals.sort() + return min(vals[-N:])
+ + + +
+[docs] +def KS_test(distr, vals, alpha, thresh=None): + """ + Perform Kolmogorov-Smirnov test for fitted distribution + + Compare the given set of discrete values above a given threshold to the + fitted distribution function. + If no threshold is specified, the minimum sample value will be used. + Returns the KS test statistic and its p-value: lower p means less + probable under the hypothesis of a perfect fit + + Parameters + ---------- + distr : {'exponential', 'rayleigh', 'power'} + Name of distribution + vals : sequence of floats + Values to compare to fit + alpha : float + Fitted distribution parameter + thresh : float + Threshold to apply before fitting; if None, use min(vals) + + Returns + ------- + D : float + KS test statistic + p-value : float + p-value, assumed to be two-tailed + """ + vals = numpy.array(vals) + if thresh is None: + thresh = min(vals) + else: + vals = vals[vals >= thresh] + def cdf_fn(x): + return 1 - cum_fndict[distr](x, alpha, thresh) + return kstest(vals, cdf_fn)
+ + + +
+[docs] +def which_bin(par, minpar, maxpar, nbins, log=False): + """ + Helper function + + Returns bin index where a parameter value belongs (from 0 through nbins-1) + when dividing the range between minpar and maxpar equally into bins. + + Parameters + ---------- + par : float + Parameter value being binned + minpar : float + Minimum parameter value + maxpar : float + Maximum parameter value + nbins : int + Number of bins to use + log : boolean + If True, use log spaced bins + + Returns + ------- + binind : int + Bin index + """ + assert (par >= minpar and par <= maxpar) + if log: + par, minpar, maxpar = numpy.log(par), numpy.log(minpar), numpy.log(maxpar) + # par lies some fraction of the way between min and max + if minpar != maxpar: + frac = float(par - minpar) / float(maxpar - minpar) + else: + # if they are equal there is only one size 0 bin + # must be in that bin + frac = 0 + # binind then lies between 0 and nbins - 1 + binind = int(frac * nbins) + # corner case + if par == maxpar: + binind = nbins - 1 + return binind
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/triggers.html b/latest/html/_modules/pycbc/events/triggers.html new file mode 100644 index 00000000000..b80badbf064 --- /dev/null +++ b/latest/html/_modules/pycbc/events/triggers.html @@ -0,0 +1,384 @@ + + + + + + pycbc.events.triggers — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.triggers

+# Copyright (C) 2017 Christopher M. Biwer
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This modules contains functions for reading single and coincident triggers
+from the command line.
+"""
+import logging
+import h5py
+import numpy
+
+from pycbc import conversions, pnutils
+from pycbc.events import coinc
+import pycbc.detector
+
+logger = logging.getLogger('pycbc.events.triggers')
+
+
+
+[docs] +def insert_bank_bins_option_group(parser): + """ Add options to the optparser object for selecting templates in bins. + + Parameters + ----------- + parser : object + OptionParser instance. + """ + bins_group = parser.add_argument_group( + "Options for selecting templates in bins.") + bins_group.add_argument("--bank-bins", nargs="+", default=None, + help="Ordered list of mass bin upper boundaries. " + "An ordered list of type-boundary pairs, " + "applied sequentially. Must provide a name " + "(can be any unique string for tagging " + "purposes), the parameter to bin " + "on, and the membership condition via " + "'lt' / 'gt' operators. " + "Ex. name1:component:lt2 name2:total:lt15") + bins_group.add_argument("--bank-file", default=None, + help="HDF format template bank file.") + bins_group.add_argument("--f-lower", default=None, + help="Low frequency cutoff in Hz.") + return bins_group
+ + + +
+[docs] +def bank_bins_from_cli(opts): + """ Parses the CLI options related to binning templates in the bank. + + Parameters + ---------- + opts : object + Result of parsing the CLI with OptionParser. + + Results + ------- + bins_idx : dict + A dict with bin names as key and an array of their indices as value. + bank : dict + A dict of the datasets from the bank file. + """ + bank = {} + fp = h5py.File(opts.bank_file) + for key in fp.keys(): + bank[key] = fp[key][:] + bank["f_lower"] = float(opts.f_lower) if opts.f_lower else None + if opts.bank_bins: + bins_idx = coinc.background_bin_from_string(opts.bank_bins, bank) + else: + bins_idx = {"all" : numpy.arange(0, len(bank[tuple(fp.keys())[0]]))} + fp.close() + return bins_idx, bank
+ + + +
+[docs] +def get_mass_spin(bank, tid): + """ + Helper function + + Parameters + ---------- + bank : h5py File object + Bank parameter file + tid : integer or array of int + Indices of the entries to be returned + + Returns + ------- + m1, m2, s1z, s2z : tuple of floats or arrays of floats + Parameter values of the bank entries + """ + m1 = bank['mass1'][:][tid] + m2 = bank['mass2'][:][tid] + s1z = bank['spin1z'][:][tid] + s2z = bank['spin2z'][:][tid] + return m1, m2, s1z, s2z
+ + + +
+[docs] +def get_param(par, args, m1, m2, s1z, s2z): + """ + Helper function + + Parameters + ---------- + par : string + Name of parameter to calculate + args : Namespace object returned from ArgumentParser instance + Calling code command line options, used for f_lower value + m1 : float or array of floats + First binary component mass (etc.) + + Returns + ------- + parvals : float or array of floats + Calculated parameter values + """ + if par == 'mchirp': + parvals = conversions.mchirp_from_mass1_mass2(m1, m2) + elif par == 'mtotal': + parvals = m1 + m2 + elif par == 'eta': + parvals = conversions.eta_from_mass1_mass2(m1, m2) + elif par in ['chi_eff', 'effective_spin']: + parvals = conversions.chi_eff(m1, m2, s1z, s2z) + elif par == 'template_duration': + # default to SEOBNRv4 duration function + if not hasattr(args, 'approximant') or args.approximant is None: + args.approximant = "SEOBNRv4" + parvals = pnutils.get_imr_duration(m1, m2, s1z, s2z, args.f_lower, + args.approximant) + if args.min_duration: + parvals += args.min_duration + elif par == 'tau0': + parvals = conversions.tau0_from_mass1_mass2(m1, m2, args.f_lower) + elif par == 'tau3': + parvals = conversions.tau3_from_mass1_mass2(m1, m2, args.f_lower) + elif par in pnutils.named_frequency_cutoffs.keys(): + parvals = pnutils.frequency_cutoff_from_name(par, m1, m2, s1z, s2z) + else: + # try asking for a LALSimulation frequency function + parvals = pnutils.get_freq(par, m1, m2, s1z, s2z) + return parvals
+ + + +
+[docs] +def get_found_param(injfile, bankfile, trigfile, param, ifo, args=None): + """ + Translates some popular trigger parameters into functions that calculate + them from an hdf found injection file + + Parameters + ---------- + injfile: hdf5 File object + Injection file of format known to ANitz (DOCUMENTME) + bankfile: hdf5 File object or None + Template bank file + trigfile: hdf5 File object or None + Single-detector trigger file + param: string + Parameter to be calculated for the recovered triggers + ifo: string or None + Standard ifo name, ex. 'L1' + args : Namespace object returned from ArgumentParser instance + Calling code command line options, used for f_lower value + + Returns + ------- + [return value]: NumPy array of floats, array of boolean + The calculated parameter values and a Boolean mask indicating which + injections were found in the given ifo (if supplied) + """ + foundtmp = injfile["found_after_vetoes/template_id"][:] + # will record whether inj was found in the given ifo + found_in_ifo = numpy.ones_like(foundtmp, dtype=bool) + if trigfile is not None: + try: # old 2-ifo behaviour + # get the name of the ifo in the injection file, eg "detector_1" + # and the integer from that name + ifolabel = [name for name, val in injfile.attrs.items() if \ + "detector" in name and val == ifo][0] + foundtrg = injfile["found_after_vetoes/trigger_id" + ifolabel[-1]] + except IndexError: # multi-ifo + foundtrg = injfile["found_after_vetoes/%s/trigger_id" % ifo] + # multi-ifo pipeline assigns -1 for inj not found in specific ifo + found_in_ifo = foundtrg[:] != -1 + if bankfile is not None and param in bankfile.keys(): + return bankfile[param][:][foundtmp], found_in_ifo + elif trigfile is not None and param in trigfile[ifo].keys(): + return trigfile[ifo][param][:][foundtrg], found_in_ifo + else: + assert bankfile + b = bankfile + return get_param(param, args, b['mass1'][:], b['mass2'][:], + b['spin1z'][:], b['spin2z'][:])[foundtmp],\ + found_in_ifo
+ + + +
+[docs] +def get_inj_param(injfile, param, ifo, args=None): + """ + Translates some popular injection parameters into functions that calculate + them from an hdf found injection file + + Parameters + ---------- + injfile: hdf5 File object + Injection file of format known to ANitz (DOCUMENTME) + param: string + Parameter to be calculated for the injected signals + ifo: string + Standard detector name, ex. 'L1' + args: Namespace object returned from ArgumentParser instance + Calling code command line options, used for f_lower value + + Returns + ------- + [return value]: NumPy array of floats + The calculated parameter values + """ + det = pycbc.detector.Detector(ifo) + + inj = injfile["injections"] + if param in inj.keys(): + return inj["injections/"+param] + + if param == "end_time_"+ifo[0].lower(): + return inj['end_time'][:] + det.time_delay_from_earth_center( + inj['longitude'][:], + inj['latitude'][:], + inj['end_time'][:]) + else: + return get_param(param, args, inj['mass1'][:], inj['mass2'][:], + inj['spin1z'][:], inj['spin2z'][:])
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/events/veto.html b/latest/html/_modules/pycbc/events/veto.html new file mode 100644 index 00000000000..fc338ab40ff --- /dev/null +++ b/latest/html/_modules/pycbc/events/veto.html @@ -0,0 +1,383 @@ + + + + + + pycbc.events.veto — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.events.veto

+""" This module contains utilities to manipulate trigger lists based on
+segment.
+"""
+import logging
+import numpy
+from ligo.lw import table, lsctables, utils as ligolw_utils
+from ligo.segments import segment, segmentlist
+
+logger = logging.getLogger('pycbc.events.veto')
+
+
+[docs] +def start_end_to_segments(start, end): + return segmentlist([segment(s, e) for s, e in zip(start, end)])
+ + +
+[docs] +def segments_to_start_end(segs): + segs.coalesce() + return (numpy.array([s[0] for s in segs]), + numpy.array([s[1] for s in segs]))
+ + +
+[docs] +def start_end_from_segments(segment_file): + """ + Return the start and end time arrays from a segment file. + + Parameters + ---------- + segment_file: xml segment file + + Returns + ------- + start: numpy.ndarray + end: numpy.ndarray + """ + from pycbc.io.ligolw import LIGOLWContentHandler as h + + indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) + segment_table = lsctables.SegmentTable.get_table(indoc) + start = numpy.array(segment_table.getColumnByName('start_time')) + start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) + end = numpy.array(segment_table.getColumnByName('end_time')) + end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) + return start + start_ns * 1e-9, end + end_ns * 1e-9
+ + + +
+[docs] +def indices_within_times(times, start, end): + """ + Return an index array into times that lie within the durations defined by start end arrays + + Parameters + ---------- + times: numpy.ndarray + Array of times + start: numpy.ndarray + Array of duration start times + end: numpy.ndarray + Array of duration end times + + Returns + ------- + indices: numpy.ndarray + Array of indices into times + """ + # coalesce the start/end segments + start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce()) + + tsort = times.argsort() + times_sorted = times[tsort] + left = numpy.searchsorted(times_sorted, start) + right = numpy.searchsorted(times_sorted, end) + + if len(left) == 0: + return numpy.array([], dtype=numpy.uint32) + + return tsort[numpy.hstack([numpy.r_[s:e] for s, e in zip(left, right)])]
+ + +
+[docs] +def indices_outside_times(times, start, end): + """ + Return an index array into times that like outside the durations defined by start end arrays + + Parameters + ---------- + times: numpy.ndarray + Array of times + start: numpy.ndarray + Array of duration start times + end: numpy.ndarray + Array of duration end times + + Returns + ------- + indices: numpy.ndarray + Array of indices into times + """ + exclude = indices_within_times(times, start, end) + indices = numpy.arange(0, len(times)) + return numpy.delete(indices, exclude)
+ + +
+[docs] +def select_segments_by_definer(segment_file, segment_name=None, ifo=None): + """ Return the list of segments that match the segment name + + Parameters + ---------- + segment_file: str + path to segment xml file + + segment_name: str + Name of segment + ifo: str, optional + + Returns + ------- + seg: list of segments + """ + from pycbc.io.ligolw import LIGOLWContentHandler as h + + indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) + segment_table = table.Table.get_table(indoc, 'segment') + + seg_def_table = table.Table.get_table(indoc, 'segment_definer') + def_ifos = seg_def_table.getColumnByName('ifos') + def_names = seg_def_table.getColumnByName('name') + def_ids = seg_def_table.getColumnByName('segment_def_id') + + valid_id = [] + for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): + if ifo and ifo != def_ifo: + continue + if segment_name and segment_name != def_name: + continue + valid_id += [def_id] + + start = numpy.array(segment_table.getColumnByName('start_time')) + start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) + end = numpy.array(segment_table.getColumnByName('end_time')) + end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) + start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns + did = segment_table.getColumnByName('segment_def_id') + + keep = numpy.array([d in valid_id for d in did]) + if sum(keep) > 0: + return start_end_to_segments(start[keep], end[keep]) + else: + return segmentlist([])
+ + +
+[docs] +def indices_within_segments(times, segment_files, ifo=None, segment_name=None): + """ Return the list of indices that should be vetoed by the segments in the + list of veto_files. + + Parameters + ---------- + times: numpy.ndarray of integer type + Array of gps start times + segment_files: string or list of strings + A string or list of strings that contain the path to xml files that + contain a segment table + ifo: string, optional + The ifo to retrieve segments for from the segment files + segment_name: str, optional + name of segment + Returns + ------- + indices: numpy.ndarray + The array of index values within the segments + segmentlist: + The segment list corresponding to the selected time. + """ + veto_segs = segmentlist([]) + indices = numpy.array([], dtype=numpy.uint32) + for veto_file in segment_files: + veto_segs += select_segments_by_definer(veto_file, segment_name, ifo) + veto_segs.coalesce() + + start, end = segments_to_start_end(veto_segs) + if len(start) > 0: + idx = indices_within_times(times, start, end) + indices = numpy.union1d(indices, idx) + + return indices, veto_segs.coalesce()
+ + +
+[docs] +def indices_outside_segments(times, segment_files, ifo=None, segment_name=None): + """ Return the list of indices that are outside the segments in the + list of segment files. + + Parameters + ---------- + times: numpy.ndarray of integer type + Array of gps start times + segment_files: string or list of strings + A string or list of strings that contain the path to xml files that + contain a segment table + ifo: string, optional + The ifo to retrieve segments for from the segment files + segment_name: str, optional + name of segment + Returns + -------- + indices: numpy.ndarray + The array of index values outside the segments + segmentlist: + The segment list corresponding to the selected time. + """ + exclude, segs = indices_within_segments(times, segment_files, + ifo=ifo, segment_name=segment_name) + indices = numpy.arange(0, len(times)) + return numpy.delete(indices, exclude), segs
+ + +
+[docs] +def get_segment_definer_comments(xml_file, include_version=True): + """Returns a dict with the comment column as the value for each segment""" + + from pycbc.io.ligolw import LIGOLWContentHandler as h + + # read segment definer table + xmldoc = ligolw_utils.load_fileobj(xml_file, + compress='auto', + contenthandler=h) + seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc) + + # put comment column into a dict + comment_dict = {} + for seg_def in seg_def_table: + if include_version: + full_channel_name = ':'.join([str(seg_def.ifos), + str(seg_def.name), + str(seg_def.version)]) + else: + full_channel_name = ':'.join([str(seg_def.ifos), + str(seg_def.name)]) + + comment_dict[full_channel_name] = seg_def.comment + + return comment_dict
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/backend_cpu.html b/latest/html/_modules/pycbc/fft/backend_cpu.html new file mode 100644 index 00000000000..95b42e0ec8f --- /dev/null +++ b/latest/html/_modules/pycbc/fft/backend_cpu.html @@ -0,0 +1,173 @@ + + + + + + pycbc.fft.backend_cpu — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.backend_cpu

+#  Copyright (C) 2014 Josh Willis
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with with program; see the file COPYING. If not, write to the
+#  Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+#  MA  02111-1307  USA
+
+from .core import _list_available
+
+_backend_dict = {'fftw' : 'fftw',
+                 'mkl' : 'mkl',
+                 'numpy' : 'npfft'}
+_backend_list = ['mkl', 'fftw', 'numpy']
+
+_alist, _adict = _list_available(_backend_list, _backend_dict)
+
+cpu_backend = None
+
+
+[docs] +def set_backend(backend_list): + global cpu_backend + for backend in backend_list: + if backend in _alist: + cpu_backend = backend + break
+ + +
+[docs] +def get_backend(): + return _adict[cpu_backend]
+ + +set_backend(_backend_list) + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/backend_mkl.html b/latest/html/_modules/pycbc/fft/backend_mkl.html new file mode 100644 index 00000000000..6e6e3d89642 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/backend_mkl.html @@ -0,0 +1,170 @@ + + + + + + pycbc.fft.backend_mkl — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.backend_mkl

+#  Copyright (C) 2014 Josh Willis
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with with program; see the file COPYING. If not, write to the
+#  Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+#  MA  02111-1307  USA
+
+from .core import _list_available
+
+_backend_dict = {'mkl' : 'mkl'}
+_backend_list = ['mkl']
+
+_alist, _adict = _list_available(_backend_list, _backend_dict)
+
+mkl_backend = None
+
+
+[docs] +def set_backend(backend_list): + global mkl_backend + for backend in backend_list: + if backend in _alist: + mkl_backend = backend + break
+ + +
+[docs] +def get_backend(): + return _adict[mkl_backend]
+ + +set_backend(_backend_list) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/backend_support.html b/latest/html/_modules/pycbc/fft/backend_support.html new file mode 100644 index 00000000000..0f8d6acb23d --- /dev/null +++ b/latest/html/_modules/pycbc/fft/backend_support.html @@ -0,0 +1,222 @@ + + + + + + pycbc.fft.backend_support — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.backend_support

+# Copyright (C) 2012  Josh Willis, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This package provides a front-end to various fast Fourier transform
+implementations within PyCBC.
+"""
+
+import pycbc
+import pycbc.scheme
+
+
+# These are global variables, that are modified by the various scheme-
+# dependent submodules, to maintain a list of all possible backends
+# for all possible schemes that are available at runtime.  This list
+# and dict are then used when parsing command-line options.
+
+_all_backends_list = []
+_all_backends_dict = {}
+
+# The following is the function called by each scheme's setup to add whatever new
+# backends may have been found to the global list.  Since some backends may be
+# shared, we must first check to make sure that the item in the list is not already
+# in the global list, and we assume that the keys to the dict are in one-to-one
+# correspondence with the items in the list.
+
+def _update_global_available(new_list, new_dict, global_list, global_dict):
+    for item in new_list:
+        if item not in global_list:
+            global_list.append(item)
+            global_dict.update({item:new_dict[item]})
+
+
+[docs] +def get_backend_modules(): + return _all_backends_dict.values()
+ + +
+[docs] +def get_backend_names(): + return list(_all_backends_dict.keys())
+ + +BACKEND_PREFIX="pycbc.fft.backend_" + +
+[docs] +@pycbc.scheme.schemed(BACKEND_PREFIX) +def set_backend(backend_list): + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] +@pycbc.scheme.schemed(BACKEND_PREFIX) +def get_backend(): + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +# Import all scheme-dependent backends, to get _all_backends accurate: + +for scheme_name in ["cpu", "mkl", "cuda"]: + try: + mod = __import__('pycbc.fft.backend_' + scheme_name, fromlist = ['_alist', '_adict']) + _alist = getattr(mod, "_alist") + _adict = getattr(mod, "_adict") + _update_global_available(_alist, _adict, _all_backends_list, + _all_backends_dict) + except ImportError: + pass +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/class_api.html b/latest/html/_modules/pycbc/fft/class_api.html new file mode 100644 index 00000000000..fd0e53f5658 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/class_api.html @@ -0,0 +1,223 @@ + + + + + + pycbc.fft.class_api — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.class_api

+# Copyright (C) 2012  Josh Willis, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This package provides a front-end to various fast Fourier transform
+implementations within PyCBC.
+"""
+
+from .backend_support import get_backend
+
+def _fft_factory(invec, outvec, nbatch=1, size=None):
+    backend = get_backend()
+    cls = getattr(backend, 'FFT')
+    return cls
+
+def _ifft_factory(invec, outvec, nbatch=1, size=None):
+    backend = get_backend()
+    cls = getattr(backend, 'IFFT')
+    return cls
+
+
+[docs] +class FFT(object): + """ Create a forward FFT engine + + Parameters + ---------- + invec : complex64 or float32 + Input pycbc.types.Array (or subclass); its FFT will be computed + outvec : complex64 + Output pycbc.types.Array (or subclass); it will hold the FFT of invec + nbatch : int (default 1) + When not one, specifies that invec and outvec should each be interpreted + as nbatch distinct vectors. The total length of invec and outvec should + then be that appropriate to a single vector, multiplied by nbatch + size : int (default None) + When nbatch is not 1, this parameter gives the logical size of each + transform. If nbatch is 1 (the default) this can be None, and the + logical size is the length of invec. + + The addresses in memory of both vectors should be divisible by + pycbc.PYCBC_ALIGNMENT. + """ + def __new__(cls, *args, **kwargs): + real_cls = _fft_factory(*args, **kwargs) + return real_cls(*args, **kwargs)
+ + +
+[docs] +class IFFT(object): + """ Create a reverse FFT engine + + Parameters + ---------- + invec : complex64 + Input pycbc.types.Array (or subclass); its IFFT will be computed + outvec : complex64 or float32 + Output pycbc.types.Array (or subclass); it will hold the IFFT of invec + nbatch : int (default 1) + When not one, specifies that invec and outvec should each be interpreted + as nbatch distinct vectors. The total length of invec and outvec should + then be that appropriate to a single vector, multiplied by nbatch + size : int (default None) + When nbatch is not 1, this parameter gives the logical size of each + transform. If nbatch is 1 (the default) this can be None, and the + logical size is the length of outvec. + + The addresses in memory of both vectors should be divisible by + pycbc.PYCBC_ALIGNMENT. + """ + def __new__(cls, *args, **kwargs): + real_cls = _ifft_factory(*args, **kwargs) + return real_cls(*args, **kwargs)
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/fft_callback.html b/latest/html/_modules/pycbc/fft/fft_callback.html new file mode 100644 index 00000000000..f69448c1726 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/fft_callback.html @@ -0,0 +1,403 @@ + + + + + + pycbc.fft.fft_callback — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.fft_callback

+#!/usr/bin/python
+import os, subprocess, ctypes
+from mako.template import Template
+
+full_corr = """
+    __device__ cufftComplex in_call(void* input, size_t offset,
+                                void* caller_info, void* shared) {
+        cufftComplex r;
+
+        cufftComplex* hp =  ((cufftComplex*) callback_params.htilde);
+        cufftComplex h = hp[offset];
+        cufftComplex s = ((cufftComplex*) input)[offset];
+
+        r.x = h.x * s.x + h.y * s.y;
+        r.y = h.x * s.y - h.y * s.x;
+
+        return r;
+    }
+"""
+
+zero_corr = """
+    __device__ cufftComplex in_call(void* input, size_t offset,
+                                void* caller_info, void* shared) {
+        if (offset >= callback_params.in_kmax)
+            return (cufftComplex){0, 0};
+        else{
+            cufftComplex r;
+
+            cufftComplex s = ((cufftComplex*) input)[offset];
+            cufftComplex* hp =  ((cufftComplex*) callback_params.htilde);
+            cufftComplex h = hp[offset];
+
+            r.x = h.x * s.x + h.y * s.y;
+            r.y = h.x * s.y - h.y * s.x;
+
+            return r;
+        }
+
+    }
+"""
+
+zero_out = """
+    __device__ void out_call(void *out, size_t offset, cufftComplex element,
+                             void *caller_info, void *shared){
+           if (offset > callback_params.out_kmin && offset < callback_params.out_kmax)
+                ((cufftComplex*) out)[offset] = element;
+    }
+
+"""
+
+copy_callback = """
+    __device__ cufftComplex correlate(void* input, size_t offset,
+                                void* caller_info, void* shared) {
+        return ((cufftComplex*)input)[offset];
+    }
+"""
+
+copy_out = """
+    __device__ void out_call(void *out, size_t offset, cufftComplex element,
+                             void *caller_info, void *shared){
+           ((cufftComplex*) out)[offset] = element;
+    }
+
+"""
+
+real_out = """
+    __device__ void out_call(void *out, size_t offset, cufftComplex element,
+                             void *caller_info, void *shared){
+           ((cufftReal*) out)[offset] = element.x * element.x + element.y * element.y;
+    }
+
+"""
+
+copy_out_fp16 = """
+    __device__ void out_call(void *out, size_t offset, cufftComplex element,
+                             void *caller_info, void *shared){
+           ((short*) out)[offset*2] = __float2half_rn(element.x);
+           ((short*) out)[offset*2+1] = __float2half_rn(element.y);
+    }
+"""
+
+
+no_out = """
+    __device__ void out_call(void *out, size_t offset, cufftComplex element,
+                             void *caller_info, void *shared){
+    }
+
+"""
+
+fftsrc = Template("""
+    #include <stdlib.h>
+    #include <stdio.h>
+    #include <string.h>
+    #include <math.h>
+    #include <cuda_runtime.h>
+    #include <cufft.h>
+    #include <cufftXt.h>
+
+    typedef struct {
+        %for t, n in parameters:
+            ${t} ${n};
+        %endfor
+    } param_t;
+
+    __constant__ param_t callback_params;
+
+    #define checkCudaErrors(val)  __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
+
+    template <typename T>
+    inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
+    {
+        if (code) {
+            fprintf(stderr, "CUDA error at %s:%d code=%d \\"%s\\" \\n",
+                    file, line, (unsigned int)code, func);
+            cudaDeviceReset();
+            exit(EXIT_FAILURE);
+        }
+    }
+
+    % if input_callback:
+        ${input_callback}
+        __device__ cufftCallbackLoadC input_callback = in_call;
+    % endif
+
+    % if output_callback:
+        ${output_callback}
+        __device__ cufftCallbackStoreC output_callback = out_call;
+    % endif
+
+    extern "C"  cufftHandle* create_plan(unsigned int size){
+        cufftHandle* plan = new cufftHandle;
+        size_t work_size;
+        cufftCreate(plan);
+        checkCudaErrors(cufftMakePlan1d(*plan, size, CUFFT_C2C, 1, &work_size));
+
+
+        % if input_callback:
+            cufftCallbackLoadC h_input_callback;
+            checkCudaErrors(cudaMemcpyFromSymbol(&h_input_callback, input_callback,
+                                             sizeof(h_input_callback)));
+            checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_input_callback,
+                                          CUFFT_CB_LD_COMPLEX, 0));
+        % endif
+
+        % if output_callback:
+            cufftCallbackStoreC h_output_callback;
+            checkCudaErrors(cudaMemcpyFromSymbol(&h_output_callback, output_callback,
+                                                 sizeof(h_output_callback)));
+            checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_output_callback,
+                                              CUFFT_CB_ST_COMPLEX, 0));
+        % endif
+
+        return plan;
+    }
+
+    extern "C" void execute(cufftHandle* plan, cufftComplex* in, cufftComplex* out, param_t* p){
+         if (p != NULL)
+            checkCudaErrors(cudaMemcpyToSymbolAsync(callback_params, p, sizeof(param_t), 0,  cudaMemcpyHostToDevice, 0));
+         checkCudaErrors(cufftExecC2C(*plan, in, out, CUFFT_INVERSE));
+    }
+""")
+
+
+[docs] +def compile(source, name): + """ Compile the string source code into a shared object linked against + the static version of cufft for callback support. + """ + # If we start using this again, we should find a better place for the cache + cache = os.path.join('/tmp', name) + hash_file = cache + ".hash" + lib_file = cache + ".so" + obj_file = cache + ".o" + + try: + if int(open(hash_file, "r").read()) == hash(source): + return lib_file + raise ValueError + except: + pass + + src_file = cache + ".cu" + fsrc = open(src_file, "w") + fsrc.write(source) + fsrc.close() + + cmd = ["nvcc", "-ccbin", "g++", "-dc", "-m64", + "--compiler-options", "'-fPIC'", + "-o", obj_file, + "-c", src_file] + print(" ".join(cmd)) + subprocess.check_call(cmd) + + cmd = ["nvcc", "-shared", "-ccbin", "g++", "-m64", + "-o", lib_file, obj_file, "-lcufft_static", "-lculibos"] + print(" ".join(cmd)) + + subprocess.check_call(cmd) + + hash_file = cache + ".hash" + fhash = open(hash_file, "w") + fhash.write(str(hash(source))) + return lib_file
+ + +
+[docs] +def get_fn_plan(callback=None, out_callback=None, name='pycbc_cufft', parameters=None): + """ Get the IFFT execute and plan functions + """ + if parameters is None: + parameters = [] + source = fftsrc.render(input_callback=callback, output_callback=out_callback, parameters=parameters) + path = compile(source, name) + lib = ctypes.cdll.LoadLibrary(path) + fn = lib.execute + fn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + plan = lib.create_plan + plan.restype = ctypes.c_void_p + plan.argyptes = [ctypes.c_uint] + return fn, plan
+ + +_plans = {} + +
+[docs] +class param(ctypes.Structure): + _fields_ = [("htilde", ctypes.c_void_p)]
+ +hparam = param() + +
+[docs] +def c2c_correlate_ifft(htilde, stilde, outvec): + key = 'cnf' + if key not in _plans: + fn, pfn = get_fn_plan(callback=full_corr, parameters = [("void*", "htilde")]) + plan = pfn(len(outvec), int(htilde.data.gpudata)) + _plans[key] = (fn, plan, int(htilde.data.gpudata)) + fn, plan, _ = _plans[key] + hparam.htilde = htilde.data.gpudata + fn(plan, int(stilde.data.gpudata), int(outvec.data.gpudata), ctypes.pointer(hparam))
+ + +
+[docs] +class param2(ctypes.Structure): + _fields_ = [("htilde", ctypes.c_void_p), + ("in_kmax", ctypes.c_uint), + ("out_kmin", ctypes.c_uint), + ("out_kmax", ctypes.c_uint)]
+ +hparam_zeros = param2() + +
+[docs] +def c2c_half_correlate_ifft(htilde, stilde, outvec): + key = 'cn' + if key not in _plans: + fn, pfn = get_fn_plan(callback=zero_corr, + parameters = [("void*", "htilde"), + ("unsigned int", "in_kmax"), + ("unsigned int", "out_kmin"), + ("unsigned int", "out_kmax")], + out_callback=zero_out) + plan = pfn(len(outvec), int(htilde.data.gpudata)) + _plans[key] = (fn, plan, int(htilde.data.gpudata)) + fn, plan, _ = _plans[key] + hparam_zeros.htilde = htilde.data.gpudata + hparam_zeros.in_kmax = htilde.end_idx + hparam_zeros.out_kmin = stilde.analyze.start + hparam_zeros.out_kmax = stilde.analyze.stop + fn(plan, int(stilde.data.gpudata), int(outvec.data.gpudata), ctypes.pointer(hparam_zeros))
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/fftw.html b/latest/html/_modules/pycbc/fft/fftw.html new file mode 100644 index 00000000000..8f9052be480 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/fftw.html @@ -0,0 +1,732 @@ + + + + + + pycbc.fft.fftw — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.fftw

+import os
+from pycbc.types import zeros
+import numpy as _np
+import ctypes
+import pycbc.scheme as _scheme
+from pycbc.libutils import get_ctypes_library
+from .core import _BaseFFT, _BaseIFFT
+from ..types import check_aligned
+
+# IMPORTANT NOTE TO PYCBC DEVELOPERS:
+# Because this module is loaded automatically when present, and because
+# no FFTW function should be called until the user has had the chance
+# to set the threading backend, it is ESSENTIAL that simply loading this
+# module should not actually *call* ANY functions.
+
+# NOTE:
+# When loading FFTW we use os.RTLD_DEEPBIND to avoid potential segfaults due
+# to conflicts with MKL if both are present.
+if hasattr(os, 'RTLD_DEEPBIND'):
+    FFTW_RTLD_MODE = os.RTLD_DEEPBIND
+else:
+    FFTW_RTLD_MODE = ctypes.DEFAULT_MODE
+
+#FFTW constants, these are pulled from fftw3.h
+FFTW_FORWARD = -1
+FFTW_BACKWARD = 1
+
+FFTW_MEASURE = 0
+FFTW_DESTROY_INPUT = 1 << 0
+FFTW_UNALIGNED = 1 << 1
+FFTW_CONSERVE_MEMORY = 1 << 2
+FFTW_EXHAUSTIVE = 1 << 3
+FFTW_PRESERVE_INPUT = 1 << 4
+FFTW_PATIENT = 1 << 5
+FFTW_ESTIMATE = 1 << 6
+FFTW_WISDOM_ONLY = 1 << 21
+
+# Load the single and double precision libraries
+# We need to construct them directly with CDLL so
+# we can give the RTLD_GLOBAL mode, which we must do
+# in order to use the threaded libraries as well.
+double_lib = get_ctypes_library('fftw3', ['fftw3'], mode=FFTW_RTLD_MODE)
+float_lib = get_ctypes_library('fftw3f', ['fftw3f'], mode=FFTW_RTLD_MODE)
+if (double_lib is None) or (float_lib is None):
+    raise ImportError("Unable to find FFTW libraries")
+
+# Support for FFTW's two different threading backends
+_fftw_threaded_lib = None
+_fftw_threaded_set = False
+_double_threaded_lib = None
+_float_threaded_lib = None
+
+HAVE_FFTW_THREADED = False
+
+# Although we set the number of threads based on the scheme,
+# we need a private variable that records the last value used so
+# we know whether we need to call plan_with_nthreads() again.
+_fftw_current_nthreads = 0
+
+# This function sets the number of threads used internally by FFTW
+# in planning. It just takes a number of threads, rather than itself
+# looking at scheme.mgr.num_threads, because it should not be called
+# directly, but only by functions that get the value they use from
+# scheme.mgr.num_threads
+
+def _fftw_plan_with_nthreads(nthreads):
+    global _fftw_current_nthreads
+    if not HAVE_FFTW_THREADED:
+        if (nthreads > 1):
+            raise ValueError("Threading is NOT enabled, but {0} > 1 threads specified".format(nthreads))
+        else:
+            _pycbc_current_threads = nthreads
+    else:
+        dplanwthr = _double_threaded_lib.fftw_plan_with_nthreads
+        fplanwthr = _float_threaded_lib.fftwf_plan_with_nthreads
+        dplanwthr.restype = None
+        fplanwthr.restype = None
+        dplanwthr(nthreads)
+        fplanwthr(nthreads)
+        _fftw_current_nthreads = nthreads
+
+# This is a global dict-of-dicts used when initializing threads and
+# setting the threading library
+
+_fftw_threading_libnames = { 'unthreaded' : {'double' : None, 'float' : None},
+                             'openmp' : {'double' : 'fftw3_omp', 'float' : 'fftw3f_omp'},
+                             'pthreads' : {'double' : 'fftw3_threads', 'float' : 'fftw3f_threads'}}
+
+def _init_threads(backend):
+    # This function actually sets the backend and initializes. It returns zero on
+    # success and 1 if given a valid backend but that cannot be loaded.  It raises
+    # an exception if called after the threading backend has already been set, or
+    # if given an invalid backend.
+    global _fftw_threaded_set
+    global _fftw_threaded_lib
+    global HAVE_FFTW_THREADED
+    global _double_threaded_lib
+    global _float_threaded_lib
+    if _fftw_threaded_set:
+        raise RuntimeError(
+            "Threading backend for FFTW already set to {0}; cannot be changed".format(_fftw_threaded_lib))
+    try:
+        double_threaded_libname = _fftw_threading_libnames[backend]['double']
+        float_threaded_libname =  _fftw_threading_libnames[backend]['float']
+    except KeyError:
+        raise ValueError("Backend {0} for FFTW threading does not exist!".format(backend))
+    if double_threaded_libname is not None:
+        try:
+            # For reasons Ian doesn't understand we should not load libgomp
+            # first using RTLD_DEEPBIND, so force loading it here if needed
+            if backend == 'openmp':
+                get_ctypes_library('gomp', [], mode=ctypes.DEFAULT_MODE)
+            # Note that the threaded libraries don't have their own pkg-config
+            # files we must look for them wherever we look for double or single
+            # FFTW itself.
+            _double_threaded_lib = get_ctypes_library(
+                double_threaded_libname,
+                ['fftw3'],
+                mode=FFTW_RTLD_MODE
+            )
+            _float_threaded_lib =  get_ctypes_library(
+                float_threaded_libname,
+                ['fftw3f'],
+                mode=FFTW_RTLD_MODE
+            )
+            if (_double_threaded_lib is None) or (_float_threaded_lib is None):
+                err_str = 'Unable to load threaded libraries'
+                err_str += f'{double_threaded_libname} or '
+                err_str += f'{float_threaded_libname}'
+                raise RuntimeError(err_str)
+            dret = _double_threaded_lib.fftw_init_threads()
+            fret = _float_threaded_lib.fftwf_init_threads()
+            # FFTW for some reason uses *0* to indicate failure.  In C.
+            if (dret == 0) or (fret == 0):
+                return 1
+            HAVE_FFTW_THREADED = True
+            _fftw_threaded_set = True
+            _fftw_threaded_lib = backend
+            return 0
+        except:
+            return 1
+    else:
+        # We get here when we were given the 'unthreaded' backend
+        HAVE_FFTW_THREADED = False
+        _fftw_threaded_set = True
+        _fftw_threaded_lib = backend
+        return 0
+
+
+[docs] +def set_threads_backend(backend=None): + # This is the user facing function. If given a backend it just + # calls _init_threads and lets it do the work. If not (the default) + # then it cycles in order through threaded backends, + if backend is not None: + retval = _init_threads(backend) + # Since the user specified this backend raise an exception if the above failed + if retval != 0: + raise RuntimeError("Could not initialize FFTW threading backend {0}".format(backend)) + else: + # Note that we pop() from the end, so 'pthreads' + # is the first thing tried + _backend_list = ['unthreaded','openmp', 'pthreads'] + while not _fftw_threaded_set: + _next_backend = _backend_list.pop() + retval = _init_threads(_next_backend)
+ + +# Function to import system-wide wisdom files. + +
+[docs] +def import_sys_wisdom(): + if not _fftw_threaded_set: + set_threads_backend() + double_lib.fftw_import_system_wisdom() + float_lib.fftwf_import_system_wisdom()
+ + +# We provide an interface for changing the "measure level" +# By default this is 0, which does no planning, +# but we provide functions to read and set it + +_default_measurelvl = 0 +
+[docs] +def get_measure_level(): + """ + Get the current 'measure level' used in deciding how much effort to put into + creating FFTW plans. From least effort (and shortest planning time) to most + they are 0 to 3. No arguments. + """ + return _default_measurelvl
+ + +
+[docs] +def set_measure_level(mlvl): + """ + Set the current 'measure level' used in deciding how much effort to expend + creating FFTW plans. Must be an integer from 0 (least effort, shortest time) + to 3 (most effort and time). + """ + global _default_measurelvl + if mlvl not in (0,1,2,3): + raise ValueError("Measure level can only be one of 0, 1, 2, or 3") + _default_measurelvl = mlvl
+ + +_flag_dict = {0: FFTW_ESTIMATE, + 1: FFTW_MEASURE, + 2: FFTW_MEASURE|FFTW_PATIENT, + 3: FFTW_MEASURE|FFTW_PATIENT|FFTW_EXHAUSTIVE} +
+[docs] +def get_flag(mlvl,aligned): + if aligned: + return _flag_dict[mlvl] + else: + return (_flag_dict[mlvl]|FFTW_UNALIGNED)
+ + +# Add the ability to read/store wisdom to filenames + +
+[docs] +def wisdom_io(filename, precision, action): + """Import or export an FFTW plan for single or double precision. + """ + if not _fftw_threaded_set: + set_threads_backend() + fmap = {('float', 'import'): float_lib.fftwf_import_wisdom_from_filename, + ('float', 'export'): float_lib.fftwf_export_wisdom_to_filename, + ('double', 'import'): double_lib.fftw_import_wisdom_from_filename, + ('double', 'export'): double_lib.fftw_export_wisdom_to_filename} + f = fmap[(precision, action)] + f.argtypes = [ctypes.c_char_p] + retval = f(filename.encode()) + if retval == 0: + raise RuntimeError(('Could not {0} wisdom ' + 'from file {1}').format(action, filename))
+ + +
+[docs] +def import_single_wisdom_from_filename(filename): + wisdom_io(filename, 'float', 'import')
+ + +
+[docs] +def import_double_wisdom_from_filename(filename): + wisdom_io(filename, 'double', 'import')
+ + +
+[docs] +def export_single_wisdom_to_filename(filename): + wisdom_io(filename, 'float', 'export')
+ + +
+[docs] +def export_double_wisdom_to_filename(filename): + wisdom_io(filename, 'double', 'export')
+ + +
+[docs] +def set_planning_limit(time): + if not _fftw_threaded_set: + set_threads_backend() + + f = double_lib.fftw_set_timelimit + f.argtypes = [ctypes.c_double] + f(time) + + f = float_lib.fftwf_set_timelimit + f.argtypes = [ctypes.c_double] + f(time)
+ + +# Create function maps for the dtypes +plan_function = {'float32': {'complex64': float_lib.fftwf_plan_dft_r2c_1d}, + 'float64': {'complex128': double_lib.fftw_plan_dft_r2c_1d}, + 'complex64': {'float32': float_lib.fftwf_plan_dft_c2r_1d, + 'complex64': float_lib.fftwf_plan_dft_1d}, + 'complex128': {'float64': double_lib.fftw_plan_dft_c2r_1d, + 'complex128': double_lib.fftw_plan_dft_1d} + } + +execute_function = {'float32': {'complex64': float_lib.fftwf_execute_dft_r2c}, + 'float64': {'complex128': double_lib.fftw_execute_dft_r2c}, + 'complex64': {'float32': float_lib.fftwf_execute_dft_c2r, + 'complex64': float_lib.fftwf_execute_dft}, + 'complex128': {'float64': double_lib.fftw_execute_dft_c2r, + 'complex128': double_lib.fftw_execute_dft} + } + +
+[docs] +def plan(size, idtype, odtype, direction, mlvl, aligned, nthreads, inplace): + if not _fftw_threaded_set: + set_threads_backend() + if nthreads != _fftw_current_nthreads: + _fftw_plan_with_nthreads(nthreads) + # Convert a measure-level to flags + flags = get_flag(mlvl,aligned) + + # We make our arrays of the necessary type and size. Things can be + # tricky, especially for in-place transforms with one of input or + # output real. + if (idtype == odtype): + # We're in the complex-to-complex case, so lengths are the same + ip = zeros(size, dtype=idtype) + if inplace: + op = ip + else: + op = zeros(size, dtype=odtype) + elif (idtype.kind == 'c') and (odtype.kind == 'f'): + # Complex-to-real (reverse), so size is length of real array. + # However the complex array may be larger (in bytes) and + # should therefore be allocated first and reused for an in-place + # transform + ip = zeros(size/2+1, dtype=idtype) + if inplace: + op = ip.view(dtype=odtype)[0:size] + else: + op = zeros(size, dtype=odtype) + else: + # Real-to-complex (forward), and size is still that of real. + # However it is still true that the complex array may be larger + # (in bytes) and should therefore be allocated first and reused + # for an in-place transform + op = zeros(size/2+1, dtype=odtype) + if inplace: + ip = op.view(dtype=idtype)[0:size] + else: + ip = zeros(size, dtype=idtype) + + # Get the plan function + idtype = _np.dtype(idtype) + odtype = _np.dtype(odtype) + f = plan_function[str(idtype)][str(odtype)] + f.restype = ctypes.c_void_p + + # handle the C2C cases (forward and reverse) + if idtype.kind == odtype.kind: + f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int, ctypes.c_int] + theplan = f(size, ip.ptr, op.ptr, direction, flags) + # handle the R2C and C2R case + else: + f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int] + theplan = f(size, ip.ptr, op.ptr, flags) + + # We don't need ip or op anymore + del ip, op + + # Make the destructors + if idtype.char in ['f', 'F']: + destroy = float_lib.fftwf_destroy_plan + else: + destroy = double_lib.fftw_destroy_plan + + destroy.argtypes = [ctypes.c_void_p] + return theplan, destroy
+ + + +# Note that we don't need to check whether we've set the threading backend +# in the following functions, since execute is not called directly and +# the fft and ifft will call plan first. +
+[docs] +def execute(plan, invec, outvec): + f = execute_function[str(invec.dtype)][str(outvec.dtype)] + f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + f(plan, invec.ptr, outvec.ptr)
+ + +
+[docs] +def fft(invec, outvec, prec, itype, otype): + theplan, destroy = plan(len(invec), invec.dtype, outvec.dtype, FFTW_FORWARD, + get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)), + _scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr)) + execute(theplan, invec, outvec) + destroy(theplan)
+ + +
+[docs] +def ifft(invec, outvec, prec, itype, otype): + theplan, destroy = plan(len(outvec), invec.dtype, outvec.dtype, FFTW_BACKWARD, + get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)), + _scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr)) + execute(theplan, invec, outvec) + destroy(theplan)
+ + + +# Class based API + +# First, set up a lot of different ctypes functions: +plan_many_c2c_f = float_lib.fftwf_plan_many_dft +plan_many_c2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_int, ctypes.c_uint] +plan_many_c2c_f.restype = ctypes.c_void_p + +plan_many_c2c_d = double_lib.fftw_plan_many_dft +plan_many_c2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_int, ctypes.c_uint] +plan_many_c2c_d.restype = ctypes.c_void_p + +plan_many_c2r_f = float_lib.fftwf_plan_many_dft_c2r +plan_many_c2r_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_uint] +plan_many_c2r_f.restype = ctypes.c_void_p + +plan_many_c2r_d = double_lib.fftw_plan_many_dft_c2r +plan_many_c2r_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_uint] +plan_many_c2r_d.restype = ctypes.c_void_p + +plan_many_r2c_f = float_lib.fftwf_plan_many_dft_r2c +plan_many_r2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_uint] +plan_many_r2c_f.restype = ctypes.c_void_p + +plan_many_r2c_d = double_lib.fftw_plan_many_dft_r2c +plan_many_r2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, + ctypes.c_uint] +plan_many_r2c_d.restype = ctypes.c_void_p + +# Now set up a dictionary indexed by (str(input_dtype), str(output_dtype)) to +# translate input and output dtypes into the correct planning function. + +_plan_funcs_dict = { ('complex64', 'complex64') : plan_many_c2c_f, + ('float32', 'complex64') : plan_many_r2c_f, + ('complex64', 'float32') : plan_many_c2r_f, + ('complex128', 'complex128') : plan_many_c2c_d, + ('float64', 'complex128') : plan_many_r2c_d, + ('complex128', 'float64') : plan_many_c2r_d } + +# To avoid multiple-inheritance, we set up a function that returns much +# of the initialization that will need to be handled in __init__ of both +# classes. + +def _fftw_setup(fftobj): + n = _np.asarray([fftobj.size], dtype=_np.int32) + inembed = _np.asarray([len(fftobj.invec)], dtype=_np.int32) + onembed = _np.asarray([len(fftobj.outvec)], dtype=_np.int32) + nthreads = _scheme.mgr.state.num_threads + if not _fftw_threaded_set: + set_threads_backend() + if nthreads != _fftw_current_nthreads: + _fftw_plan_with_nthreads(nthreads) + mlvl = get_measure_level() + aligned = check_aligned(fftobj.invec.data) and check_aligned(fftobj.outvec.data) + flags = get_flag(mlvl, aligned) + plan_func = _plan_funcs_dict[ (str(fftobj.invec.dtype), str(fftobj.outvec.dtype)) ] + tmpin = zeros(len(fftobj.invec), dtype = fftobj.invec.dtype) + tmpout = zeros(len(fftobj.outvec), dtype = fftobj.outvec.dtype) + # C2C + if fftobj.outvec.kind == 'complex' and fftobj.invec.kind == 'complex': + if fftobj.forward: + ffd = FFTW_FORWARD + else: + ffd = FFTW_BACKWARD + plan = plan_func(1, n.ctypes.data, fftobj.nbatch, + tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist, + tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist, + ffd, flags) + # R2C or C2R (hence no direction argument for plan creation) + else: + plan = plan_func(1, n.ctypes.data, fftobj.nbatch, + tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist, + tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist, + flags) + del tmpin + del tmpout + return plan + +
+[docs] +class FFT(_BaseFFT): + def __init__(self, invec, outvec, nbatch=1, size=None): + super(FFT, self).__init__(invec, outvec, nbatch, size) + self.iptr = self.invec.ptr + self.optr = self.outvec.ptr + self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)] + self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + self.plan = _fftw_setup(self) + +
+[docs] + def execute(self): + self._efunc(self.plan, self.iptr, self.optr)
+
+ + +
+[docs] +class IFFT(_BaseIFFT): + def __init__(self, invec, outvec, nbatch=1, size=None): + super(IFFT, self).__init__(invec, outvec, nbatch, size) + self.iptr = self.invec.ptr + self.optr = self.outvec.ptr + self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)] + self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + self.plan = _fftw_setup(self) + +
+[docs] + def execute(self): + self._efunc(self.plan, self.iptr, self.optr)
+
+ + +
+[docs] +def insert_fft_options(optgroup): + """ + Inserts the options that affect the behavior of this backend + + Parameters + ---------- + optgroup: fft_option + OptionParser argument group whose options are extended + """ + optgroup.add_argument("--fftw-measure-level", + help="Determines the measure level used in planning " + "FFTW FFTs; allowed values are: " + str([0,1,2,3]), + type=int, default=_default_measurelvl) + optgroup.add_argument("--fftw-threads-backend", + help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use", + default=None) + optgroup.add_argument("--fftw-input-float-wisdom-file", + help="Filename from which to read single-precision wisdom", + default=None) + optgroup.add_argument("--fftw-input-double-wisdom-file", + help="Filename from which to read double-precision wisdom", + default=None) + optgroup.add_argument("--fftw-output-float-wisdom-file", + help="Filename to which to write single-precision wisdom", + default=None) + optgroup.add_argument("--fftw-output-double-wisdom-file", + help="Filename to which to write double-precision wisdom", + default=None) + optgroup.add_argument("--fftw-import-system-wisdom", + help = "If given, call fftw[f]_import_system_wisdom()", + action = "store_true")
+ + +
+[docs] +def verify_fft_options(opt,parser): + """Parses the FFT options and verifies that they are + reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes. + parser : object + OptionParser instance. + """ + if opt.fftw_measure_level not in [0,1,2,3]: + parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level)) + + if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None) + or (opt.fftw_input_double_wisdom_file is not None)): + parser.error("If --fftw-import-system-wisdom is given, then you cannot give" + " either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file") + + if opt.fftw_threads_backend is not None: + if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']: + parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'")
+ + +
+[docs] +def from_cli(opt): + # Since opt.fftw_threads_backend defaults to None, the following is always + # appropriate: + set_threads_backend(opt.fftw_threads_backend) + + # Set the user-provided measure level + set_measure_level(opt.fftw_measure_level)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/fftw_pruned.html b/latest/html/_modules/pycbc/fft/fftw_pruned.html new file mode 100644 index 00000000000..bb17414583c --- /dev/null +++ b/latest/html/_modules/pycbc/fft/fftw_pruned.html @@ -0,0 +1,385 @@ + + + + + + pycbc.fft.fftw_pruned — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.fftw_pruned

+"""This module provides a functions to perform a pruned FFT based on FFTW
+
+This should be considered a test and example module, as the functionality
+can and should be generalized to other FFT backends, and precisions.
+
+These functions largely implemented the generic FFT decomposition as
+described rather nicely by wikipedia.
+
+http://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm
+
+I use a similar naming convention here, with minor simplifications to the
+twiddle factors.
+"""
+import numpy, ctypes, pycbc.types
+from pycbc.libutils import get_ctypes_library
+import logging
+from .fftw_pruned_cython import second_phase_cython
+
+logger = logging.getLogger('pycbc.events.fftw_pruned')
+
+warn_msg = ("The FFTW_pruned module can be used to speed up computing SNR "
+            "timeseries by computing first at a low sample rate and then "
+            "computing at full sample rate only at certain samples. This code "
+            "has not yet been used in production, and has no test case. "
+            "This was also ported to Cython in this state. "
+            "This code would need verification before trusting results. "
+            "Please do contribute test cases.")
+
+logger.warning(warn_msg)
+
+# FFTW constants
+FFTW_FORWARD = -1
+FFTW_BACKWARD = 1
+FFTW_MEASURE = 0
+FFTW_PATIENT = 1 << 5
+FFTW_ESTIMATE = 1 << 6
+float_lib = get_ctypes_library('fftw3f', ['fftw3f'],mode=ctypes.RTLD_GLOBAL)
+fexecute = float_lib.fftwf_execute_dft
+fexecute.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+
+ftexecute = float_lib.fftwf_execute_dft
+ftexecute.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+
+
+[docs] +def plan_transpose(N1, N2): + """ + Create a plan for transposing internally to the pruned_FFT calculation. + (Alex to provide a write up with more details.) + + Parameters + ----------- + N1 : int + Number of rows. + N2 : int + Number of columns. + + Returns + -------- + plan : FFTWF plan + The plan for performing the FFTW transpose. + """ + + rows = N1 + cols = N2 + + iodim = numpy.zeros(6, dtype=numpy.int32) + iodim[0] = rows + iodim[1] = 1 + iodim[2] = cols + iodim[3] = cols + iodim[4] = rows + iodim[5] = 1 + + N = N1*N2 + vin = pycbc.types.zeros(N, dtype=numpy.complex64) + vout = pycbc.types.zeros(N, dtype=numpy.complex64) + + f = float_lib.fftwf_plan_guru_dft + f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int] + f.restype = ctypes.c_void_p + return f(0, None, 2, iodim.ctypes.data, vin.ptr, vout.ptr, None, FFTW_MEASURE)
+ + +
+[docs] +def plan_first_phase(N1, N2): + """ + Create a plan for the first stage of the pruned FFT operation. + (Alex to provide a write up with more details.) + + Parameters + ----------- + N1 : int + Number of rows. + N2 : int + Number of columns. + + Returns + -------- + plan : FFTWF plan + The plan for performing the first phase FFT. + """ + N = N1*N2 + vin = pycbc.types.zeros(N, dtype=numpy.complex64) + vout = pycbc.types.zeros(N, dtype=numpy.complex64) + f = float_lib.fftwf_plan_many_dft + f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int, ctypes.c_int, + ctypes.c_int, ctypes.c_int] + f.restype = ctypes.c_void_p + return f(1, ctypes.byref(ctypes.c_int(N2)), N1, + vin.ptr, None, 1, N2, + vout.ptr, None, 1, N2, FFTW_BACKWARD, FFTW_MEASURE)
+ + +_theplan = None +
+[docs] +def first_phase(invec, outvec, N1, N2): + """ + This implements the first phase of the FFT decomposition, using + the standard FFT many plans. + + Parameters + ----------- + invec : array + The input array. + outvec : array + The output array. + N1 : int + Number of rows. + N2 : int + Number of columns. + """ + global _theplan + if _theplan is None: + _theplan = plan_first_phase(N1, N2) + fexecute(_theplan, invec.ptr, outvec.ptr)
+ + +
+[docs] +def second_phase(invec, indices, N1, N2): + """ + This is the second phase of the FFT decomposition that actually performs + the pruning. It is an explicit calculation for the subset of points. Note + that there seem to be some numerical accumulation issues at various values + of N1 and N2. + + Parameters + ---------- + invec : + The result of the first phase FFT + indices : array of ints + The index locations to calculate the FFT + N1 : int + The length of the second phase "FFT" + N2 : int + The length of the first phase FFT + + Returns + ------- + out : array of floats + """ + invec = numpy.array(invec.data, copy=False) + NI = len(indices) # pylint:disable=unused-variable + N1 = int(N1) + N2 = int(N2) + out = numpy.zeros(len(indices), dtype=numpy.complex64) + indices = numpy.array(indices, dtype=numpy.uint32) + + # Note, the next step if this needs to be faster is to invert the loops + second_phase_cython(N1, N2, NI, indices, out, invec) + + return out
+ + +_thetransposeplan = None +
+[docs] +def fft_transpose_fftw(vec): + """ + Perform an FFT transpose from vec into outvec. + (Alex to provide more details in a write-up.) + + Parameters + ----------- + vec : array + Input array. + + Returns + -------- + outvec : array + Transposed output array. + """ + global _thetransposeplan + outvec = pycbc.types.zeros(len(vec), dtype=vec.dtype) + if _theplan is None: + N1, N2 = splay(vec) + _thetransposeplan = plan_transpose(N1, N2) + ftexecute(_thetransposeplan, vec.ptr, outvec.ptr) + return outvec
+ + +fft_transpose = fft_transpose_fftw + +
+[docs] +def splay(vec): + """ Determine two lengths to split stride the input vector by + """ + N2 = 2 ** int(numpy.log2( len(vec) ) / 2) + N1 = len(vec) / N2 + return N1, N2
+ + +
+[docs] +def pruned_c2cifft(invec, outvec, indices, pretransposed=False): + """ + Perform a pruned iFFT, only valid for power of 2 iffts as the + decomposition is easier to choose. This is not a strict requirement of the + functions, but it is unlikely to the optimal to use anything but power + of 2. (Alex to provide more details in write up. + + Parameters + ----------- + invec : array + The input vector. This should be the correlation between the data and + the template at full sample rate. Ideally this is pre-transposed, but + if not this will be transposed in this function. + outvec : array + The output of the first phase of the pruned FFT. + indices : array of ints + The indexes at which to calculate the full sample-rate SNR. + pretransposed : boolean, default=False + Used to indicate whether or not invec is pretransposed. + + Returns + -------- + SNRs : array + The complex SNRs at the indexes given by indices. + """ + N1, N2 = splay(invec) + + if not pretransposed: + invec = fft_transpose(invec) + first_phase(invec, outvec, N1=N1, N2=N2) + out = second_phase(outvec, indices, N1=N1, N2=N2) + return out
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/func_api.html b/latest/html/_modules/pycbc/fft/func_api.html new file mode 100644 index 00000000000..4f252af91f8 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/func_api.html @@ -0,0 +1,226 @@ + + + + + + pycbc.fft.func_api — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.func_api

+# Copyright (C) 2012  Josh Willis, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This package provides a front-end to various fast Fourier transform
+implementations within PyCBC.
+"""
+
+from pycbc.types import TimeSeries as _TimeSeries
+from pycbc.types import FrequencySeries as _FrequencySeries
+from .core import _check_fft_args, _check_fwd_args, _check_inv_args
+from .backend_support import get_backend
+
+
+[docs] +def fft(invec, outvec): + """ Fourier transform from invec to outvec. + + Perform a fourier transform. The type of transform is determined + by the dtype of invec and outvec. + + Parameters + ---------- + invec : TimeSeries or FrequencySeries + The input vector. + outvec : TimeSeries or FrequencySeries + The output. + """ + prec, itype, otype = _check_fft_args(invec, outvec) + _check_fwd_args(invec, itype, outvec, otype, 1, None) + + # The following line is where all the work is done: + backend = get_backend() + backend.fft(invec, outvec, prec, itype, otype) + # For a forward FFT, the length of the *input* vector is the length + # we should divide by, whether C2C or R2HC transform + if isinstance(invec, _TimeSeries): + outvec._epoch = invec._epoch + outvec._delta_f = 1.0/(invec._delta_t * len(invec)) + outvec *= invec._delta_t + elif isinstance(invec, _FrequencySeries): + outvec._epoch = invec._epoch + outvec._delta_t = 1.0/(invec._delta_f * len(invec)) + outvec *= invec._delta_f
+ + +
+[docs] +def ifft(invec, outvec): + """ Inverse fourier transform from invec to outvec. + + Perform an inverse fourier transform. The type of transform is determined + by the dtype of invec and outvec. + + Parameters + ---------- + invec : TimeSeries or FrequencySeries + The input vector. + outvec : TimeSeries or FrequencySeries + The output. + """ + prec, itype, otype = _check_fft_args(invec, outvec) + _check_inv_args(invec, itype, outvec, otype, 1, None) + + # The following line is where all the work is done: + backend = get_backend() + backend.ifft(invec, outvec, prec, itype, otype) + # For an inverse FFT, the length of the *output* vector is the length + # we should divide by, whether C2C or HC2R transform + if isinstance(invec, _TimeSeries): + outvec._epoch = invec._epoch + outvec._delta_f = 1.0/(invec._delta_t * len(outvec)) + outvec *= invec._delta_t + elif isinstance(invec,_FrequencySeries): + outvec._epoch = invec._epoch + outvec._delta_t = 1.0/(invec._delta_f * len(outvec)) + outvec *= invec._delta_f
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/mkl.html b/latest/html/_modules/pycbc/fft/mkl.html new file mode 100644 index 00000000000..c52ad5e1c94 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/mkl.html @@ -0,0 +1,341 @@ + + + + + + pycbc.fft.mkl — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.mkl

+import ctypes, pycbc.libutils
+from pycbc.types import zeros
+from .core import _BaseFFT, _BaseIFFT
+import pycbc.scheme as _scheme
+
+lib = pycbc.libutils.get_ctypes_library('mkl_rt', [])
+if lib is None:
+    raise ImportError
+
+#MKL constants  taken from mkl_df_defines.h
+DFTI_FORWARD_DOMAIN = 0
+DFTI_DIMENSION = 1
+DFTI_LENGTHS = 2
+DFTI_PRECISION = 3
+DFTI_FORWARD_SCALE  = 4
+DFTI_BACKWARD_SCALE = 5
+DFTI_NUMBER_OF_TRANSFORMS = 7
+DFTI_COMPLEX_STORAGE = 8
+DFTI_REAL_STORAGE = 9
+DFTI_CONJUGATE_EVEN_STORAGE = 10
+DFTI_PLACEMENT = 11
+DFTI_INPUT_STRIDES = 12
+DFTI_OUTPUT_STRIDES = 13
+DFTI_INPUT_DISTANCE = 14
+DFTI_OUTPUT_DISTANCE = 15
+DFTI_WORKSPACE = 17
+DFTI_ORDERING = 18
+DFTI_TRANSPOSE = 19
+DFTI_DESCRIPTOR_NAME = 20
+DFTI_PACKED_FORMAT = 21
+DFTI_COMMIT_STATUS = 22
+DFTI_VERSION = 23
+DFTI_NUMBER_OF_USER_THREADS = 26
+DFTI_THREAD_LIMIT = 27
+DFTI_COMMITTED = 30
+DFTI_UNCOMMITTED = 31
+DFTI_COMPLEX = 32
+DFTI_REAL = 33
+DFTI_SINGLE = 35
+DFTI_DOUBLE = 36
+DFTI_COMPLEX_COMPLEX = 39
+DFTI_COMPLEX_REAL = 40
+DFTI_REAL_COMPLEX = 41
+DFTI_REAL_REAL = 42
+DFTI_INPLACE = 43
+DFTI_NOT_INPLACE = 44
+DFTI_ORDERED = 48
+DFTI_BACKWARD_SCRAMBLED = 49
+DFTI_ALLOW = 51
+DFTI_AVOID = 52
+DFTI_NONE = 53
+DFTI_CCS_FORMAT = 54
+DFTI_PACK_FORMAT = 55
+DFTI_PERM_FORMAT = 56
+DFTI_CCE_FORMAT = 57
+
+mkl_domain = {'real': {'complex': DFTI_REAL},
+              'complex': {'real': DFTI_REAL,
+                          'complex':DFTI_COMPLEX,
+                         }
+             }
+
+mkl_descriptor = {'single': lib.DftiCreateDescriptor_s_1d,
+                  'double': lib.DftiCreateDescriptor_d_1d,
+                  }
+
+
+[docs] +def check_status(status): + """ Check the status of a mkl functions and raise a python exeption if + there is an error. + """ + if status: + lib.DftiErrorMessage.restype = ctypes.c_char_p + msg = lib.DftiErrorMessage(status) + raise RuntimeError(msg)
+ + +
+[docs] +def create_descriptor(size, idtype, odtype, inplace): + invec = zeros(1, dtype=idtype) + outvec = zeros(1, dtype=odtype) + desc = ctypes.c_void_p(1) + + domain = mkl_domain[str(invec.kind)][str(outvec.kind)] + f = mkl_descriptor[invec.precision] + f.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_int, ctypes.c_long] + + status = f(ctypes.byref(desc), domain, size) + + if inplace: + lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE) + else: + lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE) + + nthreads = _scheme.mgr.state.num_threads + status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads) + check_status(status) + + lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT) + lib.DftiCommitDescriptor(desc) + check_status(status) + + return desc
+ + +
+[docs] +def fft(invec, outvec, prec, itype, otype): + descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype, + outvec.dtype, (invec.ptr == outvec.ptr)) + f = lib.DftiComputeForward + f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + status = f(descr, invec.ptr, outvec.ptr) + lib.DftiFreeDescriptor(ctypes.byref(descr)) + check_status(status)
+ + +
+[docs] +def ifft(invec, outvec, prec, itype, otype): + descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype, + outvec.dtype, (invec.ptr == outvec.ptr)) + f = lib.DftiComputeBackward + f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + status = f(descr, invec.ptr, outvec.ptr) + lib.DftiFreeDescriptor(ctypes.byref(descr)) + check_status(status)
+ + +# Class based API + +def _get_desc(fftobj): + desc = ctypes.c_void_p(1) + domain = mkl_domain[str(fftobj.invec.kind)][str(fftobj.outvec.kind)] + + f = mkl_descriptor[fftobj.invec.precision] + f.argtypes = [ctypes.POINTER(ctypes.c_void_p), ctypes.c_int, ctypes.c_long] + status = f(ctypes.byref(desc), domain, int(fftobj.size)) + + check_status(status) + # Now we set various things depending on exactly what kind of transform we're + # performing. + + lib.DftiSetValue.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] + + # The following only matters if the transform is C2R or R2C + status = lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, + DFTI_COMPLEX_COMPLEX) + check_status(status) + + # In-place or out-of-place: + if fftobj.inplace: + status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE) + else: + status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE) + check_status(status) + + # If we are performing a batched transform: + if fftobj.nbatch > 1: + status = lib.DftiSetValue(desc, DFTI_NUMBER_OF_TRANSFORMS, fftobj.nbatch) + check_status(status) + status = lib.DftiSetValue(desc, DFTI_INPUT_DISTANCE, fftobj.idist) + check_status(status) + status = lib.DftiSetValue(desc, DFTI_OUTPUT_DISTANCE, fftobj.odist) + check_status(status) + + # Knowing how many threads will be allowed may help select a better transform + nthreads = _scheme.mgr.state.num_threads + status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads) + check_status(status) + + # Now everything's ready, so commit + status = lib.DftiCommitDescriptor(desc) + check_status(status) + + return desc + +
+[docs] +class FFT(_BaseFFT): + def __init__(self, invec, outvec, nbatch=1, size=None): + super(FFT, self).__init__(invec, outvec, nbatch, size) + self.iptr = self.invec.ptr + self.optr = self.outvec.ptr + self._efunc = lib.DftiComputeForward + self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + self.desc = _get_desc(self) + +
+[docs] + def execute(self): + self._efunc(self.desc, self.iptr, self.optr)
+
+ + +
+[docs] +class IFFT(_BaseIFFT): + def __init__(self, invec, outvec, nbatch=1, size=None): + super(IFFT, self).__init__(invec, outvec, nbatch, size) + self.iptr = self.invec.ptr + self.optr = self.outvec.ptr + self._efunc = lib.DftiComputeBackward + self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + self.desc = _get_desc(self) + +
+[docs] + def execute(self): + self._efunc(self.desc, self.iptr, self.optr)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/npfft.html b/latest/html/_modules/pycbc/fft/npfft.html new file mode 100644 index 00000000000..76072d9732d --- /dev/null +++ b/latest/html/_modules/pycbc/fft/npfft.html @@ -0,0 +1,243 @@ + + + + + + pycbc.fft.npfft — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.npfft

+# Copyright (C) 2012  Josh Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides the numpy backend of the fast Fourier transform
+for the PyCBC package.
+"""
+
+import logging
+import numpy.fft
+from .core import _check_fft_args
+from .core import _BaseFFT, _BaseIFFT
+
+logger = logging.getLogger('pycbc.events.npfft')
+
+_INV_FFT_MSG = ("I cannot perform an {} between data with an input type of "
+                "{} and an output type of {}")
+
+
+[docs] +def fft(invec, outvec, _, itype, otype): + if invec.ptr == outvec.ptr: + raise NotImplementedError("numpy backend of pycbc.fft does not " + "support in-place transforms") + if itype == 'complex' and otype == 'complex': + outvec.data[:] = numpy.asarray(numpy.fft.fft(invec.data), + dtype=outvec.dtype) + elif itype == 'real' and otype == 'complex': + outvec.data[:] = numpy.asarray(numpy.fft.rfft(invec.data), + dtype=outvec.dtype) + else: + raise ValueError(_INV_FFT_MSG.format("FFT", itype, otype))
+ + + +
+[docs] +def ifft(invec, outvec, _, itype, otype): + if invec.ptr == outvec.ptr: + raise NotImplementedError("numpy backend of pycbc.fft does not " + "support in-place transforms") + if itype == 'complex' and otype == 'complex': + outvec.data[:] = numpy.asarray(numpy.fft.ifft(invec.data), + dtype=outvec.dtype) + outvec *= len(outvec) + elif itype == 'complex' and otype == 'real': + outvec.data[:] = numpy.asarray(numpy.fft.irfft(invec.data,len(outvec)), + dtype=outvec.dtype) + outvec *= len(outvec) + else: + raise ValueError(_INV_FFT_MSG.format("IFFT", itype, otype))
+ + + +WARN_MSG = ("You are using the class-based PyCBC FFT API, with the numpy " + "backed. This is provided for convenience only. If performance is " + "important use the class-based API with one of the other backends " + "(for e.g. MKL or FFTW)") + + +
+[docs] +class FFT(_BaseFFT): + """ + Class for performing FFTs via the numpy interface. + """ + def __init__(self, invec, outvec, nbatch=1, size=None): + super(FFT, self).__init__(invec, outvec, nbatch, size) + logger.warning(WARN_MSG) + self.prec, self.itype, self.otype = _check_fft_args(invec, outvec) + +
+[docs] + def execute(self): + fft(self.invec, self.outvec, self.prec, self.itype, self.otype)
+
+ + + +
+[docs] +class IFFT(_BaseIFFT): + """ + Class for performing IFFTs via the numpy interface. + """ + def __init__(self, invec, outvec, nbatch=1, size=None): + super(IFFT, self).__init__(invec, outvec, nbatch, size) + logger.warning(WARN_MSG) + self.prec, self.itype, self.otype = _check_fft_args(invec, outvec) + +
+[docs] + def execute(self): + ifft(self.invec, self.outvec, self.prec, self.itype, self.otype)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/fft/parser_support.html b/latest/html/_modules/pycbc/fft/parser_support.html new file mode 100644 index 00000000000..67d22e94730 --- /dev/null +++ b/latest/html/_modules/pycbc/fft/parser_support.html @@ -0,0 +1,264 @@ + + + + + + pycbc.fft.parser_support — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.fft.parser_support

+# Copyright (C) 2012  Josh Willis, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This package provides a front-end to various fast Fourier transform
+implementations within PyCBC.
+"""
+
+from .backend_support import get_backend_modules, get_backend_names
+from .backend_support import set_backend, get_backend
+
+# Next we add all of the machinery to set backends and their options
+# from the command line.
+
+
+[docs] +def insert_fft_option_group(parser): + """ + Adds the options used to choose an FFT backend. This should be used + if your program supports the ability to select the FFT backend; otherwise + you may simply call the fft and ifft functions and rely on default + choices. This function will also attempt to add any options exported + by available backends through a function called insert_fft_options. + These submodule functions should take the fft_group object as argument. + + Parameters + ---------- + parser : object + OptionParser instance + """ + fft_group = parser.add_argument_group("Options for selecting the" + " FFT backend and controlling its performance" + " in this program.") + # We have one argument to specify the backends. This becomes the default list used + # if none is specified for a particular call of fft() of ifft(). Note that this + # argument expects a *list* of inputs, as indicated by the nargs='*'. + fft_group.add_argument("--fft-backends", + help="Preference list of the FFT backends. " + "Choices are: \n" + str(get_backend_names()), + nargs='*', default=[]) + + for backend in get_backend_modules(): + try: + backend.insert_fft_options(fft_group) + except AttributeError: + pass
+ + +
+[docs] +def verify_fft_options(opt, parser): + """Parses the FFT options and verifies that they are + reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes. + parser : object + OptionParser instance. + """ + + if len(opt.fft_backends) > 0: + _all_backends = get_backend_names() + for backend in opt.fft_backends: + if backend not in _all_backends: + parser.error("Backend {0} is not available".format(backend)) + + for backend in get_backend_modules(): + try: + backend.verify_fft_options(opt, parser) + except AttributeError: + pass
+ + +# The following function is the only one that is designed +# only to work with the active scheme. We'd like to fix that, +# eventually, but it's non-trivial because of how poorly MKL +# and FFTW cooperate. + +
+[docs] +def from_cli(opt): + """Parses the command line options and sets the FFT backend + for each (available) scheme. Aside from setting the default + backed for this context, this function will also call (if + it exists) the from_cli function of the specified backends in + the *current* scheme; typically one would only call this function + once inside of a scheme context manager, but if it is desired + to perform FFTs both inside and outside of a context, then + this function would need to be called again. + + Parameters + ---------- + opt: object + Result of parsing the CLI with OptionParser, or any object with + the required attributes. + + Returns + """ + + set_backend(opt.fft_backends) + + # Eventually, we need to be able to parse command lines + # from more than just the current scheme's preference. But + # the big problem is that calling from_cli for more than one + # backend could cause interference; apparently, FFTW and MKL + # don't play nice unless FFTW has been compiled and linked + # with icc (and possibly numpy, scipy, and/or Python as well?) + + backend = get_backend() + try: + backend.from_cli(opt) + except AttributeError: + pass
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/autocorrelation.html b/latest/html/_modules/pycbc/filter/autocorrelation.html new file mode 100644 index 00000000000..371ceea289b --- /dev/null +++ b/latest/html/_modules/pycbc/filter/autocorrelation.html @@ -0,0 +1,305 @@ + + + + + + pycbc.filter.autocorrelation — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.autocorrelation

+# Copyright (C) 2016  Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides functions for calculating the autocorrelation function
+and length of a data series.
+"""
+
+import numpy
+from pycbc.filter.matchedfilter import correlate
+from pycbc.types import FrequencySeries, TimeSeries, zeros
+
+
+[docs] +def calculate_acf(data, delta_t=1.0, unbiased=False): + r"""Calculates the one-sided autocorrelation function. + + Calculates the autocorrelation function (ACF) and returns the one-sided + ACF. The ACF is defined as the autocovariance divided by the variance. The + ACF can be estimated using + + .. math:: + + \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) + + Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at + time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is + the variance of :math:`X_{t}`. + + Parameters + ----------- + data : TimeSeries or numpy.array + A TimeSeries or numpy.array of data. + delta_t : float + The time step of the data series if it is not a TimeSeries instance. + unbiased : bool + If True the normalization of the autocovariance function is n-k + instead of n. This is called the unbiased estimation of the + autocovariance. Note that this does not mean the ACF is unbiased. + + Returns + ------- + acf : numpy.array + If data is a TimeSeries then acf will be a TimeSeries of the + one-sided ACF. Else acf is a numpy.array. + """ + + # if given a TimeSeries instance then get numpy.array + if isinstance(data, TimeSeries): + y = data.numpy() + delta_t = data.delta_t + else: + y = data + + # Zero mean + y = y - y.mean() + ny_orig = len(y) + + npad = 1 + while npad < 2*ny_orig: + npad = npad << 1 + ypad = numpy.zeros(npad) + ypad[:ny_orig] = y + + # FFT data minus the mean + fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries() + + # correlate + # do not need to give the congjugate since correlate function does it + cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype), + delta_f=fdata.delta_f, copy=False) + correlate(fdata, fdata, cdata) + + # IFFT correlated data to get unnormalized autocovariance time series + acf = cdata.to_timeseries() + acf = acf[:ny_orig] + + # normalize the autocovariance + # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) + if unbiased: + acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) + else: + acf /= acf[0] + + # return input datatype + if isinstance(data, TimeSeries): + return TimeSeries(acf, delta_t=delta_t) + else: + return acf
+ + + +
+[docs] +def calculate_acl(data, m=5, dtype=int): + r"""Calculates the autocorrelation length (ACL). + + Given a normalized autocorrelation function :math:`\rho[i]` (by normalized, + we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is: + + .. math:: + + \tau = 1 + 2 \sum_{i=1}^{K} \rho[i]. + + The number of samples used :math:`K` is found by using the first point + such that: + + .. math:: + + m \tau[K] \leq K, + + where :math:`m` is a tuneable parameter (default = 5). If no such point + exists, then the given data set it too short to estimate the ACL; in this + case ``inf`` is returned. + + This algorithm for computing the ACL is taken from: + + N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988). + + Parameters + ----------- + data : TimeSeries or array + A TimeSeries of data. + m : int + The number of autocorrelation lengths to use for determining the window + size :math:`K` (see above). + dtype : int or float + The datatype of the output. If the dtype was set to int, then the + ceiling is returned. + + Returns + ------- + acl : int or float + The autocorrelation length. If the ACL cannot be estimated, returns + ``numpy.inf``. + """ + + # sanity check output data type + if dtype not in [int, float]: + raise ValueError("The dtype must be either int or float.") + + # if we have only a single point, just return 1 + if len(data) < 2: + return 1 + + # calculate ACF that is normalized by the zero-lag value + acf = calculate_acf(data) + + cacf = 2 * acf.numpy().cumsum() - 1 + win = m * cacf <= numpy.arange(len(cacf)) + if win.any(): + acl = cacf[numpy.where(win)[0][0]] + if dtype == int: + acl = int(numpy.ceil(acl)) + else: + acl = numpy.inf + return acl
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/matchedfilter.html b/latest/html/_modules/pycbc/filter/matchedfilter.html new file mode 100644 index 00000000000..bb5da45d4e6 --- /dev/null +++ b/latest/html/_modules/pycbc/filter/matchedfilter.html @@ -0,0 +1,2434 @@ + + + + + + pycbc.filter.matchedfilter — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.matchedfilter

+# Copyright (C) 2012  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides functions for matched filtering along with associated
+utilities.
+"""
+
+import logging
+from math import sqrt
+import numpy
+
+from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
+from pycbc.types import complex_same_precision_as, real_same_precision_as
+from pycbc.fft import fft, ifft, IFFT
+import pycbc.scheme
+from pycbc import events
+from pycbc.events import ranking
+import pycbc
+
+logger = logging.getLogger('pycbc.filter.matchedfilter')
+
+BACKEND_PREFIX="pycbc.filter.matchedfilter_"
+
+
+[docs] +@pycbc.scheme.schemed(BACKEND_PREFIX) +def correlate(x, y, z): + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + + +class BatchCorrelator(object): + """ Create a batch correlation engine + """ + def __init__(self, xs, zs, size): + """ Correlate x and y, store in z. Arrays need not be equal length, but + must be at least size long and of the same dtype. No error checking + will be performed, so be careful. All dtypes must be complex64. + Note, must be created within the processing context that it will be used in. + """ + self.size = int(size) + self.dtype = xs[0].dtype + self.num_vectors = len(xs) + + # keep reference to arrays + self.xs = xs + self.zs = zs + + # Store each pointer as in integer array + self.x = Array([v.ptr for v in xs], dtype=int) + self.z = Array([v.ptr for v in zs], dtype=int) + + @pycbc.scheme.schemed(BACKEND_PREFIX) + def batch_correlate_execute(self, y): + pass + + execute = batch_correlate_execute + + +@pycbc.scheme.schemed(BACKEND_PREFIX) +def _correlate_factory(x, y, z): + err_msg = "This class is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + + +class Correlator(object): + """ Create a correlator engine + + Parameters + --------- + x : complex64 + Input pycbc.types.Array (or subclass); it will be conjugated + y : complex64 + Input pycbc.types.Array (or subclass); it will not be conjugated + z : complex64 + Output pycbc.types.Array (or subclass). + It will contain conj(x) * y, element by element + + The addresses in memory of the data of all three parameter vectors + must be the same modulo pycbc.PYCBC_ALIGNMENT + """ + def __new__(cls, *args, **kwargs): + real_cls = _correlate_factory(*args, **kwargs) + return real_cls(*args, **kwargs) # pylint:disable=not-callable + + +# The class below should serve as the parent for all schemed classes. +# The intention is that this class serves simply as the location for +# all documentation of the class and its methods, though that is not +# yet implemented. Perhaps something along the lines of: +# +# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance +# +# will work? Is there a better way? +class _BaseCorrelator(object): + def correlate(self): + """ + Compute the correlation of the vectors specified at object + instantiation, writing into the output vector given when the + object was instantiated. The intention is that this method + should be called many times, with the contents of those vectors + changing between invocations, but not their locations in memory + or length. + """ + pass + + +
+[docs] +class MatchedFilterControl(object): + def __init__(self, low_frequency_cutoff, high_frequency_cutoff, snr_threshold, tlen, + delta_f, dtype, segment_list, template_output, use_cluster, + downsample_factor=1, upsample_threshold=1, upsample_method='pruned_fft', + gpu_callback_method='none', cluster_function='symmetric'): + """ Create a matched filter engine. + + Parameters + ---------- + low_frequency_cutoff : {None, float}, optional + The frequency to begin the filter calculation. If None, begin at the + first frequency after DC. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the filter calculation. If None, continue to the + the nyquist frequency. + snr_threshold : float + The minimum snr to return when filtering + segment_list : list + List of FrequencySeries that are the Fourier-transformed data segments + template_output : complex64 + Array of memory given as the 'out' parameter to waveform.FilterBank + use_cluster : boolean + If true, cluster triggers above threshold using a window; otherwise, + only apply a threshold. + downsample_factor : {1, int}, optional + The factor by which to reduce the sample rate when doing a hierarchical + matched filter + upsample_threshold : {1, float}, optional + The fraction of the snr_threshold to trigger on the subsampled filter. + upsample_method : {pruned_fft, str} + The method to upsample or interpolate the reduced rate filter. + cluster_function : {symmetric, str}, optional + Which method is used to cluster triggers over time. If 'findchirp', a + sliding forward window; if 'symmetric', each window's peak is compared + to the windows before and after it, and only kept as a trigger if larger + than both. + """ + # Assuming analysis time is constant across templates and segments, also + # delta_f is constant across segments. + self.tlen = tlen + self.flen = self.tlen / 2 + 1 + self.delta_f = delta_f + self.delta_t = 1.0/(self.delta_f * self.tlen) + self.dtype = dtype + self.snr_threshold = snr_threshold + self.flow = low_frequency_cutoff + self.fhigh = high_frequency_cutoff + self.gpu_callback_method = gpu_callback_method + if cluster_function not in ['symmetric', 'findchirp']: + raise ValueError("MatchedFilter: 'cluster_function' must be either 'symmetric' or 'findchirp'") + self.cluster_function = cluster_function + self.segments = segment_list + self.htilde = template_output + + if downsample_factor == 1: + self.snr_mem = zeros(self.tlen, dtype=self.dtype) + self.corr_mem = zeros(self.tlen, dtype=self.dtype) + + if use_cluster and (cluster_function == 'symmetric'): + self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_symm + # setup the threasholding/clustering operations for each segment + self.threshold_and_clusterers = [] + for seg in self.segments: + thresh = events.ThresholdCluster(self.snr_mem[seg.analyze]) + self.threshold_and_clusterers.append(thresh) + elif use_cluster and (cluster_function == 'findchirp'): + self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_fc + else: + self.matched_filter_and_cluster = self.full_matched_filter_thresh_only + + # Assuming analysis time is constant across templates and segments, also + # delta_f is constant across segments. + self.kmin, self.kmax = get_cutoff_indices(self.flow, self.fhigh, + self.delta_f, self.tlen) + + # Set up the correlation operations for each analysis segment + corr_slice = slice(self.kmin, self.kmax) + self.correlators = [] + for seg in self.segments: + corr = Correlator(self.htilde[corr_slice], + seg[corr_slice], + self.corr_mem[corr_slice]) + self.correlators.append(corr) + + # setup up the ifft we will do + self.ifft = IFFT(self.corr_mem, self.snr_mem) + + elif downsample_factor >= 1: + self.matched_filter_and_cluster = self.hierarchical_matched_filter_and_cluster + self.downsample_factor = downsample_factor + self.upsample_method = upsample_method + self.upsample_threshold = upsample_threshold + + N_full = self.tlen + N_red = N_full / downsample_factor + self.kmin_full, self.kmax_full = get_cutoff_indices(self.flow, + self.fhigh, self.delta_f, N_full) + + self.kmin_red, _ = get_cutoff_indices(self.flow, + self.fhigh, self.delta_f, N_red) + + if self.kmax_full < N_red: + self.kmax_red = self.kmax_full + else: + self.kmax_red = N_red - 1 + + self.snr_mem = zeros(N_red, dtype=self.dtype) + self.corr_mem_full = FrequencySeries(zeros(N_full, dtype=self.dtype), delta_f=self.delta_f) + self.corr_mem = Array(self.corr_mem_full[0:N_red], copy=False) + self.inter_vec = zeros(N_full, dtype=self.dtype) + + else: + raise ValueError("Invalid downsample factor") + +
+[docs] + def full_matched_filter_and_cluster_symm(self, segnum, template_norm, window, epoch=None): + """ Returns the complex snr timeseries, normalization of the complex snr, + the correlation vector frequency series, the list of indices of the + triggers, and the snr values at the trigger locations. Returns empty + lists for these for points that are not above the threshold. + + Calculated the matched filter, threshold, and cluster. + + Parameters + ---------- + segnum : int + Index into the list of segments at MatchedFilterControl construction + against which to filter. + template_norm : float + The htilde, template normalization factor. + window : int + Size of the window over which to cluster triggers, in samples + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + norm : float + The normalization of the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + idx : Array + List of indices of the triggers. + snrv : Array + The snr values at the trigger locations. + """ + norm = (4.0 * self.delta_f) / sqrt(template_norm) + self.correlators[segnum].correlate() + self.ifft.execute() + snrv, idx = self.threshold_and_clusterers[segnum].threshold_and_cluster(self.snr_threshold / norm, window) + + if len(idx) == 0: + return [], [], [], [], [] + + logger.info("%d points above threshold", len(idx)) + + snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False) + corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False) + return snr, norm, corr, idx, snrv
+ + +
+[docs] + def full_matched_filter_and_cluster_fc(self, segnum, template_norm, window, epoch=None): + """ Returns the complex snr timeseries, normalization of the complex snr, + the correlation vector frequency series, the list of indices of the + triggers, and the snr values at the trigger locations. Returns empty + lists for these for points that are not above the threshold. + + Calculated the matched filter, threshold, and cluster. + + Parameters + ---------- + segnum : int + Index into the list of segments at MatchedFilterControl construction + against which to filter. + template_norm : float + The htilde, template normalization factor. + window : int + Size of the window over which to cluster triggers, in samples + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + norm : float + The normalization of the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + idx : Array + List of indices of the triggers. + snrv : Array + The snr values at the trigger locations. + """ + norm = (4.0 * self.delta_f) / sqrt(template_norm) + self.correlators[segnum].correlate() + self.ifft.execute() + idx, snrv = events.threshold(self.snr_mem[self.segments[segnum].analyze], + self.snr_threshold / norm) + idx, snrv = events.cluster_reduce(idx, snrv, window) + + if len(idx) == 0: + return [], [], [], [], [] + + logger.info("%d points above threshold", len(idx)) + + snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False) + corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False) + return snr, norm, corr, idx, snrv
+ + +
+[docs] + def full_matched_filter_thresh_only(self, segnum, template_norm, window=None, epoch=None): + """ Returns the complex snr timeseries, normalization of the complex snr, + the correlation vector frequency series, the list of indices of the + triggers, and the snr values at the trigger locations. Returns empty + lists for these for points that are not above the threshold. + + Calculated the matched filter, threshold, and cluster. + + Parameters + ---------- + segnum : int + Index into the list of segments at MatchedFilterControl construction + against which to filter. + template_norm : float + The htilde, template normalization factor. + window : int + Size of the window over which to cluster triggers, in samples. + This is IGNORED by this function, and provided only for API compatibility. + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + norm : float + The normalization of the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + idx : Array + List of indices of the triggers. + snrv : Array + The snr values at the trigger locations. + """ + norm = (4.0 * self.delta_f) / sqrt(template_norm) + self.correlators[segnum].correlate() + self.ifft.execute() + idx, snrv = events.threshold_only(self.snr_mem[self.segments[segnum].analyze], + self.snr_threshold / norm) + logger.info("%d points above threshold", len(idx)) + + snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False) + corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False) + return snr, norm, corr, idx, snrv
+ + +
+[docs] + def hierarchical_matched_filter_and_cluster(self, segnum, template_norm, window): + """ Returns the complex snr timeseries, normalization of the complex snr, + the correlation vector frequency series, the list of indices of the + triggers, and the snr values at the trigger locations. Returns empty + lists for these for points that are not above the threshold. + + Calculated the matched filter, threshold, and cluster. + + Parameters + ---------- + segnum : int + Index into the list of segments at MatchedFilterControl construction + template_norm : float + The htilde, template normalization factor. + window : int + Size of the window over which to cluster triggers, in samples + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr at the reduced sample rate. + norm : float + The normalization of the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + idx : Array + List of indices of the triggers. + snrv : Array + The snr values at the trigger locations. + """ + from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose + htilde = self.htilde + stilde = self.segments[segnum] + + norm = (4.0 * stilde.delta_f) / sqrt(template_norm) + + correlate(htilde[self.kmin_red:self.kmax_red], + stilde[self.kmin_red:self.kmax_red], + self.corr_mem[self.kmin_red:self.kmax_red]) + + ifft(self.corr_mem, self.snr_mem) + + if not hasattr(stilde, 'red_analyze'): + stilde.red_analyze = \ + slice(stilde.analyze.start/self.downsample_factor, + stilde.analyze.stop/self.downsample_factor) + + + idx_red, snrv_red = events.threshold(self.snr_mem[stilde.red_analyze], + self.snr_threshold / norm * self.upsample_threshold) + if len(idx_red) == 0: + return [], None, [], [], [] + + idx_red, _ = events.cluster_reduce(idx_red, snrv_red, window / self.downsample_factor) + logger.info("%d points above threshold at reduced resolution", + len(idx_red)) + + # The fancy upsampling is here + if self.upsample_method=='pruned_fft': + idx = (idx_red + stilde.analyze.start/self.downsample_factor)\ + * self.downsample_factor + + idx = smear(idx, self.downsample_factor) + + # cache transposed versions of htilde and stilde + if not hasattr(self.corr_mem_full, 'transposed'): + self.corr_mem_full.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype) + + if not hasattr(htilde, 'transposed'): + htilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype) + htilde.transposed[self.kmin_full:self.kmax_full] = htilde[self.kmin_full:self.kmax_full] + htilde.transposed = fft_transpose(htilde.transposed) + + if not hasattr(stilde, 'transposed'): + stilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype) + stilde.transposed[self.kmin_full:self.kmax_full] = stilde[self.kmin_full:self.kmax_full] + stilde.transposed = fft_transpose(stilde.transposed) + + correlate(htilde.transposed, stilde.transposed, self.corr_mem_full.transposed) + snrv = pruned_c2cifft(self.corr_mem_full.transposed, self.inter_vec, idx, pretransposed=True) + idx = idx - stilde.analyze.start + idx2, snrv = events.threshold(Array(snrv, copy=False), self.snr_threshold / norm) + + if len(idx2) > 0: + correlate(htilde[self.kmax_red:self.kmax_full], + stilde[self.kmax_red:self.kmax_full], + self.corr_mem_full[self.kmax_red:self.kmax_full]) + idx, snrv = events.cluster_reduce(idx[idx2], snrv, window) + else: + idx, snrv = [], [] + + logger.info("%d points at full rate and clustering", len(idx)) + return self.snr_mem, norm, self.corr_mem_full, idx, snrv + else: + raise ValueError("Invalid upsample method")
+
+ + + +
+[docs] +def compute_max_snr_over_sky_loc_stat(hplus, hcross, hphccorr, + hpnorm=None, hcnorm=None, + out=None, thresh=0, + analyse_slice=None): + """ + Matched filter maximised over polarization and orbital phase. + + This implements the statistic derived in 1603.02444. It is encouraged + to read that work to understand the limitations and assumptions implicit + in this statistic before using it. + + Parameters + ----------- + hplus : TimeSeries + This is the IFFTed complex SNR time series of (h+, data). If not + normalized, supply the normalization factor so this can be done! + It is recommended to normalize this before sending through this + function + hcross : TimeSeries + This is the IFFTed complex SNR time series of (hx, data). If not + normalized, supply the normalization factor so this can be done! + hphccorr : float + The real component of the overlap between the two polarizations + Re[(h+, hx)]. Note that the imaginary component does not enter the + detection statistic. This must be normalized and is sign-sensitive. + thresh : float + Used for optimization. If we do not care about the value of SNR + values below thresh we can calculate a quick statistic that will + always overestimate SNR and then only calculate the proper, more + expensive, statistic at points where the quick SNR is above thresh. + hpsigmasq : float + The normalization factor (h+, h+). Default = None (=1, already + normalized) + hcsigmasq : float + The normalization factor (hx, hx). Default = None (=1, already + normalized) + out : TimeSeries (optional, default=None) + If given, use this array to store the output. + + Returns + -------- + det_stat : TimeSeries + The SNR maximized over sky location + """ + # NOTE: Not much optimization has been done here! This may need to be + # Cythonized. + + if out is None: + out = zeros(len(hplus)) + out.non_zero_locs = numpy.array([], dtype=out.dtype) + else: + if not hasattr(out, 'non_zero_locs'): + # Doing this every time is not a zero-cost operation + out.data[:] = 0 + out.non_zero_locs = numpy.array([], dtype=out.dtype) + else: + # Only set non zero locations to zero + out.data[out.non_zero_locs] = 0 + + + # If threshold is given we can limit the points at which to compute the + # full statistic + if thresh: + # This is the statistic that always overestimates the SNR... + # It allows some unphysical freedom that the full statistic does not + idx_p, _ = events.threshold_only(hplus[analyse_slice], + thresh / (2**0.5 * hpnorm)) + idx_c, _ = events.threshold_only(hcross[analyse_slice], + thresh / (2**0.5 * hcnorm)) + idx_p = idx_p + analyse_slice.start + idx_c = idx_c + analyse_slice.start + hp_red = hplus[idx_p] * hpnorm + hc_red = hcross[idx_p] * hcnorm + stat_p = hp_red.real**2 + hp_red.imag**2 + \ + hc_red.real**2 + hc_red.imag**2 + locs_p = idx_p[stat_p > (thresh*thresh)] + hp_red = hplus[idx_c] * hpnorm + hc_red = hcross[idx_c] * hcnorm + stat_c = hp_red.real**2 + hp_red.imag**2 + \ + hc_red.real**2 + hc_red.imag**2 + locs_c = idx_c[stat_c > (thresh*thresh)] + locs = numpy.unique(numpy.concatenate((locs_p, locs_c))) + + hplus = hplus[locs] + hcross = hcross[locs] + + hplus = hplus * hpnorm + hcross = hcross * hcnorm + + + # Calculate and sanity check the denominator + denom = 1 - hphccorr*hphccorr + if denom < 0: + if hphccorr > 1: + err_msg = "Overlap between hp and hc is given as %f. " %(hphccorr) + err_msg += "How can an overlap be bigger than 1?" + raise ValueError(err_msg) + else: + err_msg = "There really is no way to raise this error!?! " + err_msg += "If you're seeing this, it is bad." + raise ValueError(err_msg) + if denom == 0: + # This case, of hphccorr==1, makes the statistic degenerate + # This case should not physically be possible luckily. + err_msg = "You have supplied a real overlap between hp and hc of 1. " + err_msg += "Ian is reasonably certain this is physically impossible " + err_msg += "so why are you seeing this?" + raise ValueError(err_msg) + + assert(len(hplus) == len(hcross)) + + # Now the stuff where comp. cost may be a problem + hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \ + numpy.imag(hplus) * numpy.imag(hplus) + hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \ + numpy.imag(hcross) * numpy.imag(hcross) + rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + numpy.imag(hplus)*numpy.imag(hcross) + + sqroot = (hplus_magsq - hcross_magsq)**2 + sqroot += 4 * (hphccorr * hplus_magsq - rho_pluscross) * \ + (hphccorr * hcross_magsq - rho_pluscross) + # Sometimes this can be less than 0 due to numeric imprecision, catch this. + if (sqroot < 0).any(): + indices = numpy.arange(len(sqroot))[sqroot < 0] + # This should not be *much* smaller than 0 due to numeric imprecision + if (sqroot[indices] < -0.0001).any(): + err_msg = "Square root has become negative. Something wrong here!" + raise ValueError(err_msg) + sqroot[indices] = 0 + sqroot = numpy.sqrt(sqroot) + det_stat_sq = 0.5 * (hplus_magsq + hcross_magsq - \ + 2 * rho_pluscross*hphccorr + sqroot) / denom + + det_stat = numpy.sqrt(det_stat_sq) + + if thresh: + out.data[locs] = det_stat + out.non_zero_locs = locs + return out + else: + return Array(det_stat, copy=False)
+ + +
+[docs] +def compute_u_val_for_sky_loc_stat(hplus, hcross, hphccorr, + hpnorm=None, hcnorm=None, indices=None): + """The max-over-sky location detection statistic maximizes over a phase, + an amplitude and the ratio of F+ and Fx, encoded in a variable called u. + Here we return the value of u for the given indices. + """ + if indices is not None: + hplus = hplus[indices] + hcross = hcross[indices] + + if hpnorm is not None: + hplus = hplus * hpnorm + if hcnorm is not None: + hcross = hcross * hcnorm + + # Sanity checking in func. above should already have identified any points + # which are bad, and should be used to construct indices for input here + hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \ + numpy.imag(hplus) * numpy.imag(hplus) + hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \ + numpy.imag(hcross) * numpy.imag(hcross) + rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + \ + numpy.imag(hplus)*numpy.imag(hcross) + + a = hphccorr * hplus_magsq - rho_pluscross + b = hplus_magsq - hcross_magsq + c = rho_pluscross - hphccorr * hcross_magsq + + sq_root = b*b - 4*a*c + sq_root = sq_root**0.5 + sq_root = -sq_root + # Catch the a->0 case + bad_lgc = (a == 0) + dbl_bad_lgc = numpy.logical_and(c == 0, b == 0) + dbl_bad_lgc = numpy.logical_and(bad_lgc, dbl_bad_lgc) + # Initialize u + u = sq_root * 0. + # In this case u is completely degenerate, so set it to 1 + u[dbl_bad_lgc] = 1. + # If a->0 avoid overflow by just setting to a large value + u[bad_lgc & ~dbl_bad_lgc] = 1E17 + # Otherwise normal statistic + u[~bad_lgc] = (-b[~bad_lgc] + sq_root[~bad_lgc]) / (2*a[~bad_lgc]) + + snr_cplx = hplus * u + hcross + coa_phase = numpy.angle(snr_cplx) + + return u, coa_phase
+ + +
+[docs] +def compute_max_snr_over_sky_loc_stat_no_phase(hplus, hcross, hphccorr, + hpnorm=None, hcnorm=None, + out=None, thresh=0, + analyse_slice=None): + """ + Matched filter maximised over polarization phase. + + This implements the statistic derived in 1709.09181. It is encouraged + to read that work to understand the limitations and assumptions implicit + in this statistic before using it. + + In contrast to compute_max_snr_over_sky_loc_stat this function + performs no maximization over orbital phase, treating that as an intrinsic + parameter. In the case of aligned-spin 2,2-mode only waveforms, this + collapses to the normal statistic (at twice the computational cost!) + + Parameters + ----------- + hplus : TimeSeries + This is the IFFTed complex SNR time series of (h+, data). If not + normalized, supply the normalization factor so this can be done! + It is recommended to normalize this before sending through this + function + hcross : TimeSeries + This is the IFFTed complex SNR time series of (hx, data). If not + normalized, supply the normalization factor so this can be done! + hphccorr : float + The real component of the overlap between the two polarizations + Re[(h+, hx)]. Note that the imaginary component does not enter the + detection statistic. This must be normalized and is sign-sensitive. + thresh : float + Used for optimization. If we do not care about the value of SNR + values below thresh we can calculate a quick statistic that will + always overestimate SNR and then only calculate the proper, more + expensive, statistic at points where the quick SNR is above thresh. + hpsigmasq : float + The normalization factor (h+, h+). Default = None (=1, already + normalized) + hcsigmasq : float + The normalization factor (hx, hx). Default = None (=1, already + normalized) + out : TimeSeries (optional, default=None) + If given, use this array to store the output. + + Returns + -------- + det_stat : TimeSeries + The SNR maximized over sky location + """ + # NOTE: Not much optimization has been done here! This may need to be + # Cythonized. + + if out is None: + out = zeros(len(hplus)) + out.non_zero_locs = numpy.array([], dtype=out.dtype) + else: + if not hasattr(out, 'non_zero_locs'): + # Doing this every time is not a zero-cost operation + out.data[:] = 0 + out.non_zero_locs = numpy.array([], dtype=out.dtype) + else: + # Only set non zero locations to zero + out.data[out.non_zero_locs] = 0 + + # If threshold is given we can limit the points at which to compute the + # full statistic + if thresh: + # This is the statistic that always overestimates the SNR... + # It allows some unphysical freedom that the full statistic does not + # + # For now this is copied from the max-over-phase statistic. One could + # probably make this faster by removing the imaginary components of + # the matched filter, as these are not used here. + idx_p, _ = events.threshold_only(hplus[analyse_slice], + thresh / (2**0.5 * hpnorm)) + idx_c, _ = events.threshold_only(hcross[analyse_slice], + thresh / (2**0.5 * hcnorm)) + idx_p = idx_p + analyse_slice.start + idx_c = idx_c + analyse_slice.start + hp_red = hplus[idx_p] * hpnorm + hc_red = hcross[idx_p] * hcnorm + stat_p = hp_red.real**2 + hp_red.imag**2 + \ + hc_red.real**2 + hc_red.imag**2 + locs_p = idx_p[stat_p > (thresh*thresh)] + hp_red = hplus[idx_c] * hpnorm + hc_red = hcross[idx_c] * hcnorm + stat_c = hp_red.real**2 + hp_red.imag**2 + \ + hc_red.real**2 + hc_red.imag**2 + locs_c = idx_c[stat_c > (thresh*thresh)] + locs = numpy.unique(numpy.concatenate((locs_p, locs_c))) + + hplus = hplus[locs] + hcross = hcross[locs] + + hplus = hplus * hpnorm + hcross = hcross * hcnorm + + + # Calculate and sanity check the denominator + denom = 1 - hphccorr*hphccorr + if denom < 0: + if hphccorr > 1: + err_msg = "Overlap between hp and hc is given as %f. " %(hphccorr) + err_msg += "How can an overlap be bigger than 1?" + raise ValueError(err_msg) + else: + err_msg = "There really is no way to raise this error!?! " + err_msg += "If you're seeing this, it is bad." + raise ValueError(err_msg) + if denom == 0: + # This case, of hphccorr==1, makes the statistic degenerate + # This case should not physically be possible luckily. + err_msg = "You have supplied a real overlap between hp and hc of 1. " + err_msg += "Ian is reasonably certain this is physically impossible " + err_msg += "so why are you seeing this?" + raise ValueError(err_msg) + + assert(len(hplus) == len(hcross)) + + # Now the stuff where comp. cost may be a problem + hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + + det_stat_sq = (hplus_magsq + hcross_magsq - 2 * rho_pluscross*hphccorr) + + det_stat = numpy.sqrt(det_stat_sq / denom) + + if thresh: + out.data[locs] = det_stat + out.non_zero_locs = locs + return out + else: + return Array(det_stat, copy=False)
+ + +
+[docs] +def compute_u_val_for_sky_loc_stat_no_phase(hplus, hcross, hphccorr, + hpnorm=None , hcnorm=None, indices=None): + """The max-over-sky location (no phase) detection statistic maximizes over + an amplitude and the ratio of F+ and Fx, encoded in a variable called u. + Here we return the value of u for the given indices. + + + """ + if indices is not None: + hplus = hplus[indices] + hcross = hcross[indices] + + if hpnorm is not None: + hplus = hplus * hpnorm + if hcnorm is not None: + hcross = hcross * hcnorm + + rhoplusre=numpy.real(hplus) + rhocrossre=numpy.real(hcross) + overlap=numpy.real(hphccorr) + + denom = (-rhocrossre+overlap*rhoplusre) + # Initialize tan_kappa array + u_val = denom * 0. + # Catch the denominator -> 0 case + numpy.putmask(u_val, denom == 0, 1E17) + # Otherwise do normal statistic + numpy.putmask(u_val, denom != 0, (-rhoplusre+overlap*rhocrossre)/(-rhocrossre+overlap*rhoplusre)) + coa_phase = numpy.zeros(len(indices), dtype=numpy.float32) + + return u_val, coa_phase
+ + + +
+[docs] +class MatchedFilterSkyMaxControl(object): + # FIXME: This seems much more simplistic than the aligned-spin class. + # E.g. no correlators. Is this worth updating? + def __init__(self, low_frequency_cutoff, high_frequency_cutoff, + snr_threshold, tlen, delta_f, dtype): + """ + Create a matched filter engine. + + Parameters + ---------- + low_frequency_cutoff : {None, float}, optional + The frequency to begin the filter calculation. If None, begin + at the first frequency after DC. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the filter calculation. If None, continue + to the nyquist frequency. + snr_threshold : float + The minimum snr to return when filtering + """ + self.tlen = tlen + self.delta_f = delta_f + self.dtype = dtype + self.snr_threshold = snr_threshold + self.flow = low_frequency_cutoff + self.fhigh = high_frequency_cutoff + + self.matched_filter_and_cluster = \ + self.full_matched_filter_and_cluster + self.snr_plus_mem = zeros(self.tlen, dtype=self.dtype) + self.corr_plus_mem = zeros(self.tlen, dtype=self.dtype) + self.snr_cross_mem = zeros(self.tlen, dtype=self.dtype) + self.corr_cross_mem = zeros(self.tlen, dtype=self.dtype) + self.snr_mem = zeros(self.tlen, dtype=self.dtype) + self.cached_hplus_hcross_correlation = None + self.cached_hplus_hcross_hplus = None + self.cached_hplus_hcross_hcross = None + self.cached_hplus_hcross_psd = None + + +
+[docs] + def full_matched_filter_and_cluster(self, hplus, hcross, hplus_norm, + hcross_norm, psd, stilde, window): + """ + Return the complex snr and normalization. + + Calculated the matched filter, threshold, and cluster. + + Parameters + ---------- + h_quantities : Various + FILL ME IN + stilde : FrequencySeries + The strain data to be filtered. + window : int + The size of the cluster window in samples. + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + norm : float + The normalization of the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + idx : Array + List of indices of the triggers. + snrv : Array + The snr values at the trigger locations. + """ + + I_plus, Iplus_corr, Iplus_norm = matched_filter_core(hplus, stilde, + h_norm=hplus_norm, + low_frequency_cutoff=self.flow, + high_frequency_cutoff=self.fhigh, + out=self.snr_plus_mem, + corr_out=self.corr_plus_mem) + + + I_cross, Icross_corr, Icross_norm = matched_filter_core(hcross, + stilde, h_norm=hcross_norm, + low_frequency_cutoff=self.flow, + high_frequency_cutoff=self.fhigh, + out=self.snr_cross_mem, + corr_out=self.corr_cross_mem) + + # The information on the complex side of this overlap is important + # we may want to use this in the future. + if not id(hplus) == self.cached_hplus_hcross_hplus: + self.cached_hplus_hcross_correlation = None + if not id(hcross) == self.cached_hplus_hcross_hcross: + self.cached_hplus_hcross_correlation = None + if not id(psd) == self.cached_hplus_hcross_psd: + self.cached_hplus_hcross_correlation = None + if self.cached_hplus_hcross_correlation is None: + hplus_cross_corr = overlap_cplx(hplus, hcross, psd=psd, + low_frequency_cutoff=self.flow, + high_frequency_cutoff=self.fhigh, + normalized=False) + hplus_cross_corr = numpy.real(hplus_cross_corr) + hplus_cross_corr = hplus_cross_corr / (hcross_norm*hplus_norm)**0.5 + self.cached_hplus_hcross_correlation = hplus_cross_corr + self.cached_hplus_hcross_hplus = id(hplus) + self.cached_hplus_hcross_hcross = id(hcross) + self.cached_hplus_hcross_psd = id(psd) + else: + hplus_cross_corr = self.cached_hplus_hcross_correlation + + snr = self._maximized_snr(I_plus,I_cross, + hplus_cross_corr, + hpnorm=Iplus_norm, + hcnorm=Icross_norm, + out=self.snr_mem, + thresh=self.snr_threshold, + analyse_slice=stilde.analyze) + # FIXME: This should live further down + # Convert output to pycbc TimeSeries + delta_t = 1.0 / (self.tlen * stilde.delta_f) + + snr = TimeSeries(snr, epoch=stilde.start_time, delta_t=delta_t, + copy=False) + + idx, snrv = events.threshold_real_numpy(snr[stilde.analyze], + self.snr_threshold) + + if len(idx) == 0: + return [], 0, 0, [], [], [], [], 0, 0, 0 + logger.info("%d points above threshold", len(idx)) + + + idx, snrv = events.cluster_reduce(idx, snrv, window) + logger.info("%d clustered points", len(idx)) + # erased self. + u_vals, coa_phase = self._maximized_extrinsic_params\ + (I_plus.data, I_cross.data, hplus_cross_corr, + indices=idx+stilde.analyze.start, hpnorm=Iplus_norm, + hcnorm=Icross_norm) + + + + return snr, Iplus_corr, Icross_corr, idx, snrv, u_vals, coa_phase,\ + hplus_cross_corr, Iplus_norm, Icross_norm
+ + + def _maximized_snr(self, hplus, hcross, hphccorr, **kwargs): + return compute_max_snr_over_sky_loc_stat(hplus, hcross, hphccorr, + **kwargs) + + def _maximized_extrinsic_params(self, hplus, hcross, hphccorr, **kwargs): + return compute_u_val_for_sky_loc_stat(hplus, hcross, hphccorr, + **kwargs)
+ + + +
+[docs] +class MatchedFilterSkyMaxControlNoPhase(MatchedFilterSkyMaxControl): + # Basically the same as normal SkyMaxControl, except we use a slight + # variation in the internal SNR functions. + def _maximized_snr(self, hplus, hcross, hphccorr, **kwargs): + return compute_max_snr_over_sky_loc_stat_no_phase(hplus, hcross, + hphccorr, **kwargs) + + def _maximized_extrinsic_params(self, hplus, hcross, hphccorr, **kwargs): + return compute_u_val_for_sky_loc_stat_no_phase(hplus, hcross, hphccorr, + **kwargs)
+ + +
+[docs] +def make_frequency_series(vec): + """Return a frequency series of the input vector. + + If the input is a frequency series it is returned, else if the input + vector is a real time series it is fourier transformed and returned as a + frequency series. + + Parameters + ---------- + vector : TimeSeries or FrequencySeries + + Returns + ------- + Frequency Series: FrequencySeries + A frequency domain version of the input vector. + """ + if isinstance(vec, FrequencySeries): + return vec + if isinstance(vec, TimeSeries): + N = len(vec) + n = N // 2 + 1 + delta_f = 1.0 / N / vec.delta_t + vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), + delta_f=delta_f, copy=False) + fft(vec, vectilde) + return vectilde + else: + raise TypeError("Can only convert a TimeSeries to a FrequencySeries")
+ + +
+[docs] +def sigmasq_series(htilde, psd=None, low_frequency_cutoff=None, + high_frequency_cutoff=None): + """Return a cumulative sigmasq frequency series. + + Return a frequency series containing the accumulated power in the input + up to that frequency. + + Parameters + ---------- + htilde : TimeSeries or FrequencySeries + The input vector + psd : {None, FrequencySeries}, optional + The psd used to weight the accumulated power. + low_frequency_cutoff : {None, float}, optional + The frequency to begin accumulating power. If None, start at the beginning + of the vector. + high_frequency_cutoff : {None, float}, optional + The frequency to stop considering accumulated power. If None, continue + until the end of the input vector. + + Returns + ------- + Frequency Series: FrequencySeries + A frequency series containing the cumulative sigmasq. + """ + htilde = make_frequency_series(htilde) + N = (len(htilde)-1) * 2 + norm = 4.0 * htilde.delta_f + kmin, kmax = get_cutoff_indices(low_frequency_cutoff, + high_frequency_cutoff, htilde.delta_f, N) + + sigma_vec = FrequencySeries(zeros(len(htilde), dtype=real_same_precision_as(htilde)), + delta_f = htilde.delta_f, copy=False) + + mag = htilde.squared_norm() + + if psd is not None: + mag /= psd + + sigma_vec[kmin:kmax] = mag[kmin:kmax].cumsum() + + return sigma_vec*norm
+ + + +
+[docs] +def sigmasq(htilde, psd = None, low_frequency_cutoff=None, + high_frequency_cutoff=None): + """Return the loudness of the waveform. This is defined (see Duncan + Brown's thesis) as the unnormalized matched-filter of the input waveform, + htilde, with itself. This quantity is usually referred to as (sigma)^2 + and is then used to normalize matched-filters with the data. + + Parameters + ---------- + htilde : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : {None, FrequencySeries}, optional + The psd used to weight the accumulated power. + low_frequency_cutoff : {None, float}, optional + The frequency to begin considering waveform power. + high_frequency_cutoff : {None, float}, optional + The frequency to stop considering waveform power. + + Returns + ------- + sigmasq: float + """ + htilde = make_frequency_series(htilde) + N = (len(htilde)-1) * 2 + norm = 4.0 * htilde.delta_f + kmin, kmax = get_cutoff_indices(low_frequency_cutoff, + high_frequency_cutoff, htilde.delta_f, N) + ht = htilde[kmin:kmax] + + if psd: + try: + numpy.testing.assert_almost_equal(ht.delta_f, psd.delta_f) + except AssertionError: + raise ValueError('Waveform does not have same delta_f as psd') + + if psd is None: + sq = ht.inner(ht) + else: + sq = ht.weighted_inner(ht, psd[kmin:kmax]) + + return sq.real * norm
+ + +
+[docs] +def sigma(htilde, psd = None, low_frequency_cutoff=None, + high_frequency_cutoff=None): + """ Return the sigma of the waveform. See sigmasq for more details. + + Parameters + ---------- + htilde : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : {None, FrequencySeries}, optional + The psd used to weight the accumulated power. + low_frequency_cutoff : {None, float}, optional + The frequency to begin considering waveform power. + high_frequency_cutoff : {None, float}, optional + The frequency to stop considering waveform power. + + Returns + ------- + sigmasq: float + """ + return sqrt(sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff))
+ + +
+[docs] +def get_cutoff_indices(flow, fhigh, df, N): + """ + Gets the indices of a frequency series at which to stop an overlap + calculation. + + Parameters + ---------- + flow: float + The frequency (in Hz) of the lower index. + fhigh: float + The frequency (in Hz) of the upper index. + df: float + The frequency step (in Hz) of the frequency series. + N: int + The number of points in the **time** series. Can be odd + or even. + + Returns + ------- + kmin: int + kmax: int + """ + if flow: + kmin = int(flow / df) + if kmin < 0: + err_msg = "Start frequency cannot be negative. " + err_msg += "Supplied value and kmin {} and {}".format(flow, kmin) + raise ValueError(err_msg) + else: + kmin = 1 + if fhigh: + kmax = int(fhigh / df) + if kmax > int((N + 1)/2.): + kmax = int((N + 1)/2.) + else: + # int() truncates towards 0, so this is + # equivalent to the floor of the float + kmax = int((N + 1)/2.) + + if kmax <= kmin: + err_msg = "Kmax cannot be less than or equal to kmin. " + err_msg += "Provided values of freqencies (min,max) were " + err_msg += "{} and {} ".format(flow, fhigh) + err_msg += "corresponding to (kmin, kmax) of " + err_msg += "{} and {}.".format(kmin, kmax) + raise ValueError(err_msg) + + return kmin,kmax
+ + +
+[docs] +def matched_filter_core(template, data, psd=None, low_frequency_cutoff=None, + high_frequency_cutoff=None, h_norm=None, out=None, corr_out=None): + """ Return the complex snr and normalization. + + Return the complex snr, along with its associated normalization of the template, + matched filtered against the data. + + Parameters + ---------- + template : TimeSeries or FrequencySeries + The template waveform + data : TimeSeries or FrequencySeries + The strain data to be filtered. + psd : {FrequencySeries}, optional + The noise weighting of the filter. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the filter calculation. If None, begin at the + first frequency after DC. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the filter calculation. If None, continue to the + the nyquist frequency. + h_norm : {None, float}, optional + The template normalization. If none, this value is calculated internally. + out : {None, Array}, optional + An array to use as memory for snr storage. If None, memory is allocated + internally. + corr_out : {None, Array}, optional + An array to use as memory for correlation storage. If None, memory is allocated + internally. If provided, management of the vector is handled externally by the + caller. No zero'ing is done internally. + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + correlation: FrequencySeries + A frequency series containing the correlation vector. + norm : float + The normalization of the complex snr. + """ + htilde = make_frequency_series(template) + stilde = make_frequency_series(data) + + if len(htilde) != len(stilde): + raise ValueError("Length of template and data must match") + + N = (len(stilde)-1) * 2 + kmin, kmax = get_cutoff_indices(low_frequency_cutoff, + high_frequency_cutoff, stilde.delta_f, N) + + if corr_out is not None: + qtilde = corr_out + else: + qtilde = zeros(N, dtype=complex_same_precision_as(data)) + + if out is None: + _q = zeros(N, dtype=complex_same_precision_as(data)) + elif (len(out) == N) and type(out) is Array and out.kind =='complex': + _q = out + else: + raise TypeError('Invalid Output Vector: wrong length or dtype') + + correlate(htilde[kmin:kmax], stilde[kmin:kmax], qtilde[kmin:kmax]) + + if psd is not None: + if isinstance(psd, FrequencySeries): + try: + numpy.testing.assert_almost_equal(stilde.delta_f, psd.delta_f) + except AssertionError: + raise ValueError("PSD delta_f does not match data") + qtilde[kmin:kmax] /= psd[kmin:kmax] + else: + raise TypeError("PSD must be a FrequencySeries") + + ifft(qtilde, _q) + + if h_norm is None: + h_norm = sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff) + + norm = (4.0 * stilde.delta_f) / sqrt( h_norm) + + return (TimeSeries(_q, epoch=stilde._epoch, delta_t=stilde.delta_t, copy=False), + FrequencySeries(qtilde, epoch=stilde._epoch, delta_f=stilde.delta_f, copy=False), + norm)
+ + +def smear(idx, factor): + """ + This function will take as input an array of indexes and return every + unique index within the specified factor of the inputs. + + E.g.: smear([5,7,100],2) = [3,4,5,6,7,8,9,98,99,100,101,102] + + Parameters + ----------- + idx : numpy.array of ints + The indexes to be smeared. + factor : idx + The factor by which to smear out the input array. + + Returns + -------- + new_idx : numpy.array of ints + The smeared array of indexes. + """ + + + s = [idx] + for i in range(factor+1): + a = i - factor/2 + s += [idx + a] + return numpy.unique(numpy.concatenate(s)) + +
+[docs] +def matched_filter(template, data, psd=None, low_frequency_cutoff=None, + high_frequency_cutoff=None, sigmasq=None): + """ Return the complex snr. + + Return the complex snr, along with its associated normalization of the + template, matched filtered against the data. + + Parameters + ---------- + template : TimeSeries or FrequencySeries + The template waveform + data : TimeSeries or FrequencySeries + The strain data to be filtered. + psd : FrequencySeries + The noise weighting of the filter. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the filter calculation. If None, begin at the + first frequency after DC. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the filter calculation. If None, continue to the + the nyquist frequency. + sigmasq : {None, float}, optional + The template normalization. If none, this value is calculated + internally. + + Returns + ------- + snr : TimeSeries + A time series containing the complex snr. + """ + snr, _, norm = matched_filter_core(template, data, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq) + return snr * norm
+ + +_snr = None +
+[docs] +def match( + vec1, + vec2, + psd=None, + low_frequency_cutoff=None, + high_frequency_cutoff=None, + v1_norm=None, + v2_norm=None, + subsample_interpolation=False, + return_phase=False, +): + """Return the match between the two TimeSeries or FrequencySeries. + + Return the match between two waveforms. This is equivalent to the overlap + maximized over time and phase. + + The maximization is only performed with discrete time-shifts, + or a quadratic interpolation of them if the subsample_interpolation + option is turned on; for a more precise computation + of the match between two waveforms, use the optimized_match function. + The accuracy of this function is guaranteed up to the fourth decimal place. + + Parameters + ---------- + vec1 : TimeSeries or FrequencySeries + The input vector containing a waveform. + vec2 : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : Frequency Series + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the match. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the match. + v1_norm : {None, float}, optional + The normalization of the first waveform. This is equivalent to its + sigmasq value. If None, it is internally calculated. + v2_norm : {None, float}, optional + The normalization of the second waveform. This is equivalent to its + sigmasq value. If None, it is internally calculated. + subsample_interpolation : {False, bool}, optional + If True the peak will be interpolated between samples using a simple + quadratic fit. This can be important if measuring matches very close to + 1 and can cause discontinuities if you don't use it as matches move + between discrete samples. If True the index returned will be a float. + return_phase : {False, bool}, optional + If True, also return the phase shift that gives the match. + + Returns + ------- + match: float + index: int + The number of samples to shift to get the match. + phi: float + Phase to rotate complex waveform to get the match, if desired. + """ + + htilde = make_frequency_series(vec1) + stilde = make_frequency_series(vec2) + + N = (len(htilde) - 1) * 2 + + global _snr + if _snr is None or _snr.dtype != htilde.dtype or len(_snr) != N: + _snr = zeros(N, dtype=complex_same_precision_as(vec1)) + snr, _, snr_norm = matched_filter_core( + htilde, + stilde, + psd, + low_frequency_cutoff, + high_frequency_cutoff, + v1_norm, + out=_snr, + ) + maxsnr, max_id = snr.abs_max_loc() + if v2_norm is None: + v2_norm = sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff) + + if subsample_interpolation: + # This uses the implementation coded up in sbank. Thanks Nick! + # The maths for this is well summarized here: + # https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html + # We use adjacent points to interpolate, but wrap off the end if needed + left = abs(snr[-1]) if max_id == 0 else abs(snr[max_id - 1]) + middle = maxsnr + right = abs(snr[0]) if max_id == (len(snr) - 1) else abs(snr[max_id + 1]) + # Get derivatives + id_shift, maxsnr = quadratic_interpolate_peak(left, middle, right) + max_id = max_id + id_shift + + if return_phase: + rounded_max_id = int(round(max_id)) + phi = numpy.angle(snr[rounded_max_id]) + return maxsnr * snr_norm / sqrt(v2_norm), max_id, phi + else: + return maxsnr * snr_norm / sqrt(v2_norm), max_id
+ + +
+[docs] +def overlap(vec1, vec2, psd=None, low_frequency_cutoff=None, + high_frequency_cutoff=None, normalized=True): + """ Return the overlap between the two TimeSeries or FrequencySeries. + + Parameters + ---------- + vec1 : TimeSeries or FrequencySeries + The input vector containing a waveform. + vec2 : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : Frequency Series + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the overlap. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the overlap. + normalized : {True, boolean}, optional + Set if the overlap is normalized. If true, it will range from 0 to 1. + + Returns + ------- + overlap: float + """ + + return overlap_cplx(vec1, vec2, psd=psd, \ + low_frequency_cutoff=low_frequency_cutoff,\ + high_frequency_cutoff=high_frequency_cutoff,\ + normalized=normalized).real
+ + +
+[docs] +def overlap_cplx(vec1, vec2, psd=None, low_frequency_cutoff=None, + high_frequency_cutoff=None, normalized=True): + """Return the complex overlap between the two TimeSeries or FrequencySeries. + + Parameters + ---------- + vec1 : TimeSeries or FrequencySeries + The input vector containing a waveform. + vec2 : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : Frequency Series + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the overlap. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the overlap. + normalized : {True, boolean}, optional + Set if the overlap is normalized. If true, it will range from 0 to 1. + + Returns + ------- + overlap: complex + """ + htilde = make_frequency_series(vec1) + stilde = make_frequency_series(vec2) + + kmin, kmax = get_cutoff_indices(low_frequency_cutoff, + high_frequency_cutoff, stilde.delta_f, (len(stilde)-1) * 2) + + if psd: + inner = (htilde[kmin:kmax]).weighted_inner(stilde[kmin:kmax], psd[kmin:kmax]) + else: + inner = (htilde[kmin:kmax]).inner(stilde[kmin:kmax]) + + if normalized: + sig1 = sigma(vec1, psd=psd, low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff) + sig2 = sigma(vec2, psd=psd, low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff) + norm = 1 / sig1 / sig2 + else: + norm = 1 + + return 4 * htilde.delta_f * inner * norm
+ + +def quadratic_interpolate_peak(left, middle, right): + """ Interpolate the peak and offset using a quadratic approximation + + Parameters + ---------- + left : numpy array + Values at a relative bin value of [-1] + middle : numpy array + Values at a relative bin value of [0] + right : numpy array + Values at a relative bin value of [1] + + Returns + ------- + bin_offset : numpy array + Array of bins offsets, each in the range [-1/2, 1/2] + peak_values : numpy array + Array of the estimated peak values at the interpolated offset + """ + bin_offset = 1.0/2.0 * (left - right) / (left - 2 * middle + right) + peak_value = middle - 0.25 * (left - right) * bin_offset + return bin_offset, peak_value + + +
+[docs] +class LiveBatchMatchedFilter(object): + + """Calculate SNR and signal consistency tests in a batched progression""" + + def __init__(self, templates, snr_threshold, chisq_bins, sg_chisq, + maxelements=2**27, + snr_abort_threshold=None, + newsnr_threshold=None, + max_triggers_in_batch=None): + """Create a batched matchedfilter instance + + Parameters + ---------- + templates: list of `FrequencySeries` + List of templates from the FilterBank class. + snr_threshold: float + Minimum value to record peaks in the SNR time series. + chisq_bins: str + Str that determines how the number of chisq bins varies as a + function of the template bank parameters. + sg_chisq: pycbc.vetoes.SingleDetSGChisq + Instance of the sg_chisq class to calculate sg_chisq with. + maxelements: {int, 2**27} + Maximum size of a batched fourier transform. + snr_abort_threshold: {float, None} + If the SNR is above this threshold, do not record any triggers. + newsnr_threshold: {float, None} + Only record triggers that have a re-weighted NewSNR above this + threshold. + max_triggers_in_batch: {int, None} + Record X number of the loudest triggers by SNR in each MPI + process. Signal consistency values will also only be calculated + for these triggers. + """ + self.snr_threshold = snr_threshold + self.snr_abort_threshold = snr_abort_threshold + self.newsnr_threshold = newsnr_threshold + self.max_triggers_in_batch = max_triggers_in_batch + + from pycbc import vetoes + self.power_chisq = vetoes.SingleDetPowerChisq(chisq_bins, None) + self.sg_chisq = sg_chisq + + durations = numpy.array([1.0 / t.delta_f for t in templates]) + + lsort = durations.argsort() + durations = durations[lsort] + templates = [templates[li] for li in lsort] + + # Figure out how to chunk together the templates into groups to process + _, counts = numpy.unique(durations, return_counts=True) + tsamples = [(len(t) - 1) * 2 for t in templates] + grabs = maxelements / numpy.unique(tsamples) + + chunks = numpy.array([]) + num = 0 + for count, grab in zip(counts, grabs): + chunks = numpy.append(chunks, numpy.arange(num, count + num, grab)) + chunks = numpy.append(chunks, [count + num]) + num += count + chunks = numpy.unique(chunks).astype(numpy.uint32) + + # We now have how many templates to grab at a time. + self.chunks = chunks[1:] - chunks[0:-1] + + self.out_mem = {} + self.cout_mem = {} + self.ifts = {} + chunk_durations = [durations[i] for i in chunks[:-1]] + self.chunk_tsamples = [tsamples[int(i)] for i in chunks[:-1]] + samples = self.chunk_tsamples * self.chunks + + # Create workspace memory for correlate and snr + mem_ids = [(a, b) for a, b in zip(chunk_durations, self.chunks)] + mem_types = set(zip(mem_ids, samples)) + + self.tgroups, self.mids = [], [] + for i, size in mem_types: + dur, count = i + self.out_mem[i] = zeros(size, dtype=numpy.complex64) + self.cout_mem[i] = zeros(size, dtype=numpy.complex64) + self.ifts[i] = IFFT(self.cout_mem[i], self.out_mem[i], + nbatch=count, + size=len(self.cout_mem[i]) // count) + + # Split the templates into their processing groups + for dur, count in mem_ids: + tgroup = templates[0:count] + self.tgroups.append(tgroup) + self.mids.append((dur, count)) + templates = templates[count:] + + # Associate the snr and corr memory block to each template + self.corr = [] + for i, tgroup in enumerate(self.tgroups): + psize = self.chunk_tsamples[i] + s = 0 + e = psize + mid = self.mids[i] + for htilde in tgroup: + htilde.out = self.out_mem[mid][s:e] + htilde.cout = self.cout_mem[mid][s:e] + s += psize + e += psize + self.corr.append(BatchCorrelator(tgroup, [t.cout for t in tgroup], len(tgroup[0]))) + +
+[docs] + def set_data(self, data): + """Set the data reader object to use""" + self.data = data + self.block_id = 0
+ + +
+[docs] + def combine_results(self, results): + """Combine results from different batches of filtering""" + result = {} + for key in results[0]: + result[key] = numpy.concatenate([r[key] for r in results]) + return result
+ + +
+[docs] + def process_data(self, data_reader): + """Process the data for all of the templates""" + self.set_data(data_reader) + return self.process_all()
+ + +
+[docs] + def process_all(self): + """Process every batch group and return as single result""" + results = [] + veto_info = [] + while 1: + result, veto = self._process_batch() + if result is False: return False + if result is None: break + results.append(result) + veto_info += veto + + result = self.combine_results(results) + + if self.max_triggers_in_batch: + sort = result['snr'].argsort()[::-1][:self.max_triggers_in_batch] + for key in result: + result[key] = result[key][sort] + + tmp = veto_info + veto_info = [tmp[i] for i in sort] + + result = self._process_vetoes(result, veto_info) + return result
+ + + def _process_vetoes(self, results, veto_info): + """Calculate signal based vetoes""" + chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32, ndmin=1) + dof = numpy.array(numpy.zeros(len(veto_info)), numpy.uint32, ndmin=1) + sg_chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32, + ndmin=1) + results['chisq'] = chisq + results['chisq_dof'] = dof + results['sg_chisq'] = sg_chisq + + keep = [] + for i, (snrv, norm, l, htilde, stilde) in enumerate(veto_info): + correlate(htilde, stilde, htilde.cout) + c, d = self.power_chisq.values(htilde.cout, snrv, + norm, stilde.psd, [l], htilde) + chisq[i] = c[0] / d[0] + dof[i] = d[0] + + sgv = self.sg_chisq.values(stilde, htilde, stilde.psd, + snrv, norm, c, d, [l]) + if sgv is not None: + sg_chisq[i] = sgv[0] + + if self.newsnr_threshold: + newsnr = ranking.newsnr(results['snr'][i], chisq[i]) + if newsnr >= self.newsnr_threshold: + keep.append(i) + + if self.newsnr_threshold: + keep = numpy.array(keep, dtype=numpy.uint32) + for key in results: + results[key] = results[key][keep] + + return results + + def _process_batch(self): + """Process only a single batch group of data""" + if self.block_id == len(self.tgroups): + return None, None + + tgroup = self.tgroups[self.block_id] + psize = self.chunk_tsamples[self.block_id] + mid = self.mids[self.block_id] + stilde = self.data.overwhitened_data(tgroup[0].delta_f) + psd = stilde.psd + + valid_end = int(psize - self.data.trim_padding) + valid_start = int(valid_end - self.data.blocksize * self.data.sample_rate) + + seg = slice(valid_start, valid_end) + + self.corr[self.block_id].execute(stilde) + self.ifts[mid].execute() + + self.block_id += 1 + + snr = numpy.zeros(len(tgroup), dtype=numpy.complex64) + time = numpy.zeros(len(tgroup), dtype=numpy.float64) + templates = numpy.zeros(len(tgroup), dtype=numpy.uint64) + sigmasq = numpy.zeros(len(tgroup), dtype=numpy.float32) + + time[:] = self.data.start_time + + result = {} + tkeys = tgroup[0].params.dtype.names + for key in tkeys: + result[key] = [] + + veto_info = [] + + # Find the peaks in our SNR times series from the various templates + i = 0 + for htilde in tgroup: + if hasattr(htilde, 'time_offset'): + if 'time_offset' not in result: + result['time_offset'] = [] + + l = htilde.out[seg].abs_arg_max() + + sgm = htilde.sigmasq(psd) + norm = 4.0 * htilde.delta_f / (sgm ** 0.5) + + l += valid_start + snrv = numpy.array([htilde.out[l]]) + + # If nothing is above threshold we can exit this template + s = abs(snrv[0]) * norm + if s < self.snr_threshold: + continue + + time[i] += float(l - valid_start) / self.data.sample_rate + + # We have an SNR so high that we will drop the entire analysis + # of this chunk of time! + if self.snr_abort_threshold is not None and s > self.snr_abort_threshold: + logger.info("We are seeing some *really* high SNRs, let's " + "assume they aren't signals and just give up") + return False, [] + + veto_info.append((snrv, norm, l, htilde, stilde)) + + snr[i] = snrv[0] * norm + sigmasq[i] = sgm + templates[i] = htilde.id + if not hasattr(htilde, 'dict_params'): + htilde.dict_params = {} + for key in tkeys: + htilde.dict_params[key] = htilde.params[key] + + for key in tkeys: + result[key].append(htilde.dict_params[key]) + + if hasattr(htilde, 'time_offset'): + result['time_offset'].append(htilde.time_offset) + + i += 1 + + result['snr'] = abs(snr[0:i]) + result['coa_phase'] = numpy.angle(snr[0:i]) + result['end_time'] = time[0:i] + result['template_id'] = templates[0:i] + result['sigmasq'] = sigmasq[0:i] + + for key in tkeys: + result[key] = numpy.array(result[key]) + + if 'time_offset' in result: + result['time_offset'] = numpy.array(result['time_offset']) + + return result, veto_info
+ + +
+[docs] +def followup_event_significance(ifo, data_reader, bank, + template_id, coinc_times, + coinc_threshold=0.005, + lookback=150, duration=0.095): + """Given a detector, a template waveform and a set of candidate event + times in different detectors, perform an on-source/off-source analysis + to determine if the SNR in the first detector has a significant peak + in the on-source window. The significance is given in terms of a + p-value. See Dal Canton et al. 2021 (https://arxiv.org/abs/2008.07494) + for details. A portion of the SNR time series around the on-source window + is also returned for use in BAYESTAR. + + If the calculation cannot be carried out, for example because `ifo` is + not in observing mode at the requested time, then None is returned. + Otherwise, the dict contains the following keys. `snr_series` is a + TimeSeries object with the SNR time series for BAYESTAR. `peak_time` is the + time of maximum SNR in the on-source window. `pvalue` is the p-value for + the maximum on-source SNR compared to the off-source realizations. + `pvalue_saturated` is a bool indicating whether the p-value is limited by + the number of off-source realizations, i.e. whether the maximum on-source + SNR is larger than all the off-source ones. `sigma2` is the SNR + normalization (squared) for the given template and detector. + + Parameters + ---------- + ifo: str + Which detector is being used for the calculation. + data_reader: StrainBuffer + StrainBuffer object providing the data for the given detector. + bank: LiveFilterBank + Template bank object providing the template related quantities. + template_id: int + Index of the template in the bank. + coinc_times: dict + Dictionary keyed by detector names reporting the coalescence times of + a candidate measured at the different detectors. Used to define the + on-source window of the candidate in `ifo`. + coinc_threshold: float + Nominal statistical uncertainty in `coinc_times`; expands the + on-source window by twice the given amount. + lookback: float + Nominal amount of time to use for the calculation of the onsource and + offsource SNR time series. The actual time may be reduced depending on + the duration of the template and the strain buffer in the data reader + (if so, a warning is logged). + duration: float + Duration of the SNR time series to be reported to BAYESTAR. + + Returns + ------- + followup_info: dict or None + Results of the followup calculation (see above) or None if `ifo` did + not have usable data. + """ + from pycbc.waveform import get_waveform_filter_length_in_time + tmplt = bank.table[template_id] + length_in_time = get_waveform_filter_length_in_time(tmplt['approximant'], + tmplt) + + # calculate onsource time range + from pycbc.detector import Detector + onsource_start = -numpy.inf + onsource_end = numpy.inf + fdet = Detector(ifo) + + for cifo in coinc_times: + time = coinc_times[cifo] + dtravel = Detector(cifo).light_travel_time_to_detector(fdet) + if time - dtravel > onsource_start: + onsource_start = time - dtravel + if time + dtravel < onsource_end: + onsource_end = time + dtravel + + # Source must be within this time window to be considered a possible + # coincidence + onsource_start -= coinc_threshold + onsource_end += coinc_threshold + + # Calculate how much time is needed to calculate the significance. + # At the minimum, we need enough time to include the lookback, plus time + # that we will throw away because of corruption from finite-duration filter + # responses (this is equal to the nominal padding plus the template + # duration). Next, for efficiency, we round the resulting duration up to + # align it with one of the frequency resolutions preferred by the template + # bank. And finally, the resulting duration must fit into the strain buffer + # available in the data reader, so we check that. + trim_pad = data_reader.trim_padding * data_reader.strain.delta_t + buffer_duration = lookback + 2 * trim_pad + length_in_time + buffer_samples = bank.round_up(int(buffer_duration * bank.sample_rate)) + max_safe_buffer_samples = int( + 0.9 * data_reader.strain.duration * bank.sample_rate + ) + if buffer_samples > max_safe_buffer_samples: + buffer_samples = max_safe_buffer_samples + new_lookback = ( + buffer_samples / bank.sample_rate - (2 * trim_pad + length_in_time) + ) + # Require a minimum lookback time of twice the onsource window or SNR + # time series (whichever is longer) so we have enough data for the + # onsource window, the SNR time series, and at least a few background + # samples + min_required_lookback = 2 * max(onsource_end - onsource_start, duration) + if new_lookback > min_required_lookback: + logging.warning( + 'Strain buffer too short for a lookback time of %f s, ' + 'reducing lookback to %f s', + lookback, + new_lookback + ) + else: + logging.error( + 'Strain buffer too short to compute the followup SNR time ' + 'series for template %d, will not use %s for followup. ' + 'Either use shorter templates, or raise --max-length.', + template_id, + ifo + ) + return None + buffer_duration = buffer_samples / bank.sample_rate + + # Require all strain be valid within lookback time + if data_reader.state is not None: + state_start_time = ( + data_reader.strain.end_time + - data_reader.reduced_pad * data_reader.strain.delta_t + - buffer_duration + ) + if not data_reader.state.is_extent_valid( + state_start_time, buffer_duration + ): + logging.info( + '%s strain buffer contains invalid data during lookback, ' + 'will not use for followup', + ifo + ) + return None + + # We won't require that all DQ checks be valid for now, except at + # onsource time. + if data_reader.dq is not None: + dq_start_time = onsource_start - duration / 2.0 + dq_duration = onsource_end - onsource_start + duration + if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration): + logging.info( + '%s DQ buffer indicates invalid data during onsource window, ' + 'will not use for followup', + ifo + ) + return None + + # Calculate SNR time series for the entire lookback duration + htilde = bank.get_template( + template_id, delta_f=bank.sample_rate / float(buffer_samples) + ) + stilde = data_reader.overwhitened_data(htilde.delta_f) + + sigma2 = htilde.sigmasq(stilde.psd) + snr, _, norm = matched_filter_core(htilde, stilde, h_norm=sigma2) + + # Find peak SNR in on-source and determine p-value + onsrc = snr.time_slice(onsource_start, onsource_end) + peak = onsrc.abs_arg_max() + peak_time = peak * snr.delta_t + onsrc.start_time + peak_value = abs(onsrc[peak]) + + bstart = float(snr.start_time) + length_in_time + trim_pad + bkg = abs(snr.time_slice(bstart, onsource_start)).numpy() + + window = int((onsource_end - onsource_start) * snr.sample_rate) + nsamples = int(len(bkg) / window) + + peaks = bkg[:nsamples*window].reshape(nsamples, window).max(axis=1) + num_louder_bg = (peaks >= peak_value).sum() + pvalue = (1 + num_louder_bg) / float(1 + nsamples) + pvalue_saturated = num_louder_bg == 0 + + # Return recentered source SNR for bayestar, along with p-value, and trig + peak_full = int((peak_time - snr.start_time) / snr.delta_t) + half_dur_samples = int(snr.sample_rate * duration / 2) + snr_slice = slice(peak_full - half_dur_samples, + peak_full + half_dur_samples + 1) + baysnr = snr[snr_slice] + + logger.info('Adding %s to candidate, pvalue %s, %s samples', ifo, + pvalue, nsamples) + + return { + 'snr_series': baysnr * norm, + 'peak_time': peak_time, + 'pvalue': pvalue, + 'pvalue_saturated': pvalue_saturated, + 'sigma2': sigma2 + }
+ + +
+[docs] +def compute_followup_snr_series(data_reader, htilde, trig_time, + duration=0.095, check_state=True, + coinc_window=0.05): + """Given a StrainBuffer, a template frequency series and a trigger time, + compute a portion of the SNR time series centered on the trigger for its + rapid sky localization and followup. + + If the trigger time is too close to the boundary of the valid data segment + the SNR series is calculated anyway and might be slightly contaminated by + filter and wrap-around effects. For reasonable durations this will only + affect a small fraction of the triggers and probably in a negligible way. + + Parameters + ---------- + data_reader : StrainBuffer + The StrainBuffer object to read strain data from. + + htilde : FrequencySeries + The frequency series containing the template waveform. + + trig_time : {float, lal.LIGOTimeGPS} + The trigger time. + + duration : float (optional) + Duration of the computed SNR series in seconds. If omitted, it defaults + to twice the Earth light travel time plus 10 ms of timing uncertainty. + + check_state : boolean + If True, and the detector was offline or flagged for bad data quality + at any point during the inspiral, then return (None, None) instead. + + coinc_window : float (optional) + Maximum possible time between coincident triggers at different + detectors. This is needed to properly determine data padding. + + Returns + ------- + snr : TimeSeries + The portion of SNR around the trigger. None if the detector is offline + or has bad data quality, and check_state is True. + """ + if check_state: + # was the detector observing for the full amount of involved data? + state_start_time = trig_time - duration / 2 - htilde.length_in_time + state_end_time = trig_time + duration / 2 + state_duration = state_end_time - state_start_time + if data_reader.state is not None: + if not data_reader.state.is_extent_valid(state_start_time, + state_duration): + return None + + # was the data quality ok for the full amount of involved data? + dq_start_time = state_start_time - data_reader.dq_padding + dq_duration = state_duration + 2 * data_reader.dq_padding + if data_reader.dq is not None: + if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration): + return None + + stilde = data_reader.overwhitened_data(htilde.delta_f) + snr, _, norm = matched_filter_core(htilde, stilde, + h_norm=htilde.sigmasq(stilde.psd)) + + valid_end = int(len(snr) - data_reader.trim_padding) + valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate) + + half_dur_samples = int(snr.sample_rate * duration / 2) + coinc_samples = int(snr.sample_rate * coinc_window) + valid_start -= half_dur_samples + coinc_samples + valid_end += half_dur_samples + if valid_start < 0 or valid_end > len(snr)-1: + raise ValueError(('Requested SNR duration ({0} s)' + ' too long').format(duration)) + + # Onsource slice for Bayestar followup + onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate + onsource_idx = int(round(onsource_idx)) + onsource_slice = slice(onsource_idx - half_dur_samples, + onsource_idx + half_dur_samples + 1) + return snr[onsource_slice] * norm
+ + +
+[docs] +def optimized_match( + vec1, + vec2, + psd=None, + low_frequency_cutoff=None, + high_frequency_cutoff=None, + v1_norm=None, + v2_norm=None, + return_phase=False, +): + """Given two waveforms (as numpy arrays), + compute the optimized match between them, making use + of scipy.minimize_scalar. + + This function computes the same quantities as "match"; + it is more accurate and slower. + + Parameters + ---------- + vec1 : TimeSeries or FrequencySeries + The input vector containing a waveform. + vec2 : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : FrequencySeries + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the match. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the match. + v1_norm : {None, float}, optional + The normalization of the first waveform. This is equivalent to its + sigmasq value. If None, it is internally calculated. + v2_norm : {None, float}, optional + The normalization of the second waveform. This is equivalent to its + sigmasq value. If None, it is internally calculated. + return_phase : {False, bool}, optional + If True, also return the phase shift that gives the match. + + Returns + ------- + match: float + index: int + The number of samples to shift to get the match. + phi: float + Phase to rotate complex waveform to get the match, if desired. + """ + + from scipy.optimize import minimize_scalar + + htilde = make_frequency_series(vec1) + stilde = make_frequency_series(vec2) + + assert numpy.isclose(htilde.delta_f, stilde.delta_f) + delta_f = stilde.delta_f + + assert numpy.isclose(htilde.delta_t, stilde.delta_t) + delta_t = stilde.delta_t + + # a first time shift to get in the nearby region; + # then the optimization is only used to move to the + # correct subsample-timeshift witin (-delta_t, delta_t) + # of this + _, max_id, _ = match( + htilde, + stilde, + psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff, + return_phase=True, + ) + + stilde = stilde.cyclic_time_shift(-max_id * delta_t) + + frequencies = stilde.sample_frequencies.numpy() + waveform_1 = htilde.numpy() + waveform_2 = stilde.numpy() + + N = (len(stilde) - 1) * 2 + kmin, kmax = get_cutoff_indices( + low_frequency_cutoff, high_frequency_cutoff, delta_f, N + ) + mask = slice(kmin, kmax) + + waveform_1 = waveform_1[mask] + waveform_2 = waveform_2[mask] + frequencies = frequencies[mask] + + if psd is not None: + psd_arr = psd.numpy()[mask] + else: + psd_arr = numpy.ones_like(waveform_1) + + def product(a, b): + integral = numpy.sum(numpy.conj(a) * b / psd_arr) * delta_f + return 4 * abs(integral), numpy.angle(integral) + + def product_offset(dt): + offset = numpy.exp(2j * numpy.pi * frequencies * dt) + return product(waveform_1, waveform_2 * offset) + + def to_minimize(dt): + return -product_offset(dt)[0] + + norm_1 = ( + sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff) + if v1_norm is None + else v1_norm + ) + norm_2 = ( + sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff) + if v2_norm is None + else v2_norm + ) + + norm = numpy.sqrt(norm_1 * norm_2) + + res = minimize_scalar( + to_minimize, + method="brent", + bracket=(-delta_t, delta_t) + ) + m, angle = product_offset(res.x) + + if return_phase: + return m / norm, res.x / delta_t + max_id, -angle + else: + return m / norm, res.x / delta_t + max_id
+ + + +__all__ = ['match', 'optimized_match', 'matched_filter', 'sigmasq', 'sigma', 'get_cutoff_indices', + 'sigmasq_series', 'make_frequency_series', 'overlap', + 'overlap_cplx', 'matched_filter_core', 'correlate', + 'MatchedFilterControl', 'LiveBatchMatchedFilter', + 'MatchedFilterSkyMaxControl', 'MatchedFilterSkyMaxControlNoPhase', + 'compute_max_snr_over_sky_loc_stat_no_phase', + 'compute_max_snr_over_sky_loc_stat', + 'compute_followup_snr_series', + 'compute_u_val_for_sky_loc_stat_no_phase', + 'compute_u_val_for_sky_loc_stat', + 'followup_event_significance'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/matchedfilter_numpy.html b/latest/html/_modules/pycbc/filter/matchedfilter_numpy.html new file mode 100644 index 00000000000..a8f36f2314a --- /dev/null +++ b/latest/html/_modules/pycbc/filter/matchedfilter_numpy.html @@ -0,0 +1,150 @@ + + + + + + pycbc.filter.matchedfilter_numpy — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.matchedfilter_numpy

+# Copyright (C) 2017 Ian Harry
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import numpy
+
+
+[docs] +def correlate(x, y, z): + z.data[:] = numpy.conjugate(x.data)[:] + z *= y
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/qtransform.html b/latest/html/_modules/pycbc/filter/qtransform.html new file mode 100644 index 00000000000..d527bc275ea --- /dev/null +++ b/latest/html/_modules/pycbc/filter/qtransform.html @@ -0,0 +1,370 @@ + + + + + + pycbc.filter.qtransform — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.qtransform

+# Copyright (C) 2017  Hunter A. Gabbard, Andrew Lundgren,
+#                     Duncan Macleod, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module retrives a timeseries and then calculates
+the q-transform of that time series
+"""
+
+import numpy
+from numpy import ceil, log, exp
+from pycbc.types.timeseries import FrequencySeries, TimeSeries
+from pycbc.fft import ifft
+from pycbc.types import zeros
+
+
+[docs] +def qplane(qplane_tile_dict, fseries, return_complex=False): + """Performs q-transform on each tile for each q-plane and selects + tile with the maximum energy. Q-transform can then + be interpolated to a desired frequency and time resolution. + + Parameters + ---------- + qplane_tile_dict: + Dictionary containing a list of q-tile tupples for each q-plane + fseries: 'pycbc FrequencySeries' + frequency-series data set + return_complex: {False, bool} + Return the raw complex series instead of the normalized power. + + Returns + ------- + q : float + The q of the maximum q plane + times : numpy.ndarray + The time that the qtransform is sampled. + freqs : numpy.ndarray + The frequencies that the qtransform is samled. + qplane : numpy.ndarray (2d) + The two dimensional interpolated qtransform of this time series. + """ + # store q-transforms for each q in a dict + qplanes = {} + max_energy, max_key = None, None + for i, q in enumerate(qplane_tile_dict): + energies = [] + for f0 in qplane_tile_dict[q]: + energy = qseries(fseries, q, f0, return_complex=return_complex) + menergy = abs(energy).max() + energies.append(energy) + + if i == 0 or menergy > max_energy: + max_energy = menergy + max_key = q + + qplanes[q] = energies + + # record q-transform output for peak q + plane = qplanes[max_key] + frequencies = qplane_tile_dict[max_key] + times = plane[0].sample_times.numpy() + plane = numpy.array([v.numpy() for v in plane]) + return max_key, times, frequencies, numpy.array(plane)
+ + +
+[docs] +def qtiling(fseries, qrange, frange, mismatch=0.2): + """Iterable constructor of QTile tuples + + Parameters + ---------- + fseries: 'pycbc FrequencySeries' + frequency-series data set + qrange: + upper and lower bounds of q range + frange: + upper and lower bounds of frequency range + mismatch: + percentage of desired fractional mismatch + + Returns + ------- + qplane_tile_dict: 'dict' + dictionary containing Q-tile tuples for a set of Q-planes + """ + qplane_tile_dict = {} + qs = list(_iter_qs(qrange, deltam_f(mismatch))) + for q in qs: + qtilefreq = _iter_frequencies(q, frange, mismatch, fseries.duration) + qplane_tile_dict[q] = numpy.array(list(qtilefreq)) + + return qplane_tile_dict
+ + +
+[docs] +def deltam_f(mismatch): + """Fractional mismatch between neighbouring tiles + + Parameters + ---------- + mismatch: 'float' + percentage of desired fractional mismatch + + Returns + ------- + :type: 'float' + """ + return 2 * (mismatch / 3.) ** (1/2.)
+ + +def _iter_qs(qrange, deltam): + """Iterate over the Q values + + Parameters + ---------- + qrange: + upper and lower bounds of q range + deltam: + Fractional mismatch between neighbouring tiles + + Returns + ------- + Q-value: + Q value for Q-tile + """ + + # work out how many Qs we need + cumum = log(float(qrange[1]) / qrange[0]) / 2**(1/2.) + nplanes = int(max(ceil(cumum / deltam), 1)) + dq = cumum / nplanes + for i in range(nplanes): + yield qrange[0] * exp(2**(1/2.) * dq * (i + .5)) + return + +def _iter_frequencies(q, frange, mismatch, dur): + """Iterate over the frequencies of this 'QPlane' + + Parameters + ---------- + q: + q value + frange: 'list' + upper and lower bounds of frequency range + mismatch: + percentage of desired fractional mismatch + dur: + duration of timeseries in seconds + + Returns + ------- + frequencies: + Q-Tile frequency + """ + # work out how many frequencies we need + minf, maxf = frange + fcum_mismatch = log(float(maxf) / minf) * (2 + q**2)**(1/2.) / 2. + nfreq = int(max(1, ceil(fcum_mismatch / deltam_f(mismatch)))) + fstep = fcum_mismatch / nfreq + fstepmin = 1. / dur + # for each frequency, yield a QTile + for i in range(nfreq): + yield (float(minf) * + exp(2 / (2 + q**2)**(1/2.) * (i + .5) * fstep) // + fstepmin * fstepmin) + return + +
+[docs] +def qseries(fseries, Q, f0, return_complex=False): + """Calculate the energy 'TimeSeries' for the given fseries + + Parameters + ---------- + fseries: 'pycbc FrequencySeries' + frequency-series data set + Q: + q value + f0: + central frequency + return_complex: {False, bool} + Return the raw complex series instead of the normalized power. + + Returns + ------- + energy: '~pycbc.types.TimeSeries' + A 'TimeSeries' of the normalized energy from the Q-transform of + this tile against the data. + """ + # normalize and generate bi-square window + qprime = Q / 11**(1/2.) + norm = numpy.sqrt(315. * qprime / (128. * f0)) + window_size = 2 * int(f0 / qprime * fseries.duration) + 1 + xfrequencies = numpy.linspace(-1., 1., window_size) + + start = int((f0 - (f0 / qprime)) * fseries.duration) + end = int(start + window_size) + center = (start + end) // 2 + + windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm + + tlen = (len(fseries)-1) * 2 + windowed.resize(tlen) + windowed.roll(-center) + + # calculate the time series for this q -value + windowed = FrequencySeries(windowed, delta_f=fseries.delta_f, + epoch=fseries.start_time) + ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128), + delta_t=fseries.delta_t) + ifft(windowed, ctseries) + + if return_complex: + return ctseries + else: + energy = ctseries.squared_norm() + medianenergy = numpy.median(energy.numpy()) + return energy / float(medianenergy)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/resample.html b/latest/html/_modules/pycbc/filter/resample.html new file mode 100644 index 00000000000..534bced2f09 --- /dev/null +++ b/latest/html/_modules/pycbc/filter/resample.html @@ -0,0 +1,595 @@ + + + + + + pycbc.filter.resample — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.resample

+# Copyright (C) 2012  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+import functools
+import lal
+import numpy
+import scipy.signal
+from pycbc.types import TimeSeries, Array, zeros, FrequencySeries, real_same_precision_as
+from pycbc.types import complex_same_precision_as
+from pycbc.fft import ifft, fft
+
+_resample_func = {numpy.dtype('float32'): lal.ResampleREAL4TimeSeries,
+                 numpy.dtype('float64'): lal.ResampleREAL8TimeSeries}
+
+@functools.lru_cache(maxsize=20)
+def cached_firwin(*args, **kwargs):
+    """Cache the FIR filter coefficients.
+    This is mostly done for PyCBC Live, which rapidly and repeatedly resamples data.
+    """
+    return scipy.signal.firwin(*args, **kwargs)
+
+
+# Change to True in front-end if you want this function to use caching
+# This is a mostly-hidden optimization option that most users will not want
+# to use. It is used in PyCBC Live
+USE_CACHING_FOR_LFILTER = False
+# If using caching we want output to be unique if called at different places
+# (and if called from different modules/functions), these unique IDs acheive
+# that. The numbers are not significant, only that they are unique.
+LFILTER_UNIQUE_ID_1 = 651273657
+LFILTER_UNIQUE_ID_2 = 154687641
+LFILTER_UNIQUE_ID_3 = 548946442
+
+def lfilter(coefficients, timeseries):
+    """ Apply filter coefficients to a time series
+
+    Parameters
+    ----------
+    coefficients: numpy.ndarray
+        Filter coefficients to apply
+    timeseries: numpy.ndarray
+        Time series to be filtered.
+
+    Returns
+    -------
+    tseries: numpy.ndarray
+        filtered array
+    """
+    from pycbc.filter import correlate
+    fillen = len(coefficients)
+
+    # If there aren't many points just use the default scipy method
+    if len(timeseries) < 2**7:
+        series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
+        return TimeSeries(series,
+                          epoch=timeseries.start_time,
+                          delta_t=timeseries.delta_t)
+    elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18):
+        from pycbc.strain.strain import create_memory_and_engine_for_class_based_fft
+        from pycbc.strain.strain import execute_cached_fft
+
+        cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
+        cseries.resize(len(timeseries))
+        cseries.roll(len(timeseries) - fillen + 1)
+
+        flen = len(cseries) // 2 + 1
+        ftype = complex_same_precision_as(timeseries)
+
+        if not USE_CACHING_FOR_LFILTER:
+            cfreq = zeros(flen, dtype=ftype)
+            tfreq = zeros(flen, dtype=ftype)
+            fft(Array(cseries), cfreq)
+            fft(Array(timeseries), tfreq)
+            cout = zeros(flen, ftype)
+            correlate(cfreq, tfreq, cout)
+            out = zeros(len(timeseries), dtype=timeseries)
+            ifft(cout, out)
+
+        else:
+            npoints = len(cseries)
+            # NOTE: This function is cached!
+            ifftouts = create_memory_and_engine_for_class_based_fft(
+                npoints,
+                timeseries.dtype,
+                ifft=True,
+                uid=LFILTER_UNIQUE_ID_1
+            )
+
+            # FFT contents of cseries into cfreq
+            cfreq = execute_cached_fft(cseries, uid=LFILTER_UNIQUE_ID_2,
+                                       copy_output=False,
+                                       normalize_by_rate=False)
+
+            # FFT contents of timeseries into tfreq
+            tfreq = execute_cached_fft(timeseries, uid=LFILTER_UNIQUE_ID_3,
+                                       copy_output=False,
+                                       normalize_by_rate=False)
+
+            cout, out, fft_class = ifftouts
+
+            # Correlate cfreq and tfreq
+            correlate(cfreq, tfreq, cout)
+            # IFFT correlation output into out
+            fft_class.execute()
+
+        return TimeSeries(out.numpy()  / len(out), epoch=timeseries.start_time,
+                          delta_t=timeseries.delta_t)
+    else:
+        # recursively perform which saves a bit on memory usage
+        # but must keep within recursion limit
+        chunksize = max(fillen * 5, len(timeseries) // 128)
+        part1 = lfilter(coefficients, timeseries[0:chunksize])
+        part2 = lfilter(coefficients, timeseries[chunksize - fillen:])
+        out = timeseries.copy()
+        out[:len(part1)] = part1
+        out[len(part1):] = part2[fillen:]
+        return out
+
+
+[docs] +def fir_zero_filter(coeff, timeseries): + """Filter the timeseries with a set of FIR coefficients + + Parameters + ---------- + coeff: numpy.ndarray + FIR coefficients. Should be and odd length and symmetric. + timeseries: pycbc.types.TimeSeries + Time series to be filtered. + + Returns + ------- + filtered_series: pycbc.types.TimeSeries + Return the filtered timeseries, which has been properly shifted to account + for the FIR filter delay and the corrupted regions zeroed out. + """ + # apply the filter + series = lfilter(coeff, timeseries) + + # reverse the time shift caused by the filter, + # corruption regions contain zeros + # If the number of filter coefficients is odd, the central point *should* + # be included in the output so we only zero out a region of len(coeff) - 1 + series[:(len(coeff) // 2) * 2] = 0 + series.roll(-len(coeff)//2) + return series
+ + +
+[docs] +def resample_to_delta_t(timeseries, delta_t, method='butterworth'): + """Resmple the time_series to delta_t + + Resamples the TimeSeries instance time_series to the given time step, + delta_t. Only powers of two and real valued time series are supported + at this time. Additional restrictions may apply to particular filter + methods. + + Parameters + ---------- + time_series: TimeSeries + The time series to be resampled + delta_t: float + The desired time step + + Returns + ------- + Time Series: TimeSeries + A TimeSeries that has been resampled to delta_t. + + Raises + ------ + TypeError: + time_series is not an instance of TimeSeries. + TypeError: + time_series is not real valued + + Examples + -------- + + >>> h_plus_sampled = resample_to_delta_t(h_plus, 1.0/2048) + """ + if not isinstance(timeseries,TimeSeries): + raise TypeError("Can only resample time series") + + if timeseries.kind != 'real': + raise TypeError("Time series must be real") + + if timeseries.sample_rate_close(1.0 / delta_t): + return timeseries * 1 + + if method == 'butterworth': + lal_data = timeseries.lal() + _resample_func[timeseries.dtype](lal_data, delta_t) + data = lal_data.data.data + + elif method == 'ldas': + factor = int(round(delta_t / timeseries.delta_t)) + numtaps = factor * 20 + 1 + + # The kaiser window has been testing using the LDAS implementation + # and is in the same configuration as used in the original lalinspiral + filter_coefficients = cached_firwin(numtaps, 1.0 / factor, + window=('kaiser', 5)) + + # apply the filter and decimate + data = fir_zero_filter(filter_coefficients, timeseries)[::factor] + + else: + raise ValueError('Invalid resampling method: %s' % method) + + ts = TimeSeries(data, delta_t = delta_t, + dtype=timeseries.dtype, + epoch=timeseries._epoch) + + # From the construction of the LDAS FIR filter there will be 10 corrupted samples + # explanation here http://software.ligo.org/docs/lalsuite/lal/group___resample_time_series__c.html + ts.corrupted_samples = 10 + return ts
+ + + +_highpass_func = {numpy.dtype('float32'): lal.HighPassREAL4TimeSeries, + numpy.dtype('float64'): lal.HighPassREAL8TimeSeries} +_lowpass_func = {numpy.dtype('float32'): lal.LowPassREAL4TimeSeries, + numpy.dtype('float64'): lal.LowPassREAL8TimeSeries} + + +
+[docs] +def notch_fir(timeseries, f1, f2, order, beta=5.0): + """ notch filter the time series using an FIR filtered generated from + the ideal response passed through a time-domain kaiser window (beta = 5.0) + + The suppression of the notch filter is related to the bandwidth and + the number of samples in the filter length. For a few Hz bandwidth, + a length corresponding to a few seconds is typically + required to create significant suppression in the notched band. + To achieve frequency resolution df at sampling frequency fs, + order should be at least fs/df. + + Parameters + ---------- + Time Series: TimeSeries + The time series to be notched. + f1: float + The start of the frequency suppression. + f2: float + The end of the frequency suppression. + order: int + Number of corrupted samples on each side of the time series + (Extent of the filter on either side of zero) + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + """ + k1 = f1 / float((int(1.0 / timeseries.delta_t) / 2)) + k2 = f2 / float((int(1.0 / timeseries.delta_t) / 2)) + coeff = cached_firwin(order * 2 + 1, [k1, k2], window=('kaiser', beta)) + return fir_zero_filter(coeff, timeseries)
+ + +
+[docs] +def lowpass_fir(timeseries, frequency, order, beta=5.0): + """ Lowpass filter the time series using an FIR filtered generated from + the ideal response passed through a kaiser window (beta = 5.0) + + Parameters + ---------- + Time Series: TimeSeries + The time series to be low-passed. + frequency: float + The frequency below which is suppressed. + order: int + Number of corrupted samples on each side of the time series + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + """ + k = frequency / float((int(1.0 / timeseries.delta_t) / 2)) + coeff = cached_firwin(order * 2 + 1, k, window=('kaiser', beta)) + return fir_zero_filter(coeff, timeseries)
+ + +
+[docs] +def highpass_fir(timeseries, frequency, order, beta=5.0): + """ Highpass filter the time series using an FIR filtered generated from + the ideal response passed through a kaiser window (beta = 5.0) + + Parameters + ---------- + Time Series: TimeSeries + The time series to be high-passed. + frequency: float + The frequency below which is suppressed. + order: int + Number of corrupted samples on each side of the time series + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + """ + k = frequency / float((int(1.0 / timeseries.delta_t) / 2)) + coeff = cached_firwin(order * 2 + 1, k, window=('kaiser', beta), pass_zero=False) + return fir_zero_filter(coeff, timeseries)
+ + +
+[docs] +def highpass(timeseries, frequency, filter_order=8, attenuation=0.1): + """Return a new timeseries that is highpassed. + + Return a new time series that is highpassed above the `frequency`. + + Parameters + ---------- + Time Series: TimeSeries + The time series to be high-passed. + frequency: float + The frequency below which is suppressed. + filter_order: {8, int}, optional + The order of the filter to use when high-passing the time series. + attenuation: {0.1, float}, optional + The attenuation of the filter. + + Returns + ------- + Time Series: TimeSeries + A new TimeSeries that has been high-passed. + + Raises + ------ + TypeError: + time_series is not an instance of TimeSeries. + TypeError: + time_series is not real valued + + """ + + if not isinstance(timeseries, TimeSeries): + raise TypeError("Can only resample time series") + + if timeseries.kind != 'real': + raise TypeError("Time series must be real") + + lal_data = timeseries.lal() + _highpass_func[timeseries.dtype](lal_data, frequency, + 1-attenuation, filter_order) + + return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT, + dtype=timeseries.dtype, epoch=timeseries._epoch)
+ + +
+[docs] +def lowpass(timeseries, frequency, filter_order=8, attenuation=0.1): + """Return a new timeseries that is lowpassed. + + Return a new time series that is lowpassed below the `frequency`. + + Parameters + ---------- + Time Series: TimeSeries + The time series to be low-passed. + frequency: float + The frequency above which is suppressed. + filter_order: {8, int}, optional + The order of the filter to use when low-passing the time series. + attenuation: {0.1, float}, optional + The attenuation of the filter. + + Returns + ------- + Time Series: TimeSeries + A new TimeSeries that has been low-passed. + + Raises + ------ + TypeError: + time_series is not an instance of TimeSeries. + TypeError: + time_series is not real valued + """ + + if not isinstance(timeseries, TimeSeries): + raise TypeError("Can only resample time series") + + if timeseries.kind != 'real': + raise TypeError("Time series must be real") + + lal_data = timeseries.lal() + _lowpass_func[timeseries.dtype](lal_data, frequency, + 1-attenuation, filter_order) + + return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT, + dtype=timeseries.dtype, epoch=timeseries._epoch)
+ + + +
+[docs] +def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'): + """Interpolate complex frequency series to desired delta_f. + + Return a new complex frequency series that has been interpolated to the + desired delta_f. + + Parameters + ---------- + series : FrequencySeries + Frequency series to be interpolated. + delta_f : float + The desired delta_f of the output + zeros_offset : optional, {0, int} + Number of sample to delay the start of the zero padding + side : optional, {'right', str} + The side of the vector to zero pad + + Returns + ------- + interpolated series : FrequencySeries + A new FrequencySeries that has been interpolated. + """ + new_n = int( (len(series)-1) * series.delta_f / delta_f + 1) + old_N = int( (len(series)-1) * 2 ) + new_N = int( (new_n - 1) * 2 ) + time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N), + dtype=real_same_precision_as(series)) + + ifft(series, time_series) + + time_series.roll(-zeros_offset) + time_series.resize(new_N) + + if side == 'left': + time_series.roll(zeros_offset + new_N - old_N) + elif side == 'right': + time_series.roll(zeros_offset) + + out_series = FrequencySeries(zeros(new_n), epoch=series.epoch, + delta_f=delta_f, dtype=series.dtype) + fft(time_series, out_series) + + return out_series
+ + +__all__ = ['resample_to_delta_t', 'highpass', 'lowpass', + 'interpolate_complex_frequency', 'highpass_fir', + 'lowpass_fir', 'notch_fir', 'fir_zero_filter'] + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/simd_correlate.html b/latest/html/_modules/pycbc/filter/simd_correlate.html new file mode 100644 index 00000000000..b92d12c779f --- /dev/null +++ b/latest/html/_modules/pycbc/filter/simd_correlate.html @@ -0,0 +1,204 @@ + + + + + + pycbc.filter.simd_correlate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.simd_correlate

+# Copyright (C) 2014 Josh Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+from pycbc.types import float32, complex64
+import numpy as _np
+from .. import opt
+from .simd_correlate_cython import ccorrf_simd, ccorrf_parallel
+
+"""
+This module interfaces to C functions for multiplying
+the complex conjugate of one vector by a second vector, writing the output
+to a third vector. They do this multi-threaded and with SIMD vectorization.
+
+The code defined here, and the other calling that function,
+are imported and used in the CPUCorrelator class defined in
+matchedfilter_cpu.py.
+
+Two functions are defined in the 'support' C/Cython module:
+
+ccorrf_simd: Runs on a single core, but vectorized
+ccorrf_parallel: Runs multicore, but not explicitly vectorized.
+                 Parallelized using OpenMP, and calls ccorrf_simd
+"""
+
+
+
+[docs] +def correlate_simd(ht, st, qt): + htilde = _np.array(ht.data, copy=False, dtype=float32) + stilde = _np.array(st.data, copy=False, dtype=float32) + qtilde = _np.array(qt.data, copy=False, dtype=float32) + arrlen = len(htilde) + ccorrf_simd(htilde, stilde, qtilde, arrlen)
+ + + +# We need a segment size (number of complex elements) such that *three* segments +# of that size will fit in the L2 cache. We also want it to be a power of two. +# We are dealing with single-precision complex numbers, which each require 8 bytes. +# +# Our kernel is written to assume a complex correlation of single-precision vectors, +# so that's all we support here. Note that we are assuming that the correct target +# is that the vectors should fit in L2 cache. Figuring out cache topology dynamically +# is a harder problem than we attempt to solve here. + +if opt.HAVE_GETCONF: + # Since we need 3 vectors fitting in L2 cache, divide by 3 + # We find the nearest power-of-two that fits, and the length + # of the single-precision complex array that fits into that size. + pow2 = int(_np.log(opt.LEVEL2_CACHE_SIZE/3.0)/_np.log(2.0)) + default_segsize = pow(2, pow2)/_np.dtype(_np.complex64).itemsize +else: + # Seems to work for Sandy Bridge/Ivy Bridge/Haswell, for now? + default_segsize = 8192 + +
+[docs] +def correlate_parallel(ht, st, qt): + htilde = _np.array(ht.data, copy=False, dtype=complex64) + stilde = _np.array(st.data, copy=False, dtype=complex64) + qtilde = _np.array(qt.data, copy=False, dtype=complex64) + arrlen = len(htilde) + segsize = default_segsize + ccorrf_parallel(htilde, stilde, qtilde, arrlen, segsize)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/filter/zpk.html b/latest/html/_modules/pycbc/filter/zpk.html new file mode 100644 index 00000000000..49f86ab4ddb --- /dev/null +++ b/latest/html/_modules/pycbc/filter/zpk.html @@ -0,0 +1,247 @@ + + + + + + pycbc.filter.zpk — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.filter.zpk

+# Copyright (C) 2014  Christopher M. Biwer
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+import numpy as np
+
+from scipy.signal import zpk2sos, sosfilt
+from pycbc.types import TimeSeries
+
+
+[docs] +def filter_zpk(timeseries, z, p, k): + """Return a new timeseries that was filtered with a zero-pole-gain filter. + The transfer function in the s-domain looks like: + .. math:: + \\frac{H(s) = (s - s_1) * (s - s_3) * ... * (s - s_n)}{(s - s_2) * (s - s_4) * ... * (s - s_m)}, m >= n + + The zeroes, and poles entered in Hz are converted to angular frequency, + along the imaginary axis in the s-domain s=i*omega. Then the zeroes, and + poles are bilinearly transformed via: + .. math:: + z(s) = \\frac{(1 + s*T/2)}{(1 - s*T/2)} + + Where z is the z-domain value, s is the s-domain value, and T is the + sampling period. After the poles and zeroes have been bilinearly + transformed, then the second-order sections are found and filter the data + using scipy. + + Parameters + ---------- + timeseries: TimeSeries + The TimeSeries instance to be filtered. + z: array + Array of zeros to include in zero-pole-gain filter design. + In units of Hz. + p: array + Array of poles to include in zero-pole-gain filter design. + In units of Hz. + k: float + Gain to include in zero-pole-gain filter design. This gain is a + constant multiplied to the transfer function. + + Returns + ------- + Time Series: TimeSeries + A new TimeSeries that has been filtered. + + Examples + -------- + To apply a 5 zeroes at 100Hz, 5 poles at 1Hz, and a gain of 1e-10 filter + to a TimeSeries instance, do: + >>> filtered_data = zpk_filter(timeseries, [100]*5, [1]*5, 1e-10) + """ + + # sanity check type + if not isinstance(timeseries, TimeSeries): + raise TypeError("Can only filter TimeSeries instances.") + + # sanity check casual filter + degree = len(p) - len(z) + if degree < 0: + raise TypeError("May not have more zeroes than poles. \ + Filter is not casual.") + + # cast zeroes and poles as arrays and gain as a float + z = np.array(z) + p = np.array(p) + k = float(k) + + # put zeroes and poles in the s-domain + # convert from frequency to angular frequency + z *= -2 * np.pi + p *= -2 * np.pi + + # get denominator of bilinear transform + fs = 2.0 * timeseries.sample_rate + + # zeroes in the z-domain + z_zd = (1 + z/fs) / (1 - z/fs) + + # any zeros that were at infinity are moved to the Nyquist frequency + z_zd = z_zd[np.isfinite(z_zd)] + z_zd = np.append(z_zd, -np.ones(degree)) + + # poles in the z-domain + p_zd = (1 + p/fs) / (1 - p/fs) + + # gain change in z-domain + k_zd = k * np.prod(fs - z) / np.prod(fs - p) + + # get second-order sections + sos = zpk2sos(z_zd, p_zd, k_zd) + + # filter + filtered_data = sosfilt(sos, timeseries.numpy()) + + return TimeSeries(filtered_data, delta_t = timeseries.delta_t, + dtype=timeseries.dtype, + epoch=timeseries._epoch)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/frame.html b/latest/html/_modules/pycbc/frame.html new file mode 100644 index 00000000000..0a50a7d53d8 --- /dev/null +++ b/latest/html/_modules/pycbc/frame.html @@ -0,0 +1,179 @@ + + + + + + pycbc.frame — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.frame

+from . frame import (locations_to_cache, read_frame,
+                     query_and_read_frame, frame_paths, write_frame,
+                     DataBuffer, StatusBuffer, iDQBuffer)
+
+from . store import (read_store)
+
+
+# Status flags for the calibration state vector
+# See e.g. https://dcc.ligo.org/LIGO-G1700234
+# https://wiki.ligo.org/DetChar/DataQuality/O3Flags
+HOFT_OK = 1
+SCIENCE_INTENT = 2
+SCIENCE_QUALITY = 4
+HOFT_PROD = 8
+FILTERS_OK = 16
+NO_STOCH_HW_INJ = 32
+NO_CBC_HW_INJ = 64
+NO_BURST_HW_INJ = 128
+NO_DETCHAR_HW_INJ = 256
+KAPPA_A_OK = 512
+KAPPA_PU_OK = 1024
+KAPPA_TST_OK = 2048
+KAPPA_C_OK = 4096
+FCC_OK = 8192
+NO_GAP = 16384
+NO_HWINJ = NO_STOCH_HW_INJ | NO_CBC_HW_INJ | \
+           NO_BURST_HW_INJ | NO_DETCHAR_HW_INJ
+
+# relevant bits in the LIGO O2/O3 low-latency DQ vector
+# If the bit is 0 then we should veto
+# https://wiki.ligo.org/DetChar/DmtDqVector
+# https://wiki.ligo.org/DetChar/DataQuality/O3Flags
+OMC_DCPD_ADC_OVERFLOW = 2
+ETMY_ESD_DAC_OVERFLOW = 4
+ETMX_ESD_DAC_OVERFLOW = 16
+
+# CAT1 bit in the Virgo state vector
+# https://wiki.virgo-gw.eu/DetChar/DetCharVirgoStateVector
+VIRGO_GOOD_DQ = 1 << 10
+
+
+
+[docs] +def flag_names_to_bitmask(flags): + """Takes a list of flag names corresponding to bits in a status channel + and returns the corresponding bit mask. + """ + mask = 0 + for flag in flags: + mask |= globals()[flag] + return mask
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/frame/frame.html b/latest/html/_modules/pycbc/frame/frame.html new file mode 100644 index 00000000000..d9082ca084c --- /dev/null +++ b/latest/html/_modules/pycbc/frame/frame.html @@ -0,0 +1,1176 @@ + + + + + + pycbc.frame.frame — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.frame.frame

+# Copyright (C) 2014 Andrew Miller, Alex Nitz, Tito Dal Canton, Christopher M. Biwer
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules contains functions for reading in data from frame files or caches
+"""
+
+import logging
+import warnings
+import os.path
+import glob
+import time
+import math
+import re
+from urllib.parse import urlparse
+import numpy
+
+import lalframe
+import lal
+from gwdatafind import find_urls as find_frame_urls
+
+import pycbc
+from pycbc.types import TimeSeries, zeros
+
+logger = logging.getLogger('pycbc.frame.frame')
+
+# map LAL series types to corresponding functions and Numpy types
+_fr_type_map = {
+    lal.S_TYPE_CODE: [
+        lalframe.FrStreamReadREAL4TimeSeries, numpy.float32,
+        lal.CreateREAL4TimeSeries,
+        lalframe.FrStreamGetREAL4TimeSeriesMetadata,
+        lal.CreateREAL4Sequence,
+        lalframe.FrameAddREAL4TimeSeriesProcData
+    ],
+    lal.D_TYPE_CODE: [
+        lalframe.FrStreamReadREAL8TimeSeries, numpy.float64,
+        lal.CreateREAL8TimeSeries,
+        lalframe.FrStreamGetREAL8TimeSeriesMetadata,
+        lal.CreateREAL8Sequence,
+        lalframe.FrameAddREAL8TimeSeriesProcData
+    ],
+    lal.C_TYPE_CODE: [
+        lalframe.FrStreamReadCOMPLEX8TimeSeries, numpy.complex64,
+        lal.CreateCOMPLEX8TimeSeries,
+        lalframe.FrStreamGetCOMPLEX8TimeSeriesMetadata,
+        lal.CreateCOMPLEX8Sequence,
+        lalframe.FrameAddCOMPLEX8TimeSeriesProcData
+    ],
+    lal.Z_TYPE_CODE: [
+        lalframe.FrStreamReadCOMPLEX16TimeSeries, numpy.complex128,
+        lal.CreateCOMPLEX16TimeSeries,
+        lalframe.FrStreamGetCOMPLEX16TimeSeriesMetadata,
+        lal.CreateCOMPLEX16Sequence,
+        lalframe.FrameAddCOMPLEX16TimeSeriesProcData
+    ],
+    lal.U4_TYPE_CODE: [
+        lalframe.FrStreamReadUINT4TimeSeries, numpy.uint32,
+        lal.CreateUINT4TimeSeries,
+        lalframe.FrStreamGetUINT4TimeSeriesMetadata,
+        lal.CreateUINT4Sequence,
+        lalframe.FrameAddUINT4TimeSeriesProcData
+    ],
+    lal.I4_TYPE_CODE: [
+        lalframe.FrStreamReadINT4TimeSeries, numpy.int32,
+        lal.CreateINT4TimeSeries,
+        lalframe.FrStreamGetINT4TimeSeriesMetadata,
+        lal.CreateINT4Sequence,
+        lalframe.FrameAddINT4TimeSeriesProcData
+    ],
+}
+
+def _read_channel(channel, stream, start, duration):
+    """ Get channel using lalframe """
+    channel_type = lalframe.FrStreamGetTimeSeriesType(channel, stream)
+    read_func = _fr_type_map[channel_type][0]
+    d_type = _fr_type_map[channel_type][1]
+    data = read_func(stream, channel, start, duration, 0)
+    return TimeSeries(data.data.data, delta_t=data.deltaT, epoch=start,
+                      dtype=d_type)
+
+
+def _is_gwf(file_path):
+    """Test if a file is a frame file by checking if its contents begins with
+    the magic string 'IGWD'."""
+    try:
+        with open(file_path, 'rb') as f:
+            if f.read(4) == b'IGWD':
+                return True
+    except IOError:
+        pass
+    return False
+
+
+
+[docs] +def locations_to_cache(locations, latest=False): + """ Return a cumulative cache file build from the list of locations + + Parameters + ---------- + locations : list + A list of strings containing files, globs, or cache files used to + build a combined lal cache file object. + latest : Optional, {False, Boolean} + Only return a cache with the most recent frame in the locations. + If false, all results are returned. + + Returns + ------- + cache : lal.Cache + A cumulative lal cache object containing the files derived from the + list of locations. + """ + cum_cache = lal.Cache() + for source in locations: + flist = glob.glob(source) + if latest: + def relaxed_getctime(fn): + # when building a cache from a directory of temporary + # low-latency frames, files might disappear between + # the glob() and getctime() calls + try: + return os.path.getctime(fn) + except OSError: + return 0 + if not flist: + raise ValueError('no frame or cache files found in ' + source) + flist = [max(flist, key=relaxed_getctime)] + + for file_path in flist: + dir_name, file_name = os.path.split(file_path) + _, file_extension = os.path.splitext(file_name) + + if file_extension in [".lcf", ".cache"]: + cache = lal.CacheImport(file_path) + elif file_extension == ".gwf" or _is_gwf(file_path): + cache = lalframe.FrOpen(str(dir_name), str(file_name)).cache + else: + raise TypeError("Invalid location name") + + cum_cache = lal.CacheMerge(cum_cache, cache) + return cum_cache
+ + +
+[docs] +def read_frame(location, channels, start_time=None, + end_time=None, duration=None, check_integrity=False, + sieve=None): + """Read time series from frame data. + + Using the `location`, which can either be a frame file ".gwf" or a + frame cache ".gwf", read in the data for the given channel(s) and output + as a TimeSeries or list of TimeSeries. + + Parameters + ---------- + location : string + A source of gravitational wave frames. Either a frame filename + (can include pattern), a list of frame files, or frame cache file. + channels : string or list of strings + Either a string that contains the channel name or a list of channel + name strings. + start_time : {None, LIGOTimeGPS}, optional + The gps start time of the time series. Defaults to reading from the + beginning of the available frame(s). + end_time : {None, LIGOTimeGPS}, optional + The gps end time of the time series. Defaults to the end of the frame. + Note, this argument is incompatible with `duration`. + duration : {None, float}, optional + The amount of data to read in seconds. Note, this argument is + incompatible with `end`. + check_integrity : {True, bool}, optional + Test the frame files for internal integrity. + sieve : string, optional + Selects only frames where the frame URL matches the regular + expression sieve + + Returns + ------- + Frame Data: TimeSeries or list of TimeSeries + A TimeSeries or a list of TimeSeries, corresponding to the data from + the frame file/cache for a given channel or channels. + """ + + if end_time and duration: + raise ValueError("end time and duration are mutually exclusive") + + if type(location) is list: + locations = location + else: + locations = [location] + + cum_cache = locations_to_cache(locations) + if sieve: + logger.info("Using frames that match regexp: %s", sieve) + lal.CacheSieve(cum_cache, 0, 0, None, None, sieve) + if start_time is not None and end_time is not None: + # Before sieving, check if this is sane. Otherwise it will fail later. + if (int(math.ceil(end_time)) - int(start_time)) <= 0: + raise ValueError("Negative or null duration") + lal.CacheSieve(cum_cache, int(start_time), int(math.ceil(end_time)), + None, None, None) + + stream = lalframe.FrStreamCacheOpen(cum_cache) + stream.mode = lalframe.FR_STREAM_VERBOSE_MODE + + if check_integrity: + stream.mode = (stream.mode | lalframe.FR_STREAM_CHECKSUM_MODE) + + lalframe.FrStreamSetMode(stream, stream.mode) + + # determine duration of data + if type(channels) is list: + first_channel = channels[0] + else: + first_channel = channels + + data_length = lalframe.FrStreamGetVectorLength(first_channel, stream) + channel_type = lalframe.FrStreamGetTimeSeriesType(first_channel, stream) + create_series_func = _fr_type_map[channel_type][2] + get_series_metadata_func = _fr_type_map[channel_type][3] + series = create_series_func(first_channel, stream.epoch, 0, 0, + lal.ADCCountUnit, 0) + get_series_metadata_func(series, stream) + data_duration = (data_length + 0.5) * series.deltaT + + if start_time is None: + start_time = stream.epoch*1 + if end_time is None: + end_time = start_time + data_duration + + if type(start_time) is not lal.LIGOTimeGPS: + start_time = lal.LIGOTimeGPS(start_time) + if type(end_time) is not lal.LIGOTimeGPS: + end_time = lal.LIGOTimeGPS(end_time) + + if duration is None: + duration = float(end_time - start_time) + else: + duration = float(duration) + + # lalframe behaves dangerously with invalid duration so catch it here + if duration <= 0: + raise ValueError("Negative or null duration") + #if duration > data_duration: + # raise ValueError("Requested duration longer than available data") + + if type(channels) is list: + all_data = [] + for channel in channels: + channel_data = _read_channel(channel, stream, start_time, duration) + lalframe.FrStreamSeek(stream, start_time) + all_data.append(channel_data) + return all_data + else: + return _read_channel(channels, stream, start_time, duration)
+ + +
+[docs] +def frame_paths( + frame_type, start_time, end_time, server=None, url_type='file', site=None +): + """Return the paths to a span of frame files. + + Parameters + ---------- + frame_type : string + The string representation of the frame type (ex. 'H1_ER_C00_L1'). + start_time : int + The start time that we need the frames to span. + end_time : int + The end time that we need the frames to span. + server : {None, SERVER:PORT string}, optional + Optional string to specify the datafind server to use. By default an + attempt is made to use a local datafind server. + url_type : string + Returns only frame URLs with a particular scheme or head such + as "file" or "https". Default is "file", which queries locally + stored frames. Option can be disabled if set to None. + site : string, optional + One-letter string specifying which site you want data from (H, L, V, + etc). If not given, the site is assumed to be the first letter of + `frame_type`, which is usually (but not always) a safe assumption. + + Returns + ------- + paths : list of paths + The list of paths to the frame files. + + Examples + -------- + >>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048) + """ + if site is None: + # this case is tolerated for backward compatibility + site = frame_type[0] + warnings.warn( + f'Guessing site {site} from frame type {frame_type}', + DeprecationWarning + ) + cache = find_frame_urls(site, frame_type, start_time, end_time, + urltype=url_type, host=server) + return [urlparse(entry).path for entry in cache]
+ + + +def get_site_from_type_or_channel(frame_type, channels): + """Determine the site for querying gwdatafind (H, L, V, etc) based on + substrings of the frame type and channel(s). + + The type should begin with S: or SN:, in which case S is taken as the + site. Otherwise, the same is done with the channel (with the first + channel if more than one are given). If that also fails, the site is + taken to be the first letter of the frame type, which is usually + (but not always) a correct assumption. + + Parameters + ---------- + frame_type : string + The frame type, ideally prefixed by the site indicator. + channels : string or list of strings + The channel name or names. + + Returns + ------- + site : string + The site letter. + frame_type : string + The frame type with the site prefix (if any) removed. + """ + site_re = '^([^:])[^:]?:' + m = re.match(site_re, frame_type) + if m: + return m.groups(1)[0], frame_type[m.end():] + chan = channels + if isinstance(chan, list): + chan = channels[0] + m = re.match(site_re, chan) + if m: + return m.groups(1)[0], frame_type + warnings.warn( + f'Guessing site {frame_type[0]} from frame type {frame_type}', + DeprecationWarning + ) + return frame_type[0], frame_type + + +
+[docs] +def query_and_read_frame(frame_type, channels, start_time, end_time, + sieve=None, check_integrity=False): + """Read time series from frame data. + + Query for the location of physical frames matching the frame type. Return + a time series containing the channel between the given start and end times. + + Parameters + ---------- + frame_type : string + The type of frame file that we are looking for. The string should begin + with S: or SN:, in which case S is taken as the site to query. If this + is not the case, the site will be guessed from the channel name or from + the type in a different way, which may not work. + channels : string or list of strings + Either a string that contains the channel name or a list of channel + name strings. + start_time : LIGOTimeGPS or int + The gps start time of the time series. Defaults to reading from the + beginning of the available frame(s). + end_time : LIGOTimeGPS or int + The gps end time of the time series. Defaults to the end of the frame. + sieve : string, optional + Selects only frames where the frame URL matches the regular + expression sieve + check_integrity : boolean + Do an expensive checksum of the file before returning. + + Returns + ------- + Frame Data: TimeSeries or list of TimeSeries + A TimeSeries or a list of TimeSeries, corresponding to the data from + the frame file/cache for a given channel or channels. + + Examples + -------- + >>> ts = query_and_read_frame('H1_LDAS_C02_L2', 'H1:LDAS-STRAIN', + >>> 968995968, 968995968+2048) + """ + site, frame_type = get_site_from_type_or_channel(frame_type, channels) + + # Allows compatibility with our standard tools + # We may want to place this into a higher level frame getting tool + if frame_type in ['LOSC_STRAIN', 'GWOSC_STRAIN']: + from pycbc.frame.gwosc import read_strain_gwosc + if not isinstance(channels, list): + channels = [channels] + data = [read_strain_gwosc(c[:2], start_time, end_time) + for c in channels] + return data if len(data) > 1 else data[0] + if frame_type in ['LOSC', 'GWOSC']: + from pycbc.frame.gwosc import read_frame_gwosc + return read_frame_gwosc(channels, start_time, end_time) + + logger.info('Querying datafind server') + paths = frame_paths( + frame_type, + int(start_time), + int(numpy.ceil(end_time)), + site=site + ) + logger.info('Found frame file paths: %s', ' '.join(paths)) + return read_frame( + paths, + channels, + start_time=start_time, + end_time=end_time, + sieve=sieve, + check_integrity=check_integrity + )
+ + + +
+[docs] +def write_frame(location, channels, timeseries): + """Write a list of time series to a single frame file. + + Parameters + ---------- + location : string + A frame filename. + channels : string or list of strings + Either a string that contains the channel name or a list of channel + name strings. + timeseries: TimeSeries + A TimeSeries or list of TimeSeries, corresponding to the data to be + written to the frame file for a given channel. + """ + # check if a single channel or a list of channels + if type(channels) is list and type(timeseries) is list: + channels = channels + timeseries = timeseries + else: + channels = [channels] + timeseries = [timeseries] + + # check that timeseries have the same start and end time + gps_start_times = {series.start_time for series in timeseries} + gps_end_times = {series.end_time for series in timeseries} + if len(gps_start_times) != 1 or len(gps_end_times) != 1: + raise ValueError("Start and end times of TimeSeries must be identical.") + + # check that start, end time, and duration are integers + gps_start_time = gps_start_times.pop() + gps_end_time = gps_end_times.pop() + duration = int(gps_end_time - gps_start_time) + if gps_start_time % 1 or gps_end_time % 1: + raise ValueError("Start and end times of TimeSeries must be integer seconds.") + + # create frame + frame = lalframe.FrameNew(epoch=gps_start_time, duration=duration, + project='', run=1, frnum=1, + detectorFlags=lal.LALDETECTORTYPE_ABSENT) + + for i,tseries in enumerate(timeseries): + # get data type + for seriestype in _fr_type_map.keys(): + if _fr_type_map[seriestype][1] == tseries.dtype: + create_series_func = _fr_type_map[seriestype][2] + create_sequence_func = _fr_type_map[seriestype][4] + add_series_func = _fr_type_map[seriestype][5] + break + + # add time series to frame + series = create_series_func(channels[i], tseries.start_time, + 0, tseries.delta_t, lal.ADCCountUnit, + len(tseries.numpy())) + series.data = create_sequence_func(len(tseries.numpy())) + series.data.data = tseries.numpy() + add_series_func(frame, series) + + # write frame + lalframe.FrameWrite(frame, location)
+ + +
+[docs] +class DataBuffer(object): + + """A linear buffer that acts as a FILO for reading in frame data + """ + + def __init__(self, frame_src, + channel_name, + start_time, + max_buffer=2048, + force_update_cache=True, + increment_update_cache=None, + dtype=numpy.float64): + """ Create a rolling buffer of frame data + + Parameters + --------- + frame_src: str of list of strings + Strings that indicate where to read from files from. This can be a + list of frame files, a glob, etc. + channel_name: str + Name of the channel to read from the frame files + start_time: + Time to start reading from. + max_buffer: {int, 2048}, Optional + Length of the buffer in seconds + dtype: {dtype, numpy.float32}, Optional + Data type to use for the interal buffer + """ + self.frame_src = frame_src + self.channel_name = channel_name + self.read_pos = start_time + self.force_update_cache = force_update_cache + self.increment_update_cache = increment_update_cache + self.detector = channel_name.split(':')[0] + + self.update_cache() + self.channel_type, self.raw_sample_rate = self._retrieve_metadata(self.stream, self.channel_name) + + raw_size = self.raw_sample_rate * max_buffer + self.raw_buffer = TimeSeries(zeros(raw_size, dtype=dtype), + copy=False, + epoch=start_time - max_buffer, + delta_t=1.0/self.raw_sample_rate) + +
+[docs] + def update_cache(self): + """Reset the lal cache. This can be used to update the cache if the + result may change due to more files being added to the filesystem, + for example. + """ + cache = locations_to_cache(self.frame_src, latest=True) + stream = lalframe.FrStreamCacheOpen(cache) + self.stream = stream
+ + + @staticmethod + def _retrieve_metadata(stream, channel_name): + """Retrieve basic metadata by reading the first file in the cache + + Parameters + ---------- + stream: lal stream object + Stream containing a channel we want to learn about + channel_name: str + The name of the channel we want to know the dtype and sample rate of + + Returns + ------- + channel_type: lal type enum + Enum value which indicates the dtype of the channel + sample_rate: int + The sample rate of the data within this channel + """ + lalframe.FrStreamGetVectorLength(channel_name, stream) + channel_type = lalframe.FrStreamGetTimeSeriesType(channel_name, stream) + create_series_func = _fr_type_map[channel_type][2] + get_series_metadata_func = _fr_type_map[channel_type][3] + series = create_series_func(channel_name, stream.epoch, 0, 0, + lal.ADCCountUnit, 0) + get_series_metadata_func(series, stream) + return channel_type, int(1.0/series.deltaT) + + def _read_frame(self, blocksize): + """Try to read the block of data blocksize seconds long + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + + Returns + ------- + data: TimeSeries + TimeSeries containg 'blocksize' seconds of frame data + + Raises + ------ + RuntimeError: + If data cannot be read for any reason + """ + try: + read_func = _fr_type_map[self.channel_type][0] + dtype = _fr_type_map[self.channel_type][1] + data = read_func(self.stream, self.channel_name, + self.read_pos, int(blocksize), 0) + return TimeSeries(data.data.data, delta_t=data.deltaT, + epoch=self.read_pos, + dtype=dtype) + except Exception: + raise RuntimeError('Cannot read {0} frame data'.format(self.channel_name)) + +
+[docs] + def null_advance(self, blocksize): + """Advance and insert zeros + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + """ + self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate)) + self.read_pos += blocksize + self.raw_buffer.start_time += blocksize
+ + +
+[docs] + def advance(self, blocksize): + """Add blocksize seconds more to the buffer, push blocksize seconds + from the beginning. + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + """ + ts = self._read_frame(blocksize) + + self.raw_buffer.roll(-len(ts)) + self.raw_buffer[-len(ts):] = ts[:] + self.read_pos += blocksize + self.raw_buffer.start_time += blocksize + return ts
+ + +
+[docs] + def update_cache_by_increment(self, blocksize): + """Update the internal cache by starting from the first frame + and incrementing. + + Guess the next frame file name by incrementing from the first found + one. This allows a pattern to be used for the GPS folder of the file, + which is indicated by `GPSX` where x is the number of digits to use. + + Parameters + ---------- + blocksize: int + Number of seconds to increment the next frame file. + """ + start = float(self.raw_buffer.end_time) + end = float(start + blocksize) + + if not hasattr(self, 'dur'): + fname = glob.glob(self.frame_src[0])[0] + fname = os.path.splitext(os.path.basename(fname))[0].split('-') + + self.beg = '-'.join([fname[0], fname[1]]) + self.ref = int(fname[2]) + self.dur = int(fname[3]) + + fstart = int(self.ref + numpy.floor((start - self.ref) / float(self.dur)) * self.dur) + starts = numpy.arange(fstart, end, self.dur).astype(int) + + keys = [] + for s in starts: + pattern = self.increment_update_cache + if 'GPS' in pattern: + n = int(pattern[int(pattern.index('GPS') + 3)]) + pattern = pattern.replace('GPS%s' % n, str(s)[0:n]) + + name = f'{pattern}/{self.beg}-{s}-{self.dur}.gwf' + # check that file actually exists, else abort now + if not os.path.exists(name): + raise RuntimeError + + keys.append(name) + cache = locations_to_cache(keys) + stream = lalframe.FrStreamCacheOpen(cache) + self.stream = stream + self.channel_type, self.raw_sample_rate = \ + self._retrieve_metadata(self.stream, self.channel_name)
+ + +
+[docs] + def attempt_advance(self, blocksize, timeout=10): + """ Attempt to advance the frame buffer. Retry upon failure, except + if the frame file is beyond the timeout limit. + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + timeout: {int, 10}, Optional + Number of seconds before giving up on reading a frame + + Returns + ------- + data: TimeSeries + TimeSeries containg 'blocksize' seconds of frame data + """ + if self.force_update_cache: + self.update_cache() + while True: + try: + if self.increment_update_cache: + self.update_cache_by_increment(blocksize) + return DataBuffer.advance(self, blocksize) + except RuntimeError: + if pycbc.gps_now() > timeout + self.raw_buffer.end_time: + # The frame is not there and it should be by now, + # so we give up and treat it as zeros + DataBuffer.null_advance(self, blocksize) + return None + # I am too early to give up on this frame, + # so we should try again + time.sleep(0.1)
+
+ + +
+[docs] +class StatusBuffer(DataBuffer): + + """ Read state vector or DQ information from a frame file """ + + def __init__(self, frame_src, + channel_name, + start_time, + max_buffer=2048, + valid_mask=3, + force_update_cache=False, + increment_update_cache=None, + valid_on_zero=False): + """ Create a rolling buffer of status data from a frame + + Parameters + --------- + frame_src: str of list of strings + Strings that indicate where to read from files from. This can be a + list of frame files, a glob, etc. + channel_name: str + Name of the channel to read from the frame files + start_time: + Time to start reading from. + max_buffer: {int, 2048}, Optional + Length of the buffer in seconds + valid_mask: {int, HOFT_OK | SCIENCE_INTENT}, Optional + Set of flags that must be on to indicate valid frame data. + valid_on_zero: bool + If True, `valid_mask` is ignored and the status is considered + "good" simply when the channel is zero. + """ + DataBuffer.__init__(self, frame_src, channel_name, start_time, + max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache, + dtype=numpy.int32) + + self.valid_mask = valid_mask + self.valid_on_zero = valid_on_zero + +
+[docs] + def check_valid(self, values, flag=None): + """Check if the data contains any non-valid status information + + Parameters + ---------- + values: pycbc.types.Array + Array of status information + flag: str, optional + Override the default valid mask with a user defined mask. + + Returns + ------- + status: boolean + Returns True if all of the status information if valid, + False if any is not. + """ + if self.valid_on_zero: + valid = values.numpy() == 0 + else: + if flag is None: + flag = self.valid_mask + valid = numpy.bitwise_and(values.numpy(), flag) == flag + return bool(numpy.all(valid))
+ + +
+[docs] + def is_extent_valid(self, start_time, duration, flag=None): + """Check if the duration contains any non-valid frames + + Parameters + ---------- + start_time: int + Beginning of the duration to check in gps seconds + duration: int + Number of seconds after the start_time to check + flag: str, optional + Override the default valid mask with a user defined mask. + + Returns + ------- + status: boolean + Returns True if all of the status information if valid, + False if any is not. + """ + sr = self.raw_buffer.sample_rate + s = int((start_time - self.raw_buffer.start_time) * sr) + e = s + int(duration * sr) + 1 + data = self.raw_buffer[s:e] + return self.check_valid(data, flag=flag)
+ + +
+[docs] + def indices_of_flag(self, start_time, duration, times, padding=0): + """ Return the indices of the times lying in the flagged region + + Parameters + ---------- + start_time: int + Beginning time to request for + duration: int + Number of seconds to check. + padding: float + Number of seconds to add around flag inactive times to be considered + inactive as well. + + Returns + ------- + indices: numpy.ndarray + Array of indices marking the location of triggers within valid + time. + """ + from pycbc.events.veto import indices_outside_times + sr = self.raw_buffer.sample_rate + s = int((start_time - self.raw_buffer.start_time - padding) * sr) - 1 + e = s + int((duration + padding) * sr) + 1 + data = self.raw_buffer[s:e] + stamps = data.sample_times.numpy() + + if self.valid_on_zero: + invalid = data.numpy() != 0 + else: + invalid = numpy.bitwise_and(data.numpy(), self.valid_mask) \ + != self.valid_mask + + starts = stamps[invalid] - padding + ends = starts + 1.0 / sr + padding * 2.0 + idx = indices_outside_times(times, starts, ends) + return idx
+ + +
+[docs] + def advance(self, blocksize): + """ Add blocksize seconds more to the buffer, push blocksize seconds + from the beginning. + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + + Returns + ------- + status: boolean + Returns True if all of the status information if valid, + False if any is not. + """ + try: + if self.increment_update_cache: + self.update_cache_by_increment(blocksize) + ts = DataBuffer.advance(self, blocksize) + return self.check_valid(ts) + except RuntimeError: + self.null_advance(blocksize) + return False
+
+ + +
+[docs] +class iDQBuffer(object): + + """ Read iDQ timeseries from a frame file """ + + def __init__(self, frame_src, + idq_channel_name, + idq_status_channel_name, + idq_threshold, + start_time, + max_buffer=512, + force_update_cache=False, + increment_update_cache=None): + """ + Parameters + ---------- + frame_src: str of list of strings + Strings that indicate where to read from files from. This can be a + list of frame files, a glob, etc. + idq_channel_name: str + Name of the channel to read the iDQ statistic from + idq_status_channel_name: str + Name of the channel to read the iDQ status from + idq_threshold: float + Threshold which triggers a veto if iDQ channel falls below this threshold + start_time: + Time to start reading from. + max_buffer: {int, 512}, Optional + Length of the buffer in seconds + force_update_cache: {boolean, True}, Optional + Re-check the filesystem for frame files on every attempt to + read more data. + increment_update_cache: {str, None}, Optional + Pattern to look for frame files in a GPS dependent directory. This + is an alternate to the forced updated of the frame cache, and + apptempts to predict the next frame file name without probing the + filesystem. + """ + self.threshold = idq_threshold + self.idq = DataBuffer(frame_src, idq_channel_name, start_time, + max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + self.idq_state = DataBuffer(frame_src, idq_status_channel_name, start_time, + max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + +
+[docs] + def flag_at_times(self, start_time, duration, times, padding=0): + """ Check whether the idq flag was on at given times + + Parameters + ---------- + start_time: int + Beginning time to request for + duration: int + Number of seconds to check. + times: array of floats + Times to check for an active flag + padding: float + Amount of time in seconds to flag around samples + below the iDQ FAP threshold + + Returns + ------- + flag_state: numpy.ndarray + Boolean array of whether flag was on at given times + """ + from pycbc.events.veto import indices_within_times + + # convert start and end times to buffer indices + sr = self.idq.raw_buffer.sample_rate + s = int((start_time - self.idq.raw_buffer.start_time - padding) * sr) - 1 + e = s + int((duration + padding) * sr) + 1 + + # find samples when iDQ FAP is below threshold and state is valid + idq_fap = self.idq.raw_buffer[s:e] + low_fap = idq_fap.numpy() <= self.threshold + idq_valid = self.idq_state.raw_buffer[s:e] + idq_valid = idq_valid.numpy().astype(bool) + valid_low_fap = numpy.logical_and(idq_valid, low_fap) + + # find times corresponding to the valid low FAP samples + glitch_idx = numpy.flatnonzero(valid_low_fap) + stamps = idq_fap.sample_times.numpy() + glitch_times = stamps[glitch_idx] + + # construct start and end times of flag segments + starts = glitch_times - padding + ends = starts + 1.0 / sr + padding * 2.0 + + # check if times were flagged + idx = indices_within_times(times, starts, ends) + flagged_bool = numpy.zeros(len(times), dtype=bool) + flagged_bool[idx] = True + return flagged_bool
+ + +
+[docs] + def advance(self, blocksize): + """ Add blocksize seconds more to the buffer, push blocksize seconds + from the beginning. + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read + + Returns + ------- + status: boolean + Returns True if advance is succesful, + False if not. + """ + idq_ts = self.idq.attempt_advance(blocksize) + idq_state_ts = self.idq_state.attempt_advance(blocksize) + return (idq_ts is not None) and (idq_state_ts is not None)
+ + +
+[docs] + def null_advance(self, blocksize): + """Advance and insert zeros + + Parameters + ---------- + blocksize: int + The number of seconds to advance the buffers + """ + self.idq.null_advance(blocksize) + self.idq_state.null_advance(blocksize)
+
+ + + +__all__ = [ + 'locations_to_cache', + 'read_frame', + 'query_and_read_frame', + 'frame_paths', + 'write_frame', + 'DataBuffer', + 'StatusBuffer', + 'iDQBuffer' +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/frame/gwosc.html b/latest/html/_modules/pycbc/frame/gwosc.html new file mode 100644 index 00000000000..77311c89dbe --- /dev/null +++ b/latest/html/_modules/pycbc/frame/gwosc.html @@ -0,0 +1,331 @@ + + + + + + pycbc.frame.gwosc — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.frame.gwosc

+# Copyright (C) 2017 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules contains functions for getting data from the Gravitational Wave
+Open Science Center (GWOSC).
+"""
+import logging
+import json
+from urllib.request import urlopen
+
+from pycbc.io import get_file
+from pycbc.frame import read_frame
+
+logger = logging.getLogger('pycbc.frame.gwosc')
+
+_GWOSC_URL = "https://www.gwosc.org/archive/links/%s/%s/%s/%s/json/"
+
+
+
+[docs] +def get_run(time, ifo=None): + """Return the run name for a given time. + + Parameters + ---------- + time: int + The GPS time. + ifo: str + The interferometer prefix string. Optional and normally unused, + except for some special times where data releases were made for a + single detector under unusual circumstances. For example, to get + the data around GW170608 in the Hanford detector. + """ + cases = [ + ( + # ifo is only needed in this special case, otherwise, + # the run name is the same for all ifos + 1180911618 <= time <= 1180982427 and ifo == 'H1', + 'BKGW170608_16KHZ_R1' + ), + (1253977219 <= time <= 1320363336, 'O3b_16KHZ_R1'), + (1238166018 <= time <= 1253977218, 'O3a_16KHZ_R1'), + (1164556817 <= time <= 1187733618, 'O2_16KHZ_R1'), + (1126051217 <= time <= 1137254417, 'O1'), + (815011213 <= time <= 875318414, 'S5'), + (930787215 <= time <= 971568015, 'S6') + ] + for condition, name in cases: + if condition: + return name + raise ValueError(f'Time {time} not available in a public dataset')
+ + + +def _get_channel(time): + if time < 1164556817: + return 'LOSC-STRAIN' + return 'GWOSC-16KHZ_R1_STRAIN' + + +
+[docs] +def gwosc_frame_json(ifo, start_time, end_time): + """Get the information about the public data files in a duration of time. + + Parameters + ---------- + ifo: str + The name of the interferometer to find the information about. + start_time: int + The start time in GPS seconds. + end_time: int + The end time in GPS seconds. + + Returns + ------- + info: dict + A dictionary containing information about the files that span the + requested times. + """ + run = get_run(start_time) + run2 = get_run(end_time) + if run != run2: + raise ValueError( + 'Spanning multiple runs is not currently supported. ' + f'You have requested data that uses both {run} and {run2}' + ) + + url = _GWOSC_URL % (run, ifo, int(start_time), int(end_time)) + + try: + return json.loads(urlopen(url).read().decode()) + except Exception as exc: + msg = ('Failed to find gwf files for ' + f'ifo={ifo}, run={run}, between {start_time}-{end_time}') + raise ValueError(msg) from exc
+ + + +
+[docs] +def gwosc_frame_urls(ifo, start_time, end_time): + """Get a list of URLs to GWOSC frame files. + + Parameters + ---------- + ifo: str + The name of the interferometer to find the information about. + start_time: int + The start time in GPS seconds. + end_time: int + The end time in GPS seconds. + + Returns + ------- + frame_files: list + A dictionary containing information about the files that span the + requested times. + """ + data = gwosc_frame_json(ifo, start_time, end_time)['strain'] + return [d['url'] for d in data if d['format'] == 'gwf']
+ + + +
+[docs] +def read_frame_gwosc(channels, start_time, end_time): + """Read channels from GWOSC data. + + Parameters + ---------- + channels: str or list + The channel name to read or list of channel names. + start_time: int + The start time in GPS seconds. + end_time: int + The end time in GPS seconds. + + Returns + ------- + ts: TimeSeries + Returns a timeseries or list of timeseries with the requested data. + """ + if not isinstance(channels, list): + channels = [channels] + ifos = [c[0:2] for c in channels] + urls = {} + for ifo in ifos: + urls[ifo] = gwosc_frame_urls(ifo, start_time, end_time) + if len(urls[ifo]) == 0: + raise ValueError("No data found for %s so we " + "can't produce a time series" % ifo) + + fnames = {ifo: [] for ifo in ifos} + for ifo in ifos: + for url in urls[ifo]: + fname = get_file(url, cache=True) + fnames[ifo].append(fname) + + ts_list = [read_frame(fnames[channel[0:2]], channel, + start_time=start_time, end_time=end_time) + for channel in channels] + if len(ts_list) == 1: + return ts_list[0] + return ts_list
+ + + +
+[docs] +def read_strain_gwosc(ifo, start_time, end_time): + """Get the strain data from the GWOSC data. + + Parameters + ---------- + ifo: str + The name of the interferometer to read data for. Ex. 'H1', 'L1', 'V1'. + start_time: int + The start time in GPS seconds. + end_time: int + The end time in GPS seconds. + + Returns + ------- + ts: TimeSeries + Returns a timeseries with the strain data. + """ + channel = _get_channel(start_time) + return read_frame_gwosc(f'{ifo}:{channel}', start_time, end_time)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/frame/store.html b/latest/html/_modules/pycbc/frame/store.html new file mode 100644 index 00000000000..29cce41542b --- /dev/null +++ b/latest/html/_modules/pycbc/frame/store.html @@ -0,0 +1,206 @@ + + + + + + pycbc.frame.store — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.frame.store

+# Copyright (C) 2019 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules contains functions for reading in data from hdf stores
+"""
+import logging
+import numpy
+
+from pycbc.types import TimeSeries
+from pycbc.io.hdf import HFile
+
+logger = logging.getLogger('pycbc.frame.store')
+
+
+
+[docs] +def read_store(fname, channel, start_time, end_time): + """ Read time series data from hdf store + + Parameters + ---------- + fname: str + Name of hdf store file + channel: str + Channel name to read + start_time: int + GPS time to start reading from + end_time: int + GPS time to end time series + + Returns + ------- + ts: pycbc.types.TimeSeries + Time series containing the requested data + + """ + fhandle = HFile(fname, 'r') + if channel not in fhandle: + raise ValueError('Could not find channel name {}'.format(channel)) + + # Determine which segment data lies in (can only read contiguous data now) + starts = fhandle[channel]['segments']['start'][:] + ends = fhandle[channel]['segments']['end'][:] + + diff = start_time - starts + loc = numpy.where(diff >= 0)[0] + sidx = loc[diff[loc].argmin()] + + stime = starts[sidx] + etime = ends[sidx] + + if stime > start_time: + raise ValueError("Cannot read data segment before {}".format(stime)) + + if etime < end_time: + raise ValueError("Cannot read data segment past {}".format(etime)) + + data = fhandle[channel][str(sidx)] + sample_rate = len(data) / (etime - stime) + + start = int((start_time - stime) * sample_rate) + end = int((end_time - stime) * sample_rate) + return TimeSeries(data[start:end], delta_t=1.0/sample_rate, + epoch=start_time)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/burn_in.html b/latest/html/_modules/pycbc/inference/burn_in.html new file mode 100644 index 00000000000..43743d988c8 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/burn_in.html @@ -0,0 +1,1020 @@ + + + + + + pycbc.inference.burn_in — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.burn_in

+# Copyright (C) 2017  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for determining when Markov Chains
+have burned in.
+"""
+
+
+import logging
+from abc import ABCMeta, abstractmethod
+import numpy
+from scipy.stats import ks_2samp
+
+from pycbc.io.record import get_vars_from_arg
+
+logger = logging.getLogger('pycbc.inference.burn_in')
+
+# The value to use for a burn-in iteration if a chain is not burned in
+NOT_BURNED_IN_ITER = -1
+
+
+#
+# =============================================================================
+#
+#                              Convenience functions
+#
+# =============================================================================
+#
+
+
+
+[docs] +def ks_test(samples1, samples2, threshold=0.9): + """Applies a KS test to determine if two sets of samples are the same. + + The ks test is applied parameter-by-parameter. If the two-tailed p-value + returned by the test is greater than ``threshold``, the samples are + considered to be the same. + + Parameters + ---------- + samples1 : dict + Dictionary of mapping parameters to the first set of samples. + samples2 : dict + Dictionary of mapping parameters to the second set of samples. + threshold : float + The thershold to use for the p-value. Default is 0.9. + + Returns + ------- + dict : + Dictionary mapping parameter names to booleans indicating whether the + given parameter passes the KS test. + """ + is_the_same = {} + assert set(samples1.keys()) == set(samples2.keys()), ( + "samples1 and 2 must have the same parameters") + # iterate over the parameters + for param in samples1: + s1 = samples1[param] + s2 = samples2[param] + _, p_value = ks_2samp(s1, s2) + is_the_same[param] = p_value > threshold + return is_the_same
+ + + +
+[docs] +def max_posterior(lnps_per_walker, dim): + """Burn in based on samples being within dim/2 of maximum posterior. + + Parameters + ---------- + lnps_per_walker : 2D array + Array of values that are proportional to the log posterior values. Must + have shape ``nwalkers x niterations``. + dim : int + The dimension of the parameter space. + + Returns + ------- + burn_in_idx : array of int + The burn in indices of each walker. If a walker is not burned in, its + index will be be equal to the length of the chain. + is_burned_in : array of bool + Whether or not a walker is burned in. + """ + if len(lnps_per_walker.shape) != 2: + raise ValueError("lnps_per_walker must have shape " + "nwalkers x niterations") + # find the value to compare against + max_p = lnps_per_walker.max() + criteria = max_p - dim/2. + nwalkers, _ = lnps_per_walker.shape + burn_in_idx = numpy.empty(nwalkers, dtype=int) + is_burned_in = numpy.empty(nwalkers, dtype=bool) + # find the first iteration in each chain where the logpost has exceeded + # max_p - dim/2 + for ii in range(nwalkers): + chain = lnps_per_walker[ii, :] + passedidx = numpy.where(chain >= criteria)[0] + is_burned_in[ii] = passedidx.size > 0 + if is_burned_in[ii]: + burn_in_idx[ii] = passedidx[0] + else: + burn_in_idx[ii] = NOT_BURNED_IN_ITER + return burn_in_idx, is_burned_in
+ + + +
+[docs] +def posterior_step(logposts, dim): + """Finds the last time a chain made a jump > dim/2. + + Parameters + ---------- + logposts : array + 1D array of values that are proportional to the log posterior values. + dim : int + The dimension of the parameter space. + + Returns + ------- + int + The index of the last time the logpost made a jump > dim/2. If that + never happened, returns 0. + """ + if logposts.ndim > 1: + raise ValueError("logposts must be a 1D array") + criteria = dim/2. + dp = numpy.diff(logposts) + indices = numpy.where(dp >= criteria)[0] + if indices.size > 0: + idx = indices[-1] + 1 + else: + idx = 0 + return idx
+ + + +
+[docs] +def nacl(nsamples, acls, nacls=5): + """Burn in based on ACL. + + This applies the following test to determine burn in: + + 1. The first half of the chain is ignored. + + 2. An ACL is calculated from the second half. + + 3. If ``nacls`` times the ACL is < the length of the chain / 2, + the chain is considered to be burned in at the half-way point. + + Parameters + ---------- + nsamples : int + The number of samples of in the chain(s). + acls : dict + Dictionary of parameter -> ACL(s). The ACLs for each parameter may + be an integer or an array of integers (for multiple chains). + nacls : int, optional + The number of ACLs the chain(s) must have gone past the halfway point + in order to be considered burned in. Default is 5. + + Returns + ------- + dict + Dictionary of parameter -> boolean(s) indicating if the chain(s) pass + the test. If an array of values was provided for the acls, the values + will be arrays of booleans. + """ + kstart = int(nsamples / 2.) + return {param: (nacls * acl) < kstart for (param, acl) in acls.items()}
+ + + +
+[docs] +def evaluate_tests(burn_in_test, test_is_burned_in, test_burn_in_iter): + """Evaluates burn in data from multiple tests. + + The iteration to use for burn-in depends on the logic in the burn-in + test string. For example, if the test was 'max_posterior | nacl' and + max_posterior burned-in at iteration 5000 while nacl burned in at + iteration 6000, we'd want to use 5000 as the burn-in iteration. + However, if the test was 'max_posterior & nacl', we'd want to use + 6000 as the burn-in iteration. This function handles all cases by + doing the following: first, take the collection of burn in iterations + from all the burn in tests that were applied. Next, cycle over the + iterations in increasing order, checking which tests have burned in + by that point. Then evaluate the burn-in string at that point to see + if it passes, and if so, what the iteration is. The first point that + the test passes is used as the burn-in iteration. + + Parameters + ---------- + burn_in_test : str + The test to apply; e.g., ``'max_posterior & nacl'``. + test_is_burned_in : dict + Dictionary of test name -> boolean indicating whether a specific burn + in test has passed. + test_burn_in_iter : dict + Dictionary of test name -> int indicating when a specific test burned + in. + + Returns + ------- + is_burned_in : bool + Whether or not the data passes all burn in tests. + burn_in_iteration : + The iteration at which all the tests pass. If the tests did not all + pass (``is_burned_in`` is false), then returns + :py:data:`NOT_BURNED_IN_ITER`. + """ + burn_in_iters = numpy.unique(list(test_burn_in_iter.values())) + burn_in_iters.sort() + for ii in burn_in_iters: + test_results = {t: (test_is_burned_in[t] & + 0 <= test_burn_in_iter[t] <= ii) + for t in test_is_burned_in} + is_burned_in = eval(burn_in_test, {"__builtins__": None}, + test_results) + if is_burned_in: + break + if not is_burned_in: + ii = NOT_BURNED_IN_ITER + return is_burned_in, ii
+ + + +# +# ============================================================================= +# +# Burn in classes +# +# ============================================================================= +# + + +
+[docs] +class BaseBurnInTests(metaclass=ABCMeta): + """Base class for burn in tests.""" + + available_tests = ('halfchain', 'min_iterations', 'max_posterior', + 'posterior_step', 'nacl', + ) + + # pylint: disable=unnecessary-pass + + def __init__(self, sampler, burn_in_test, **kwargs): + self.sampler = sampler + # determine the burn-in tests that are going to be done + self.do_tests = get_vars_from_arg(burn_in_test) + self.burn_in_test = burn_in_test + self.is_burned_in = False + self.burn_in_iteration = NOT_BURNED_IN_ITER + self.test_is_burned_in = {} # burn in status per test + self.test_burn_in_iteration = {} # burn in iter per test + self.test_aux_info = {} # any additional information the test stores + # Arguments specific to each test... + # for nacl: + self._nacls = int(kwargs.pop('nacls', 5)) + # for max_posterior and posterior_step + self._ndim = int(kwargs.pop('ndim', len(sampler.variable_params))) + # for min iterations + self._min_iterations = int(kwargs.pop('min_iterations', 0)) + +
+[docs] + @abstractmethod + def burn_in_index(self, filename): + """The burn in index (retrieved from the iteration). + + This is an abstract method because how this is evaluated depends on + if this is an ensemble MCMC or not. + """ + pass
+ + + def _getniters(self, filename): + """Convenience function to get the number of iterations in the file. + + If `niterations` hasn't been written to the file yet, just returns 0. + """ + with self.sampler.io(filename, 'r') as fp: + try: + niters = fp.niterations + except KeyError: + niters = 0 + return niters + + def _getnsamples(self, filename): + """Convenience function to get the number of samples saved in the file. + + If no samples have been written to the file yet, just returns 0. + """ + with self.sampler.io(filename, 'r') as fp: + try: + group = fp[fp.samples_group] + # we'll just use the first parameter + params = list(group.keys()) + nsamples = group[params[0]].shape[-1] + except (KeyError, IndexError): + nsamples = 0 + return nsamples + + def _index2iter(self, filename, index): + """Converts the index in some samples at which burn in occurs to the + iteration of the sampler that corresponds to. + """ + with self.sampler.io(filename, 'r') as fp: + thin_interval = fp.thinned_by + return index * thin_interval + + def _iter2index(self, filename, iteration): + """Converts an iteration to the index it corresponds to. + """ + with self.sampler.io(filename, 'r') as fp: + thin_interval = fp.thinned_by + return iteration // thin_interval + + def _getlogposts(self, filename): + """Convenience function for retrieving log posteriors. + + Parameters + ---------- + filename : str + The file to read. + + Returns + ------- + array + The log posterior values. They are not flattened, so have dimension + nwalkers x niterations. + """ + with self.sampler.io(filename, 'r') as fp: + samples = fp.read_raw_samples( + ['loglikelihood', 'logprior'], thin_start=0, thin_interval=1, + flatten=False) + logposts = samples['loglikelihood'] + samples['logprior'] + return logposts + + def _getacls(self, filename, start_index): + """Convenience function for calculating acls for the given filename. + """ + return self.sampler.compute_acl(filename, start_index=start_index) + + def _getaux(self, test): + """Convenience function for getting auxilary information. + + Parameters + ---------- + test : str + The name of the test to retrieve auxilary information about. + + Returns + ------- + dict + The ``test_aux_info[test]`` dictionary. If a dictionary does + not exist yet for the given test, an empty dictionary will be + created and saved to ``test_aux_info[test]``. + """ + try: + aux = self.test_aux_info[test] + except KeyError: + aux = self.test_aux_info[test] = {} + return aux + +
+[docs] + def halfchain(self, filename): + """Just uses half the chain as the burn-in iteration. + """ + niters = self._getniters(filename) + # this test cannot determine when something will burn in + # only when it was not burned in in the past + self.test_is_burned_in['halfchain'] = True + self.test_burn_in_iteration['halfchain'] = niters//2
+ + +
+[docs] + def min_iterations(self, filename): + """Just checks that the sampler has been run for the minimum number + of iterations. + """ + niters = self._getniters(filename) + is_burned_in = self._min_iterations < niters + if is_burned_in: + burn_in_iter = self._min_iterations + else: + burn_in_iter = NOT_BURNED_IN_ITER + self.test_is_burned_in['min_iterations'] = is_burned_in + self.test_burn_in_iteration['min_iterations'] = burn_in_iter
+ + +
+[docs] + @abstractmethod + def max_posterior(self, filename): + """Carries out the max posterior test and stores the results.""" + pass
+ + +
+[docs] + @abstractmethod + def posterior_step(self, filename): + """Carries out the posterior step test and stores the results.""" + pass
+ + +
+[docs] + @abstractmethod + def nacl(self, filename): + """Carries out the nacl test and stores the results.""" + pass
+ + +
+[docs] + @abstractmethod + def evaluate(self, filename): + """Performs all tests and evaluates the results to determine if and + when all tests pass. + """ + pass
+ + +
+[docs] + def write(self, fp, path=None): + """Writes burn-in info to an open HDF file. + + Parameters + ---------- + fp : pycbc.inference.io.base.BaseInferenceFile + Open HDF file to write the data to. The HDF file should be an + instance of a pycbc BaseInferenceFile. + path : str, optional + Path in the HDF file to write the data to. Default is (None) is + to write to the path given by the file's ``sampler_group`` + attribute. + """ + if path is None: + path = fp.sampler_group + fp.write_data('burn_in_test', self.burn_in_test, path) + fp.write_data('is_burned_in', self.is_burned_in, path) + fp.write_data('burn_in_iteration', self.burn_in_iteration, path) + testgroup = 'burn_in_tests' + # write individual test data + for tst in self.do_tests: + subpath = '/'.join([path, testgroup, tst]) + fp.write_data('is_burned_in', self.test_is_burned_in[tst], subpath) + fp.write_data('burn_in_iteration', + self.test_burn_in_iteration[tst], + subpath) + # write auxiliary info + if tst in self.test_aux_info: + for name, data in self.test_aux_info[tst].items(): + fp.write_data(name, data, subpath)
+ + + @staticmethod + def _extra_tests_from_config(cp, section, tag): + """For loading class-specific tests.""" + # pylint: disable=unused-argument + return {} + +
+[docs] + @classmethod + def from_config(cls, cp, sampler): + """Loads burn in from section [sampler-burn_in].""" + section = 'sampler' + tag = 'burn_in' + burn_in_test = cp.get_opt_tag(section, 'burn-in-test', tag) + kwargs = {} + if cp.has_option_tag(section, 'nacl', tag): + kwargs['nacl'] = int(cp.get_opt_tag(section, 'nacl', tag)) + if cp.has_option_tag(section, 'ndim', tag): + kwargs['ndim'] = int( + cp.get_opt_tag(section, 'ndim', tag)) + if cp.has_option_tag(section, 'min-iterations', tag): + kwargs['min_iterations'] = int( + cp.get_opt_tag(section, 'min-iterations', tag)) + # load any class specific tests + kwargs.update(cls._extra_tests_from_config(cp, section, tag)) + return cls(sampler, burn_in_test, **kwargs)
+
+ + + +
+[docs] +class MCMCBurnInTests(BaseBurnInTests): + """Burn-in tests for collections of independent MCMC chains. + + This differs from EnsembleMCMCBurnInTests in that chains are treated as + being independent of each other. The ``is_burned_in`` attribute will be + True if `any` chain passes the burn in tests (whereas in MCMCBurnInTests, + all chains must pass the burn in tests). In other words, independent + samples can be collected even if all of the chains are not burned in. + """ + def __init__(self, sampler, burn_in_test, **kwargs): + super(MCMCBurnInTests, self).__init__(sampler, burn_in_test, **kwargs) + try: + nchains = sampler.nchains + except AttributeError: + nchains = sampler.nwalkers + self.nchains = nchains + self.is_burned_in = numpy.zeros(self.nchains, dtype=bool) + self.burn_in_iteration = numpy.repeat(NOT_BURNED_IN_ITER, self.nchains) + +
+[docs] + def burn_in_index(self, filename): + """The burn in index (retrieved from the iteration).""" + burn_in_index = self._iter2index(filename, self.burn_in_iteration) + # don't set if it isn't burned in + burn_in_index[~self.is_burned_in] = NOT_BURNED_IN_ITER + return burn_in_index
+ + +
+[docs] + def max_posterior(self, filename): + """Applies max posterior test.""" + logposts = self._getlogposts(filename) + burn_in_idx, is_burned_in = max_posterior(logposts, self._ndim) + # convert index to iterations + burn_in_iter = self._index2iter(filename, burn_in_idx) + burn_in_iter[~is_burned_in] = NOT_BURNED_IN_ITER + # save + test = 'max_posterior' + self.test_is_burned_in[test] = is_burned_in + self.test_burn_in_iteration[test] = burn_in_iter
+ + +
+[docs] + def posterior_step(self, filename): + """Applies the posterior-step test.""" + logposts = self._getlogposts(filename) + burn_in_idx = numpy.array([posterior_step(logps, self._ndim) + for logps in logposts]) + # this test cannot determine when something will burn in + # only when it was not burned in in the past + test = 'posterior_step' + if test not in self.test_is_burned_in: + self.test_is_burned_in[test] = numpy.ones(self.nchains, dtype=bool) + # convert index to iterations + self.test_burn_in_iteration[test] = self._index2iter(filename, + burn_in_idx)
+ + +
+[docs] + def nacl(self, filename): + """Applies the :py:func:`nacl` test.""" + nsamples = self._getnsamples(filename) + acls = self._getacls(filename, start_index=nsamples//2) + is_burned_in = nacl(nsamples, acls, self._nacls) + # stack the burn in results into an nparams x nchains array + burn_in_per_chain = numpy.stack(list(is_burned_in.values())).all( + axis=0) + # store + test = 'nacl' + self.test_is_burned_in[test] = burn_in_per_chain + try: + burn_in_iter = self.test_burn_in_iteration[test] + except KeyError: + # hasn't been stored yet + burn_in_iter = numpy.repeat(NOT_BURNED_IN_ITER, self.nchains) + self.test_burn_in_iteration[test] = burn_in_iter + burn_in_iter[burn_in_per_chain] = self._index2iter(filename, + nsamples//2) + # add the status for each parameter as additional information + self.test_aux_info[test] = is_burned_in
+ + +
+[docs] + def evaluate(self, filename): + """Runs all of the burn-in tests.""" + # evaluate all the tests + for tst in self.do_tests: + logger.info("Evaluating %s burn-in test", tst) + getattr(self, tst)(filename) + # evaluate each chain at a time + for ci in range(self.nchains): + # some tests (like halfchain) just store a single bool for all + # chains + tibi = {t: r[ci] if isinstance(r, numpy.ndarray) else r + for t, r in self.test_is_burned_in.items()} + tbi = {t: r[ci] if isinstance(r, numpy.ndarray) else r + for t, r in self.test_burn_in_iteration.items()} + is_burned_in, burn_in_iter = evaluate_tests(self.burn_in_test, + tibi, tbi) + self.is_burned_in[ci] = is_burned_in + self.burn_in_iteration[ci] = burn_in_iter + logger.info("Number of chains burned in: %i of %i", + self.is_burned_in.sum(), self.nchains)
+ + +
+[docs] + def write(self, fp, path=None): + """Writes burn-in info to an open HDF file. + + Parameters + ---------- + fp : pycbc.inference.io.base.BaseInferenceFile + Open HDF file to write the data to. The HDF file should be an + instance of a pycbc BaseInferenceFile. + path : str, optional + Path in the HDF file to write the data to. Default is (None) is + to write to the path given by the file's ``sampler_group`` + attribute. + """ + if path is None: + path = fp.sampler_group + super(MCMCBurnInTests, self).write(fp, path) + # add number of chains burned in as additional metadata + fp.write_data('nchains_burned_in', self.is_burned_in.sum(), path)
+
+ + + +
+[docs] +class MultiTemperedMCMCBurnInTests(MCMCBurnInTests): + """Adds support for multiple temperatures to + :py:class:`MCMCBurnInTests`. + """ + + def _getacls(self, filename, start_index): + """Convenience function for calculating acls for the given filename. + + This function is used by the ``n_acl`` burn-in test. That function + expects the returned ``acls`` dict to just report a single ACL for + each parameter. Since multi-tempered samplers return an array of ACLs + for each parameter instead, this takes the max over the array before + returning. + + Since we calculate the acls, this will also store it to the sampler. + + Parameters + ---------- + filename : str + Name of the file to retrieve samples from. + start_index : int + Index to start calculating ACLs. + + Returns + ------- + dict : + Dictionary of parameter names -> array giving ACL for each chain. + """ + acls = super(MultiTemperedMCMCBurnInTests, self)._getacls( + filename, start_index) + # acls will have shape ntemps x nchains, flatten to nchains + return {param: vals.max(axis=0) for (param, vals) in acls.items()} + + def _getlogposts(self, filename): + """Convenience function for retrieving log posteriors. + + This just gets the coldest temperature chain, and returns arrays with + shape nwalkers x niterations, so the parent class can run the same + ``posterior_step`` function. + """ + return _multitemper_getlogposts(self.sampler, filename)
+ + + +
+[docs] +class EnsembleMCMCBurnInTests(BaseBurnInTests): + """Provides methods for estimating burn-in of an ensemble MCMC.""" + + available_tests = ('halfchain', 'min_iterations', 'max_posterior', + 'posterior_step', 'nacl', 'ks_test', + ) + + def __init__(self, sampler, burn_in_test, **kwargs): + super(EnsembleMCMCBurnInTests, self).__init__( + sampler, burn_in_test, **kwargs) + # for kstest + self._ksthreshold = float(kwargs.pop('ks_threshold', 0.9)) + +
+[docs] + def burn_in_index(self, filename): + """The burn in index (retrieved from the iteration).""" + if self.is_burned_in: + index = self._iter2index(filename, self.burn_in_iteration) + else: + index = NOT_BURNED_IN_ITER + return index
+ + +
+[docs] + def max_posterior(self, filename): + """Applies max posterior test to self.""" + logposts = self._getlogposts(filename) + burn_in_idx, is_burned_in = max_posterior(logposts, self._ndim) + all_burned_in = is_burned_in.all() + if all_burned_in: + burn_in_iter = self._index2iter(filename, burn_in_idx.max()) + else: + burn_in_iter = NOT_BURNED_IN_ITER + # store + test = 'max_posterior' + self.test_is_burned_in[test] = all_burned_in + self.test_burn_in_iteration[test] = burn_in_iter + aux = self._getaux(test) + # additional info + aux['iteration_per_walker'] = self._index2iter(filename, burn_in_idx) + aux['status_per_walker'] = is_burned_in
+ + +
+[docs] + def posterior_step(self, filename): + """Applies the posterior-step test.""" + logposts = self._getlogposts(filename) + burn_in_idx = numpy.array([posterior_step(logps, self._ndim) + for logps in logposts]) + burn_in_iters = self._index2iter(filename, burn_in_idx) + # this test cannot determine when something will burn in + # only when it was not burned in in the past + test = 'posterior_step' + self.test_is_burned_in[test] = True + self.test_burn_in_iteration[test] = burn_in_iters.max() + # store the iteration per walker as additional info + aux = self._getaux(test) + aux['iteration_per_walker'] = burn_in_iters
+ + +
+[docs] + def nacl(self, filename): + """Applies the :py:func:`nacl` test.""" + nsamples = self._getnsamples(filename) + acls = self._getacls(filename, start_index=nsamples//2) + is_burned_in = nacl(nsamples, acls, self._nacls) + all_burned_in = all(is_burned_in.values()) + if all_burned_in: + burn_in_iter = self._index2iter(filename, nsamples//2) + else: + burn_in_iter = NOT_BURNED_IN_ITER + # store + test = 'nacl' + self.test_is_burned_in[test] = all_burned_in + self.test_burn_in_iteration[test] = burn_in_iter + # store the status per parameter as additional info + aux = self._getaux(test) + aux['status_per_parameter'] = is_burned_in
+ + +
+[docs] + def ks_test(self, filename): + """Applies ks burn-in test.""" + nsamples = self._getnsamples(filename) + with self.sampler.io(filename, 'r') as fp: + # get the samples from the mid point + samples1 = fp.read_raw_samples( + ['loglikelihood', 'logprior'], iteration=int(nsamples/2.)) + # get the last samples + samples2 = fp.read_raw_samples( + ['loglikelihood', 'logprior'], iteration=-1) + # do the test + # is_the_same is a dictionary of params --> bool indicating whether or + # not the 1D marginal is the same at the half way point + is_the_same = ks_test(samples1, samples2, threshold=self._ksthreshold) + is_burned_in = all(is_the_same.values()) + if is_burned_in: + burn_in_iter = self._index2iter(filename, int(nsamples//2)) + else: + burn_in_iter = NOT_BURNED_IN_ITER + # store + test = 'ks_test' + self.test_is_burned_in[test] = is_burned_in + self.test_burn_in_iteration[test] = burn_in_iter + # store the test per parameter as additional info + aux = self._getaux(test) + aux['status_per_parameter'] = is_the_same
+ + +
+[docs] + def evaluate(self, filename): + """Runs all of the burn-in tests.""" + # evaluate all the tests + for tst in self.do_tests: + logger.info("Evaluating %s burn-in test", tst) + getattr(self, tst)(filename) + is_burned_in, burn_in_iter = evaluate_tests( + self.burn_in_test, self.test_is_burned_in, + self.test_burn_in_iteration) + self.is_burned_in = is_burned_in + self.burn_in_iteration = burn_in_iter + logger.info("Is burned in: %r", self.is_burned_in) + if self.is_burned_in: + logger.info("Burn-in iteration: %i", + int(self.burn_in_iteration))
+ + + @staticmethod + def _extra_tests_from_config(cp, section, tag): + """Loads the ks test settings from the config file.""" + kwargs = {} + if cp.has_option_tag(section, 'ks-threshold', tag): + kwargs['ks_threshold'] = float( + cp.get_opt_tag(section, 'ks-threshold', tag)) + return kwargs
+ + + +
+[docs] +class EnsembleMultiTemperedMCMCBurnInTests(EnsembleMCMCBurnInTests): + """Adds support for multiple temperatures to + :py:class:`EnsembleMCMCBurnInTests`. + """ + + def _getacls(self, filename, start_index): + """Convenience function for calculating acls for the given filename. + + This function is used by the ``n_acl`` burn-in test. That function + expects the returned ``acls`` dict to just report a single ACL for + each parameter. Since multi-tempered samplers return an array of ACLs + for each parameter instead, this takes the max over the array before + returning. + + Since we calculate the acls, this will also store it to the sampler. + """ + acls = super(EnsembleMultiTemperedMCMCBurnInTests, self)._getacls( + filename, start_index) + # return the max for each parameter + return {param: vals.max() for (param, vals) in acls.items()} + + def _getlogposts(self, filename): + """Convenience function for retrieving log posteriors. + + This just gets the coldest temperature chain, and returns arrays with + shape nwalkers x niterations, so the parent class can run the same + ``posterior_step`` function. + """ + return _multitemper_getlogposts(self.sampler, filename)
+ + + +def _multitemper_getlogposts(sampler, filename): + """Retrieve log posteriors for multi tempered samplers.""" + with sampler.io(filename, 'r') as fp: + samples = fp.read_raw_samples( + ['loglikelihood', 'logprior'], thin_start=0, thin_interval=1, + temps=0, flatten=False) + # reshape to drop the first dimension + for (stat, arr) in samples.items(): + _, nwalkers, niterations = arr.shape + samples[stat] = arr.reshape((nwalkers, niterations)) + logposts = samples['loglikelihood'] + samples['logprior'] + return logposts +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/entropy.html b/latest/html/_modules/pycbc/inference/entropy.html new file mode 100644 index 00000000000..1da4fe23c02 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/entropy.html @@ -0,0 +1,384 @@ + + + + + + pycbc.inference.entropy — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.entropy

+""" The module contains functions for calculating the
+Kullback-Leibler divergence.
+"""
+
+import numpy
+from scipy import stats
+
+
+
+[docs] +def check_hist_params(samples, hist_min, hist_max, hist_bins): + """ Checks that the bound values given for the histogram are consistent, + returning the range if they are or raising an error if they are not. + Also checks that if hist_bins is a str, it corresponds to a method + available in numpy.histogram + + Parameters + ---------- + samples : numpy.array + Set of samples to get the min/max if only one of the bounds is given. + hist_min : numpy.float64 + Minimum value for the histogram. + hist_max : numpy.float64 + Maximum value for the histogram. + hist_bins: int or str + If int, number of equal-width bins to use in numpy.histogram. If str, + it should be one of the methods to calculate the optimal bin width + available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone', + 'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis + Estimator). This option will be ignored if `kde=True`. + + Returns + ------- + hist_range : tuple or None + The bounds (hist_min, hist_max) or None. + hist_bins : int or str + Number of bins or method for optimal width bin calculation. + """ + + hist_methods = ['auto', 'fd', 'doane', 'scott', 'stone', 'rice', + 'sturges', 'sqrt'] + if not hist_bins: + hist_bins = 'fd' + elif isinstance(hist_bins, str) and hist_bins not in hist_methods: + raise ValueError('Method for calculating bins width must be one of' + ' {}'.format(hist_methods)) + + # No bounds given, return None + if not hist_min and not hist_max: + return None, hist_bins + + # One of the bounds is missing + if hist_min and not hist_max: + hist_max = samples.max() + elif hist_max and not hist_min: + hist_min = samples.min() + # Both bounds given + elif hist_min and hist_max and hist_min >= hist_max: + raise ValueError('hist_min must be lower than hist_max.') + + hist_range = (hist_min, hist_max) + + return hist_range, hist_bins
+ + + +
+[docs] +def compute_pdf(samples, method, bins, hist_min, hist_max): + """ Computes the probability density function for a set of samples. + + Parameters + ---------- + samples : numpy.array + Set of samples to calculate the pdf. + method : str + Method to calculate the pdf. Options are 'kde' for the Kernel Density + Estimator, and 'hist' to use numpy.histogram + bins : str or int, optional + This option will be ignored if method is `kde`. + If int, number of equal-width bins to use when calculating probability + density function from a set of samples of the distribution. If str, it + should be one of the methods to calculate the optimal bin width + available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone', + 'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis + Estimator). + hist_min : numpy.float64, optional + Minimum of the distributions' values to use. This will be ignored if + `kde=True`. + hist_max : numpy.float64, optional + Maximum of the distributions' values to use. This will be ignored if + `kde=True`. + + Returns + ------- + pdf : numpy.array + Discrete probability distribution calculated from samples. + """ + + if method == 'kde': + samples_kde = stats.gaussian_kde(samples) + npts = 10000 if len(samples) <= 10000 else len(samples) + draw = samples_kde.resample(npts) + pdf = samples_kde.evaluate(draw) + elif method == 'hist': + hist_range, hist_bins = check_hist_params(samples, hist_min, + hist_max, bins) + pdf, _ = numpy.histogram(samples, bins=hist_bins, + range=hist_range, density=True) + else: + raise ValueError('Method not recognized.') + + return pdf
+ + + +
+[docs] +def entropy(pdf1, base=numpy.e): + """ Computes the information entropy for a single parameter + from one probability density function. + + Parameters + ---------- + pdf1 : numpy.array + Probability density function. + base : {numpy.e, numpy.float64}, optional + The logarithmic base to use (choose base 2 for information measured + in bits, default is nats). + + Returns + ------- + numpy.float64 + The information entropy value. + """ + + return stats.entropy(pdf1, base=base)
+ + + +
+[docs] +def kl(samples1, samples2, pdf1=False, pdf2=False, kde=False, + bins=None, hist_min=None, hist_max=None, base=numpy.e): + """ Computes the Kullback-Leibler divergence for a single parameter + from two distributions. + + Parameters + ---------- + samples1 : numpy.array + Samples or probability density function (for the latter must also set + `pdf1=True`). + samples2 : numpy.array + Samples or probability density function (for the latter must also set + `pdf2=True`). + pdf1 : bool + Set to `True` if `samples1` is a probability density funtion already. + pdf2 : bool + Set to `True` if `samples2` is a probability density funtion already. + kde : bool + Set to `True` if at least one of `pdf1` or `pdf2` is `False` to + estimate the probability density function using kernel density + estimation (KDE). + bins : int or str, optional + If int, number of equal-width bins to use when calculating probability + density function from a set of samples of the distribution. If str, it + should be one of the methods to calculate the optimal bin width + available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone', + 'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis + Estimator). This option will be ignored if `kde=True`. + hist_min : numpy.float64 + Minimum of the distributions' values to use. This will be ignored if + `kde=True`. + hist_max : numpy.float64 + Maximum of the distributions' values to use. This will be ignored if + `kde=True`. + base : numpy.float64 + The logarithmic base to use (choose base 2 for information measured + in bits, default is nats). + + Returns + ------- + numpy.float64 + The Kullback-Leibler divergence value. + """ + if pdf1 and pdf2 and kde: + raise ValueError('KDE can only be used when at least one of pdf1 or ' + 'pdf2 is False.') + + sample_groups = {'P': (samples1, pdf1), 'Q': (samples2, pdf2)} + pdfs = {} + for n in sample_groups: + samples, pdf = sample_groups[n] + if pdf: + pdfs[n] = samples + else: + method = 'kde' if kde else 'hist' + pdfs[n] = compute_pdf(samples, method, bins, hist_min, hist_max) + + return stats.entropy(pdfs['P'], qk=pdfs['Q'], base=base)
+ + + +
+[docs] +def js(samples1, samples2, kde=False, bins=None, hist_min=None, hist_max=None, + base=numpy.e): + """ Computes the Jensen-Shannon divergence for a single parameter + from two distributions. + + Parameters + ---------- + samples1 : numpy.array + Samples. + samples2 : numpy.array + Samples. + kde : bool + Set to `True` to estimate the probability density function using + kernel density estimation (KDE). + bins : int or str, optional + If int, number of equal-width bins to use when calculating probability + density function from a set of samples of the distribution. If str, it + should be one of the methods to calculate the optimal bin width + available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone', + 'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis + Estimator). This option will be ignored if `kde=True`. + hist_min : numpy.float64 + Minimum of the distributions' values to use. This will be ignored if + `kde=True`. + hist_max : numpy.float64 + Maximum of the distributions' values to use. This will be ignored if + `kde=True`. + base : numpy.float64 + The logarithmic base to use (choose base 2 for information measured + in bits, default is nats). + + Returns + ------- + numpy.float64 + The Jensen-Shannon divergence value. + """ + + sample_groups = {'P': samples1, 'Q': samples2} + pdfs = {} + for n in sample_groups: + samples = sample_groups[n] + method = 'kde' if kde else 'hist' + pdfs[n] = compute_pdf(samples, method, bins, hist_min, hist_max) + + pdfs['M'] = (1./2) * (pdfs['P'] + pdfs['Q']) + + js_div = 0 + for pdf in (pdfs['P'], pdfs['Q']): + js_div += (1./2) * kl(pdf, pdfs['M'], pdf1=True, pdf2=True, base=base) + + return js_div
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/evidence.html b/latest/html/_modules/pycbc/inference/evidence.html new file mode 100644 index 00000000000..5429225a6f3 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/evidence.html @@ -0,0 +1,391 @@ + + + + + + pycbc.inference.evidence — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.evidence

+# Copyright (C) 2019 Steven Reyes
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides functions for estimating the marginal
+likelihood or evidence of a model.
+"""
+import numpy
+from scipy import integrate
+
+
+
+[docs] +def arithmetic_mean_estimator(log_likelihood): + """Returns the log evidence via the prior arithmetic mean estimator (AME). + + The logarithm form of AME is used. This is the most basic + evidence estimator, and often requires O(billions) of samples + from the prior. + + Parameters + ---------- + log_likelihood : 1d array of floats + The log likelihood of the data sampled from the prior + distribution. + + Returns + ------- + float : + Estimation of the log of the evidence. + """ + num_samples = len(log_likelihood) + logl_max = numpy.max(log_likelihood) + + log_evidence = 0. + for i, _ in enumerate(log_likelihood): + log_evidence += numpy.exp(log_likelihood[i] - logl_max) + + log_evidence = numpy.log(log_evidence) + log_evidence += logl_max - numpy.log(num_samples) + + return log_evidence
+ + + +
+[docs] +def harmonic_mean_estimator(log_likelihood): + """Returns the log evidence via posterior harmonic mean estimator (HME). + + The logarithm form of HME is used. This method is not + recommended for general use. It is very slow to converge, + formally, has infinite variance, and very error prone. + + Not recommended for general use. + + Parameters + ---------- + log_likelihood : 1d array of floats + The log likelihood of the data sampled from the posterior + distribution. + + Returns + ------- + float : + Estimation of the log of the evidence. + """ + num_samples = len(log_likelihood) + logl_max = numpy.max(-1.0*log_likelihood) + + log_evidence = 0. + for i, _ in enumerate(log_likelihood): + log_evidence += numpy.exp(-1.0*log_likelihood[i] + logl_max) + + log_evidence = -1.0*numpy.log(log_evidence) + log_evidence += logl_max + log_evidence += numpy.log(num_samples) + + return log_evidence
+ + + +
+[docs] +def thermodynamic_integration(log_likelihood, betas, + method="simpsons"): + """Returns the log evidence of the model via thermodynamic integration. + Also returns an estimated standard deviation for the log evidence. + + Current options are integration through the trapezoid rule, a + first-order corrected trapezoid rule, and Simpson's rule. + + Parameters + ---------- + log_likelihood : 3d array of shape (betas, walker, iteration) + The log likelihood for each temperature separated by + temperature, walker, and iteration. + + betas : 1d array + The inverse temperatures used in the MCMC. + + method : {"trapzoid", "trapezoid_corrected", "simpsons"}, + optional. + The numerical integration method to use for the + thermodynamic integration. Choices include: "trapezoid", + "trapezoid_corrected", "simpsons", for the trapezoid rule, + the first-order correction to the trapezoid rule, and + Simpson's rule. [Default = "simpsons"] + + Returns + ------- + log_evidence : float + Estimation of the log of the evidence. + + mcmc_std : float + The standard deviation of the log evidence estimate from + Monte-Carlo spread. + """ + # Check if the method of integration is in the list of choices + method_list = ["trapezoid", "trapezoid_corrected", "simpsons"] + + if method not in method_list: + raise ValueError("Method %s not supported. Expected %s" + % (method, method_list)) + # Read in the data and ensure ordering of data. + # Ascending order sort + order = numpy.argsort(betas) + betas = betas[order] + log_likelihood = log_likelihood[order] + + # Assume log likelihood is given in shape of beta, walker, + # and iteration. + log_likelihood = numpy.reshape(log_likelihood, + (len(betas), + len(log_likelihood[0].flatten()))) + + average_logl = numpy.average(log_likelihood, axis=1) + + if method in ("trapezoid", "trapezoid_corrected"): + log_evidence = numpy.trapz(average_logl, betas) + + if method == "trapezoid_corrected": + # var_correction holds the derivative correction terms + # See Friel et al. 2014 for expression and derivation. + # https://link.springer.com/article/10.1007/s11222-013-9397-1 + var_correction = 0 + for i in range(len(betas) - 1): + delta_beta = betas[i+1] - betas[i] + pre_fac_var = (1. / 12.) * (delta_beta ** 2.0) + var_diff = numpy.var(log_likelihood[i+1]) + var_diff -= numpy.var(log_likelihood[i]) + var_correction -= pre_fac_var * var_diff + + # Add the derivative correction term back to the log_evidence + # from the first if statement. + log_evidence += var_correction + + elif method == "simpsons": + # beta -> 0 tends to contribute the least to the integral + # so we can sacrifice precision there, rather than near + # beta -> 1. Option even="last" puts trapezoid rule at + # first few points. + log_evidence = integrate.simps(average_logl, betas, + even="last") + + # Estimate the Monte Carlo variance of the evidence calculation + # See (Evans, Annis, 2019.) + # https://www.sciencedirect.com/science/article/pii/S0022249617302651 + ti_vec = numpy.zeros(len(log_likelihood[0])) + + # Get log likelihood chains by sample and not by temperature. + logl_per_samp = [] + for i, _ in enumerate(log_likelihood[0]): + logl_per_samp.append([log_likelihood[x][i] for x in range(len(betas))]) + + if method in ("trapezoid", "trapezoid_corrected"): + for i, _ in enumerate(log_likelihood[0]): + ti_vec[i] = numpy.trapz(logl_per_samp[i], betas) + + elif method == "simpsons": + for i, _ in enumerate(log_likelihood[0]): + ti_vec[i] = integrate.simps(logl_per_samp[i], betas, + even="last") + + # Standard error is sample std / sqrt(number of samples) + mcmc_std = numpy.std(ti_vec) / numpy.sqrt(float(len(log_likelihood[0]))) + + return log_evidence, mcmc_std
+ + + +
+[docs] +def stepping_stone_algorithm(log_likelihood, betas): + """Returns the log evidence of the model via stepping stone algorithm. + Also returns an estimated standard deviation for the log evidence. + + Parameters + ---------- + log_likelihood : 3d array of shape (betas, walker, iteration) + The log likelihood for each temperature separated by + temperature, walker, and iteration. + + betas : 1d array + The inverse temperatures used in the MCMC. + + Returns + ------- + log_evidence : float + Estimation of the log of the evidence. + mcmc_std : float + The standard deviation of the log evidence estimate from + Monte-Carlo spread. + """ + # Reverse order sort + order = numpy.argsort(betas)[::-1] + betas = betas[order] + log_likelihood = log_likelihood[order] + + # Assume log likelihood is given in shape of beta, + # walker, iteration. + log_likelihood = numpy.reshape(log_likelihood, + (len(betas), + len(log_likelihood[0].flatten()))) + + log_rk_pb = numpy.zeros(len(betas) - 1) + for i in range(len(betas) - 1): + delta_beta = betas[i] - betas[i+1] + # Max log likelihood for beta [i+1] + max_logl_pb = numpy.max(log_likelihood[i+1]) + val_1 = delta_beta * max_logl_pb + val_2 = delta_beta * (log_likelihood[i+1] - max_logl_pb) + val_2 = numpy.log(numpy.average(numpy.exp(val_2))) + log_rk_pb[i] = val_1 + val_2 + + log_rk = numpy.sum(log_rk_pb) + log_evidence = log_rk + + # Calculate the Monte Carlo variation + mcmc_std = 0 + for i in range(len(betas) - 1): + delta_beta = betas[i] - betas[i+1] + pre_fact = (delta_beta * log_likelihood[i+1]) - log_rk_pb[i] + pre_fact = numpy.exp(pre_fact) - 1.0 + val = numpy.sum(pre_fact ** 2) + + mcmc_std += val + + mcmc_std /= float(len(log_likelihood[0])) ** 2.0 + mcmc_std = numpy.sqrt(mcmc_std) + + return log_evidence, mcmc_std
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/gelman_rubin.html b/latest/html/_modules/pycbc/inference/gelman_rubin.html new file mode 100644 index 00000000000..9e24f038f42 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/gelman_rubin.html @@ -0,0 +1,320 @@ + + + + + + pycbc.inference.gelman_rubin — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.gelman_rubin

+# Copyright (C) 2017  Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This modules provides functions for evaluating the Gelman-Rubin convergence
+diagnostic statistic.
+"""
+
+import numpy
+
+
+
+[docs] +def walk(chains, start, end, step): + """ Calculates Gelman-Rubin conervergence statistic along chains of data. + This function will advance along the chains and calculate the + statistic for each step. + + Parameters + ---------- + chains : iterable + An iterable of numpy.array instances that contain the samples + for each chain. Each chain has shape (nparameters, niterations). + start : float + Start index of blocks to calculate all statistics. + end : float + Last index of blocks to calculate statistics. + step : float + Step size to take for next block. + + Returns + ------- + starts : numpy.array + 1-D array of start indexes of calculations. + ends : numpy.array + 1-D array of end indexes of caluclations. + stats : numpy.array + Array with convergence statistic. It has + shape (nparameters, ncalculations). + """ + + # get number of chains, parameters, and iterations + chains = numpy.array(chains) + _, nparameters, _ = chains.shape + + # get end index of blocks + ends = numpy.arange(start, end, step) + stats = numpy.zeros((nparameters, len(ends))) + + # get start index of blocks + starts = numpy.array(len(ends) * [start]) + + # loop over end indexes and calculate statistic + for i, e in enumerate(ends): + tmp = chains[:, :, 0:e] + stats[:, i] = gelman_rubin(tmp) + + return starts, ends, stats
+ + + +
+[docs] +def gelman_rubin(chains, auto_burn_in=True): + """ Calculates the univariate Gelman-Rubin convergence statistic + which compares the evolution of multiple chains in a Markov-Chain Monte + Carlo process and computes their difference to determine their convergence. + The between-chain and within-chain variances are computed for each sampling + parameter, and a weighted combination of the two is used to determine the + convergence. As the chains converge, the point scale reduction factor + should go to 1. + + Parameters + ---------- + chains : iterable + An iterable of numpy.array instances that contain the samples + for each chain. Each chain has shape (nparameters, niterations). + auto_burn_in : bool + If True, then only use later half of samples provided. + + Returns + ------- + psrf : numpy.array + A numpy.array of shape (nparameters) that has the point estimates of + the potential scale reduction factor. + """ + + # remove first half of samples + # this will have shape (nchains, nparameters, niterations) + if auto_burn_in: + _, _, niterations = numpy.array(chains).shape + chains = numpy.array([chain[:, niterations // 2 + 1:] + for chain in chains]) + + # get number of chains, parameters, and iterations + chains = numpy.array(chains) + nchains, nparameters, niterations = chains.shape + + # calculate the covariance matrix for each chain + # this will have shape (nchains, nparameters, nparameters) + chains_covs = numpy.array([numpy.cov(chain) for chain in chains]) + if nparameters == 1: + chains_covs = chains_covs.reshape((nchains, 1, 1)) + + # calculate W the within-chain variance + # this will have shape (nparameters, nparameters) + w = numpy.zeros(chains_covs[0].shape) + for i, row in enumerate(chains_covs[0]): + for j, _ in enumerate(row): + w[i, j] = numpy.mean(chains_covs[:, i, j]) + if nparameters == 1: + w = w.reshape((1, 1)) + + # calculate B the between-chain variance + # this will have shape (nparameters, nparameters) + means = numpy.zeros((nparameters, nchains)) + for i, chain in enumerate(chains): + means[:, i] = numpy.mean(chain, axis=1).transpose() + b = niterations * numpy.cov(means) + if nparameters == 1: + b = b.reshape((1, 1)) + + # get diagonal elements of W and B + # these will have shape (nparameters) + w_diag = numpy.diag(w) + b_diag = numpy.diag(b) + + # get variance for each chain + # this will have shape (nparameters, nchains) + var = numpy.zeros((nparameters, nchains)) + for i, chain_cov in enumerate(chains_covs): + var[:, i] = numpy.diag(chain_cov) + + # get mean of means + # this will have shape (nparameters) + mu_hat = numpy.mean(means, axis=1) + + # get variance of variances + # this will have shape (nparameters) + s = numpy.var(var, axis=1) + + # get V the combined variance of all chains + # this will have shape (nparameters) + v = ((niterations - 1.) * w_diag / niterations + + (1. + 1. / nchains) * b_diag / niterations) + + # get factors in variance of V calculation + # this will have shape (nparameters) + k = 2 * b_diag**2 / (nchains - 1) + mid_term = numpy.cov( + var, means**2)[nparameters:2*nparameters, 0:nparameters].T + end_term = numpy.cov( + var, means)[nparameters:2*nparameters, 0:nparameters].T + wb = niterations / nchains * numpy.diag(mid_term - 2 * mu_hat * end_term) + + # get variance of V + # this will have shape (nparameters) + var_v = ( + (niterations - 1.) ** 2 * s + + (1. + 1. / nchains) ** 2 * k + + 2. * (niterations - 1.) * (1. + 1. / nchains) * wb + ) / niterations**2 + + # get degrees of freedom + # this will have shape (nparameters) + dof = (2. * v**2) / var_v + + # more degrees of freedom factors + # this will have shape (nparameters) + df_adj = (dof + 3.) / (dof + 1.) + + # estimate R + # this will have shape (nparameters) + r2_fixed = (niterations - 1.) / niterations + r2_random = (1. + 1. / nchains) * (1. / niterations) * (b_diag / w_diag) + r2_estimate = r2_fixed + r2_random + + # calculate PSRF the potential scale reduction factor + # this will have shape (nparameters) + psrf = numpy.sqrt(r2_estimate * df_adj) + + return psrf
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/geweke.html b/latest/html/_modules/pycbc/inference/geweke.html new file mode 100644 index 00000000000..03a9c28c4c1 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/geweke.html @@ -0,0 +1,213 @@ + + + + + + pycbc.inference.geweke — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.geweke

+# Copyright (C) 2017 Christopher M. Biwer
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" Functions for computing the Geweke convergence statistic.
+"""
+
+import numpy
+
+
+
+[docs] +def geweke(x, seg_length, seg_stride, end_idx, ref_start, + ref_end=None, seg_start=0): + """ Calculates Geweke conervergence statistic for a chain of data. + This function will advance along the chain and calculate the + statistic for each step. + + Parameters + ---------- + x : numpy.array + A one-dimensional array of data. + seg_length : int + Number of samples to use for each Geweke calculation. + seg_stride : int + Number of samples to advance before next Geweke calculation. + end_idx : int + Index of last start. + ref_start : int + Index of beginning of end reference segment. + ref_end : int + Index of end of end reference segment. Default is None which + will go to the end of the data array. + seg_start : int + What index to start computing the statistic. Default is 0 which + will go to the beginning of the data array. + + Returns + ------- + starts : numpy.array + The start index of the first segment in the chain. + ends : numpy.array + The end index of the first segment in the chain. + stats : numpy.array + The Geweke convergence diagnostic statistic for the segment. + """ + + # lists to hold statistic and end index + stats = [] + ends = [] + + # get the beginning of all segments + starts = numpy.arange(seg_start, end_idx, seg_stride) + + # get second segment of data at the end to compare + x_end = x[ref_start:ref_end] + + # loop over all segments + for start in starts: + + # find the end of the first segment + x_start_end = int(start + seg_length) + + # get first segment + x_start = x[start:x_start_end] + + # compute statistic + stats.append((x_start.mean() - x_end.mean()) / numpy.sqrt( + x_start.var() + x_end.var())) + + # store end of first segment + ends.append(x_start_end) + + return numpy.array(starts), numpy.array(ends), numpy.array(stats)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io.html b/latest/html/_modules/pycbc/inference/io.html new file mode 100644 index 00000000000..12c94aa6345 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io.html @@ -0,0 +1,948 @@ + + + + + + pycbc.inference.io — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io

+# Copyright (C) 2018  Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""I/O utilities for pycbc inference
+"""
+
+
+import os
+import argparse
+import shutil
+import textwrap
+import numpy
+import logging
+import h5py as _h5py
+from pycbc.io.record import (FieldArray, _numpy_function_lib)
+from pycbc import waveform as _waveform
+from pycbc.io.hdf import (dump_state, load_state)
+
+from pycbc.inference.option_utils import (ParseLabelArg, ParseParametersArg)
+from .emcee import EmceeFile
+from .emcee_pt import EmceePTFile
+from .ptemcee import PTEmceeFile
+from .cpnest import CPNestFile
+from .multinest import MultinestFile
+from .dynesty import DynestyFile
+from .ultranest import UltranestFile
+from .snowline import SnowlineFile
+from .nessai import NessaiFile
+from .posterior import PosteriorFile
+from .txt import InferenceTXTFile
+
+filetypes = {
+    EmceeFile.name: EmceeFile,
+    EmceePTFile.name: EmceePTFile,
+    PTEmceeFile.name: PTEmceeFile,
+    CPNestFile.name: CPNestFile,
+    MultinestFile.name: MultinestFile,
+    DynestyFile.name: DynestyFile,
+    PosteriorFile.name: PosteriorFile,
+    UltranestFile.name: UltranestFile,
+    NessaiFile.name: NessaiFile,
+    SnowlineFile.name: SnowlineFile,
+}
+
+try:
+    from .epsie import EpsieFile
+    filetypes[EpsieFile.name] = EpsieFile
+except ImportError:
+    pass
+
+
+
+[docs] +def get_file_type(filename): + """ Returns I/O object to use for file. + + Parameters + ---------- + filename : str + Name of file. + + Returns + ------- + file_type : {InferenceFile, InferenceTXTFile} + The type of inference file object to use. + """ + txt_extensions = [".txt", ".dat", ".csv"] + hdf_extensions = [".hdf", ".h5", ".bkup", ".checkpoint"] + for ext in hdf_extensions: + if filename.endswith(ext): + with _h5py.File(filename, 'r') as fp: + filetype = fp.attrs['filetype'] + try: + filetype = str(filetype.decode()) + except AttributeError: + pass + return filetypes[filetype] + for ext in txt_extensions: + if filename.endswith(ext): + return InferenceTXTFile + raise TypeError("Extension is not supported.")
+ + + +
+[docs] +def loadfile(path, mode=None, filetype=None, **kwargs): + """Loads the given file using the appropriate InferenceFile class. + + If ``filetype`` is not provided, this will try to retreive the ``filetype`` + from the file's ``attrs``. If the file does not exist yet, an IOError will + be raised if ``filetype`` is not provided. + + Parameters + ---------- + path : str + The filename to load. + mode : str, optional + What mode to load the file with, e.g., 'w' for write, 'r' for read, + 'a' for append. Default will default to h5py.File's mode, which is 'a'. + filetype : str, optional + Force the file to be loaded with the given class name. This must be + provided if creating a new file. + + Returns + ------- + filetype instance + An open file handler to the file. The class used for IO with the file + is determined by the ``filetype`` keyword (if provided) or the + ``filetype`` stored in the file (if not provided). + """ + if filetype is None: + # try to read the file to get its filetype + try: + fileclass = get_file_type(path) + except IOError: + # file doesn't exist, filetype must be provided + raise IOError("The file appears not to exist. In this case, " + "filetype must be provided.") + else: + fileclass = filetypes[filetype] + return fileclass(path, mode=mode, **kwargs)
+ + + +# +# ============================================================================= +# +# HDF Utilities +# +# ============================================================================= +# + + +
+[docs] +def check_integrity(filename): + """Checks the integrity of an InferenceFile. + + Checks done are: + + * can the file open? + * do all of the datasets in the samples group have the same shape? + * can the first and last sample in all of the datasets in the samples + group be read? + + If any of these checks fail, an IOError is raised. + + Parameters + ---------- + filename: str + Name of an InferenceFile to check. + + Raises + ------ + ValueError + If the given file does not exist. + KeyError + If the samples group does not exist. + IOError + If any of the checks fail. + """ + # check that the file exists + if not os.path.exists(filename): + raise ValueError("file {} does not exist".format(filename)) + # if the file is corrupted such that it cannot be opened, the next line + # will raise an IOError + with loadfile(filename, 'r') as fp: + # check that all datasets in samples have the same shape + parameters = list(fp[fp.samples_group].keys()) + # but only do the check if parameters have been written + if len(parameters) > 0: + group = fp.samples_group + '/{}' + # use the first parameter as a reference shape + ref_shape = fp[group.format(parameters[0])].shape + if not all(fp[group.format(param)].shape == ref_shape + for param in parameters): + raise IOError("not all datasets in the samples group have " + "the same shape") + # check that we can read the first/last sample + firstidx = tuple([0]*len(ref_shape)) + lastidx = tuple([-1]*len(ref_shape)) + for param in parameters: + _ = fp[group.format(param)][firstidx] + _ = fp[group.format(param)][lastidx]
+ + + +
+[docs] +def validate_checkpoint_files(checkpoint_file, backup_file, + check_nsamples=True): + """Checks if the given checkpoint and/or backup files are valid. + + The checkpoint file is considered valid if: + + * it passes all tests run by ``check_integrity``; + * it has at least one sample written to it (indicating at least one + checkpoint has happened). + + The same applies to the backup file. The backup file must also have the + same number of samples as the checkpoint file, otherwise, the backup is + considered invalid. + + If the checkpoint (backup) file is found to be valid, but the backup + (checkpoint) file is not valid, then the checkpoint (backup) is copied to + the backup (checkpoint). Thus, this function ensures that checkpoint and + backup files are either both valid or both invalid. + + Parameters + ---------- + checkpoint_file : string + Name of the checkpoint file. + backup_file : string + Name of the backup file. + + Returns + ------- + checkpoint_valid : bool + Whether or not the checkpoint (and backup) file may be used for loading + samples. + """ + # check if checkpoint file exists and is valid + try: + check_integrity(checkpoint_file) + checkpoint_valid = True + except (ValueError, KeyError, IOError): + checkpoint_valid = False + + # backup file + try: + check_integrity(backup_file) + backup_valid = True + except (ValueError, KeyError, IOError): + backup_valid = False + + # since we can open the file, run self diagnostics + if checkpoint_valid: + with loadfile(checkpoint_file, 'r') as fp: + checkpoint_valid = fp.validate() + if backup_valid: + with loadfile(backup_file, 'r') as fp: + backup_valid = fp.validate() + if check_nsamples: + # This check is not required by nested samplers + # check that the checkpoint and backup have the same number of samples; + # if not, assume the checkpoint has the correct number + if checkpoint_valid and backup_valid: + with loadfile(checkpoint_file, 'r') as fp: + group = list(fp[fp.samples_group].keys())[0] + nsamples = fp[fp.samples_group][group].size + with loadfile(backup_file, 'r') as fp: + group = list(fp[fp.samples_group].keys())[0] + backup_nsamples = fp[fp.samples_group][group].size + backup_valid = nsamples == backup_nsamples + # decide what to do based on the files' statuses + if checkpoint_valid and not backup_valid: + # copy the checkpoint to the backup + logging.info("Backup invalid; copying checkpoint file") + shutil.copy(checkpoint_file, backup_file) + backup_valid = True + elif backup_valid and not checkpoint_valid: + logging.info("Checkpoint invalid; copying backup file") + # copy the backup to the checkpoint + shutil.copy(backup_file, checkpoint_file) + checkpoint_valid = True + return checkpoint_valid
+ + + +# +# ============================================================================= +# +# Command-line Utilities +# +# ============================================================================= +# +
+[docs] +def get_common_parameters(input_files, collection=None): + """Gets a list of variable params that are common across all input files. + + If no common parameters are found, a ``ValueError`` is raised. + + Parameters + ---------- + input_files : list of str + List of input files to load. + collection : str, optional + What group of parameters to load. Can be the name of a list of + parameters stored in the files' attrs (e.g., "variable_params"), or + "all". If "all", will load all of the parameters in the files' + samples group. Default is to load all. + + Returns + ------- + list : + List of the parameter names. + """ + if collection is None: + collection = "all" + parameters = [] + for fn in input_files: + fp = loadfile(fn, 'r') + if collection == 'all': + ps = fp[fp.samples_group].keys() + else: + ps = fp.attrs[collection] + parameters.append(set(ps)) + fp.close() + parameters = list(set.intersection(*parameters)) + if parameters == []: + raise ValueError("no common parameters found for collection {} in " + "files {}".format(collection, ', '.join(input_files))) + # if using python 3 to read a file created in python 2, need to convert + # parameters to strs + try: + parameters = [p.decode() for p in parameters] + except AttributeError: + pass + return parameters
+ + + +
+[docs] +class NoInputFileError(Exception): + """Raised in custom argparse Actions by arguments needing input-files when + no file(s) were provided.""" + pass
+ + + +
+[docs] +class PrintFileParams(argparse.Action): + """Argparse action that will load input files and print possible parameters + to screen. Once this is done, the program is forced to exit immediately. + + The behvior is similar to --help, except that the input-file is read. + + .. note:: + The ``input_file`` attribute must be set in the parser namespace before + this action is called. Otherwise, a ``NoInputFileError`` is raised. + """ + def __init__(self, skip_args=None, nargs=0, **kwargs): + if nargs != 0: + raise ValueError("nargs for this action must be 0") + super(PrintFileParams, self).__init__(nargs=nargs, **kwargs) + self.skip_args = skip_args + + def __call__(self, parser, namespace, values, option_string=None): + # get the input file(s) + input_files = namespace.input_file + if input_files is None: + # see if we should raise an error + try: + raise_err = not parser.no_input_file_err + except AttributeError: + raise_err = True + if raise_err: + raise NoInputFileError("must provide at least one input file") + else: + # just return to stop further processing + return + filesbytype = {} + fileparsers = {} + for fn in input_files: + fp = loadfile(fn, 'r') + try: + filesbytype[fp.name].append(fn) + except KeyError: + filesbytype[fp.name] = [fn] + # get any extra options + fileparsers[fp.name], _ = fp.extra_args_parser( + skip_args=self.skip_args, add_help=False) + fp.close() + # now print information about the intersection of all parameters + parameters = get_common_parameters(input_files, collection='all') + print("\n"+textwrap.fill("Parameters available with this (these) " + "input file(s):"), end="\n\n") + print(textwrap.fill(' '.join(sorted(parameters))), + end="\n\n") + # information about the pycbc functions + pfuncs = sorted(FieldArray.functionlib.fget(FieldArray).keys()) + print(textwrap.fill("Available pycbc functions (see " + "http://pycbc.org/pycbc/latest/html for " + "more details):"), end="\n\n") + print(textwrap.fill(', '.join(pfuncs)), end="\n\n") + # numpy funcs + npfuncs = sorted([name for (name, obj) in _numpy_function_lib.items() + if isinstance(obj, numpy.ufunc)]) + print(textwrap.fill("Available numpy functions:"), + end="\n\n") + print(textwrap.fill(', '.join(npfuncs)), end="\n\n") + # misc + consts = "e euler_gamma inf nan pi" + print(textwrap.fill("Recognized constants:"), + end="\n\n") + print(consts, end="\n\n") + print(textwrap.fill("Python arthimetic (+ - * / // ** %), " + "binary (&, |, etc.), and comparison (>, <, >=, " + "etc.) operators may also be used."), end="\n\n") + # print out the extra arguments that may be used + outstr = textwrap.fill("The following are additional command-line " + "options that may be provided, along with the " + "input files that understand them:") + print("\n"+outstr, end="\n\n") + for ftype, fparser in fileparsers.items(): + fnames = ', '.join(filesbytype[ftype]) + if fparser is None: + outstr = textwrap.fill( + "File(s) {} use no additional options.".format(fnames)) + print(outstr, end="\n\n") + else: + fparser.usage = fnames + fparser.print_help() + parser.exit(0)
+ + + +
+[docs] +class ResultsArgumentParser(argparse.ArgumentParser): + """Wraps argument parser, and preloads arguments needed for loading samples + from a file. + + This parser class should be used by any program that wishes to use the + standard arguments for loading samples. It provides functionality to parse + file specific options. These file-specific arguments are not included in + the standard ``--help`` (since they depend on what input files are given), + but can be seen by running ``--file-help/-H``. The ``--file-help`` will + also print off information about what parameters may be used given the + input files. + + As with the standard ``ArgumentParser``, running this class's + ``parse_args`` will result in an error if arguments are provided that are + not recognized by the parser, nor by any of the file-specific arguments. + For example, ``parse_args`` would work on the command + ``--input-file results.hdf --walker 0`` if + ``results.hdf`` was created by a sampler that recognizes a ``--walker`` + argument, but would raise an error if ``results.hdf`` was created by a + sampler that does not recognize a ``--walker`` argument. The extra + arguments that are recognized are determined by the sampler IO class's + ``extra_args_parser``. + + Some arguments may be excluded from the parser using the ``skip_args`` + optional parameter. + + Parameters + ---------- + skip_args : list of str, optional + Do not add the given arguments to the parser. Arguments should be + specified as the option string minus the leading '--'; e.g., + ``skip_args=['thin-start']`` would cause the ``thin-start`` argument + to not be included. May also specify sampler-specific arguments. Note + that ``input-file``, ``file-help``, and ``parameters`` are always + added. + defaultparams : {'variable_params', 'all'}, optional + If no ``--parameters`` provided, which collection of parameters to + load. If 'all' will load all parameters in the file's + ``samples_group``. If 'variable_params' or None (the default) will load + the variable parameters. + autoparamlabels : bool, optional + Passed to ``add_results_option_group``; see that function for details. + \**kwargs : + All other keyword arguments are passed to ``argparse.ArgumentParser``. + """ + def __init__(self, skip_args=None, defaultparams=None, + autoparamlabels=True, **kwargs): + super(ResultsArgumentParser, self).__init__(**kwargs) + # add attribute to communicate to arguments what to do when there is + # no input files + self.no_input_file_err = False + if skip_args is None: + skip_args = [] + self.skip_args = skip_args + if defaultparams is None: + defaultparams = 'variable_params' + self.defaultparams = defaultparams + # add the results option grup + self.add_results_option_group(autoparamlabels=autoparamlabels) + + @property + def actions(self): + """Exposes the actions this parser can do as a dictionary. + + The dictionary maps the ``dest`` to actions. + """ + return {act.dest: act for act in self._actions} + + def _unset_required(self): + """Convenience function to turn off required arguments for first parse. + """ + self._required_args = [act for act in self._actions if act.required] + for act in self._required_args: + act.required = False + + def _reset_required(self): + """Convenience function to turn required arguments back on. + """ + for act in self._required_args: + act.required = True + +
+[docs] + def parse_known_args(self, args=None, namespace=None): + """Parse args method to handle input-file dependent arguments.""" + # run parse args once to make sure the name space is populated with + # the input files. We'll turn off raising NoInputFileErrors on this + # pass + self.no_input_file_err = True + self._unset_required() + opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args( + args, namespace) + # now do it again + self.no_input_file_err = False + self._reset_required() + opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args( + args, opts) + # populate the parameters option if it wasn't specified + if opts.parameters is None or opts.parameters == ['*']: + parameters = get_common_parameters(opts.input_file, + collection=self.defaultparams) + # now call parse parameters action to re-populate the namespace + self.actions['parameters'](self, opts, parameters) + # check if we're being greedy or not + elif '*' in opts.parameters: + # remove the * from the parameters and the labels + opts.parameters = [p for p in opts.parameters if p != '*'] + opts.parameters_labels.pop('*', None) + # add the rest of the parameters not used + all_params = get_common_parameters(opts.input_file, + collection=self.defaultparams) + # extract the used parameters from the parameters option + used_params = FieldArray.parse_parameters(opts.parameters, + all_params) + add_params = set(all_params) - set(used_params) + # repopulate the name space with the additional parameters + if add_params: + opts.parameters += list(add_params) + # update the labels + opts.parameters_labels.update({p: p for p in add_params}) + # parse the sampler-specific options and check for any unknowns + unknown = [] + for fn in opts.input_file: + fp = loadfile(fn, 'r') + sampler_parser, _ = fp.extra_args_parser(skip_args=self.skip_args) + if sampler_parser is not None: + opts, still_unknown = sampler_parser.parse_known_args( + extra_opts, namespace=opts) + unknown.append(set(still_unknown)) + # the intersection of the unknowns are options not understood by + # any of the files + if len(unknown) > 0: + unknown = set.intersection(*unknown) + return opts, list(unknown)
+ + +
+[docs] + def add_results_option_group(self, autoparamlabels=True): + """Adds the options used to call pycbc.inference.io.results_from_cli + function to the parser. + + These are options releated to loading the results from a run of + pycbc_inference, for purposes of plotting and/or creating tables. + + Any argument strings included in the ``skip_args`` attribute will not + be added. + + Parameters + ---------- + autoparamlabels : bool, optional + If True, the ``--parameters`` option will use labels from + ``waveform.parameters`` if a parameter name is the same as a + parameter there. Otherwise, will just use whatever label is + provided. Default is True. + """ + results_reading_group = self.add_argument_group( + title="Arguments for loading results", + description="Additional, file-specific arguments may also be " + "provided, depending on what input-files are given. See " + "--file-help for details.") + results_reading_group.add_argument( + "--input-file", type=str, required=True, nargs="+", + action=ParseLabelArg, metavar='FILE[:LABEL]', + help="Path to input HDF file(s). A label may be specified for " + "each input file to use for plots when multiple files are " + "specified.") + # advanced help + results_reading_group.add_argument( + "-H", "--file-help", + action=PrintFileParams, skip_args=self.skip_args, + help="Based on the provided input-file(s), print all available " + "parameters that may be retrieved and all possible functions " + "on those parameters. Also print available additional " + "arguments that may be passed. This option is like an " + "advanced --help: if run, the program will just print the " + "information to screen, then exit.") + if autoparamlabels: + paramparser = ParseParametersArg + lblhelp = ( + "If LABEL is the same as a parameter in " + "pycbc.waveform.parameters, the label " + "property of that parameter will be used (e.g., if LABEL " + "were 'mchirp' then {} would be used). " + .format(_waveform.parameters.mchirp.label)) + else: + paramparser = ParseLabelArg + lblhelp = '' + results_reading_group.add_argument( + "--parameters", type=str, nargs="+", metavar="PARAM[:LABEL]", + action=paramparser, + help="Name of parameters to load; default is to load all. The " + "parameters can be any of the model params or posterior " + "stats (loglikelihood, logprior, etc.) in the input file(s), " + "derived parameters from them, or any function of them. If " + "multiple files are provided, any parameter common to all " + "files may be used. Syntax for functions is python; any math " + "functions in the numpy libary may be used. Can optionally " + "also specify a LABEL for each parameter. If no LABEL is " + "provided, PARAM will used as the LABEL. {}" + "To see all possible parameters that may be used with the " + "given input file(s), as well as all avaiable functions, " + "run --file-help, along with one or more input files. " + "If '*' is provided in addition to other parameters names, " + "then parameters will be loaded in a greedy fashion; i.e., " + "all other parameters that exist in the file(s) that are not " + "explicitly mentioned will also be loaded. For example, " + "if the input-file(s) contains 'srcmass1', " + "'srcmass2', and 'distance', and " + "\"'primary_mass(srcmass1, srcmass2):mass1' '*'\", is given " + "then 'mass1' and 'distance' will be loaded. Otherwise, " + "without the '*', only 'mass1' would be loaded. " + "Note that any parameter that is used in a function " + "will not automatically be added. Tip: enclose " + "arguments in single quotes, or else special characters will " + "be interpreted as shell commands. For example, the " + "wildcard should be given as either '*' or \\*, otherwise " + "bash will expand the * into the names of all the files in " + "the current directory." + .format(lblhelp)) + results_reading_group.add_argument( + "--constraint", type=str, nargs="+", metavar="CONSTRAINT[:FILE]", + help="Apply a constraint to the samples. If a file is provided " + "after the constraint, it will only be applied to the given " + "file. Otherwise, the constraint will be applied to all " + "files. Only one constraint may be applied to a file. " + "Samples that violate the constraint will be removed. Syntax " + "is python; any parameter or function of parameter can be " + "used, similar to the parameters argument. Multiple " + "constraints may be combined by using '&' and '|'.") + return results_reading_group
+
+ + + +
+[docs] +def results_from_cli(opts, load_samples=True, **kwargs): + """Loads an inference result file along with any labels associated with it + from the command line options. + + Parameters + ---------- + opts : ArgumentParser options + The options from the command line. + load_samples : bool, optional + Load the samples from the file. + + Returns + ------- + fp_all : (list of) BaseInferenceFile type + The result file as an hdf file. If more than one input file, + then it returns a list. + parameters : list of str + List of the parameters to use, parsed from the parameters option. + labels : dict + Dictionary of labels to associate with the parameters. + samples_all : (list of) FieldArray(s) or None + If load_samples, the samples as a FieldArray; otherwise, None. + If more than one input file, then it returns a list. + \**kwargs : + Any other keyword arguments that are passed to read samples using + samples_from_cli + """ + + # lists for files and samples from all input files + fp_all = [] + samples_all = [] + + input_files = opts.input_file + if isinstance(input_files, str): + input_files = [input_files] + + # load constraints + constraints = {} + if opts.constraint is not None: + for constraint in opts.constraint: + if len(constraint.split(':')) == 2: + constraint, fn = constraint.split(':') + constraints[fn] = constraint + # no file provided, make sure there's only one constraint + elif len(opts.constraint) > 1: + raise ValueError("must provide a file to apply constraints " + "to if providing more than one constraint") + else: + # this means no file, only one constraint, apply to all + # files + constraints = {fn: constraint for fn in input_files} + + # loop over all input files + for input_file in input_files: + logging.info("Reading input file %s", input_file) + + # read input file + fp = loadfile(input_file, "r") + + # load the samples + if load_samples: + logging.info("Loading samples") + + # read samples from file + samples = fp.samples_from_cli(opts, parameters=opts.parameters, + **kwargs) + logging.info("Loaded {} samples".format(samples.size)) + + if input_file in constraints: + logging.info("Applying constraints") + mask = samples[constraints[input_file]] + samples = samples[mask] + if samples.size == 0: + raise ValueError("No samples remain after constraint {} " + "applied".format(constraints[input_file])) + logging.info("{} samples remain".format(samples.size)) + + + # else do not read samples + else: + samples = None + + # add results to lists from all input files + if len(input_files) > 1: + fp_all.append(fp) + samples_all.append(samples) + + # else only one input file then do not return lists + else: + fp_all = fp + samples_all = samples + + return fp_all, opts.parameters, opts.parameters_labels, samples_all
+ + + +
+[docs] +def injections_from_cli(opts): + """Gets injection parameters from the inference file(s). + + If the opts have a ``injection_samples_map`` option, the injection + parameters will be remapped accordingly. See + :py:func:`pycbc.inference.option_utils.add_injsamples_map_opt` for + details. + + Parameters + ---------- + opts : argparser + Argparser object that has the command-line objects to parse. + + Returns + ------- + FieldArray + Array of the injection parameters from all of the input files given + by ``opts.input_file``. + """ + # see if a mapping was provided + if hasattr(opts, 'injection_samples_map') and opts.injection_samples_map: + param_map = [opt.split(':') for opt in opts.injection_samples_map] + else: + param_map = [] + input_files = opts.input_file + if isinstance(input_files, str): + input_files = [input_files] + injections = None + # loop over all input files getting the injection files + for input_file in input_files: + fp = loadfile(input_file, 'r') + these_injs = fp.read_injections() + # apply mapping if it was provided + if param_map: + mapvals = {sp: these_injs[ip] for ip, sp in param_map} + # if any of the new parameters are the same as the old, just + # overwrite the values + common_params = set(these_injs.fieldnames) & set(mapvals.keys()) + for param in common_params: + these_injs[param] = mapvals.pop(param) + # add the rest as new fields + ps = list(mapvals.keys()) + these_injs = these_injs.add_fields([mapvals[p] for p in ps], + names=ps) + if injections is None: + injections = these_injs + else: + injections = injections.append(these_injs) + return injections
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/base_hdf.html b/latest/html/_modules/pycbc/inference/io/base_hdf.html new file mode 100644 index 00000000000..3d14b97fe30 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/base_hdf.html @@ -0,0 +1,1249 @@ + + + + + + pycbc.inference.io.base_hdf — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.base_hdf

+# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This modules defines functions for reading and writing samples that the
+inference samplers generate.
+"""
+
+
+import sys
+import logging
+from io import StringIO
+
+from abc import (ABCMeta, abstractmethod)
+
+import numpy
+import h5py
+
+from pycbc.io import FieldArray
+from pycbc.inject import InjectionSet
+from pycbc.io import (dump_state, load_state)
+from pycbc.workflow import WorkflowConfigParser
+from pycbc.types import FrequencySeries
+
+
+
+[docs] +def format_attr(val): + """Formats an attr so that it can be read in either python 2 or 3. + + In python 2, strings that are saved as an attribute in an hdf file default + to unicode. Since unicode was removed in python 3, if you load that file + in a python 3 environment, the strings will be read as bytes instead, which + causes a number of issues. This attempts to fix that. If the value is + a bytes string, then it will be decoded into a string. If the value is + a numpy array of byte strings, it will convert the array to a list of + strings. + + Parameters + ---------- + val : obj + The value to format. This will try to apply decoding to the value + + Returns + ------- + obj + If ``val`` was a byte string, the value as a ``str``. If the value + was a numpy array of ``bytes_``, the value as a list of ``str``. + Otherwise, just returns the value. + """ + try: + val = str(val.decode()) + except AttributeError: + pass + if isinstance(val, numpy.ndarray) and val.dtype.type == numpy.bytes_: + val = val.astype(numpy.unicode_).tolist() + return val
+ + + +
+[docs] +class BaseInferenceFile(h5py.File, metaclass=ABCMeta): + """Base class for all inference hdf files. + + This is a subclass of the h5py.File object. It adds functions for + handling reading and writing the samples from the samplers. + + Parameters + ----------- + path : str + The path to the HDF file. + mode : {None, str} + The mode to open the file, eg. "w" for write and "r" for read. + """ + + name = None + samples_group = 'samples' + sampler_group = 'sampler_info' + data_group = 'data' + injections_group = 'injections' + config_group = 'config_file' + + def __init__(self, path, mode=None, **kwargs): + super(BaseInferenceFile, self).__init__(path, mode, **kwargs) + # check that file type matches self + try: + filetype = self.attrs['filetype'] + except KeyError: + if mode == 'w': + # first time creating the file, add this class's name + filetype = self.name + self.attrs['filetype'] = filetype + else: + filetype = None + try: + filetype = str(filetype.decode()) + except AttributeError: + pass + if filetype != self.name: + raise ValueError("This file has filetype {}, whereas this class " + "is named {}. This indicates that the file was " + "not written by this class, and so cannot be " + "read by this class.".format(filetype, self.name)) + + def __getattr__(self, attr): + """Things stored in ``.attrs`` are promoted to instance attributes. + + Note that properties will be called before this, so if there are any + properties that share the same name as something in ``.attrs``, that + property will get returned. + """ + return self.attrs[attr] + +
+[docs] + def getattrs(self, group=None, create_missing=True): + """Convenience function for getting the `attrs` from the file or group. + + Parameters + ---------- + group : str, optional + Get the attrs of the specified group. If None or ``/``, will + retrieve the file's ``attrs``. + create_missing: bool, optional + If ``group`` is provided, but doesn't yet exist in the file, create + the group. Otherwise, a KeyError will be raised. Default is True. + + Returns + ------- + h5py.File.attrs + An attrs instance of the file or requested group. + """ + if group is None or group == "/": + attrs = self.attrs + else: + try: + attrs = self[group].attrs + except KeyError as e: + if create_missing: + self.create_group(group) + attrs = self[group].attrs + else: + raise e + return attrs
+ + +
+[docs] + @abstractmethod + def write_samples(self, samples, **kwargs): + """This should write all of the provided samples. + + This function should be used to write both samples and model stats. + + Parameters + ---------- + samples : dict + Samples should be provided as a dictionary of numpy arrays. + \**kwargs : + Any other keyword args the sampler needs to write data. + """ + pass
+ + +
+[docs] + def parse_parameters(self, parameters, array_class=None): + """Parses a parameters arg to figure out what fields need to be loaded. + + Parameters + ---------- + parameters : (list of) strings + The parameter(s) to retrieve. A parameter can be the name of any + field in ``samples_group``, a virtual field or method of + ``FieldArray`` (as long as the file contains the necessary fields + to derive the virtual field or method), and/or a function of + these. + array_class : array class, optional + The type of array to use to parse the parameters. The class must + have a ``parse_parameters`` method. Default is to use a + ``FieldArray``. + + Returns + ------- + list : + A list of strings giving the fields to load from the file. + """ + # get the type of array class to use + if array_class is None: + array_class = FieldArray + # get the names of fields needed for the given parameters + possible_fields = self[self.samples_group].keys() + return array_class.parse_parameters(parameters, possible_fields)
+ + +
+[docs] + def read_samples(self, parameters, array_class=None, **kwargs): + """Reads samples for the given parameter(s). + + The ``parameters`` can be the name of any dataset in ``samples_group``, + a virtual field or method of ``FieldArray`` (as long as the file + contains the necessary fields to derive the virtual field or method), + and/or any numpy function of these. + + The ``parameters`` are parsed to figure out what datasets are needed. + Only those datasets will be loaded, and will be the base-level fields + of the returned ``FieldArray``. + + The ``static_params`` are also added as attributes of the returned + ``FieldArray``. + + Parameters + ----------- + parameters : (list of) strings + The parameter(s) to retrieve. + array_class : FieldArray-like class, optional + The type of array to return. The class must have ``from_kwargs`` + and ``parse_parameters`` methods. If None, will return a + ``FieldArray``. + \**kwargs : + All other keyword arguments are passed to ``read_raw_samples``. + + Returns + ------- + FieldArray : + The samples as a ``FieldArray``. + """ + # get the type of array class to use + if array_class is None: + array_class = FieldArray + # get the names of fields needed for the given parameters + possible_fields = self[self.samples_group].keys() + loadfields = array_class.parse_parameters(parameters, possible_fields) + samples = self.read_raw_samples(loadfields, **kwargs) + # convert to FieldArray + samples = array_class.from_kwargs(**samples) + # add the static params and attributes + addatrs = (list(self.static_params.items()) + + list(self[self.samples_group].attrs.items())) + for (p, val) in addatrs: + if p in loadfields: + continue + setattr(samples, format_attr(p), format_attr(val)) + return samples
+ + +
+[docs] + @abstractmethod + def read_raw_samples(self, fields, **kwargs): + """Low level function for reading datasets in the samples group. + + This should return a dictionary of numpy arrays. + """ + pass
+ + +
+[docs] + @staticmethod + def extra_args_parser(parser=None, skip_args=None, **kwargs): + """Provides a parser that can be used to parse sampler-specific command + line options for loading samples. + + This is optional. Inheriting classes may override this if they want to + implement their own options. + + Parameters + ---------- + parser : argparse.ArgumentParser, optional + Instead of creating a parser, add arguments to the given one. If + none provided, will create one. + skip_args : list, optional + Don't include the given options. Options should be given as the + option string, minus the '--'. For example, + ``skip_args=['iteration']`` would cause the ``--iteration`` + argument not to be included. + \**kwargs : + All other keyword arguments are passed to the parser that is + created. + + Returns + ------- + parser : argparse.ArgumentParser or None + If this class adds extra arguments, an argument parser with the + extra arguments. Otherwise, will just return whatever was passed + for the ``parser`` argument (default is None). + actions : list of argparse.Action + List of the actions that were added. + """ + return parser, []
+ + + @staticmethod + def _get_optional_args(args, opts, err_on_missing=False, **kwargs): + """Convenience function to retrieve arguments from an argparse + namespace. + + Parameters + ---------- + args : list of str + List of arguments to retreive. + opts : argparse.namespace + Namespace to retreive arguments for. + err_on_missing : bool, optional + If an argument is not found in the namespace, raise an + AttributeError. Otherwise, just pass. Default is False. + \**kwargs : + All other keyword arguments are added to the return dictionary. + Any keyword argument that is the same as an argument in ``args`` + will override what was retrieved from ``opts``. + + Returns + ------- + dict : + Dictionary mapping arguments to values retrieved from ``opts``. If + keyword arguments were provided, these will also be included in the + dictionary. + """ + parsed = {} + for arg in args: + try: + parsed[arg] = getattr(opts, arg) + except AttributeError as e: + if err_on_missing: + raise AttributeError(e) + else: + continue + parsed.update(kwargs) + return parsed + +
+[docs] + def samples_from_cli(self, opts, parameters=None, **kwargs): + """Reads samples from the given command-line options. + + Parameters + ---------- + opts : argparse Namespace + The options with the settings to use for loading samples (the sort + of thing returned by ``ArgumentParser().parse_args``). + parameters : (list of) str, optional + A list of the parameters to load. If none provided, will try to + get the parameters to load from ``opts.parameters``. + \**kwargs : + All other keyword arguments are passed to ``read_samples``. These + will override any options with the same name. + + Returns + ------- + FieldArray : + Array of the loaded samples. + """ + if parameters is None and opts.parameters is None: + parameters = self.variable_params + elif parameters is None: + parameters = opts.parameters + # parse optional arguments + _, extra_actions = self.extra_args_parser() + extra_args = [act.dest for act in extra_actions] + kwargs = self._get_optional_args(extra_args, opts, **kwargs) + return self.read_samples(parameters, **kwargs)
+ + + @property + def static_params(self): + """Returns a dictionary of the static_params. The keys are the argument + names, values are the value they were set to. + """ + return {arg: self.attrs[arg] for arg in self.attrs["static_params"]} + + @property + def effective_nsamples(self): + """Returns the effective number of samples stored in the file. + """ + try: + return self.attrs['effective_nsamples'] + except KeyError: + return 0 + +
+[docs] + def write_effective_nsamples(self, effective_nsamples): + """Writes the effective number of samples stored in the file.""" + self.attrs['effective_nsamples'] = effective_nsamples
+ + + @property + def thin_start(self): + """The default start index to use when reading samples. + + Unless overridden by sub-class attribute, just returns 0. + """ + return 0 + + @property + def thin_interval(self): + """The default interval to use when reading samples. + + Unless overridden by sub-class attribute, just returns 1. + """ + return 1 + + @property + def thin_end(self): + """The defaut end index to use when reading samples. + + Unless overriden by sub-class attribute, just return ``None``. + """ + return None + + @property + def cmd(self): + """Returns the (last) saved command line. + + If the file was created from a run that resumed from a checkpoint, only + the last command line used is returned. + + Returns + ------- + cmd : string + The command line that created this InferenceFile. + """ + cmd = self.attrs["cmd"] + if isinstance(cmd, numpy.ndarray): + cmd = cmd[-1] + return cmd + +
+[docs] + def write_logevidence(self, lnz, dlnz): + """Writes the given log evidence and its error. + + Results are saved to file's 'log_evidence' and 'dlog_evidence' + attributes. + + Parameters + ---------- + lnz : float + The log of the evidence. + dlnz : float + The error in the estimate of the log evidence. + """ + self.attrs['log_evidence'] = lnz + self.attrs['dlog_evidence'] = dlnz
+ + + @property + def log_evidence(self): + """Returns the log of the evidence and its error, if they exist in the + file. Raises a KeyError otherwise. + """ + return self.attrs["log_evidence"], self.attrs["dlog_evidence"] + +
+[docs] + def write_random_state(self, group=None, state=None): + """Writes the state of the random number generator from the file. + + The random state is written to ``sampler_group``/random_state. + + Parameters + ---------- + group : str + Name of group to write random state to. + state : tuple, optional + Specify the random state to write. If None, will use + ``numpy.random.get_state()``. + """ + # Write out the default numpy random state + group = self.sampler_group if group is None else group + dataset_name = "/".join([group, "random_state"]) + if state is None: + state = numpy.random.get_state() + s, arr, pos, has_gauss, cached_gauss = state + if dataset_name in self: + self[dataset_name][:] = arr + else: + self.create_dataset(dataset_name, arr.shape, fletcher32=True, + dtype=arr.dtype) + self[dataset_name][:] = arr + self[dataset_name].attrs["s"] = s + self[dataset_name].attrs["pos"] = pos + self[dataset_name].attrs["has_gauss"] = has_gauss + self[dataset_name].attrs["cached_gauss"] = cached_gauss
+ + +
+[docs] + def read_random_state(self, group=None): + """Reads the state of the random number generator from the file. + + Parameters + ---------- + group : str + Name of group to read random state from. + + Returns + ------- + tuple + A tuple with 5 elements that can be passed to numpy.set_state. + """ + # Read numpy randomstate + group = self.sampler_group if group is None else group + dataset_name = "/".join([group, "random_state"]) + arr = self[dataset_name][:] + s = self[dataset_name].attrs["s"] + pos = self[dataset_name].attrs["pos"] + has_gauss = self[dataset_name].attrs["has_gauss"] + cached_gauss = self[dataset_name].attrs["cached_gauss"] + state = s, arr, pos, has_gauss, cached_gauss + return state
+ + +
+[docs] + def write_strain(self, strain_dict, group=None): + """Writes strain for each IFO to file. + + Parameters + ----------- + strain : {dict, FrequencySeries} + A dict of FrequencySeries where the key is the IFO. + group : {None, str} + The group to write the strain to. If None, will write to the top + level. + """ + subgroup = self.data_group + "/{ifo}/strain" + if group is None: + group = subgroup + else: + group = '/'.join([group, subgroup]) + for ifo, strain in strain_dict.items(): + self[group.format(ifo=ifo)] = strain + self[group.format(ifo=ifo)].attrs['delta_t'] = strain.delta_t + self[group.format(ifo=ifo)].attrs['start_time'] = \ + float(strain.start_time)
+ + +
+[docs] + def write_stilde(self, stilde_dict, group=None): + """Writes stilde for each IFO to file. + + Parameters + ----------- + stilde : {dict, FrequencySeries} + A dict of FrequencySeries where the key is the IFO. + group : {None, str} + The group to write the strain to. If None, will write to the top + level. + """ + subgroup = self.data_group + "/{ifo}/stilde" + if group is None: + group = subgroup + else: + group = '/'.join([group, subgroup]) + for ifo, stilde in stilde_dict.items(): + self[group.format(ifo=ifo)] = stilde + self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f + self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch)
+ + +
+[docs] + def write_psd(self, psds, group=None): + """Writes PSD for each IFO to file. + + PSDs are written to ``[{group}/]data/{detector}/psds/0``, where {group} + is the optional keyword argument. + + Parameters + ----------- + psds : dict + A dict of detector name -> FrequencySeries. + group : str, optional + Specify a top-level group to write the data to. If ``None`` (the + default), data will be written to the file's top level. + """ + subgroup = self.data_group + "/{ifo}/psds/0" + if group is None: + group = subgroup + else: + group = '/'.join([group, subgroup]) + for ifo in psds: + self[group.format(ifo=ifo)] = psds[ifo] + self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f
+ + +
+[docs] + def write_injections(self, injection_file, group=None): + """Writes injection parameters from the given injection file. + + Everything in the injection file is copied to + ``[{group}/]injections_group``, where ``{group}`` is the optional + keyword argument. + + Parameters + ---------- + injection_file : str + Path to HDF injection file. + group : str, optional + Specify a top-level group to write the injections group to. If + ``None`` (the default), injections group will be written to the + file's top level. + """ + logging.info("Writing injection file to output") + if group is None or group == '/': + group = self.injections_group + else: + group = '/'.join([group, self.injections_group]) + try: + with h5py.File(injection_file, "r") as fp: + super(BaseInferenceFile, self).copy(fp, group) + except IOError: + logging.warning( + "Could not read %s as an HDF file", + injection_file + )
+ + +
+[docs] + def read_injections(self, group=None): + """Gets injection parameters. + + Injections are retrieved from ``[{group}/]injections``. + + Parameters + ---------- + group : str, optional + Group that the injections group is in. Default (None) is to look + in the top-level. + + Returns + ------- + FieldArray + Array of the injection parameters. + """ + if group is None or group == '/': + group = self.injections_group + else: + group = '/'.join([group, self.injections_group]) + injset = InjectionSet(self.filename, hdf_group=group) + injections = injset.table.view(FieldArray) + return injections
+ + +
+[docs] + def write_command_line(self): + """Writes command line to attributes. + + The command line is written to the file's ``attrs['cmd']``. If this + attribute already exists in the file (this can happen when resuming + from a checkpoint), ``attrs['cmd']`` will be a list storing the current + command line and all previous command lines. + """ + cmd = [" ".join(sys.argv)] + try: + previous = self.attrs["cmd"] + if isinstance(previous, str): + # convert to list + previous = [previous] + elif isinstance(previous, numpy.ndarray): + previous = previous.tolist() + except KeyError: + previous = [] + self.attrs["cmd"] = cmd + previous
+ + +
+[docs] + @staticmethod + def get_slice(thin_start=None, thin_interval=None, thin_end=None): + """Formats a slice to retrieve a thinned array from an HDF file. + + Parameters + ---------- + thin_start : float or int, optional + The starting index to use. If provided, the ``int`` will be taken. + thin_interval : float or int, optional + The interval to use. If provided the ceiling of it will be taken. + thin_end : float or int, optional + The end index to use. If provided, the ``int`` will be taken. + + Returns + ------- + slice : + The slice needed. + """ + if thin_start is not None: + thin_start = int(thin_start) + if thin_interval is not None: + thin_interval = int(numpy.ceil(thin_interval)) + if thin_end is not None: + thin_end = int(thin_end) + return slice(thin_start, thin_end, thin_interval)
+ + +
+[docs] + def copy_metadata(self, other): + """Copies all metadata from this file to the other file. + + Metadata is defined as everything in the top-level ``.attrs``. + + Parameters + ---------- + other : InferenceFile + An open inference file to write the data to. + """ + logging.info("Copying metadata") + # copy attributes + for key in self.attrs.keys(): + other.attrs[key] = self.attrs[key]
+ + +
+[docs] + def copy_info(self, other, ignore=None): + """Copies "info" from this file to the other. + + "Info" is defined all groups that are not the samples group. + + Parameters + ---------- + other : output file + The output file. Must be an hdf file. + ignore : (list of) str + Don't copy the given groups. + """ + logging.info("Copying info") + # copy non-samples/stats data + if ignore is None: + ignore = [] + if isinstance(ignore, str): + ignore = [ignore] + ignore = set(ignore + [self.samples_group]) + copy_groups = set(self.keys()) - ignore + for key in copy_groups: + super(BaseInferenceFile, self).copy(key, other)
+ + +
+[docs] + def copy_samples(self, other, parameters=None, parameter_names=None, + read_args=None, write_args=None): + """Should copy samples to the other files. + + Parameters + ---------- + other : InferenceFile + An open inference file to write to. + parameters : list of str, optional + List of parameters to copy. If None, will copy all parameters. + parameter_names : dict, optional + Rename one or more parameters to the given name. The dictionary + should map parameter -> parameter name. If None, will just use the + original parameter names. + read_args : dict, optional + Arguments to pass to ``read_samples``. + write_args : dict, optional + Arguments to pass to ``write_samples``. + """ + # select the samples to copy + logging.info("Reading samples to copy") + if parameters is None: + parameters = self.variable_params + # if list of desired parameters is different, rename + if set(parameters) != set(self.variable_params): + other.attrs['variable_params'] = parameters + if read_args is None: + read_args = {} + samples = self.read_samples(parameters, **read_args) + logging.info("Copying {} samples".format(samples.size)) + # if different parameter names are desired, get them from the samples + if parameter_names: + arrs = {pname: samples[p] for p, pname in parameter_names.items()} + arrs.update({p: samples[p] for p in parameters if + p not in parameter_names}) + samples = FieldArray.from_kwargs(**arrs) + other.attrs['variable_params'] = samples.fieldnames + logging.info("Writing samples") + if write_args is None: + write_args = {} + other.write_samples({p: samples[p] for p in samples.fieldnames}, + **write_args)
+ + +
+[docs] + def copy(self, other, ignore=None, parameters=None, parameter_names=None, + read_args=None, write_args=None): + """Copies metadata, info, and samples in this file to another file. + + Parameters + ---------- + other : str or InferenceFile + The file to write to. May be either a string giving a filename, + or an open hdf file. If the former, the file will be opened with + the write attribute (note that if a file already exists with that + name, it will be deleted). + ignore : (list of) strings + Don't copy the given groups. If the samples group is included, no + samples will be copied. + parameters : list of str, optional + List of parameters in the samples group to copy. If None, will copy + all parameters. + parameter_names : dict, optional + Rename one or more parameters to the given name. The dictionary + should map parameter -> parameter name. If None, will just use the + original parameter names. + read_args : dict, optional + Arguments to pass to ``read_samples``. + write_args : dict, optional + Arguments to pass to ``write_samples``. + + Returns + ------- + InferenceFile + The open file handler to other. + """ + if not isinstance(other, h5py.File): + # check that we're not trying to overwrite this file + if other == self.name: + raise IOError("destination is the same as this file") + other = self.__class__(other, 'w') + # metadata + self.copy_metadata(other) + # info + if ignore is None: + ignore = [] + if isinstance(ignore, str): + ignore = [ignore] + self.copy_info(other, ignore=ignore) + # samples + if self.samples_group not in ignore: + self.copy_samples(other, parameters=parameters, + parameter_names=parameter_names, + read_args=read_args, + write_args=write_args) + # if any down selection was done, re-set the default + # thin-start/interval/end + p = tuple(self[self.samples_group].keys())[0] + my_shape = self[self.samples_group][p].shape + p = tuple(other[other.samples_group].keys())[0] + other_shape = other[other.samples_group][p].shape + if my_shape != other_shape: + other.attrs['thin_start'] = 0 + other.attrs['thin_interval'] = 1 + other.attrs['thin_end'] = None + return other
+ + +
+[docs] + @classmethod + def write_kwargs_to_attrs(cls, attrs, **kwargs): + """Writes the given keywords to the given ``attrs``. + + If any keyword argument points to a dict, the keyword will point to a + list of the dict's keys. Each key is then written to the attrs with its + corresponding value. + + Parameters + ---------- + attrs : an HDF attrs + The ``attrs`` of an hdf file or a group in an hdf file. + \**kwargs : + The keywords to write. + """ + for arg, val in kwargs.items(): + if val is None: + val = str(None) + if isinstance(val, dict): + attrs[str(arg)] = list(map(str, val.keys())) + # just call self again with the dict as kwargs + cls.write_kwargs_to_attrs(attrs, **val) + else: + attrs[str(arg)] = val
+ + +
+[docs] + def write_data(self, name, data, path=None, append=False): + """Convenience function to write data. + + Given ``data`` is written as a dataset with ``name`` in ``path``. + If the dataset or path do not exist yet, the dataset and path will + be created. + + Parameters + ---------- + name : str + The name to associate with the data. This will be the dataset + name (if data is array-like) or the key in the attrs. + data : array, dict, or atomic + The data to write. If a dictionary, a subgroup will be created + for each key, and the values written there. This will be done + recursively until an array or atomic (e.g., float, int, str), is + found. Otherwise, the data is written to the given name. + path : str, optional + Write to the given path. Default (None) will write to the top + level. If the path does not exist in the file, it will be + created. + append : bool, optional + Append the data to what is currently in the file if ``path/name`` + already exists in the file, and if it does not, create the dataset + so that its last dimension can be resized. The data can only + be appended along the last dimension, and if it already exists in + the data, it must be resizable along this dimension. If ``False`` + (the default) what is in the file will be overwritten, and the + given data must have the same shape. + """ + if path is None: + path = '/' + try: + group = self[path] + except KeyError: + # create the group + self.create_group(path) + group = self[path] + if isinstance(data, dict): + # call myself for each key, value pair in the dictionary + for key, val in data.items(): + self.write_data(key, val, path='/'.join([path, name]), + append=append) + # if appending, we need to resize the data on disk, or, if it doesn't + # exist yet, create a dataset that is resizable along the last + # dimension + elif append: + # cast the data to an array if it isn't already one + if isinstance(data, (list, tuple)): + data = numpy.array(data) + if not isinstance(data, numpy.ndarray): + data = numpy.array([data]) + dshape = data.shape + ndata = dshape[-1] + try: + startidx = group[name].shape[-1] + group[name].resize(dshape[-1]+group[name].shape[-1], + axis=len(group[name].shape)-1) + except KeyError: + # dataset doesn't exist yet + group.create_dataset(name, dshape, + maxshape=tuple(list(dshape)[:-1]+[None]), + dtype=data.dtype, fletcher32=True) + startidx = 0 + group[name][..., startidx:startidx+ndata] = data[..., :] + else: + try: + group[name][()] = data + except KeyError: + # dataset doesn't exist yet + group[name] = data
+ + +
+[docs] + def write_config_file(self, cp): + """Writes the given config file parser. + + File is stored as a pickled buffer array to ``config_parser/{index}``, + where ``{index}`` is an integer corresponding to the number of config + files that have been saved. The first time a save is called, it is + stored to ``0``, and incremented from there. + + Parameters + ---------- + cp : ConfigParser + Config parser to save. + """ + # get the index of the last saved file + try: + index = list(map(int, self[self.config_group].keys())) + except KeyError: + index = [] + if index == []: + # hasn't been written yet + index = 0 + else: + index = max(index) + 1 + # we'll store the config file as a text file that is pickled + out = StringIO() + cp.write(out) + # now pickle it + dump_state(out, self, path=self.config_group, dsetname=str(index))
+ + +
+[docs] + def read_config_file(self, return_cp=True, index=-1): + """Reads the config file that was used. + + A ``ValueError`` is raised if no config files have been saved, or if + the requested index larger than the number of stored config files. + + Parameters + ---------- + return_cp : bool, optional + If true, returns the loaded config file as + :py:class:`pycbc.workflow.configuration.WorkflowConfigParser` + type. Otherwise will return as string buffer. Default is True. + index : int, optional + The config file to load. If ``write_config_file`` has been called + multiple times (as would happen if restarting from a checkpoint), + there will be config files stored. Default (-1) is to load the + last saved file. + + Returns + ------- + WorkflowConfigParser or StringIO : + The parsed config file. + """ + # get the stored indices + try: + indices = sorted(map(int, self[self.config_group].keys())) + index = indices[index] + except KeyError: + raise ValueError("no config files saved in hdf") + except IndexError: + raise ValueError("no config file matches requested index") + cf = load_state(self, path=self.config_group, dsetname=str(index)) + cf.seek(0) + if return_cp: + cp = WorkflowConfigParser() + cp.read_file(cf) + return cp + return cf
+ + +
+[docs] + def read_data(self, group=None): + """Loads the data stored in the file as a FrequencySeries. + + Only works for models that store data as a frequency series in + ``data/DET/stilde``. A ``KeyError`` will be raised if the model used + did not store data in that path. + + Parameters + ---------- + group : str, optional + Group that the data group is in. Default (None) is to look in the + top-level. + + Returns + ------- + dict : + Dictionary of detector name -> FrequencySeries. + """ + fmt = '{}/{}/stilde' + if group is None or group == '/': + path = self.data_group + else: + path = '/'.join([group, self.data_group]) + data = {} + for det in self[path].keys(): + group = self[fmt.format(path, det)] + data[det] = FrequencySeries( + group[()], delta_f=group.attrs['delta_f'], + epoch=group.attrs['epoch']) + return data
+ + +
+[docs] + def read_psds(self, group=None): + """Loads the PSDs stored in the file as a FrequencySeries. + + Only works for models that store PSDs in + ``data/DET/psds/0``. A ``KeyError`` will be raised if the model used + did not store PSDs in that path. + + Parameters + ---------- + group : str, optional + Group that the data group is in. Default (None) is to look in the + top-level. + + Returns + ------- + dict : + Dictionary of detector name -> FrequencySeries. + """ + fmt = '{}/{}/psds/0' + if group is None or group == '/': + path = self.data_group + else: + path = '/'.join([group, self.data_group]) + psds = {} + for det in self[path].keys(): + group = self[fmt.format(path, det)] + psds[det] = FrequencySeries( + group[()], delta_f=group.attrs['delta_f']) + return psds
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/base_mcmc.html b/latest/html/_modules/pycbc/inference/io/base_mcmc.html new file mode 100644 index 00000000000..9e7d6bfcb8f --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/base_mcmc.html @@ -0,0 +1,1109 @@ + + + + + + pycbc.inference.io.base_mcmc — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.base_mcmc

+# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides I/O that is specific to MCMC samplers.
+"""
+
+
+import numpy
+import argparse
+
+
+
+[docs] +class CommonMCMCMetadataIO(object): + """Provides functions for reading/writing MCMC metadata to file. + + The functions here are common to both standard MCMC (in which chains + are independent) and ensemble MCMC (in which chains/walkers share + information). + """ +
+[docs] + def write_resume_point(self): + """Keeps a list of the number of iterations that were in a file when a + run was resumed from a checkpoint.""" + try: + resume_pts = self.attrs["resume_points"].tolist() + except KeyError: + resume_pts = [] + try: + niterations = self.niterations + except KeyError: + niterations = 0 + resume_pts.append(niterations) + self.attrs["resume_points"] = resume_pts
+ + +
+[docs] + def write_niterations(self, niterations): + """Writes the given number of iterations to the sampler group.""" + self[self.sampler_group].attrs['niterations'] = niterations
+ + + @property + def niterations(self): + """Returns the number of iterations the sampler was run for.""" + return self[self.sampler_group].attrs['niterations'] + + @property + def nwalkers(self): + """Returns the number of walkers used by the sampler. + + Alias of ``nchains``. + """ + try: + return self[self.sampler_group].attrs['nwalkers'] + except KeyError: + return self[self.sampler_group].attrs['nchains'] + + @property + def nchains(self): + """Returns the number of chains used by the sampler. + + Alias of ``nwalkers``. + """ + try: + return self[self.sampler_group].attrs['nchains'] + except KeyError: + return self[self.sampler_group].attrs['nwalkers'] + + def _thin_data(self, group, params, thin_interval): + """Thins data on disk by the given interval. + + This makes no effort to record the thinning interval that is applied. + + Parameters + ---------- + group : str + The group where the datasets to thin live. + params : list + The list of dataset names to thin. + thin_interval : int + The interval to thin the samples on disk by. + """ + samples = self.read_raw_samples(params, thin_start=0, + thin_interval=thin_interval, + thin_end=None, flatten=False, + group=group) + # now resize and write the data back to disk + fpgroup = self[group] + for param in params: + data = samples[param] + # resize the arrays on disk + fpgroup[param].resize(data.shape) + # and write + fpgroup[param][:] = data + +
+[docs] + def thin(self, thin_interval): + """Thins the samples on disk to the given thinning interval. + + The interval must be a multiple of the file's current ``thinned_by``. + + Parameters + ---------- + thin_interval : int + The interval the samples on disk should be thinned by. + """ + # get the new interval to thin by + new_interval = thin_interval / self.thinned_by + if new_interval % 1: + raise ValueError("thin interval ({}) must be a multiple of the " + "current thinned_by ({})" + .format(thin_interval, self.thinned_by)) + new_interval = int(new_interval) + # now thin the data on disk + params = list(self[self.samples_group].keys()) + self._thin_data(self.samples_group, params, new_interval) + # store the interval that samples were thinned by + self.thinned_by = thin_interval
+ + + @property + def thinned_by(self): + """Returns interval samples have been thinned by on disk. + + This looks for ``thinned_by`` in the samples group attrs. If none is + found, will just return 1. + """ + try: + thinned_by = self.attrs['thinned_by'] + except KeyError: + thinned_by = 1 + return thinned_by + + @thinned_by.setter + def thinned_by(self, thinned_by): + """Sets the thinned_by attribute. + + This is the interval that samples have been thinned by on disk. The + given value is written to + ``self[self.samples_group].attrs['thinned_by']``. + """ + self.attrs['thinned_by'] = int(thinned_by) + +
+[docs] + def last_iteration(self, parameter=None, group=None): + """Returns the iteration of the last sample of the given parameter. + + Parameters + ---------- + parameter : str, optional + The name of the parameter to get the last iteration for. If + None provided, will just use the first parameter in ``group``. + group : str, optional + The name of the group to get the last iteration from. Default is + the ``samples_group``. + """ + if group is None: + group = self.samples_group + if parameter is None: + try: + parameter = list(self[group].keys())[0] + except (IndexError, KeyError): + # nothing has been written yet, just return 0 + return 0 + try: + lastiter = self[group][parameter].shape[-1] + except KeyError: + # no samples have been written, just return 0 + lastiter = 0 + # account for thinning + return lastiter * self.thinned_by
+ + +
+[docs] + def iterations(self, parameter): + """Returns the iteration each sample occurred at.""" + return numpy.arange(0, self.last_iteration(parameter), self.thinned_by)
+ + +
+[docs] + def write_sampler_metadata(self, sampler): + """Writes the sampler's metadata.""" + self.attrs['sampler'] = sampler.name + try: + self[self.sampler_group].attrs['nchains'] = sampler.nchains + except ValueError: + self[self.sampler_group].attrs['nwalkers'] = sampler.nwalkers + # write the model's metadata + sampler.model.write_metadata(self)
+ + + @property + def is_burned_in(self): + """Returns whether or not chains are burned in. + + Raises a ``ValueError`` if no burn in tests were done. + """ + try: + return self[self.sampler_group]['is_burned_in'][()] + except KeyError: + raise ValueError("No burn in tests were performed") + + @property + def burn_in_iteration(self): + """Returns the burn in iteration of all the chains. + + Raises a ``ValueError`` if no burn in tests were done. + """ + try: + return self[self.sampler_group]['burn_in_iteration'][()] + except KeyError: + raise ValueError("No burn in tests were performed") + + @property + def burn_in_index(self): + """Returns the burn in index. + + This is the burn in iteration divided by the file's ``thinned_by``. + Requires the class that this is used with has a ``burn_in_iteration`` + attribute. + """ + return self.burn_in_iteration // self.thinned_by + + @property + def act(self): + """The autocorrelation time (ACT). + + This is the ACL times the file's thinned by. Raises a ``ValueError`` + if the ACT has not been calculated. + """ + try: + return self[self.sampler_group]['act'][()] + except KeyError: + raise ValueError("ACT has not been calculated") + + @act.setter + def act(self, act): + """Writes the autocorrelation time(s). + + ACT(s) are written to the ``sample_group`` as a dataset with name + ``act``. + + Parameters + ---------- + act : array or int + ACT(s) to write. + """ + # pylint: disable=no-member + self.write_data('act', act, path=self.sampler_group) + + @property + def raw_acts(self): + """Dictionary of parameter names -> raw autocorrelation time(s). + + Depending on the sampler, the autocorrelation times may be floats, + or [ntemps x] [nchains x] arrays. + + Raises a ``ValueError`` is no raw acts have been set. + """ + try: + group = self[self.sampler_group]['raw_acts'] + except KeyError: + raise ValueError("ACTs have not been calculated") + acts = {} + for param in group: + acts[param] = group[param][()] + return acts + + @raw_acts.setter + def raw_acts(self, acts): + """Writes the raw autocorrelation times. + + The ACT of each parameter is saved to + ``[sampler_group]/raw_acts/{param}']``. Works for all types of MCMC + samplers (independent chains, ensemble, parallel tempering). + + Parameters + ---------- + acts : dict + A dictionary of ACTs keyed by the parameter. + """ + path = self.sampler_group + '/raw_acts' + for param in acts: + self.write_data(param, acts[param], path=path) + + @property + def acl(self): + """The autocorrelation length (ACL) of the samples. + + This is the autocorrelation time (ACT) divided by the file's + ``thinned_by`` attribute. Raises a ``ValueError`` if the ACT has not + been calculated. + """ + return self.act / self.thinned_by + + @acl.setter + def acl(self, acl): + """Sets the autocorrelation length (ACL) of the samples. + + This will convert the given value(s) to autocorrelation time(s) and + save to the ``act`` attribute; see that attribute for details. + """ + self.act = acl * self.thinned_by + + @property + def raw_acls(self): + """Dictionary of parameter names -> raw autocorrelation length(s). + + Depending on the sampler, the autocorrelation lengths may be floats, + or [ntemps x] [nchains x] arrays. + + The ACLs are the autocorrelation times (ACT) divided by the file's + ``thinned_by`` attribute. Raises a ``ValueError`` is no raw acts have + been set. + """ + return {p: self.raw_acts[p] / self.thinned_by for p in self.raw_acts} + + @raw_acls.setter + def raw_acls(self, acls): + """Sets the raw autocorrelation lengths. + + The given ACLs are converted to autocorrelation times (ACTs) and saved + to the ``raw_acts`` attribute; see that attribute for details. + + Parameters + ---------- + acls : dict + A dictionary of ACLs keyed by the parameter. + """ + self.raw_acts = {p: acls[p] * self.thinned_by for p in acls} + + def _update_sampler_history(self): + """Writes the number of iterations, effective number of samples, + autocorrelation times, and burn-in iteration to the history. + """ + path = '/'.join([self.sampler_group, 'checkpoint_history']) + # write the current number of iterations + self.write_data('niterations', self.niterations, path=path, + append=True) + self.write_data('effective_nsamples', self.effective_nsamples, + path=path, append=True) + # write the act: we'll make sure that this is 2D, so that the acts + # can be appened along the last dimension + try: + act = self.act + except ValueError: + # no acts were calculate + act = None + if act is not None: + act = act.reshape(tuple(list(act.shape)+[1])) + self.write_data('act', act, path=path, append=True) + # write the burn in iteration in the same way + try: + burn_in = self.burn_in_iteration + except ValueError: + # no burn in tests were done + burn_in = None + if burn_in is not None: + burn_in = burn_in.reshape(tuple(list(burn_in.shape)+[1])) + self.write_data('burn_in_iteration', burn_in, path=path, + append=True) + +
+[docs] + @staticmethod + def extra_args_parser(parser=None, skip_args=None, **kwargs): + """Create a parser to parse sampler-specific arguments for loading + samples. + + Parameters + ---------- + parser : argparse.ArgumentParser, optional + Instead of creating a parser, add arguments to the given one. If + none provided, will create one. + skip_args : list, optional + Don't parse the given options. Options should be given as the + option string, minus the '--'. For example, + ``skip_args=['iteration']`` would cause the ``--iteration`` + argument not to be included. + \**kwargs : + All other keyword arguments are passed to the parser that is + created. + + Returns + ------- + parser : argparse.ArgumentParser + An argument parser with th extra arguments added. + actions : list of argparse.Action + A list of the actions that were added. + """ + if parser is None: + parser = argparse.ArgumentParser(**kwargs) + elif kwargs: + raise ValueError("No other keyword arguments should be provded if " + "a parser is provided.") + if skip_args is None: + skip_args = [] + actions = [] + if 'thin-start' not in skip_args: + act = parser.add_argument( + "--thin-start", type=int, default=None, + help="Sample number to start collecting samples. If " + "none provided, will use the input file's `thin_start` " + "attribute.") + actions.append(act) + if 'thin-interval' not in skip_args: + act = parser.add_argument( + "--thin-interval", type=int, default=None, + help="Interval to use for thinning samples. If none provided, " + "will use the input file's `thin_interval` attribute.") + actions.append(act) + if 'thin-end' not in skip_args: + act = parser.add_argument( + "--thin-end", type=int, default=None, + help="Sample number to stop collecting samples. If " + "none provided, will use the input file's `thin_end` " + "attribute.") + actions.append(act) + if 'iteration' not in skip_args: + act = parser.add_argument( + "--iteration", type=int, default=None, + help="Only retrieve the given iteration. To load " + "the last n-th sampe use -n, e.g., -1 will " + "load the last iteration. This overrides " + "the thin-start/interval/end options.") + actions.append(act) + if 'walkers' not in skip_args and 'chains' not in skip_args: + act = parser.add_argument( + "--walkers", "--chains", type=int, nargs="+", default=None, + help="Only retrieve samples from the listed " + "walkers/chains. Default is to retrieve from all " + "walkers/chains.") + actions.append(act) + return parser, actions
+
+ + + +
+[docs] +class MCMCMetadataIO(object): + """Provides functions for reading/writing metadata to file for MCMCs in + which all chains are independent of each other. + + Overrides the ``BaseInference`` file's ``thin_start`` and ``thin_interval`` + attributes. Instead of integers, these return arrays. + """ + @property + def thin_start(self): + """Returns the default thin start to use for reading samples. + + If burn-in tests were done, this will return the burn-in index of every + chain that has burned in. The start index for chains that have not + burned in will be greater than the number of samples, so that those + chains return no samples. If no burn-in tests were done, returns 0 + for all chains. + """ + # pylint: disable=no-member + try: + thin_start = self.burn_in_index + # replace any that have not been burned in with the number + # of iterations; this will cause those chains to not return + # any samples + thin_start[~self.is_burned_in] = \ + int(numpy.ceil(self.niterations/self.thinned_by)) + return thin_start + except ValueError: + # no burn in, just return array of zeros + return numpy.zeros(self.nchains, dtype=int) + + @property + def thin_interval(self): + """Returns the default thin interval to use for reading samples. + + If a finite ACL exists in the file, will return that. Otherwise, + returns 1. + """ + try: + acl = self.acl + except ValueError: + return numpy.ones(self.nchains, dtype=int) + # replace any infs with the number of samples + acl[numpy.isinf(acl)] = self.niterations / self.thinned_by + return numpy.ceil(acl).astype(int)
+ + + +
+[docs] +class EnsembleMCMCMetadataIO(object): + """Provides functions for reading/writing metadata to file for ensemble + MCMCs. + """ + @property + def thin_start(self): + """Returns the default thin start to use for reading samples. + + If burn-in tests were done, returns the burn in index. Otherwise, + returns 0. + """ + try: + return self.burn_in_index + except ValueError: + # no burn in, just return 0 + return 0 + + @property + def thin_interval(self): + """Returns the default thin interval to use for reading samples. + + If a finite ACL exists in the file, will return that. Otherwise, + returns 1. + """ + try: + acl = self.acl + except ValueError: + acl = 1 + if numpy.isfinite(acl): + acl = int(numpy.ceil(acl)) + else: + acl = 1 + return acl
+ + + +
+[docs] +def write_samples(fp, samples, parameters=None, last_iteration=None, + samples_group=None, thin_by=None): + """Writes samples to the given file. + + This works for both standard MCMC and ensemble MCMC samplers without + parallel tempering. + + Results are written to ``samples_group/{vararg}``, where ``{vararg}`` + is the name of a model params. The samples are written as an + ``nwalkers x niterations`` array. If samples already exist, the new + samples are appended to the current. + + If the current samples on disk have been thinned (determined by the + ``thinned_by`` attribute in the samples group), then the samples will + be thinned by the same amount before being written. The thinning is + started at the sample in ``samples`` that occured at the iteration + equal to the last iteration on disk plus the ``thinned_by`` interval. + If this iteration is larger than the iteration of the last given + sample, then none of the samples will be written. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with CommonMCMCMetadataIO methods added. + samples : dict + The samples to write. Each array in the dictionary should have + shape nwalkers x niterations. + parameters : list, optional + Only write the specified parameters to the file. If None, will + write all of the keys in the ``samples`` dict. + last_iteration : int, optional + The iteration of the last sample. If the file's ``thinned_by`` + attribute is > 1, this is needed to determine where to start + thinning the samples such that the interval between the last sample + currently on disk and the first new sample is the same as all of + the other samples. + samples_group : str, optional + Which group to write the samples to. Default (None) will result + in writing to "samples". + thin_by : int, optional + Override the ``thinned_by`` attribute in the file with the given + value. **Only set this if you are using this function to write + something other than inference samples!** + """ + nwalkers, nsamples = list(samples.values())[0].shape + assert all(p.shape == (nwalkers, nsamples) + for p in samples.values()), ( + "all samples must have the same shape") + if samples_group is None: + samples_group = fp.samples_group + if parameters is None: + parameters = samples.keys() + # thin the samples + samples = thin_samples_for_writing(fp, samples, parameters, + last_iteration, samples_group, + thin_by=thin_by) + # loop over number of dimensions + group = samples_group + '/{name}' + for param in parameters: + dataset_name = group.format(name=param) + data = samples[param] + # check that there's something to write after thinning + if data.shape[1] == 0: + # nothing to write, move along + continue + try: + fp_nsamples = fp[dataset_name].shape[-1] + istart = fp_nsamples + istop = istart + data.shape[1] + if istop > fp_nsamples: + # resize the dataset + fp[dataset_name].resize(istop, axis=1) + except KeyError: + # dataset doesn't exist yet + istart = 0 + istop = istart + data.shape[1] + fp.create_dataset(dataset_name, (nwalkers, istop), + maxshape=(nwalkers, None), + dtype=data.dtype, + fletcher32=True) + fp[dataset_name][:, istart:istop] = data
+ + + +
+[docs] +def ensemble_read_raw_samples(fp, fields, thin_start=None, + thin_interval=None, thin_end=None, + iteration=None, walkers=None, flatten=True, + group=None): + """Base function for reading samples from ensemble MCMC files without + parallel tempering. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with EnsembleMCMCMetadataIO methods added. + fields : list + The list of field names to retrieve. + thin_start : int, optional + Start reading from the given iteration. Default is to start from + the first iteration. + thin_interval : int, optional + Only read every ``thin_interval`` -th sample. Default is 1. + thin_end : int, optional + Stop reading at the given iteration. Default is to end at the last + iteration. + iteration : int, optional + Only read the given iteration. If this provided, it overrides + the ``thin_(start|interval|end)`` options. + walkers : (list of) int, optional + Only read from the given walkers. Default (``None``) is to read all. + flatten : bool, optional + Flatten the samples to 1D arrays before returning. Otherwise, the + returned arrays will have shape (requested walkers x + requested iteration(s)). Default is True. + group : str, optional + The name of the group to read sample datasets from. Default is + the file's ``samples_group``. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + if isinstance(fields, str): + fields = [fields] + # walkers to load + widx, nwalkers = _ensemble_get_walker_index(fp, walkers) + # get the slice to use + get_index = _ensemble_get_index(fp, thin_start, thin_interval, thin_end, + iteration) + # load + if group is None: + group = fp.samples_group + group = group + '/{name}' + arrays = {} + for name in fields: + arr = fp[group.format(name=name)][widx, get_index] + niterations = arr.shape[-1] if iteration is None else 1 + if flatten: + arr = arr.flatten() + else: + # ensure that the returned array is 2D + arr = arr.reshape((nwalkers, niterations)) + arrays[name] = arr + return arrays
+ + + +def _ensemble_get_walker_index(fp, walkers=None): + """Convenience function to determine which walkers to load. + + Parameters + ---------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with EnsembleMCMCMetadataIO methods added. + walkers : (list of) int, optional + Only read from the given walkers. Default (``None``) is to read all. + + Returns + ------- + widx : array or slice + The walker indices to load. + nwalkers : int + The number of walkers that will be loaded. + """ + if walkers is not None: + widx = numpy.zeros(fp.nwalkers, dtype=bool) + widx[walkers] = True + nwalkers = widx.sum() + else: + widx = slice(None, None) + nwalkers = fp.nwalkers + return widx, nwalkers + + +def _ensemble_get_index(fp, thin_start=None, thin_interval=None, thin_end=None, + iteration=None): + """Determines the sample indices to retrieve for an ensemble MCMC. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with EnsembleMCMCMetadataIO methods added. + thin_start : int, optional + Start reading from the given iteration. Default is to start from + the first iteration. + thin_interval : int, optional + Only read every ``thin_interval`` -th sample. Default is 1. + thin_end : int, optional + Stop reading at the given iteration. Default is to end at the last + iteration. + iteration : int, optional + Only read the given iteration. If this provided, it overrides + the ``thin_(start|interval|end)`` options. + + Returns + ------- + slice or int + The indices to retrieve. + """ + if iteration is not None: + get_index = int(iteration) + else: + if thin_start is None: + thin_start = fp.thin_start + if thin_interval is None: + thin_interval = fp.thin_interval + if thin_end is None: + thin_end = fp.thin_end + get_index = fp.get_slice(thin_start=thin_start, + thin_interval=thin_interval, + thin_end=thin_end) + return get_index + + +def _get_index(fp, chains, thin_start=None, thin_interval=None, thin_end=None, + iteration=None): + """Determines the sample indices to retrieve for an MCMC with independent + chains. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to read samples from. Must be an instance of + BaseInferenceFile with EnsembleMCMCMetadataIO methods added. + chains : array of int + The chains to load. + thin_start : array or int, optional + Start reading from the given sample. May either provide an array + indicating the start index for each chain, or an integer. If the + former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used + for all chains. Default (None) is to use the file's ``thin_start`` + attribute. + thin_interval : array or int, optional + Only read every ``thin_interval``-th sample. May either provide an + array indicating the interval to use for each chain, or an integer. If + the former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used for + all chains. Default (None) is to use the file's ``thin_interval`` + attribute. + thin_end : array or int, optional + Stop reading at the given sample index. May either provide an + array indicating the end index to use for each chain, or an integer. If + the former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used for + all chains. Default (None) is to use the the file's ``thin_end`` + attribute. + iteration : int, optional + Only read the given iteration from all chains. If provided, it + overrides the ``thin_(start|interval|end)`` options. + + Returns + ------- + get_index : list of slice or int + The indices to retrieve. + """ + nchains = len(chains) + # convenience function to get the right thin start/interval/end + if iteration is not None: + get_index = [int(iteration)]*nchains + else: + # get the slice arguments + thin_start = _format_slice_arg(thin_start, fp.thin_start, chains) + thin_interval = _format_slice_arg(thin_interval, fp.thin_interval, + chains) + thin_end = _format_slice_arg(thin_end, fp.thin_end, chains) + # the slices to use for each chain + get_index = [fp.get_slice(thin_start=thin_start[ci], + thin_interval=thin_interval[ci], + thin_end=thin_end[ci]) + for ci in range(nchains)] + return get_index + + +def _format_slice_arg(value, default, chains): + """Formats a start/interval/end argument for picking out chains. + + Parameters + ---------- + value : None, int, array or list of int + The thin-start/interval/end value to format. ``None`` indicates the + user did not specify anything, in which case ``default`` will be used. + If an integer, then it will be repeated to match the length of + ``chains```. If an array or list, it must have the same length as + ``chains``. + default : array + What to use instead if ``value`` is ``None``. + chains : array of int + The index values of chains that will be loaded. + + Returns + ------- + array + Array giving the value to use for each chain in ``chains``. The array + will have the same length as ``chains``. + """ + if value is None and default is None: + # no value provided, and default is None, just return Nones with the + # same length as chains + value = [None]*len(chains) + elif value is None: + # use the default, with the desired values extracted + value = default[chains] + elif isinstance(value, (int, numpy.int_)): + # a single integer was provided, repeat into an array + value = numpy.repeat(value, len(chains)) + elif len(value) != len(chains): + # a list of values was provided, but the length does not match the + # chains, raise an error + raise ValueError("Number of requested thin-start/interval/end values " + "({}) does not match number of requested chains ({})" + .format(len(value), len(chains))) + return value + + +
+[docs] +def thin_samples_for_writing(fp, samples, parameters, last_iteration, + group, thin_by=None): + """Thins samples for writing to disk. + + The thinning interval to use is determined by the given file handler's + ``thinned_by`` attribute. If that attribute is 1, just returns the samples. + + Parameters + ---------- + fp : CommonMCMCMetadataIO instance + The file the sampels will be written to. Needed to determine the + thin interval used on disk. + samples : dict + Dictionary mapping parameter names to arrays of (unthinned) samples. + The arrays are thinned along their last dimension. + parameters : list of str + The parameters to thin in ``samples`` before writing. All listed + parameters must be in ``samples``. + last_iteration : int + The iteration that the last sample in ``samples`` occurred at. This is + needed to figure out where to start the thinning in ``samples``, such + that the interval between the last sample on disk and the first new + sample is the same as all of the other samples. + group : str + The name of the group that the samples will be written to. This is + needed to determine what the last iteration saved on disk was. + thin_by : int, optional + Override the ``thinned_by`` attribute in the file for with the given + value. **Only do this if you are thinning something other than + inference samples!** + + Returns + ------- + dict : + Dictionary of the thinned samples to write. + """ + if thin_by is None: + thin_by = fp.thinned_by + if thin_by > 1: + if last_iteration is None: + raise ValueError("File's thinned_by attribute is > 1 ({}), " + "but last_iteration not provided." + .format(thin_by)) + thinned_samples = {} + for param in parameters: + data = samples[param] + nsamples = data.shape[-1] + # To figure out where to start: + # the last iteration in the file + the file's thinning interval + # gives the iteration of the next sample that should be written; + # last_iteration - nsamples gives the iteration of the first + # sample in samples. Subtracting the latter from the former - 1 + # (-1 to convert from iteration to index) therefore gives the index + # in the samples data to start using samples. + thin_start = fp.last_iteration(param, group) + thin_by \ + - (last_iteration - nsamples) - 1 + thinned_samples[param] = data[..., thin_start::thin_by] + else: + thinned_samples = samples + return thinned_samples
+ + + +
+[docs] +def nsamples_in_chain(start_iter, interval, niterations): + """Calculates the number of samples in an MCMC chain given a thinning + start, end, and interval. + + This function will work with either python scalars, or numpy arrays. + + Parameters + ---------- + start_iter : (array of) int + Start iteration. If negative, will count as being how many iterations + to start before the end; otherwise, counts how many iterations to + start before the beginning. If this is larger than niterations, will + just return 0. + interval : (array of) int + Thinning interval. + niterations : (array of) int + The number of iterations. + + Returns + ------- + num_samples : (array of) numpy.int + The number of samples in a chain, >= 0. + """ + # this is written in a slightly wonky way so that it will work with either + # python scalars or numpy arrays; it is equivalent to: + # if start_iter < 0: + # count = min(abs(start_iter), niterations) + # else: + # count = max(niterations - start_iter, 0) + slt0 = start_iter < 0 + sgt0 = start_iter >= 0 + count = slt0*abs(start_iter) + sgt0*(niterations - start_iter) + # ensure count is in [0, niterations] + cgtn = count > niterations + cok = (count >= 0) & (count <= niterations) + count = cgtn*niterations + cok*count + return numpy.ceil(count / interval).astype(int)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/base_multitemper.html b/latest/html/_modules/pycbc/inference/io/base_multitemper.html new file mode 100644 index 00000000000..723a7c826b4 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/base_multitemper.html @@ -0,0 +1,555 @@ + + + + + + pycbc.inference.io.base_multitemper — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.base_multitemper

+# Copyright (C) 2018 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides I/O support for multi-tempered sampler.
+"""
+
+import argparse
+import numpy
+from .base_mcmc import (CommonMCMCMetadataIO, thin_samples_for_writing,
+                        _ensemble_get_index, _ensemble_get_walker_index,
+                        _get_index)
+
+
+[docs] +class ParseTempsArg(argparse.Action): + """Argparse action that will parse temps argument. + + If the provided argument is 'all', sets 'all' in the namespace dest. If a + a sequence of numbers are provided, converts those numbers to ints before + saving to the namespace. + """ + def __init__(self, type=str, **kwargs): # pylint: disable=redefined-builtin + # check that type is string + if type != str: + raise ValueError("the type for this action must be a string") + super(ParseTempsArg, self).__init__(type=type, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + singlearg = isinstance(values, str) + if singlearg: + values = [values] + if values[0] == 'all': + # check that only a single value was provided + if len(values) > 1: + raise ValueError("if provide 'all', should not specify any " + "other temps") + temps = 'all' + else: + temps = [] + for val in values: + try: + val = int(val) + except ValueError: + pass + temps.append(val) + if singlearg: + temps = temps[0] + setattr(namespace, self.dest, temps)
+ + + +
+[docs] +class CommonMultiTemperedMetadataIO(CommonMCMCMetadataIO): + """Adds support for reading/writing multi-tempered metadata to + :py:class:`~pycbc.inference.io.base_mcmc.CommonMCMCMetadatIO`. + """ + @property + def ntemps(self): + """Returns the number of temperatures used by the sampler.""" + return self[self.sampler_group].attrs['ntemps'] + +
+[docs] + def write_sampler_metadata(self, sampler): + """Adds writing ntemps to file. + """ + super(CommonMultiTemperedMetadataIO, self).write_sampler_metadata( + sampler) + self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
+ + +
+[docs] + @staticmethod + def extra_args_parser(parser=None, skip_args=None, **kwargs): + """Adds --temps to MCMCIO parser. + """ + if skip_args is None: + skip_args = [] + parser, actions = CommonMCMCMetadataIO.extra_args_parser( + parser=parser, skip_args=skip_args, **kwargs) + if 'temps' not in skip_args: + act = parser.add_argument( + "--temps", nargs="+", default=0, action=ParseTempsArg, + help="Get the given temperatures. May provide either a " + "sequence of integers specifying the temperatures to " + "plot, or 'all' for all temperatures. Default is to only " + "plot the coldest (= 0) temperature chain.") + actions.append(act) + return parser, actions
+
+ + + +
+[docs] +def write_samples(fp, samples, parameters=None, last_iteration=None, + samples_group=None, thin_by=None): + """Writes samples to the given file. + + This works both for standard MCMC and ensemble MCMC samplers with + parallel tempering. + + Results are written to ``samples_group/{vararg}``, where ``{vararg}`` + is the name of a model params. The samples are written as an + ``ntemps x nwalkers x niterations`` array. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with CommonMultiTemperedMetadataIO methods added. + samples : dict + The samples to write. Each array in the dictionary should have + shape ntemps x nwalkers x niterations. + parameters : list, optional + Only write the specified parameters to the file. If None, will + write all of the keys in the ``samples`` dict. + last_iteration : int, optional + The iteration of the last sample. If the file's ``thinned_by`` + attribute is > 1, this is needed to determine where to start + thinning the samples to match what has already been stored on disk. + samples_group : str, optional + Which group to write the samples to. Default (None) will result + in writing to "samples". + thin_by : int, optional + Override the ``thinned_by`` attribute in the file with the given + value. **Only set this if you are using this function to write + something other than inference samples!** + """ + ntemps, nwalkers, niterations = tuple(samples.values())[0].shape + assert all(p.shape == (ntemps, nwalkers, niterations) + for p in samples.values()), ( + "all samples must have the same shape") + if samples_group is None: + samples_group = fp.samples_group + if parameters is None: + parameters = list(samples.keys()) + # thin the samples + samples = thin_samples_for_writing(fp, samples, parameters, + last_iteration, samples_group, + thin_by=thin_by) + # loop over number of dimensions + group = samples_group + '/{name}' + for param in parameters: + dataset_name = group.format(name=param) + data = samples[param] + # check that there's something to write after thinning + if data.shape[2] == 0: + # nothing to write, move along + continue + try: + fp_niterations = fp[dataset_name].shape[-1] + istart = fp_niterations + istop = istart + data.shape[2] + if istop > fp_niterations: + # resize the dataset + fp[dataset_name].resize(istop, axis=2) + except KeyError: + # dataset doesn't exist yet + istart = 0 + istop = istart + data.shape[2] + fp.create_dataset(dataset_name, (ntemps, nwalkers, istop), + maxshape=(ntemps, nwalkers, None), + dtype=data.dtype, + fletcher32=True) + fp[dataset_name][:, :, istart:istop] = data
+ + + +
+[docs] +def read_raw_samples(fp, fields, + thin_start=None, thin_interval=None, thin_end=None, + iteration=None, temps='all', chains=None, + flatten=True, group=None): + """Base function for reading samples from a collection of independent + MCMC chains file with parallel tempering. + + This may collect differing numbering of samples from each chains, + depending on the thinning settings for each chain. If not flattened the + returned array will have dimensions requested temps x requested chains x + max samples, where max samples is the largest number of samples retrieved + from a single chain. Chains that retrieve fewer samples will be padded with + ``numpy.nan``. If flattened, the NaNs are removed prior to returning. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to read samples from. Must be an instance of + BaseInferenceFile with CommonMultiTemperedMetadataIO methods added. + fields : list + The list of field names to retrieve. + thin_start : array or int, optional + Start reading from the given sample. May either provide an array + indicating the start index for each chain, or an integer. If the + former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used + for all chains. Default (None) is to use the file's ``thin_start`` + attribute. + thin_interval : array or int, optional + Only read every ``thin_interval``-th sample. May either provide an + array indicating the interval to use for each chain, or an integer. If + the former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used for + all chains. Default (None) is to use the file's ``thin_interval`` + attribute. + thin_end : array or int, optional + Stop reading at the given sample index. May either provide an + array indicating the end index to use for each chain, or an integer. If + the former, the array must have the same length as the number of chains + that will be retrieved. If the latter, the given value will be used for + all chains. Default (None) is to use the the file's ``thin_end`` + attribute. + iteration : int, optional + Only read the given iteration from all chains. If provided, it + overrides the ``thin_(start|interval|end)`` options. + temps : 'all' or (list of) int, optional + The temperature index (or list of indices) to retrieve. To retrieve + all temperates pass 'all', or a list of all of the temperatures. + Default is 'all'. + chains : (list of) int, optional + Only read from the given chains. Default is to read all. + flatten : bool, optional + Remove NaNs and flatten the samples to 1D arrays before returning. + Otherwise, the returned arrays will have shape (requested temps x + requested chains x max requested iteration(s)), with chains that return + fewer samples padded with NaNs. Default is True. + group : str, optional + The name of the group to read sample datasets from. Default is + the file's ``samples_group``. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + if isinstance(fields, str): + fields = [fields] + if group is None: + group = fp.samples_group + group = group + '/{name}' + # chains to load + if chains is None: + chains = numpy.arange(fp.nchains) + elif not isinstance(chains, (list, numpy.ndarray)): + chains = numpy.array([chains]).astype(int) + get_index = _get_index(fp, chains, thin_start, thin_interval, thin_end, + iteration) + # load the samples + arrays = {} + for name in fields: + dset = group.format(name=name) + # get the temperatures to load + tidx, selecttemps, ntemps = _get_temps_index(temps, fp, dset) + alist = [] + maxiters = 0 + for ii, cidx in enumerate(chains): + idx = get_index[ii] + # load the data + thisarr = fp[dset][tidx, cidx, idx] + if thisarr.size == 0: + # no samples were loaded; skip this chain + alist.append(None) + continue + if isinstance(idx, (int, numpy.int_)): + # make sure the last dimension corresponds to iteration + thisarr = thisarr.reshape(list(thisarr.shape)+[1]) + # pull out the temperatures we need + if selecttemps: + thisarr = thisarr[temps, ...] + # make sure its 2D + thisarr = thisarr.reshape(ntemps, thisarr.shape[-1]) + alist.append(thisarr) + maxiters = max(maxiters, thisarr.shape[-1]) + # stack into a single array + arr = numpy.full((ntemps, len(chains), maxiters), numpy.nan, + dtype=fp[dset].dtype) + for ii, thisarr in enumerate(alist): + if thisarr is not None: + arr[:, ii, :thisarr.shape[-1]] = thisarr + if flatten: + # flatten and remove nans + arr = arr.flatten() + arr = arr[~numpy.isnan(arr)] + arrays[name] = arr + return arrays
+ + + +
+[docs] +def ensemble_read_raw_samples(fp, fields, thin_start=None, + thin_interval=None, thin_end=None, + iteration=None, temps='all', walkers=None, + flatten=True, group=None): + """Base function for reading samples from ensemble MCMC file with + parallel tempering. + + Parameters + ----------- + fp : BaseInferenceFile + Open file handler to write files to. Must be an instance of + BaseInferenceFile with CommonMultiTemperedMetadataIO methods added. + fields : list + The list of field names to retrieve. + thin_start : int, optional + Start reading from the given iteration. Default is to start from + the first iteration. + thin_interval : int, optional + Only read every ``thin_interval`` -th sample. Default is 1. + thin_end : int, optional + Stop reading at the given iteration. Default is to end at the last + iteration. + iteration : int, optional + Only read the given iteration. If this provided, it overrides + the ``thin_(start|interval|end)`` options. + temps : 'all' or (list of) int, optional + The temperature index (or list of indices) to retrieve. To retrieve + all temperates pass 'all', or a list of all of the temperatures. + Default is 'all'. + walkers : (list of) int, optional + Only read from the given walkers. Default (``None``) is to read all. + flatten : bool, optional + Flatten the samples to 1D arrays before returning. Otherwise, the + returned arrays will have shape (requested temps x + requested walkers x requested iteration(s)). Default is True. + group : str, optional + The name of the group to read sample datasets from. Default is + the file's ``samples_group``. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + if isinstance(fields, str): + fields = [fields] + # walkers to load + widx, nwalkers = _ensemble_get_walker_index(fp, walkers) + # get the slice to use + get_index = _ensemble_get_index(fp, thin_start, thin_interval, thin_end, + iteration) + # load + if group is None: + group = fp.samples_group + group = group + '/{name}' + arrays = {} + for name in fields: + dset = group.format(name=name) + tidx, selecttemps, ntemps = _get_temps_index(temps, fp, dset) + arr = fp[dset][tidx, widx, get_index] + niterations = arr.shape[-1] if iteration is None else 1 + if selecttemps: + # pull out the temperatures we need + arr = arr[temps, ...] + if flatten: + arr = arr.flatten() + else: + # ensure that the returned array is 3D + arr = arr.reshape((ntemps, nwalkers, niterations)) + arrays[name] = arr + return arrays
+ + + +def _get_temps_index(temps, fp, dataset): + """Convenience function to determine which temperatures to load. + + Parameters + ----------- + temps : 'all' or (list of) int + The temperature index (or list of indices) to retrieve. To retrieve + all temperates pass 'all', or a list of all of the temperatures. + fp : BaseInferenceFile + Open file handler to read samples from. Must be an instance of + BaseInferenceFile with CommonMultiTemperedMetadataIO methods added. + dataset : str + The name of the dataset that samples will be loaded from. + + Returns + ------- + tidx : slice or list of int + The temperature indices to load from the file. + selecttemps : bool + Whether specific temperatures need to be pulled out of the samples + array after it is loaded from the file. + ntemps : int + The number of temperatures that will be loaded. + """ + if temps == 'all': + # all temperatures were requested; just need to know how many + ntemps = fp[dataset].shape[0] + tidx = slice(None, None) + selecttemps = False + elif isinstance(temps, (int, numpy.int_)): + # only a single temperature is requested + ntemps = 1 + tidx = temps + selecttemps = False + else: + # a select set of temperatures are requested + tidx = slice(None, None) + ntemps = len(temps) + selecttemps = True + return tidx, selecttemps, ntemps +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/base_nested_sampler.html b/latest/html/_modules/pycbc/inference/io/base_nested_sampler.html new file mode 100644 index 00000000000..8acc6e64bb9 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/base_nested_sampler.html @@ -0,0 +1,223 @@ + + + + + + pycbc.inference.io.base_nested_sampler — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.base_nested_sampler

+# Copyright (C) 2019 Sumit Kumar, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides IO for the dynesty sampler.
+"""
+from .base_sampler import BaseSamplerFile
+from .posterior import read_raw_samples_from_file, write_samples_to_file
+
+
+
+[docs] +class BaseNestedSamplerFile(BaseSamplerFile): + """Class to handle file IO for the nested samplers cpnest and dynesty.""" + + name = 'base_nest_file' + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + return read_raw_samples_from_file(self, fields, **kwargs)
+ + +
+[docs] + def write_resume_point(self): + pass
+ + +
+[docs] + def write_niterations(self, niterations): + """ + Writes the given number of iterations to the sampler group. + """ + self[self.sampler_group].attrs['niterations'] = niterations
+ + +
+[docs] + def write_sampler_metadata(self, sampler): + """ + Adds writing betas to MultiTemperedMCMCIO. + """ + self.attrs['sampler'] = sampler.name + if self.sampler_group not in self.keys(): + # create the sampler group + self.create_group(self.sampler_group) + self[self.sampler_group].attrs['nlivepoints'] = sampler.nlive + # write the model's metadata + sampler.model.write_metadata(self)
+ + +
+[docs] + def write_samples(self, samples, parameters=None): + """Writes samples to the given file. + + Results are written to ``samples_group/{vararg}``, where ``{vararg}`` + is the name of a model params. The samples are written as an + array of length ``niterations``. + + Parameters + ----------- + samples : dict + The samples to write. Each array in the dictionary should have + length niterations. + parameters : list, optional + Only write the specified parameters to the file. If None, will + write all of the keys in the ``samples`` dict. + """ + # since we're just writing a posterior use + # PosteriorFile's write_samples + write_samples_to_file(self, samples, parameters=parameters)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/base_sampler.html b/latest/html/_modules/pycbc/inference/io/base_sampler.html new file mode 100644 index 00000000000..ed9da50bdbf --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/base_sampler.html @@ -0,0 +1,296 @@ + + + + + + pycbc.inference.io.base_sampler — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.base_sampler

+# Copyright (C) 2019 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Provides abstract base class for all samplers."""
+
+
+import time
+from abc import (ABCMeta, abstractmethod)
+
+from .base_hdf import BaseInferenceFile
+
+
+
+[docs] +class BaseSamplerFile(BaseInferenceFile, metaclass=ABCMeta): + """Base HDF class for all samplers. + + This adds abstract methods ``write_resume_point`` and + ``write_sampler_metadata`` to :py:class:`BaseInferenceFile`. + """ + +
+[docs] + def write_run_start_time(self): + """Writes the current (UNIX) time to the file. + + Times are stored as a list in the file's ``attrs``, with name + ``run_start_time``. If the attrbute already exists, the current time + is appended. Otherwise, the attribute will be created and time added. + """ + attrname = "run_start_time" + try: + times = self.attrs[attrname].tolist() + except KeyError: + times = [] + times.append(time.time()) + self.attrs[attrname] = times
+ + + @property + def run_start_time(self): + """The (UNIX) time pycbc inference began running. + + If the run resumed from a checkpoint, the time the last checkpoint + started is reported. + """ + return self.attrs['run_start_time'][-1] + +
+[docs] + def write_run_end_time(self): + """"Writes the curent (UNIX) time as the ``run_end_time`` attribute. + """ + self.attrs["run_end_time"] = time.time()
+ + + @property + def run_end_time(self): + """The (UNIX) time pycbc inference finished. + """ + return self.attrs["run_end_time"] + +
+[docs] + @abstractmethod + def write_resume_point(self): + """Should write the point that a sampler starts up. + + How the resume point is indexed is up to the sampler. For example, + MCMC samplers use the number of iterations that are stored in the + checkpoint file. + """ + pass
+ + +
+[docs] + @abstractmethod + def write_sampler_metadata(self, sampler): + """This should write the given sampler's metadata to the file. + + This should also include the model's metadata. + """ + pass
+ + +
+[docs] + def update_checkpoint_history(self): + """Writes a copy of relevant metadata to the file's checkpoint history. + + All data are written to ``sampler_info/checkpoint_history``. If the + group does not exist yet, it will be created. + + This function writes the current time and the time since the last + checkpoint to the file. It will also call + :py:func:`_update_sampler_history` to write sampler-specific history. + """ + path = '/'.join([self.sampler_group, 'checkpoint_history']) + try: + history = self[path] + except KeyError: + # assume history doesn't exist yet + self.create_group(path) + history = self[path] + # write the checkpoint time + current_time = time.time() + self.write_data('checkpoint_time', current_time, path=path, + append=True) + # get the amount of time since the last checkpoint + checkpoint_times = history['checkpoint_time'][()] + if len(checkpoint_times) == 1: + # this is the first checkpoint, get the run time for comparison + lasttime = self.run_start_time + else: + lasttime = checkpoint_times[-2] + # if a resume happened since the last checkpoint, use the resume + # time instad + if lasttime < self.run_start_time: + lasttime = self.run_start_time + self.write_data('checkpoint_dt', current_time-lasttime, path=path, + append=True) + # write any sampler-specific history + self._update_sampler_history()
+ + + def _update_sampler_history(self): + """Writes sampler-specific history to the file. + + This function does nothing. Classes that inherit from it may override + it to add any extra information they would like written. This is + called by :py:func:`update_checkpoint_history`. + """ + pass + +
+[docs] + def validate(self): + """Runs a validation test. + + This checks that a samples group exist, and that there are more than + one sample stored to it. + + Returns + ------- + bool : + Whether or not the file is valid as a checkpoint file. + """ + try: + group = '{}/{}'.format(self.samples_group, self.variable_params[0]) + checkpoint_valid = self[group].size != 0 + except KeyError: + checkpoint_valid = False + return checkpoint_valid
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/dynesty.html b/latest/html/_modules/pycbc/inference/io/dynesty.html new file mode 100644 index 00000000000..36a423dfb46 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/dynesty.html @@ -0,0 +1,340 @@ + + + + + + pycbc.inference.io.dynesty — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.dynesty

+# Copyright (C) 2019 Collin Capano, Sumit Kumar
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides IO for the dynesty sampler.
+"""
+
+import argparse
+import numpy
+from pycbc.io.hdf import (dump_state, load_state)
+
+from .base_nested_sampler import BaseNestedSamplerFile
+from .posterior import write_samples_to_file, read_raw_samples_from_file
+
+
+[docs] +class CommonNestedMetadataIO(object): + """Provides functions for reading/writing dynesty metadata to file. + """ + +
+[docs] + @staticmethod + def extra_args_parser(parser=None, skip_args=None, **kwargs): + """Create a parser to parse sampler-specific arguments for loading + samples. + + Parameters + ---------- + parser : argparse.ArgumentParser, optional + Instead of creating a parser, add arguments to the given one. If + none provided, will create one. + skip_args : list, optional + Don't parse the given options. Options should be given as the + option string, minus the '--'. For example, + ``skip_args=['iteration']`` would cause the ``--iteration`` + argument not to be included. + \**kwargs : + All other keyword arguments are passed to the parser that is + created. + + Returns + ------- + parser : argparse.ArgumentParser + An argument parser with th extra arguments added. + actions : list of argparse.Action + A list of the actions that were added. + """ + if parser is None: + parser = argparse.ArgumentParser(**kwargs) + elif kwargs: + raise ValueError("No other keyword arguments should be provded if " + "a parser is provided.") + if skip_args is None: + skip_args = [] + actions = [] + + if 'raw_samples' not in skip_args: + act = parser.add_argument( + "--raw-samples", action='store_true', default=False, + help="Extract raw samples rather than a posterior. " + "Raw samples are the unweighted samples obtained from " + "the nested sampler. Default value is False, which means " + "raw samples are weighted by the log-weight array " + "obtained from the sampler, giving an estimate of the " + "posterior.") + actions.append(act) + if 'seed' not in skip_args: + act = parser.add_argument( + "--seed", type=int, default=0, + help="Set the random-number seed used for extracting the " + "posterior samples. This is needed because the " + "unweighted samples are randomly shuffled to produce " + "a posterior. Default is 0. Ignored if raw-samples are " + "extracted instead.") + return parser, actions
+ + +
+[docs] + def write_pickled_data_into_checkpoint_file(self, state): + """Dump the sampler state into checkpoint file + """ + if 'sampler_info/saved_state' not in self: + self.create_group('sampler_info/saved_state') + dump_state(state, self, path='sampler_info/saved_state')
+ + +
+[docs] + def read_pickled_data_from_checkpoint_file(self): + """Load the sampler state (pickled) from checkpoint file + """ + return load_state(self, path='sampler_info/saved_state')
+ + +
+[docs] + def write_raw_samples(self, data, parameters=None): + """Write the nested samples to the file + """ + if 'samples' not in self: + self.create_group('samples') + write_samples_to_file(self, data, parameters=parameters, + group='samples')
+ +
+[docs] + def validate(self): + """Runs a validation test. + This checks that a samples group exist, and that pickeled data can + be loaded. + + Returns + ------- + bool : + Whether or not the file is valid as a checkpoint file. + """ + try: + if 'sampler_info/saved_state' in self: + load_state(self, path='sampler_info/saved_state') + checkpoint_valid = True + except KeyError: + checkpoint_valid = False + return checkpoint_valid
+
+ + + +
+[docs] +class DynestyFile(CommonNestedMetadataIO, BaseNestedSamplerFile): + """Class to handle file IO for the ``dynesty`` sampler.""" + + name = 'dynesty_file' + +
+[docs] + def read_raw_samples(self, fields, raw_samples=False, seed=0): + """Reads samples from a dynesty file and constructs a posterior. + + Parameters + ---------- + fields : list of str + The names of the parameters to load. Names must correspond to + dataset names in the file's ``samples`` group. + raw_samples : bool, optional + Return the raw (unweighted) samples instead of the estimated + posterior samples. Default is False. + seed : int, optional + When extracting the posterior, samples are randomly shuffled. To + make this reproduceable, numpy's random generator seed is set with + the given value prior to the extraction. Default is 0. + + Returns + ------- + dict : + Dictionary of parameter names -> samples. + """ + samples = read_raw_samples_from_file(self, fields) + logwt = read_raw_samples_from_file(self, ['logwt'])['logwt'] + loglikelihood = read_raw_samples_from_file( + self, ['loglikelihood'])['loglikelihood'] + logz = self.attrs.get('log_evidence') + if not raw_samples: + weights = numpy.exp(logwt - logz) + N = len(weights) + positions = (numpy.random.random() + numpy.arange(N)) / N + idx = numpy.zeros(N, dtype=int) + cumulative_sum = numpy.cumsum(weights) + cumulative_sum /= cumulative_sum[-1] + i, j = 0, 0 + while i < N: + if positions[i] < cumulative_sum[j]: + idx[i] = j + i += 1 + else: + j += 1 + try: + rng = numpy.random.default_rng(seed) + except AttributeError: + # numpy pre-1.17 uses RandomState + # Py27: delete this after we drop python 2.7 support + rng = numpy.random.RandomState(seed) + rng.shuffle(idx) + post = {'loglikelihood': loglikelihood[idx]} + for i, param in enumerate(fields): + post[param] = samples[param][idx] + return post + else: + return samples
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/emcee.html b/latest/html/_modules/pycbc/inference/io/emcee.html new file mode 100644 index 00000000000..1a936ee215d --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/emcee.html @@ -0,0 +1,255 @@ + + + + + + pycbc.inference.io.emcee — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.emcee

+# Copyright (C) 2018 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides IO for the emcee sampler.
+"""
+import numpy
+
+from .base_sampler import BaseSamplerFile
+from .base_mcmc import (EnsembleMCMCMetadataIO, CommonMCMCMetadataIO,
+                        write_samples, ensemble_read_raw_samples)
+
+
+
+[docs] +class EmceeFile(EnsembleMCMCMetadataIO, CommonMCMCMetadataIO, BaseSamplerFile): + """Class to handle file IO for the ``emcee`` sampler.""" + + name = 'emcee_file' + +
+[docs] + def write_samples(self, samples, **kwargs): + r"""Writes samples to the given file. + + Calls :py:func:`base_mcmc.write_samples`. See that function for + details. + + Parameters + ---------- + samples : dict + The samples to write. Each array in the dictionary should have + shape nwalkers x niterations. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_mcmc.write_samples`. + """ + write_samples(self, samples, **kwargs)
+ + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + r"""Base function for reading samples. + + Calls :py:func:`base_mcmc.ensemble_read_raw_samples`. See that function + for details. + + Parameters + ----------- + fields : list + The list of field names to retrieve. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_mcmc.ensemble_read_raw_samples`. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + return ensemble_read_raw_samples(self, fields, **kwargs)
+ + +
+[docs] + def read_acceptance_fraction(self, walkers=None): + """Reads the acceptance fraction. + + Parameters + ----------- + walkers : (list of) int, optional + The walker index (or a list of indices) to retrieve. If None, + samples from all walkers will be obtained. + + Returns + ------- + array + Array of acceptance fractions with shape (requested walkers,). + """ + group = self.sampler_group + '/acceptance_fraction' + if walkers is None: + wmask = numpy.ones(self.nwalkers, dtype=bool) + else: + wmask = numpy.zeros(self.nwalkers, dtype=bool) + wmask[walkers] = True + return self[group][wmask]
+ + +
+[docs] + def write_acceptance_fraction(self, acceptance_fraction): + """Write acceptance_fraction data to file. Results are written to + the ``[sampler_group]/acceptance_fraction``. + + Parameters + ----------- + acceptance_fraction : numpy.ndarray + Array of acceptance fractions to write. + """ + group = self.sampler_group + '/acceptance_fraction' + try: + self[group][:] = acceptance_fraction + except KeyError: + # dataset doesn't exist yet, create it + self[group] = acceptance_fraction
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/emcee_pt.html b/latest/html/_modules/pycbc/inference/io/emcee_pt.html new file mode 100644 index 00000000000..fc289e9beec --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/emcee_pt.html @@ -0,0 +1,282 @@ + + + + + + pycbc.inference.io.emcee_pt — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.emcee_pt

+# Copyright (C) 2018 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""Provides I/O support for emcee_pt.
+"""
+
+
+import numpy
+
+from .base_sampler import BaseSamplerFile
+from .base_mcmc import EnsembleMCMCMetadataIO
+from .base_multitemper import (CommonMultiTemperedMetadataIO,
+                               write_samples,
+                               ensemble_read_raw_samples)
+
+
+
+[docs] +class EmceePTFile(EnsembleMCMCMetadataIO, CommonMultiTemperedMetadataIO, + BaseSamplerFile): + """Class to handle file IO for the ``emcee`` sampler.""" + + name = 'emcee_pt_file' + + @property + def betas(self): + """The betas that were used.""" + return self[self.sampler_group].attrs["betas"] + +
+[docs] + def write_samples(self, samples, **kwargs): + r"""Writes samples to the given file. + + Calls :py:func:`base_multitemper.write_samples`. See that function for + details. + + Parameters + ---------- + samples : dict + The samples to write. Each array in the dictionary should have + shape ntemps x nwalkers x niterations. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.write_samples`. + """ + write_samples(self, samples, **kwargs)
+ + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + r"""Base function for reading samples. + + Calls :py:func:`base_multitemper.ensemble_read_raw_samples`. See that + function for details. + + Parameters + ----------- + fields : list + The list of field names to retrieve. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_read_raw_samples`. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + return ensemble_read_raw_samples(self, fields, **kwargs)
+ + +
+[docs] + def write_sampler_metadata(self, sampler): + """Adds writing betas to MultiTemperedMCMCIO. + """ + super(EmceePTFile, self).write_sampler_metadata(sampler) + self[self.sampler_group].attrs["betas"] = sampler.betas
+ + +
+[docs] + def read_acceptance_fraction(self, temps=None, walkers=None): + """Reads the acceptance fraction. + + Parameters + ----------- + temps : (list of) int, optional + The temperature index (or a list of indices) to retrieve. If None, + acfs from all temperatures and all walkers will be retrieved. + walkers : (list of) int, optional + The walker index (or a list of indices) to retrieve. If None, + samples from all walkers will be obtained. + + Returns + ------- + array + Array of acceptance fractions with shape (requested temps, + requested walkers). + """ + group = self.sampler_group + '/acceptance_fraction' + if walkers is None: + wmask = numpy.ones(self.nwalkers, dtype=bool) + else: + wmask = numpy.zeros(self.nwalkers, dtype=bool) + wmask[walkers] = True + if temps is None: + tmask = numpy.ones(self.ntemps, dtype=bool) + else: + tmask = numpy.zeros(self.ntemps, dtype=bool) + tmask[temps] = True + return self[group][:][numpy.ix_(tmask, wmask)]
+ + +
+[docs] + def write_acceptance_fraction(self, acceptance_fraction): + """Write acceptance_fraction data to file. + + Results are written to ``[sampler_group]/acceptance_fraction``; the + resulting dataset has shape (ntemps, nwalkers). + + Parameters + ----------- + acceptance_fraction : numpy.ndarray + Array of acceptance fractions to write. Must have shape + ntemps x nwalkers. + """ + # check + assert acceptance_fraction.shape == (self.ntemps, self.nwalkers), ( + "acceptance fraction must have shape ntemps x nwalker") + group = self.sampler_group + '/acceptance_fraction' + try: + self[group][:] = acceptance_fraction + except KeyError: + # dataset doesn't exist yet, create it + self[group] = acceptance_fraction
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/epsie.html b/latest/html/_modules/pycbc/inference/io/epsie.html new file mode 100644 index 00000000000..affb224afa2 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/epsie.html @@ -0,0 +1,446 @@ + + + + + + pycbc.inference.io.epsie — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.epsie

+# Copyright (C) 2019  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides IO classes for epsie samplers.
+"""
+
+
+import numpy
+from pickle import UnpicklingError
+from epsie import load_state
+
+from .base_sampler import BaseSamplerFile
+from .base_mcmc import MCMCMetadataIO
+from .base_multitemper import (CommonMultiTemperedMetadataIO,
+                               write_samples,
+                               read_raw_samples)
+
+
+
+[docs] +class EpsieFile(MCMCMetadataIO, CommonMultiTemperedMetadataIO, + BaseSamplerFile): + """Class to handle IO for Epsie's parallel-tempered sampler.""" + + name = 'epsie_file' + + @property + def nchains(self): + """Alias for nwalkers.""" + return self.nwalkers + + @property + def betas(self): + """The betas that were used.""" + return self[self.sampler_group]['betas'][()] + + @property + def swap_interval(self): + """The interval that temperature swaps occurred at.""" + return self[self.sampler_group].attrs['swap_interval'] + + @swap_interval.setter + def swap_interval(self, swap_interval): + """Stores the swap interval to the sampler group's attrs.""" + self[self.sampler_group].attrs['swap_interval'] = swap_interval + + @property + def seed(self): + """The sampler's seed.""" + # convert seed from str back to int (see setter below for reason) + return int(self[self.sampler_group].attrs['seed']) + + @seed.setter + def seed(self, seed): + """Store the sampler's seed.""" + # epsie uses the numpy's new random generators, which use long integers + # for seeds. hdf5 doesn't know how to handle long integers, so we'll + # store it as a string + self[self.sampler_group].attrs['seed'] = str(seed) + +
+[docs] + def write_sampler_metadata(self, sampler): + """Adds writing seed and betas to MultiTemperedMCMCIO. + """ + super(EpsieFile, self).write_sampler_metadata(sampler) + self.seed = sampler.seed + self.write_data("betas", sampler.betas, path=self.sampler_group)
+ + +
+[docs] + def thin(self, thin_interval): + """Thins the samples on disk to the given thinning interval. + + Also thins the acceptance ratio and the temperature data, both of + which are stored in the ``sampler_info`` group. + """ + # We'll need to know what the new interval to thin by will be + # so we can properly thin the acceptance ratio and temperatures swaps. + # We need to do this before calling the base thin, as we need to know + # what the current thinned by is. + new_interval = thin_interval // self.thinned_by + # now thin the samples + super(EpsieFile, self).thin(thin_interval) + # thin the acceptance ratio + self._thin_data(self.sampler_group, ['acceptance_ratio'], + new_interval) + # thin the temperature swaps; since these may not happen every + # iteration, the thin interval we use for these is different + ts_group = '/'.join([self.sampler_group, 'temperature_swaps']) + ts_thin_interval = new_interval // self.swap_interval + if ts_thin_interval > 1: + self._thin_data(ts_group, ['swap_index'], + ts_thin_interval) + self._thin_data(ts_group, ['acceptance_ratio'], + ts_thin_interval)
+ + +
+[docs] + def write_samples(self, samples, **kwargs): + r"""Writes samples to the given file. + + Calls :py:func:`base_multitemper.write_samples`. See that function for + details. + + Parameters + ---------- + samples : dict + The samples to write. Each array in the dictionary should have + shape ntemps x nwalkers x niterations. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.write_samples`. + """ + write_samples(self, samples, **kwargs)
+ + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + r"""Base function for reading samples. + + Calls :py:func:`base_multitemper.read_raw_samples`. See that + function for details. + + Parameters + ----------- + fields : list + The list of field names to retrieve. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.read_raw_samples`. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + return read_raw_samples(self, fields, **kwargs)
+ + +
+[docs] + def write_acceptance_ratio(self, acceptance_ratio, last_iteration=None): + """Writes the acceptance ratios to the sampler info group. + + Parameters + ---------- + acceptance_ratio : array + The acceptance ratios to write. Should have shape + ``ntemps x nchains x niterations``. + """ + # we'll use the write_samples machinery to write the acceptance ratios + self.write_samples({'acceptance_ratio': acceptance_ratio}, + last_iteration=last_iteration, + samples_group=self.sampler_group)
+ + +
+[docs] + def read_acceptance_ratio(self, temps=None, chains=None): + """Reads the acceptance ratios. + + Ratios larger than 1 are set back to 1 before returning. + + Parameters + ----------- + temps : (list of) int, optional + The temperature index (or a list of indices) to retrieve. If None, + acceptance ratios from all temperatures and all chains will be + retrieved. + chains : (list of) int, optional + The chain index (or a list of indices) to retrieve. If None, + ratios from all chains will be obtained. + + Returns + ------- + array + Array of acceptance ratios with shape (requested temps, + requested chains, niterations). + """ + group = self.sampler_group + '/acceptance_ratio' + if chains is None: + wmask = numpy.ones(self.nchains, dtype=bool) + else: + wmask = numpy.zeros(self.nchains, dtype=bool) + wmask[chains] = True + if temps is None: + tmask = numpy.ones(self.ntemps, dtype=bool) + else: + tmask = numpy.zeros(self.ntemps, dtype=bool) + tmask[temps] = True + all_ratios = self[group][:] + # make sure values > 1 are set back to 1 + all_ratios[all_ratios > 1] = 1. + return all_ratios[numpy.ix_(tmask, wmask)]
+ + +
+[docs] + def read_acceptance_rate(self, temps=None, chains=None): + """Reads the acceptance rate. + + This calls :py:func:`read_acceptance_ratio`, then averages the ratios + over all iterations to get the average rate. + + Parameters + ----------- + temps : (list of) int, optional + The temperature index (or a list of indices) to retrieve. If None, + acceptance rates from all temperatures and all chains will be + retrieved. + chains : (list of) int, optional + The chain index (or a list of indices) to retrieve. If None, + rates from all chains will be obtained. + + Returns + ------- + array + Array of acceptance ratios with shape (requested temps, + requested chains). + """ + all_ratios = self.read_acceptance_ratio(temps, chains) + # average over the number of iterations + all_ratios = all_ratios.mean(axis=-1) + return all_ratios
+ + +
+[docs] + def read_acceptance_fraction(self, temps=None, walkers=None): + """Alias for :py:func:`read_acceptance_rate`. + """ + return self.read_acceptance_rate(temps=temps, chains=walkers)
+ + +
+[docs] + def write_temperature_data(self, swap_index, acceptance_ratio, + swap_interval, last_iteration): + """Writes temperature swaps and acceptance ratios. + + Parameters + ---------- + swap_index : array + The indices indicating which temperatures were swapped. Should have + shape ``ntemps x nchains x (niterations/swap_interval)``. + acceptance_ratio : array + The array of acceptance ratios between temperatures. Should + have shape ``(ntemps-1) x nchains x (niterations/swap_interval)``. + arrays. + swap_interval : int + The number of iterations between temperature swaps. + last_iteration : int + The iteration of the last sample. + """ + self.swap_interval = swap_interval + group = '/'.join([self.sampler_group, 'temperature_swaps']) + # we'll use the write_samples machinery to write the acceptance ratios; + # if temperature swaps didn't happen every iteration, then a smaller + # thinning interval than what is used for the samples should be used + thin_by = self.thinned_by // swap_interval + # we'll also tell the write samples that the last "iteration" is the + # last iteration / the swap interval, to get the spacing correct + last_iteration = last_iteration // swap_interval + # we need to write the two arrays separately, since they have different + # dimensions in temperature + self.write_samples({'swap_index': swap_index}, + last_iteration=last_iteration, + samples_group=group, thin_by=thin_by) + self.write_samples({'acceptance_ratio': acceptance_ratio}, + last_iteration=last_iteration, + samples_group=group, thin_by=thin_by)
+ + +
+[docs] + def validate(self): + """Adds attemp to load checkpoint to validation test.""" + valid = super(EpsieFile, self).validate() + # try to load the checkpoint + if valid: + try: + load_state(self, self.sampler_group) + except (KeyError, UnpicklingError): + # will get this if the state wasn't written, or it was + # corrupted for some reason + valid = False + return valid
+ + + @staticmethod + def _get_optional_args(args, opts, err_on_missing=False, **kwargs): + # need this to make sure options called "walkers" are renamed to + # "chains" + parsed = BaseSamplerFile._get_optional_args( + args, opts, err_on_missing=err_on_missing, **kwargs) + try: + chains = parsed.pop('walkers') + parsed['chains'] = chains + except KeyError: + pass + return parsed
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/multinest.html b/latest/html/_modules/pycbc/inference/io/multinest.html new file mode 100644 index 00000000000..96714e17661 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/multinest.html @@ -0,0 +1,285 @@ + + + + + + pycbc.inference.io.multinest — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.multinest

+# Copyright (C) 2018 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""Provides I/O support for multinest.
+"""
+
+
+from .base_sampler import BaseSamplerFile
+
+
+
+[docs] +class MultinestFile(BaseSamplerFile): + """Class to handle file IO for the ``multinest`` sampler.""" + + name = 'multinest_file' + +
+[docs] + def write_samples(self, samples, parameters=None): + """Writes samples to the given file. + + Results are written to ``samples_group/{vararg}``, where ``{vararg}`` + is the name of a model params. The samples are written as an + array of length ``niterations``. + + Parameters + ---------- + samples : dict + The samples to write. Each array in the dictionary should have + length niterations. + parameters : list, optional + Only write the specified parameters to the file. If None, will + write all of the keys in the ``samples`` dict. + """ + niterations = len(tuple(samples.values())[0]) + assert all(len(p) == niterations for p in samples.values()), ( + "all samples must have the same shape") + group = self.samples_group + '/{name}' + if parameters is None: + parameters = samples.keys() + # loop over number of dimensions + for param in parameters: + dataset_name = group.format(name=param) + try: + fp_niterations = len(self[dataset_name]) + if niterations != fp_niterations: + # resize the dataset + self[dataset_name].resize(niterations, axis=0) + except KeyError: + # dataset doesn't exist yet + self.create_dataset(dataset_name, (niterations,), + maxshape=(None,), + dtype=samples[param].dtype, + fletcher32=True) + self[dataset_name][:] = samples[param]
+ + +
+[docs] + def write_logevidence(self, lnz, dlnz, importance_lnz, importance_dlnz): + """Writes the given log evidence and its error. + + Results are saved to file's 'log_evidence' and 'dlog_evidence' + attributes, as well as the importance-weighted versions of these + stats if they exist. + + Parameters + ---------- + lnz : float + The log of the evidence. + dlnz : float + The error in the estimate of the log evidence. + importance_lnz : float, optional + The importance-weighted log of the evidence. + importance_dlnz : float, optional + The error in the importance-weighted estimate of the log evidence. + """ + self.attrs['log_evidence'] = lnz + self.attrs['dlog_evidence'] = dlnz + if all([e is not None for e in [importance_lnz, importance_dlnz]]): + self.attrs['importance_log_evidence'] = importance_lnz + self.attrs['importance_dlog_evidence'] = importance_dlnz
+ + +
+[docs] + def read_raw_samples(self, fields, iteration=None): + if isinstance(fields, str): + fields = [fields] + # load + group = self.samples_group + '/{name}' + arrays = {} + for name in fields: + if iteration is not None: + arr = self[group.format(name=name)][int(iteration)] + else: + arr = self[group.format(name=name)][:] + arrays[name] = arr + return arrays
+ + +
+[docs] + def write_resume_point(self): + """Keeps a list of the number of iterations that were in a file when a + run was resumed from a checkpoint.""" + try: + resume_pts = self.attrs["resume_points"].tolist() + except KeyError: + resume_pts = [] + try: + niterations = self.niterations + except KeyError: + niterations = 0 + resume_pts.append(niterations) + self.attrs["resume_points"] = resume_pts
+ + + @property + def niterations(self): + """Returns the number of iterations the sampler was run for.""" + return self[self.sampler_group].attrs['niterations'] + +
+[docs] + def write_niterations(self, niterations): + """Writes the given number of iterations to the sampler group.""" + self[self.sampler_group].attrs['niterations'] = niterations
+ + +
+[docs] + def write_sampler_metadata(self, sampler): + """Writes the sampler's metadata.""" + self.attrs['sampler'] = sampler.name + if self.sampler_group not in self.keys(): + # create the sampler group + self.create_group(self.sampler_group) + self[self.sampler_group].attrs['nlivepoints'] = sampler.nlivepoints + # write the model's metadata + sampler.model.write_metadata(self)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/nessai.html b/latest/html/_modules/pycbc/inference/io/nessai.html new file mode 100644 index 00000000000..1a0ac4df00b --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/nessai.html @@ -0,0 +1,183 @@ + + + + + + pycbc.inference.io.nessai — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.nessai

+"""Provides IO for the nessai sampler"""
+import numpy
+
+from .base_nested_sampler import BaseNestedSamplerFile
+
+from .posterior import read_raw_samples_from_file
+from .dynesty import CommonNestedMetadataIO
+
+
+
+[docs] +class NessaiFile(CommonNestedMetadataIO, BaseNestedSamplerFile): + """Class to handle file IO for the ``nessai`` sampler.""" + + name = "nessai_file" + +
+[docs] + def read_raw_samples(self, fields, raw_samples=False, seed=0): + """Reads samples from a nessai file and constructs a posterior. + + Using rejection sampling to resample the nested samples + + Parameters + ---------- + fields : list of str + The names of the parameters to load. Names must correspond to + dataset names in the file's ``samples`` group. + raw_samples : bool, optional + Return the raw (unweighted) samples instead of the estimated + posterior samples. Default is False. + + Returns + ------- + dict : + Dictionary of parameter fields -> samples. + """ + samples = read_raw_samples_from_file(self, fields) + logwt = read_raw_samples_from_file(self, ['logwt'])['logwt'] + loglikelihood = read_raw_samples_from_file( + self, ['loglikelihood'])['loglikelihood'] + if not raw_samples: + n_samples = len(logwt) + # Rejection sample + rng = numpy.random.default_rng(seed) + logwt -= logwt.max() + logu = numpy.log(rng.random(n_samples)) + keep = logwt > logu + post = {'loglikelihood': loglikelihood[keep]} + for param in fields: + post[param] = samples[param][keep] + return post + return samples
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/posterior.html b/latest/html/_modules/pycbc/inference/io/posterior.html new file mode 100644 index 00000000000..a4595373081 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/posterior.html @@ -0,0 +1,250 @@ + + + + + + pycbc.inference.io.posterior — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.posterior

+# Copyright (C) 2018 Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides simplified standard format just for posterior data
+"""
+
+from .base_hdf import BaseInferenceFile
+
+
+
+[docs] +class PosteriorFile(BaseInferenceFile): + """Class to handle file IO for the simplified Posterior file.""" + + name = 'posterior_file' + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + return read_raw_samples_from_file(self, fields, **kwargs)
+ + +
+[docs] + def write_samples(self, samples, parameters=None): + return write_samples_to_file(self, samples, parameters=parameters)
+ + +
+[docs] + def write_sampler_metadata(self, sampler): + sampler.model.write_metadata(self)
+ + +
+[docs] + def write_resume_point(self): + pass
+ + + write_run_start_time = write_run_end_time = write_resume_point
+ + + +
+[docs] +def read_raw_samples_from_file(fp, fields, **kwargs): + samples = fp[fp.samples_group] + return {field: samples[field][:] for field in fields}
+ + + +
+[docs] +def write_samples_to_file(fp, samples, parameters=None, group=None): + """Writes samples to the given file. + + Results are written to ``samples_group/{vararg}``, where ``{vararg}`` + is the name of a model params. The samples are written as an + array of length ``niterations``. + + Parameters + ----------- + fp : self + Pass the 'self' from BaseInferenceFile class. + samples : dict + The samples to write. Each array in the dictionary should have + length niterations. + parameters : list, optional + Only write the specified parameters to the file. If None, will + write all of the keys in the ``samples`` dict. + """ + # check data dimensions; we'll just use the first array in samples + arr = list(samples.values())[0] + if not arr.ndim == 1: + raise ValueError("samples must be 1D arrays") + niterations = arr.size + assert all(len(p) == niterations + for p in samples.values()), ( + "all samples must have the same shape") + if group is not None: + group = group + '/{name}' + else: + group = fp.samples_group + '/{name}' + if parameters is None: + parameters = samples.keys() + # loop over number of dimensions + for param in parameters: + dataset_name = group.format(name=param) + try: + fp_niterations = len(fp[dataset_name]) + if niterations != fp_niterations: + # resize the dataset + fp[dataset_name].resize(niterations, axis=0) + except KeyError: + # dataset doesn't exist yet + fp.create_dataset(dataset_name, (niterations,), + maxshape=(None,), + dtype=samples[param].dtype, + fletcher32=True) + fp[dataset_name][:] = samples[param]
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/ptemcee.html b/latest/html/_modules/pycbc/inference/io/ptemcee.html new file mode 100644 index 00000000000..efd70dbee6b --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/ptemcee.html @@ -0,0 +1,312 @@ + + + + + + pycbc.inference.io.ptemcee — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.ptemcee

+# Copyright (C) 2020 Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""Provides I/O support for ptemcee.
+"""
+
+
+from .base_sampler import BaseSamplerFile
+from . import base_mcmc
+from .base_mcmc import EnsembleMCMCMetadataIO
+from .base_multitemper import (CommonMultiTemperedMetadataIO,
+                               write_samples,
+                               ensemble_read_raw_samples)
+
+
+
+[docs] +class PTEmceeFile(EnsembleMCMCMetadataIO, CommonMultiTemperedMetadataIO, + BaseSamplerFile): + """Class to handle file IO for the ``ptemcee`` sampler.""" + + name = 'ptemcee_file' + + # attributes for setting up an ensemble from file + _ensemble_attrs = ['jumps_proposed', 'jumps_accepted', 'swaps_proposed', + 'swaps_accepted', 'logP', 'logl'] + +
+[docs] + def write_sampler_metadata(self, sampler): + """Adds writing ptemcee-specific metadata to MultiTemperedMCMCIO. + """ + super(PTEmceeFile, self).write_sampler_metadata(sampler) + group = self[self.sampler_group] + group.attrs["starting_betas"] = sampler.starting_betas + group.attrs["adaptive"] = sampler.adaptive + group.attrs["adaptation_lag"] = sampler.adaptation_lag + group.attrs["adaptation_time"] = sampler.adaptation_time + group.attrs["scale_factor"] = sampler.scale_factor
+ + + @property + def starting_betas(self): + """The starting betas that were used.""" + return self[self.sampler_group].attrs["starting_betas"] + +
+[docs] + def write_betas(self, betas, last_iteration=None): + """Writes the betas to sampler group. + + As the betas may change with iterations, this writes the betas as + a ntemps x niterations array to the file. + """ + # we'll use the single temperature write_samples to write the betas, + # so that we get the thinning settings + base_mcmc.write_samples(self, {'betas': betas}, + last_iteration=last_iteration, + samples_group=self.sampler_group)
+ + +
+[docs] + def read_betas(self, thin_start=None, thin_interval=None, thin_end=None, + iteration=None): + """Reads betas from the file. + + Parameters + ----------- + thin_start : int, optional + Start reading from the given iteration. Default is to start from + the first iteration. + thin_interval : int, optional + Only read every ``thin_interval`` -th sample. Default is 1. + thin_end : int, optional + Stop reading at the given iteration. Default is to end at the last + iteration. + iteration : int, optional + Only read the given iteration. If this provided, it overrides + the ``thin_(start|interval|end)`` options. + + Returns + ------- + array + A ntemps x niterations array of the betas. + """ + slc = base_mcmc._ensemble_get_index(self, thin_start=thin_start, + thin_interval=thin_interval, + thin_end=thin_end, + iteration=iteration) + betas = self[self.sampler_group]['betas'][:] + return betas[:, slc]
+ + +
+[docs] + def write_ensemble_attrs(self, ensemble): + """Writes ensemble attributes necessary to restart from checkpoint. + + Parameters + ---------- + ensemble : ptemcee.Ensemble + The ensemble to write attributes for. + """ + group = self[self.sampler_group] + for attr in self._ensemble_attrs: + vals = getattr(ensemble, attr) + try: + group[attr][:] = vals + except KeyError: + group[attr] = vals
+ + +
+[docs] + def read_ensemble_attrs(self): + """Reads ensemble attributes from the file. + + Returns + ------- + dict : + Dictionary of the ensemble attributes. + """ + group = self[self.sampler_group] + return {attr: group[attr][:] for attr in self._ensemble_attrs}
+ + +
+[docs] + def write_samples(self, samples, **kwargs): + r"""Writes samples to the given file. + + Calls :py:func:`base_multitemper.write_samples`. See that function for + details. + + Parameters + ---------- + samples : dict + The samples to write. Each array in the dictionary should have + shape ntemps x nwalkers x niterations. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.write_samples`. + """ + write_samples(self, samples, **kwargs)
+ + +
+[docs] + def read_raw_samples(self, fields, **kwargs): + r"""Base function for reading samples. + + Calls :py:func:`base_multitemper.ensemble_read_raw_samples`. See that + function for details. + + Parameters + ---------- + fields : list + The list of field names to retrieve. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_read_raw_samples`. + + Returns + ------- + dict + A dictionary of field name -> numpy array pairs. + """ + return ensemble_read_raw_samples(self, fields, **kwargs)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/snowline.html b/latest/html/_modules/pycbc/inference/io/snowline.html new file mode 100644 index 00000000000..55ccb05d0d8 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/snowline.html @@ -0,0 +1,163 @@ + + + + + + pycbc.inference.io.snowline — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.snowline

+# Copyright (C) 2024 Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides IO for the snowline sampler.
+"""
+from .posterior import PosteriorFile
+
+
+
+[docs] +class SnowlineFile(PosteriorFile): + """Class to handle file IO for the ``snowline`` sampler.""" + + name = 'snowline_file'
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/txt.html b/latest/html/_modules/pycbc/inference/io/txt.html new file mode 100644 index 00000000000..4e4abb7640e --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/txt.html @@ -0,0 +1,200 @@ + + + + + + pycbc.inference.io.txt — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.txt

+# Copyright (C) 2017 Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This modules defines functions for reading and samples that the
+inference samplers generate and are stored in an ASCII TXT file.
+"""
+
+import numpy
+
+
+
+[docs] +class InferenceTXTFile(object): + """ A class that has extra functions for handling reading the samples + from posterior-only TXT files. + + Parameters + ----------- + path : str + The path to the TXT file. + mode : {None, str} + The mode to open the file. Only accepts "r" or "rb" for reading. + delimiter : str + Delimiter to use for TXT file. Default is space-delimited. + """ + name = "txt" + comments = "" + delimiter = " " + + def __init__(self, path, mode=None, delimiter=None): + self.path = path + self.delimiter = delimiter if delimiter is not None else self.delimiter + if mode in ["r", "rb"]: + self.mode = mode + else: + raise ValueError("Mode for InferenceTXTFile must be 'r' or 'rb'.") + +
+[docs] + @classmethod + def write(cls, output_file, samples, labels, delimiter=None): + """ Writes a text file with samples. + + Parameters + ----------- + output_file : str + The path of the file to write. + samples : FieldArray + Samples to write to file. + labels : list + A list of strings to include as header in TXT file. + delimiter : str + Delimiter to use in TXT file. + """ + delimiter = delimiter if delimiter is not None else cls.delimiter + header = delimiter.join(labels) + numpy.savetxt(output_file, samples, + comments=cls.comments, header=header, + delimiter=delimiter)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/io/ultranest.html b/latest/html/_modules/pycbc/inference/io/ultranest.html new file mode 100644 index 00000000000..d17a5637261 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/io/ultranest.html @@ -0,0 +1,163 @@ + + + + + + pycbc.inference.io.ultranest — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.io.ultranest

+# Copyright (C) 2019 Collin Capano, Sumit Kumar, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# self.option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides IO for the ultranest sampler.
+"""
+from .base_nested_sampler import BaseNestedSamplerFile
+
+
+
+[docs] +class UltranestFile(BaseNestedSamplerFile): + """Class to handle file IO for the ``ultranest`` sampler.""" + + name = 'ultranest_file'
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/jump.html b/latest/html/_modules/pycbc/inference/jump.html new file mode 100644 index 00000000000..e96f978d312 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/jump.html @@ -0,0 +1,214 @@ + + + + + + pycbc.inference.jump — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.jump

+# Copyright (C) 2019  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""Provides custom jump proposals for samplers."""
+
+from .normal import (EpsieNormal, EpsieAdaptiveNormal, EpsieATAdaptiveNormal)
+from .bounded_normal import (EpsieBoundedNormal, EpsieAdaptiveBoundedNormal,
+                             EpsieATAdaptiveBoundedNormal)
+from .angular import (EpsieAngular, EpsieAdaptiveAngular,
+                      EpsieATAdaptiveAngular)
+from .discrete import (EpsieNormalDiscrete, EpsieBoundedDiscrete,
+                       EpsieAdaptiveNormalDiscrete,
+                       EpsieAdaptiveBoundedDiscrete)
+
+
+epsie_proposals = {
+    EpsieNormal.name: EpsieNormal,
+    EpsieAdaptiveNormal.name: EpsieAdaptiveNormal,
+    EpsieATAdaptiveNormal.name: EpsieATAdaptiveNormal,
+    EpsieBoundedNormal.name: EpsieBoundedNormal,
+    EpsieAdaptiveBoundedNormal.name: EpsieAdaptiveBoundedNormal,
+    EpsieATAdaptiveBoundedNormal.name: EpsieATAdaptiveBoundedNormal,
+    EpsieAngular.name: EpsieAngular,
+    EpsieAdaptiveAngular.name: EpsieAdaptiveAngular,
+    EpsieATAdaptiveAngular.name: EpsieATAdaptiveAngular,
+    EpsieNormalDiscrete.name: EpsieNormalDiscrete,
+    EpsieAdaptiveNormalDiscrete.name: EpsieAdaptiveNormalDiscrete,
+    EpsieBoundedDiscrete.name: EpsieBoundedDiscrete,
+    EpsieAdaptiveBoundedDiscrete.name: EpsieAdaptiveBoundedDiscrete,
+}
+
+
+
+[docs] +def epsie_proposals_from_config(cp, section='jump_proposal'): + """Loads epsie jump proposals from the given config file. + + This loads jump proposals from sub-sections starting with ``section`` + (default is 'jump_proposal'). The tag part of the sub-sections' headers + should list the parameters the proposal is to be used for. + + Example:: + + [jump_proposal-mtotal+q] + name = adaptive_normal + adaptation-duration = 1000 + min-q = 1 + max-q = 8 + min-mtotal = 20 + max-mtotal = 160 + + [jump_proposal-spin1_a] + name = normal + + Parameters + ---------- + cp : WorkflowConfigParser instance + The config file to read. + section : str, optional + The section name to read jump proposals from. Default is + ``'jump_proposal'``. + + Returns + ------- + list : + List of the proposal instances. + """ + tags = cp.get_subsections(section) + proposals = [] + for tag in tags: + # get the name of the proposal + name = cp.get_opt_tag(section, "name", tag) + prop = epsie_proposals[name].from_config(cp, section, tag) + proposals.append(prop) + return proposals
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/jump/angular.html b/latest/html/_modules/pycbc/inference/jump/angular.html new file mode 100644 index 00000000000..7ae26dc64b3 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/jump/angular.html @@ -0,0 +1,278 @@ + + + + + + pycbc.inference.jump.angular — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.jump.angular

+# Copyright (C) 2020  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Jump proposals that use cyclic boundaries on [0, 2pi)."""
+
+from epsie import proposals as epsie_proposals
+
+from .normal import (epsie_from_config, epsie_adaptive_from_config,
+                     epsie_at_adaptive_from_config)
+
+
+
+[docs] +class EpsieAngular(epsie_proposals.Angular): + """Adds ``from_config`` method to epsie's angular proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.Angular` and ``with_boundaries`` set + to False. See that function for details on options that can be read. + + Example:: + + [jump_proposal-ra] + name = angular + var-ra = 0.01 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.Angular`: + An angular proposal for use with ``epsie`` samplers. + """ + return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
+
+ + + +
+[docs] +class EpsieAdaptiveAngular(epsie_proposals.AdaptiveAngular): + """Adds ``from_config`` method to epsie's adaptive angular proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + r"""Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveBoundedNormal` and + ``with_boundaries`` set to False (since the boundaries for the angular + proposals are always :math:`[0, 2\pi)`). See that function + for details on options that can be read. + + Example:: + + [jump_proposal-ra] + name = adaptive_angular + adaptation-duration = 1000 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveAngular`: + An adaptive angular proposal for use with ``epsie`` samplers. + """ + return epsie_adaptive_from_config(cls, cp, section, tag, + with_boundaries=False)
+
+ + + +
+[docs] +class EpsieATAdaptiveAngular(epsie_proposals.ATAdaptiveAngular): + """Adds ``from_config`` method to epsie's adaptive angular proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + r"""Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveBoundedNormal` and + ``with_boundaries`` set to False (since the boundaries for the angular + proposals are always :math:`[0, 2\pi)`). See that function + for details on options that can be read. + + Example:: + + [jump_proposal-ra] + name = adaptive_angular_proposal + adaptation-duration = 1000 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveAngularProposal`: + An adaptive angular proposal for use with ``epsie`` samplers. + """ + return epsie_at_adaptive_from_config(cls, cp, section, tag, + with_boundaries=False)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/jump/bounded_normal.html b/latest/html/_modules/pycbc/inference/jump/bounded_normal.html new file mode 100644 index 00000000000..0c1619c841e --- /dev/null +++ b/latest/html/_modules/pycbc/inference/jump/bounded_normal.html @@ -0,0 +1,279 @@ + + + + + + pycbc.inference.jump.bounded_normal — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.jump.bounded_normal

+# Copyright (C) 2020  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Jump proposals that use a bounded normal distribution."""
+
+from epsie import proposals as epsie_proposals
+
+from .normal import (epsie_from_config, epsie_adaptive_from_config,
+                     epsie_at_adaptive_from_config)
+
+
+
+[docs] +class EpsieBoundedNormal(epsie_proposals.BoundedNormal): + """Adds ``from_config`` method to epsie's bounded normal proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + r"""Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.BoundedNormal` and ``with_boundaries`` set + to True. See that function for details on options that can be read. + + Example:: + + [jump_proposal-mchrip+q] + name = bounded_normal + min-q = 1 + max-q = 8 + min-mchirp = 20 + max-mchirp = 80 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.BoundedNormal`: + A bounded normal proposal for use with ``epsie`` samplers. + """ + return epsie_from_config(cls, cp, section, tag, with_boundaries=True)
+
+ + + +
+[docs] +class EpsieAdaptiveBoundedNormal(epsie_proposals.AdaptiveBoundedNormal): + """Adds ``from_config`` method to epsie's adaptive normal proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveBoundedNormal`. See that function + for details on options that can be read. + + Example:: + + [jump_proposal-q] + name = adaptive_bounded_normal + adaptation-duration = 1000 + min-q = 1 + max-q = 8 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveBoundedNormal`: + An adaptive normal proposal for use with ``epsie`` samplers. + """ + return epsie_adaptive_from_config(cls, cp, section, tag)
+
+ + + +
+[docs] +class EpsieATAdaptiveBoundedNormal(epsie_proposals.ATAdaptiveBoundedNormal): + """Adds ``from_config`` method to epsie's adaptive bounded proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveBoundedProposal`. See that function + for details on options that can be read. + + Example:: + + [jump_proposal-q] + name = adaptive_bounded_proposal + min-q = 1 + max-q = 8 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveBoundedProposal`: + An adaptive bounded proposal for use with ``epsie`` samplers. + """ + return epsie_at_adaptive_from_config(cls, cp, section, tag, + with_boundaries=True)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/jump/discrete.html b/latest/html/_modules/pycbc/inference/jump/discrete.html new file mode 100644 index 00000000000..fb68f7a67b4 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/jump/discrete.html @@ -0,0 +1,319 @@ + + + + + + pycbc.inference.jump.discrete — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.jump.discrete

+# Copyright (C) 2020  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Jump proposals that use a bounded normal distribution."""
+
+from epsie import proposals as epsie_proposals
+
+from .normal import (epsie_from_config, epsie_adaptive_from_config)
+
+
+
+[docs] +class EpsieNormalDiscrete(epsie_proposals.NormalDiscrete): + """Adds ``from_config`` method to epsie's normal discrete proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + r"""Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.NormalDiscrete` and ``with_boundaries`` set + to False. See that function for details on options that can be read. + + Example:: + + [jump_proposal-index] + name = discrete + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.BoundedDiscrete`: + A bounded discrete proposal for use with ``epsie`` samplers. + """ + return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
+
+ + + +
+[docs] +class EpsieBoundedDiscrete(epsie_proposals.BoundedDiscrete): + """Adds ``from_config`` method to epsie's bounded discrete proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + r"""Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.BoundedDiscrete` and ``with_boundaries`` set + to True. See that function for details on options that can be read. + + Example:: + + [jump_proposal-index] + name = bounded_discrete + min-index = 0 + max-index = 19 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.BoundedDiscrete`: + A bounded discrete proposal for use with ``epsie`` samplers. + """ + return epsie_from_config(cls, cp, section, tag, with_boundaries=True)
+
+ + + +
+[docs] +class EpsieAdaptiveNormalDiscrete(epsie_proposals.AdaptiveNormalDiscrete): + """Adds ``from_config`` method to epsie's adaptive bounded discrete + proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveNormalDiscrete`. See that function + for details on options that can be read. + + Example:: + + [jump_proposal-index] + name = adaptive_normal_discrete + adaptation-duration = 1000 + min-index = 0 + max-index = 42 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveBoundedDiscrete`: + An adaptive normal proposal for use with ``epsie`` samplers. + """ + return epsie_adaptive_from_config(cls, cp, section, tag, + boundary_arg_name='prior_widths')
+
+ + + +
+[docs] +class EpsieAdaptiveBoundedDiscrete(epsie_proposals.AdaptiveBoundedDiscrete): + """Adds ``from_config`` method to epsie's adaptive bounded discrete + proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveBoundedDiscrete`. See that function + for details on options that can be read. + + Example:: + + [jump_proposal-index] + name = adaptive_bounded_discrete + adaptation-duration = 1000 + min-index = 0 + max-index = 42 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveBoundedDiscrete`: + An adaptive normal proposal for use with ``epsie`` samplers. + """ + return epsie_adaptive_from_config(cls, cp, section, tag)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/jump/normal.html b/latest/html/_modules/pycbc/inference/jump/normal.html new file mode 100644 index 00000000000..71b078c7a28 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/jump/normal.html @@ -0,0 +1,724 @@ + + + + + + pycbc.inference.jump.normal — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.jump.normal

+# Copyright (C) 2019  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Jump proposals that use a normal distribution."""
+
+
+import numpy
+
+from epsie import proposals as epsie_proposals
+from epsie.proposals import Boundaries
+
+from pycbc import VARARGS_DELIM
+
+
+
+[docs] +class EpsieNormal(epsie_proposals.Normal): + """Adds ``from_config`` method to epsie's normal proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.Normal` and ``with_boundaries`` set to + False. See that function for details on options that can be read. + + Example:: + + [jump_proposal-mchrip+q] + name = normal + var-q = 0.1 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.Normal`: + A normal proposal for use with ``epsie`` samplers. + """ + return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
+
+ + + +
+[docs] +class EpsieAdaptiveNormal(epsie_proposals.AdaptiveNormal): + """Adds ``from_config`` method to epsie's adaptive normal proposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveNormal`. See that function for + details on options that can be read. + + Example:: + + [jump_proposal-mchirp+q] + name = adaptive_normal + adaptation-duration = 1000 + min-q = 1 + max-q = 8 + min-mchirp = 20 + max-mchirp = 80 + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveNormal`: + An adaptive normal proposal for use with ``epsie`` samplers. + """ + return epsie_adaptive_from_config(cls, cp, section, tag, + boundary_arg_name='prior_widths')
+
+ + + +
+[docs] +class EpsieATAdaptiveNormal(epsie_proposals.ATAdaptiveNormal): + """Adds ``from_config`` method to epsie's ATAdaptiveProposal.""" + +
+[docs] + @classmethod + def from_config(cls, cp, section, tag): + """Loads a proposal from a config file. + + This calls :py:func:`epsie_from_config` with ``cls`` set to + :py:class:`epsie.proposals.AdaptiveProposal` and ``with_boundaries`` + set to False. See that function for details on options that can be + read. + + Example:: + + [jump_proposal-mchrip+q] + name = adaptive_proposal + diagonal = + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + + Returns + ------- + :py:class:`epsie.proposals.AdaptiveProposal`: + An adaptive proposal for use with ``epsie`` samplers. + """ + return epsie_at_adaptive_from_config(cls, cp, section, tag, + with_boundaries=False)
+
+ + + +
+[docs] +def epsie_from_config(cls, cp, section, tag, with_boundaries=False): + r"""Generic function for loading epsie proposals from a config file. + + This should be used for proposals that are not adaptive. + + The section that is read should have the format ``[{section}-{tag}]``, + where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list + of the parameters to create the jump proposal for. + + Options that are read: + + * name : str + Required. Must match the name of the proposal. + * var-{param} : float + Optional. Variance to use for parameter {param}. If ``with_boundaries`` + is True, then any parameter not specified will use a default variance + of :math:`(\Delta p/10)^2`, where :math:`\Delta p` is the boundary + width for that parameter. If ``with_boundaries`` is False, will use + a default value of 1. + * min-{param} : float + * max-{param} : float + The bounds on each parameter. Required if ``with_boundaries`` is set to + True, in which case bounds must be provided for every parameter. + + Parameters + ---------- + cls : epsie.proposals.BaseProposal + The epsie proposal class to initialize. + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + with_boundaries : bool, optional + Try to load boundaries from the section and pass a ``boundaries`` + argument to the class's initialization. This should be set to true + for bounded proposals. Default is False. + + Returns + ------- + cls : + The class initialized with the options read from the config file. + """ + # check that the name matches + assert cp.get_opt_tag(section, "name", tag) == cls.name, ( + "name in specified section must match mine") + params, opts = load_opts(cp, section, tag, skip=['name']) + args = {'parameters': params} + if with_boundaries: + boundaries = get_param_boundaries(params, opts) + args['boundaries'] = boundaries + if 'discrete' in cls.name.split('_'): + args.update({'successive': + get_epsie_discrete_successive_settings(params, opts)}) + # if there are any options left, assume they are for setting the variance + if opts: + cov = get_variance(params, opts) + elif with_boundaries: + cov = numpy.array([abs(boundaries[p])/10. for p in params])**2. + else: + cov = None + args['cov'] = cov + # no other options should remain + if opts: + raise ValueError("unrecognized options {}" + .format(', '.join(opts.keys()))) + return cls(**args)
+ + + +
+[docs] +def epsie_adaptive_from_config(cls, cp, section, tag, with_boundaries=True, + boundary_arg_name='boundaries'): + """Generic function for loading adaptive epsie proposals from a config + file. + + The section that is read should have the format ``[{section}-{tag}]``, + where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list + of the parameters to create the jump proposal for. + + Options that are read: + + * name : str + Required. Must match the name of the proposal. + * adaptation-duration : int + Required. Sets the ``adaptation_duration``. + * min-{param} : float + * max-{param} : float + The bounds on each parameter. Required if ``with_boundaries`` is set to + True, in which case bounds must be provided for every parameter. + * var-{param} : float + Optional. Initial variance to use. If not provided, will use a + default based on the bounds (see + :py:class:`epsie.proposals.AdaptiveSupport` for details). + * adaptation-decay : int + Optional. Sets the ``adaptation_decay``. If not provided, will use + the class's default. + * start-iteration : int + Optional. Sets the ``start_iteration``.If not provided, will use + the class's default. + * target-rate : float + Optional. Sets the ``target_rate``. If not provided, will use + the class's default. + + Parameters + ---------- + cls : epsie.proposals.BaseProposal + The epsie proposal class to initialize. The class should have + :py:class:`epsie.proposals.normal.AdaptiveSupport`. + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + with_boundaries : bool, optional + Try to load boundaries from the section and pass a ``boundaries`` + argument to the class's initialization. Default is True. + boundary_arg_name : str, optional + The name of the argument for the boundaries (only used if + ``with_boundaries`` is True). Provided because some adaptive proposals + that only need the boundary widths call this ``prior_widths``. Default + is ``'boundaries'``. + + Returns + ------- + cls : + The class initialized with the options read from the config file. + """ + # check that the name matches + assert cp.get_opt_tag(section, "name", tag) == cls.name, ( + "name in specified section must match mine") + params, opts = load_opts(cp, section, tag, skip=['name']) + args = {'parameters': params} + # get the bounds + if with_boundaries: + args[boundary_arg_name] = get_param_boundaries(params, opts) + if 'discrete' in cls.name.split('_'): + args.update({'successive': + get_epsie_discrete_successive_settings(params, opts)}) + # get the adaptation parameters + args.update(get_epsie_adaptation_settings(opts)) + # if there are any other options, assume they are for setting the + # initial standard deviation + if opts: + var = get_variance(params, opts) + args['initial_std'] = var**0.5 + # at this point, there should be no options left + if opts: + raise ValueError('unrecognized options {} in section {}' + .format(', '.join(opts.keys()), + '-'.join([section, tag]))) + return cls(**args)
+ + + +
+[docs] +def epsie_at_adaptive_from_config(cls, cp, section, tag, + with_boundaries=False): + """Generic function for loading AT Adaptive Normal proposals from a config + file. + + The section that is read should have the format ``[{section}-{tag}]``, + where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list + of the parameters to create the jump proposal for. + + Options that are read: + + * name : str + Required. Must match the name of the proposal. + * adaptation-duration : int + Sets the ``adaptation_duration``. If not provided will use the class's + default. + * diagonal : bool, optional + Determines whether only to adapt the variance. If True will only train + the diagonal elements. + * componentwise : bool, optional + Whether to include a componentwise scaling of the parameters. + By default set to False. Componentwise scaling `ndim` times more + expensive than global scaling. + * min-{param} : float + * max-{param} : float + The bounds on each parameter. Required if ``with_boundaries`` is set to + True, in which case bounds must be provided for every parameter. + * start-iteration : int + Optional. Sets the ``start_iteration``. If not provided, will use + the class's default. + * target-rate : float + Optional. Sets the ``target_rate``. If not provided, will use + the class's default. + + Parameters + ---------- + cls : epsie.proposals.BaseProposal + The epsie proposal class to initialize. The class should have + :py:class:`epsie.proposals.normal.AdaptiveSupport`. + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + with_boundaries : bool, optional + Try to load boundaries from the section and pass a ``boundaries`` + argument to the class's initialization. Default is True. + + Returns + ------- + cls : + The class initialized with the options read from the config file. + """ + # check that the name matches + assert cp.get_opt_tag(section, "name", tag) == cls.name, ( + "name in specified section must match mine") + params, opts = load_opts(cp, section, tag, skip=['name']) + args = {'parameters': params} + # get the bounds + if with_boundaries: + args['boundaries'] = get_param_boundaries(params, opts) + if 'discrete' in cls.name.split('_'): + args.update({'successive': + get_epsie_discrete_successive_settings(params, opts)}) + # get the adaptation parameters + args.update(get_epsie_adaptation_settings(opts, cls.name)) + # bounded and angular adaptive proposals support diagonal-only + diagonal = opts.pop('diagonal', None) + if not any(p in cls.name.split('_') for p in ['bounded', 'angular']): + args.update({'diagonal': diagonal is not None}) + componentwise = opts.pop('componentwise', None) + if componentwise is not None: + args.update({'componentwise': True}) + if opts: + raise ValueError("unrecognized options {}" + .format(', '.join(opts.keys()))) + return cls(**args)
+ + + +
+[docs] +def load_opts(cp, section, tag, skip=None): + """Loads config options for jump proposals. + + All `-` in option names are converted to `_` before returning. + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file to read from. + section : str + The name of the section to look in. + tag : str + :py:const:`pycbc.VARARGS_DELIM` separated list of parameter names + to create proposals for. + skip : list, optional + List of option names to skip loading. + + Returns + ------- + params : list + List of parameter names the jump proposal is for. + opts : dict + Dictionary of option names -> values, where all values are strings. + """ + if skip is None: + skip = [] + params = tag.split(VARARGS_DELIM) + # get options + readsection = '-'.join([section, tag]) + opts = {opt.replace('-', '_'): cp.get(readsection, opt) + for opt in cp.options(readsection) if opt not in skip} + return params, opts
+ + + +
+[docs] +def get_variance(params, opts, default=1.): + """Gets variance for jump proposals from the dictionary of options. + + This looks for ``var_{param}`` for every parameter listed in ``params``. + If found, the argument is popped from the given ``opts`` dictionary. If not + found, ``default`` will be used. + + Parameters + ---------- + params : list of str + List of parameter names to look for. + opts : dict + Dictionary of option -> value that was loaded from a config file + section. + default : float, optional + Default value to use for parameters that do not have variances + provided. Default is 1. + + Returns + ------- + numpy.array + Array of variances to use. Order is the same as the parameter names + given in ``params``. + """ + varfmt = 'var_{}' + cov = numpy.array([float(opts.pop(varfmt.format(param), default)) + for param in params]) + return cov
+ + + +
+[docs] +def get_param_boundaries(params, opts): + """Gets parameter boundaries for jump proposals. + + The syntax for the options should be ``(min|max)_{param} = value``. Both + a minimum and maximum should be provided for every parameter in ``params``. + If the opts are created using ``load_opts``, then the options can be + formatted as ``(min|max)-{param}``, since that function will turn all ``-`` + to ``_`` in option names. + + Arguments will be popped from the given ``opts`` dictionary. + + Parameters + ---------- + params : list of str + List of parameter names to get boundaries for. + opts : dict + Dictionary of option -> value that was loaded from a config file + section. + + Returns + ------- + dict : + Dictionary of parameter names -> :py:class:`epsie.proposals.Boundaries` + """ + boundaries = {} + for param in params: + minbound = opts.pop('min_{}'.format(param), None) + if minbound is None: + raise ValueError("Must provide a minimum bound for {p}." + "Syntax is min_{p} = val".format(p=param)) + maxbound = opts.pop('max_{}'.format(param), None) + if maxbound is None: + raise ValueError("Must provide a maximum bound for {p}." + "Syntax is max_{p} = val".format(p=param)) + boundaries[param] = Boundaries((float(minbound), float(maxbound))) + return boundaries
+ + + +
+[docs] +def get_epsie_adaptation_settings(opts, name=None): + """Get settings for Epsie adaptive proposals from a config file. + + This requires that ``adaptation_duration`` is in the given dictionary. + It will also look for ``adaptation_decay``, ``start_iteration``, and + ``target_rate``, but these are optional. Arguments will be popped from the + given dictionary. + + Parameters + ---------- + opts : dict + Dictionary of option -> value that was loaded from a config file + section. + name : str (optional) + Proposal name + + Returns + ------- + dict : + Dictionary of argument name -> values. + """ + args = {} + adaptation_duration = opts.pop('adaptation_duration', None) + if adaptation_duration is None: + if name is not None: + if all(p in name.split('_') for p in ['at', 'adaptive']): + args.update({'adaptation_duration': None}) + else: + raise ValueError("No adaptation_duration specified") + else: + args.update({'adaptation_duration': int(adaptation_duration)}) + # optional args + adaptation_decay = opts.pop('adaptation_decay', None) + if adaptation_decay is not None: + args.update({'adaptation_decay': int(adaptation_decay)}) + start_iteration = opts.pop('start_iteration', None) + if start_iteration is not None: + args.update({'start_iteration': int(start_iteration)}) + target_rate = opts.pop('target_rate', None) + if target_rate is not None: + args.update({'target_rate': float(target_rate)}) + return args
+ + + +
+[docs] +def get_epsie_discrete_successive_settings(params, opts): + """Get settings for Epsie successive discrete proposal successive jumps + from a config file. + + If ``successive`` is not defined for a parameter then assumes successive + jumps are not allowed (i.e. jumps from an integer to the same integer). + Arguments will be popped from the given dictionary. + + Example:: + [jump_proposal-k+n] + name = discrete + successive-k = + + This example sets successive jumps for ``k`` but does not do so for ``n``. + + Parameters + ---------- + params : list of str + List of parameter names to get the successive option for. + opts : dict + Dictionary of option -> value that was loaded from a config file + section. + + Returns + ------- + dict : + Dictionary of parameter names -> bools + """ + successive = {} + for param in params: + successive.update( + {param: opts.pop('successive_{}'.format(param), None) is not None}) + return successive
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models.html b/latest/html/_modules/pycbc/inference/models.html new file mode 100644 index 00000000000..1812bc5c771 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models.html @@ -0,0 +1,500 @@ + + + + + + pycbc.inference.models — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""
+This package provides classes and functions for evaluating Bayesian statistics
+assuming various noise models.
+"""
+
+
+import logging
+from pkg_resources import iter_entry_points as _iter_entry_points
+from .base import BaseModel
+from .base_data import BaseDataModel
+from .analytic import (TestEggbox, TestNormal, TestRosenbrock, TestVolcano,
+                       TestPrior, TestPosterior)
+from .gaussian_noise import GaussianNoise
+from .marginalized_gaussian_noise import MarginalizedPhaseGaussianNoise
+from .marginalized_gaussian_noise import MarginalizedPolarization
+from .marginalized_gaussian_noise import MarginalizedHMPolPhase
+from .marginalized_gaussian_noise import MarginalizedTime
+from .brute_marg import BruteParallelGaussianMarginalize
+from .brute_marg import BruteLISASkyModesMarginalize
+from .gated_gaussian_noise import (GatedGaussianNoise, GatedGaussianMargPol)
+from .single_template import SingleTemplate
+from .relbin import Relative, RelativeTime, RelativeTimeDom
+from .hierarchical import (HierarchicalModel, MultiSignalModel,
+                           JointPrimaryMarginalizedModel)
+
+
+# Used to manage a model instance across multiple cores or MPI
+_global_instance = None
+
+
+def _call_global_model(*args, **kwds):
+    """Private function for global model (needed for parallelization)."""
+    return _global_instance(*args, **kwds)  # pylint:disable=not-callable
+
+
+def _call_global_model_logprior(*args, **kwds):
+    """Private function for a calling global's logprior.
+
+    This is needed for samplers that use a separate function for the logprior,
+    like ``emcee_pt``.
+    """
+    # pylint:disable=not-callable
+    return _global_instance(*args, callstat='logprior', **kwds)
+
+
+
+[docs] +class CallModel(object): + """Wrapper class for calling models from a sampler. + + This class can be called like a function, with the parameter values to + evaluate provided as a list in the same order as the model's + ``variable_params``. In that case, the model is updated with the provided + parameters and then the ``callstat`` retrieved. If ``return_all_stats`` is + set to ``True``, then all of the stats specified by the model's + ``default_stats`` will be returned as a tuple, in addition to the stat + value. + + The model's attributes are promoted to this class's namespace, so that any + attribute and method of ``model`` may be called directly from this class. + + This class must be initalized prior to the creation of a ``Pool`` object. + + Parameters + ---------- + model : Model instance + The model to call. + callstat : str + The statistic to call. + return_all_stats : bool, optional + Whether or not to return all of the other statistics along with the + ``callstat`` value. + + Examples + -------- + Create a wrapper around an instance of the ``TestNormal`` model, with the + ``callstat`` set to ``logposterior``: + + >>> from pycbc.inference.models import TestNormal, CallModel + >>> model = TestNormal(['x', 'y']) + >>> call_model = CallModel(model, 'logposterior') + + Now call on a set of parameter values: + + >>> call_model([0.1, -0.2]) + (-1.8628770664093453, (0.0, 0.0, -1.8628770664093453)) + + Note that a tuple of all of the model's ``default_stats`` were returned in + addition to the ``logposterior`` value. We can shut this off by toggling + ``return_all_stats``: + + >>> call_model.return_all_stats = False + >>> call_model([0.1, -0.2]) + -1.8628770664093453 + + Attributes of the model can be called from the call model. For example: + + >>> call_model.variable_params + ('x', 'y') + + """ + + def __init__(self, model, callstat, return_all_stats=True): + self.model = model + self.callstat = callstat + self.return_all_stats = return_all_stats + + def __getattr__(self, attr): + """Adds the models attributes to self.""" + return getattr(self.model, attr) + + def __call__(self, param_values, callstat=None, return_all_stats=None): + """Updates the model with the given parameter values, then calls the + call function. + + Parameters + ---------- + param_values : list of float + The parameter values to test. Assumed to be in the same order as + ``model.sampling_params``. + callstat : str, optional + Specify which statistic to call. Default is to call whatever self's + ``callstat`` is set to. + return_all_stats : bool, optional + Whether or not to return all stats in addition to the ``callstat`` + value. Default is to use self's ``return_all_stats``. + + Returns + ------- + stat : float + The statistic returned by the ``callfunction``. + all_stats : tuple, optional + The values of all of the model's ``default_stats`` at the given + param values. Any stat that has not be calculated is set to + ``numpy.nan``. This is only returned if ``return_all_stats`` is + set to ``True``. + """ + if callstat is None: + callstat = self.callstat + if return_all_stats is None: + return_all_stats = self.return_all_stats + params = dict(zip(self.model.sampling_params, param_values)) + self.model.update(**params) + val = getattr(self.model, callstat) + if return_all_stats: + return val, self.model.get_current_stats() + else: + return val
+ + + +
+[docs] +def read_from_config(cp, **kwargs): + """Initializes a model from the given config file. + + The section must have a ``name`` argument. The name argument corresponds to + the name of the class to initialize. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + \**kwargs : + All other keyword arguments are passed to the ``from_config`` method + of the class specified by the name argument. + + Returns + ------- + cls + The initialized model. + """ + # use the name to get the distribution + name = cp.get("model", "name") + return get_model(name).from_config(cp, **kwargs)
+ + + +_models = {_cls.name: _cls for _cls in ( + TestEggbox, + TestNormal, + TestRosenbrock, + TestVolcano, + TestPosterior, + TestPrior, + GaussianNoise, + MarginalizedPhaseGaussianNoise, + MarginalizedPolarization, + MarginalizedHMPolPhase, + MarginalizedTime, + BruteParallelGaussianMarginalize, + BruteLISASkyModesMarginalize, + GatedGaussianNoise, + GatedGaussianMargPol, + SingleTemplate, + Relative, + RelativeTime, + HierarchicalModel, + MultiSignalModel, + RelativeTimeDom, + JointPrimaryMarginalizedModel, +)} + + +class _ModelManager(dict): + """Sub-classes dictionary to manage the collection of available models. + + The first time this is called, any plugin models that are available will be + added to the dictionary before returning. + """ + def __init__(self, *args, **kwargs): + self.retrieve_plugins = True + super().__init__(*args, **kwargs) + + def add_model(self, model): + """Adds a model to the dictionary. + + If the given model has the same name as a model already in the + dictionary, the original model will be overridden. A warning will be + printed in that case. + """ + if super().__contains__(model.name): + logging.warning("Custom model %s will override a model of the " + "same name. If you don't want this, change the " + "model's name attribute and restart.", model.name) + self[model.name] = model + + def add_plugins(self): + """Adds any plugin models that are available. + + This will only add the plugins if ``self.retrieve_plugins = True``. + After this runs, ``self.retrieve_plugins`` is set to ``False``, so that + subsequent calls to this will no re-add models. + """ + if self.retrieve_plugins: + for plugin in _iter_entry_points('pycbc.inference.models'): + self.add_model(plugin.resolve()) + self.retrieve_plugins = False + + def __len__(self): + self.add_plugins() + super().__len__() + + def __contains__(self, key): + self.add_plugins() + return super().__contains__(key) + + def get(self, *args): + self.add_plugins() + return super().get(*args) + + def popitem(self): + self.add_plugins() + return super().popitem() + + def pop(self, *args): + try: + return super().pop(*args) + except KeyError: + self.add_plugins() + return super().pop(*args) + + def keys(self): + self.add_plugins() + return super().keys() + + def values(self): + self.add_plugins() + return super().values() + + def items(self): + self.add_plugins() + return super().items() + + def __iter__(self): + self.add_plugins() + return super().__iter__() + + def __repr__(self): + self.add_plugins() + return super().__repr__() + + def __getitem__(self, item): + try: + return super().__getitem__(item) + except KeyError: + self.add_plugins() + return super().__getitem__(item) + + def __delitem__(self, *args, **kwargs): + try: + super().__delitem__(*args, **kwargs) + except KeyError: + self.add_plugins() + super().__delitem__(*args, **kwargs) + + +models = _ModelManager(_models) + + +
+[docs] +def get_models(): + """Returns the dictionary of current models. + + Ensures that plugins are added to the dictionary first. + """ + models.add_plugins() + return models
+ + + +
+[docs] +def get_model(model_name): + """Retrieve the given model. + + Parameters + ---------- + model_name : str + The name of the model to get. + + Returns + ------- + model : + The requested model. + """ + return get_models()[model_name]
+ + + +
+[docs] +def available_models(): + """List the currently available models.""" + return list(get_models().keys())
+ + + +
+[docs] +def register_model(model): + """Makes a custom model available to PyCBC. + + The provided model will be added to the dictionary of models that PyCBC + knows about, using the model's ``name`` attribute. If the ``name`` is the + same as a model that already exists in PyCBC, a warning will be printed. + + Parameters + ---------- + model : pycbc.inference.models.base.BaseModel + The model to use. The model should be a sub-class of + :py:class:`BaseModel <pycbc.inference.models.base.BaseModel>` to ensure + it has the correct API for use within ``pycbc_inference``. + """ + get_models().add_model(model)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/analytic.html b/latest/html/_modules/pycbc/inference/models/analytic.html new file mode 100644 index 00000000000..b0ee4c722b6 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/analytic.html @@ -0,0 +1,407 @@ + + + + + + pycbc.inference.models.analytic — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.analytic

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+This modules provides models that have analytic solutions for the
+log likelihood.
+"""
+
+import logging
+import numpy
+import numpy.random
+from scipy import stats
+
+from .base import BaseModel
+
+
+
+[docs] +class TestNormal(BaseModel): + r"""The test distribution is an multi-variate normal distribution. + + The number of dimensions is set by the number of ``variable_params`` that + are passed. For details on the distribution used, see + ``scipy.stats.multivariate_normal``. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + mean : array-like, optional + The mean values of the parameters. If None provide, will use 0 for all + parameters. + cov : array-like, optional + The covariance matrix of the parameters. If None provided, will use + unit variance for all parameters, with cross-terms set to 0. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + Examples + -------- + Create a 2D model with zero mean and unit variance: + + >>> m = TestNormal(['x', 'y']) + + Set the current parameters and evaluate the log posterior: + + >>> m.update(x=-0.2, y=0.1) + >>> m.logposterior + -1.8628770664093453 + + See the current stats that were evaluated: + + >>> m.current_stats + {'logjacobian': 0.0, 'loglikelihood': -1.8628770664093453, 'logprior': 0.0} + + """ + name = "test_normal" + + def __init__(self, variable_params, mean=None, cov=None, **kwargs): + # set up base likelihood parameters + super(TestNormal, self).__init__(variable_params, **kwargs) + # store the pdf + if mean is None: + mean = [0.]*len(variable_params) + if cov is None: + cov = [1.]*len(variable_params) + self._dist = stats.multivariate_normal(mean=mean, cov=cov) + # check that the dimension is correct + if self._dist.dim != len(variable_params): + raise ValueError("dimension mis-match between variable_params and " + "mean and/or cov") + + def _loglikelihood(self): + """Returns the log pdf of the multivariate normal. + """ + return self._dist.logpdf([self.current_params[p] + for p in self.variable_params])
+ + + +
+[docs] +class TestEggbox(BaseModel): + r"""The test distribution is an 'eggbox' function: + + .. math:: + + \log \mathcal{L}(\Theta) = \left[ + 2+\prod_{i=1}^{n}\cos\left(\frac{\theta_{i}}{2}\right)\right]^{5} + + The number of dimensions is set by the number of ``variable_params`` that + are passed. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + """ + name = "test_eggbox" + + def __init__(self, variable_params, **kwargs): + # set up base likelihood parameters + super(TestEggbox, self).__init__(variable_params, **kwargs) + + def _loglikelihood(self): + """Returns the log pdf of the eggbox function. + """ + return (2 + numpy.prod(numpy.cos([ + self.current_params[p]/2. for p in self.variable_params]))) ** 5
+ + + +
+[docs] +class TestRosenbrock(BaseModel): + r"""The test distribution is the Rosenbrock function: + + .. math:: + + \log \mathcal{L}(\Theta) = -\sum_{i=1}^{n-1}[ + (1-\theta_{i})^{2}+100(\theta_{i+1} - \theta_{i}^{2})^{2}] + + The number of dimensions is set by the number of ``variable_params`` that + are passed. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + """ + name = "test_rosenbrock" + + def __init__(self, variable_params, **kwargs): + # set up base likelihood parameters + super(TestRosenbrock, self).__init__(variable_params, **kwargs) + + def _loglikelihood(self): + """Returns the log pdf of the Rosenbrock function. + """ + logl = 0 + p = [self.current_params[p] for p in self.variable_params] + for i in range(len(p) - 1): + logl -= ((1 - p[i])**2 + 100 * (p[i+1] - p[i]**2)**2) + return logl
+ + + +
+[docs] +class TestVolcano(BaseModel): + r"""The test distribution is a two-dimensional 'volcano' function: + + .. math:: + \Theta = + \sqrt{\theta_{1}^{2} + \theta_{2}^{2}} \log \mathcal{L}(\Theta) = + 25\left(e^{\frac{-\Theta}{35}} + + \frac{1}{2\sqrt{2\pi}} e^{-\frac{(\Theta-5)^{2}}{8}}\right) + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. Must have length 2. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + """ + name = "test_volcano" + + def __init__(self, variable_params, **kwargs): + # set up base likelihood parameters + super(TestVolcano, self).__init__(variable_params, **kwargs) + + # make sure there are exactly two variable args + if len(self.variable_params) != 2: + raise ValueError("TestVolcano distribution requires exactly " + "two variable args") + + def _loglikelihood(self): + """Returns the log pdf of the 2D volcano function. + """ + p = [self.current_params[p] for p in self.variable_params] + r = numpy.sqrt(p[0]**2 + p[1]**2) + mu, sigma = 5.0, 2.0 + return 25 * ( + numpy.exp(-r/35) + 1 / (sigma * numpy.sqrt(2 * numpy.pi)) * + numpy.exp(-0.5 * ((r - mu) / sigma) ** 2))
+ + + +
+[docs] +class TestPrior(BaseModel): + r"""Uses the prior as the test distribution. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. Must have length 2. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + """ + name = "test_prior" + + def __init__(self, variable_params, **kwargs): + # set up base likelihood parameters + super(TestPrior, self).__init__(variable_params, **kwargs) + + def _loglikelihood(self): + """Returns zero. + """ + return 0.
+ + + +
+[docs] +class TestPosterior(BaseModel): + r"""Build a test posterior from a set of samples using a kde + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + posterior_file : hdf file + A compatible pycbc inference output file which posterior samples can + be read from. + nsamples : int + Number of samples to draw from posterior file to build KDE. + **kwargs : + All other keyword arguments are passed to ``BaseModel``. + + """ + name = "test_posterior" + + def __init__(self, variable_params, posterior_file, nsamples, **kwargs): + super(TestPosterior, self).__init__(variable_params, **kwargs) + + from pycbc.inference.io import loadfile # avoid cyclic import + logging.info('loading test posterior model') + inf_file = loadfile(posterior_file) + logging.info('reading samples') + samples = inf_file.read_samples(variable_params) + samples = numpy.array([samples[v] for v in variable_params]) + + # choose only the requested amount of samples + idx = numpy.arange(0, samples.shape[-1]) + idx = numpy.random.choice(idx, size=int(nsamples), replace=False) + samples = samples[:, idx] + + logging.info('making kde with %s samples', samples.shape[-1]) + self.kde = stats.gaussian_kde(samples) + logging.info('done initializing test posterior model') + + def _loglikelihood(self): + """Returns the log pdf of the test posterior kde + """ + p = numpy.array([self.current_params[p] for p in self.variable_params]) + logpost = self.kde.logpdf(p) + return float(logpost[0])
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/base.html b/latest/html/_modules/pycbc/inference/models/base.html new file mode 100644 index 00000000000..a548308b27c --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/base.html @@ -0,0 +1,1066 @@ + + + + + + pycbc.inference.models.base — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.base

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""Base class for models.
+"""
+
+import numpy
+import logging
+from abc import (ABCMeta, abstractmethod)
+from configparser import NoSectionError
+from pycbc import (transforms, distributions)
+from pycbc.io import FieldArray
+
+
+#
+# =============================================================================
+#
+#                               Support classes
+#
+# =============================================================================
+#
+
+
+class _NoPrior(object):
+    """Dummy class to just return 0 if no prior is given to a model.
+    """
+    @staticmethod
+    def apply_boundary_conditions(**params):
+        return params
+
+    def __call__(self, **params):
+        return 0.
+
+
+
+[docs] +class ModelStats(object): + """Class to hold model's current stat values.""" + + @property + def statnames(self): + """Returns the names of the stats that have been stored.""" + return list(self.__dict__.keys()) + +
+[docs] + def getstats(self, names, default=numpy.nan): + """Get the requested stats as a tuple. + + If a requested stat is not an attribute (implying it hasn't been + stored), then the default value is returned for that stat. + + Parameters + ---------- + names : list of str + The names of the stats to get. + default : float, optional + What to return if a requested stat is not an attribute of self. + Default is ``numpy.nan``. + + Returns + ------- + tuple + A tuple of the requested stats. + """ + return tuple(getattr(self, n, default) for n in names)
+ + +
+[docs] + def getstatsdict(self, names, default=numpy.nan): + """Get the requested stats as a dictionary. + + If a requested stat is not an attribute (implying it hasn't been + stored), then the default value is returned for that stat. + + Parameters + ---------- + names : list of str + The names of the stats to get. + default : float, optional + What to return if a requested stat is not an attribute of self. + Default is ``numpy.nan``. + + Returns + ------- + dict + A dictionary of the requested stats. + """ + return dict(zip(names, self.getstats(names, default=default)))
+
+ + + +
+[docs] +class SamplingTransforms(object): + """Provides methods for transforming between sampling parameter space and + model parameter space. + """ + + def __init__(self, variable_params, sampling_params, + replace_parameters, sampling_transforms): + assert len(replace_parameters) == len(sampling_params), ( + "number of sampling parameters must be the " + "same as the number of replace parameters") + # pull out the replaced parameters + self.sampling_params = [arg for arg in variable_params + if arg not in replace_parameters] + # add the sampling parameters + self.sampling_params += sampling_params + # sort to make sure we have a consistent order + self.sampling_params.sort() + self.sampling_transforms = sampling_transforms + +
+[docs] + def logjacobian(self, **params): + r"""Returns the log of the jacobian needed to transform pdfs in the + ``variable_params`` parameter space to the ``sampling_params`` + parameter space. + + Let :math:`\mathbf{x}` be the set of variable parameters, + :math:`\mathbf{y} = f(\mathbf{x})` the set of sampling parameters, and + :math:`p_x(\mathbf{x})` a probability density function defined over + :math:`\mathbf{x}`. + The corresponding pdf in :math:`\mathbf{y}` is then: + + .. math:: + + p_y(\mathbf{y}) = + p_x(\mathbf{x})\left|\mathrm{det}\,\mathbf{J}_{ij}\right|, + + where :math:`\mathbf{J}_{ij}` is the Jacobian of the inverse transform + :math:`\mathbf{x} = g(\mathbf{y})`. This has elements: + + .. math:: + + \mathbf{J}_{ij} = \frac{\partial g_i}{\partial{y_j}} + + This function returns + :math:`\log \left|\mathrm{det}\,\mathbf{J}_{ij}\right|`. + + + Parameters + ---------- + \**params : + The keyword arguments should specify values for all of the variable + args and all of the sampling args. + + Returns + ------- + float : + The value of the jacobian. + """ + return numpy.log(abs(transforms.compute_jacobian( + params, self.sampling_transforms, inverse=True)))
+ + +
+[docs] + def apply(self, samples, inverse=False): + """Applies the sampling transforms to the given samples. + + Parameters + ---------- + samples : dict or FieldArray + The samples to apply the transforms to. + inverse : bool, optional + Whether to apply the inverse transforms (i.e., go from the sampling + args to the ``variable_params``). Default is False. + + Returns + ------- + dict or FieldArray + The transformed samples, along with the original samples. + """ + return transforms.apply_transforms(samples, self.sampling_transforms, + inverse=inverse)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, variable_params): + """Gets sampling transforms specified in a config file. + + Sampling parameters and the parameters they replace are read from the + ``sampling_params`` section, if it exists. Sampling transforms are + read from the ``sampling_transforms`` section(s), using + ``transforms.read_transforms_from_config``. + + An ``AssertionError`` is raised if no ``sampling_params`` section + exists in the config file. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + variable_params : list + List of parameter names of the original variable params. + + Returns + ------- + SamplingTransforms + A sampling transforms class. + """ + # Check if a sampling_params section is provided + try: + sampling_params, replace_parameters = \ + read_sampling_params_from_config(cp) + except NoSectionError as e: + logging.warning("No sampling_params section read from config file") + raise e + # get sampling transformations + sampling_transforms = transforms.read_transforms_from_config( + cp, 'sampling_transforms') + logging.info("Sampling in {} in place of {}".format( + ', '.join(sampling_params), ', '.join(replace_parameters))) + return cls(variable_params, sampling_params, + replace_parameters, sampling_transforms)
+
+ + + +
+[docs] +def read_sampling_params_from_config(cp, section_group=None, + section='sampling_params'): + """Reads sampling parameters from the given config file. + + Parameters are read from the `[({section_group}_){section}]` section. + The options should list the variable args to transform; the parameters they + point to should list the parameters they are to be transformed to for + sampling. If a multiple parameters are transformed together, they should + be comma separated. Example: + + .. code-block:: ini + + [sampling_params] + mass1, mass2 = mchirp, logitq + spin1_a = logitspin1_a + + Note that only the final sampling parameters should be listed, even if + multiple intermediate transforms are needed. (In the above example, a + transform is needed to go from mass1, mass2 to mchirp, q, then another one + needed to go from q to logitq.) These transforms should be specified + in separate sections; see ``transforms.read_transforms_from_config`` for + details. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config parser to read from. + section_group : str, optional + Append `{section_group}_` to the section name. Default is None. + section : str, optional + The name of the section. Default is 'sampling_params'. + + Returns + ------- + sampling_params : list + The list of sampling parameters to use instead. + replaced_params : list + The list of variable args to replace in the sampler. + """ + if section_group is not None: + section_prefix = '{}_'.format(section_group) + else: + section_prefix = '' + section = section_prefix + section + replaced_params = set() + sampling_params = set() + for args in cp.options(section): + map_args = cp.get(section, args) + sampling_params.update(set(map(str.strip, map_args.split(',')))) + replaced_params.update(set(map(str.strip, args.split(',')))) + return sorted(sampling_params), sorted(replaced_params)
+ + + +# +# ============================================================================= +# +# Base model definition +# +# ============================================================================= +# + + +
+[docs] +class BaseModel(metaclass=ABCMeta): + r"""Base class for all models. + + Given some model :math:`h` with parameters :math:`\Theta`, Bayes Theorem + states that the probability of observing parameter values :math:`\vartheta` + is: + + .. math:: + + p(\vartheta|h) = \frac{p(h|\vartheta) p(\vartheta)}{p(h)}. + + Here: + + * :math:`p(\vartheta|h)` is the **posterior** probability; + + * :math:`p(h|\vartheta)` is the **likelihood**; + + * :math:`p(\vartheta)` is the **prior**; + + * :math:`p(h)` is the **evidence**. + + This class defines properties and methods for evaluating the log + likelihood, log prior, and log posteror. A set of parameter values is set + using the ``update`` method. Calling the class's + ``log(likelihood|prior|posterior)`` properties will then evaluate the model + at those parameter values. + + Classes that inherit from this class must implement a ``_loglikelihood`` + function that can be called by ``loglikelihood``. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + static_params : dict, optional + A dictionary of parameter names -> values to keep fixed. + prior : callable, optional + A callable class or function that computes the log of the prior. If + None provided, will use ``_noprior``, which returns 0 for all parameter + values. + sampling_params : list, optional + Replace one or more of the ``variable_params`` with the given + parameters for sampling. + replace_parameters : list, optional + The ``variable_params`` to replace with sampling parameters. Must be + the same length as ``sampling_params``. + sampling_transforms : list, optional + List of transforms to use to go between the ``variable_params`` and the + sampling parameters. Required if ``sampling_params`` is not None. + waveform_transforms : list, optional + A list of transforms to convert the ``variable_params`` into something + understood by the likelihood model. This is useful if the prior is + more easily parameterized in parameters that are different than what + the likelihood is most easily defined in. Since these are used solely + for converting parameters, and not for rescaling the parameter space, + a Jacobian is not required for these transforms. + """ + name = None + + def __init__(self, variable_params, static_params=None, prior=None, + sampling_transforms=None, waveform_transforms=None, **kwargs): + # store variable and static args + self.variable_params = variable_params + self.static_params = static_params + # store prior + if prior is None: + self.prior_distribution = _NoPrior() + elif set(prior.variable_args) != set(variable_params): + raise ValueError("variable params of prior and model must be the " + "same") + else: + self.prior_distribution = prior + # store transforms + self.sampling_transforms = sampling_transforms + self.waveform_transforms = waveform_transforms + # initialize current params to None + self._current_params = None + # initialize a model stats + self._current_stats = ModelStats() + + @property + def variable_params(self): + """Returns the model parameters.""" + return self._variable_params + + @variable_params.setter + def variable_params(self, variable_params): + if isinstance(variable_params, str): + variable_params = (variable_params,) + if not isinstance(variable_params, tuple): + variable_params = tuple(variable_params) + self._variable_params = variable_params + + @property + def static_params(self): + """Returns the model's static arguments.""" + return self._static_params + + @static_params.setter + def static_params(self, static_params): + if static_params is None: + static_params = {} + self._static_params = static_params + + @property + def sampling_params(self): + """Returns the sampling parameters. + + If ``sampling_transforms`` is None, this is the same as the + ``variable_params``. + """ + if self.sampling_transforms is None: + sampling_params = self.variable_params + else: + sampling_params = self.sampling_transforms.sampling_params + return sampling_params + +
+[docs] + def update(self, **params): + """Updates the current parameter positions and resets stats. + + If any sampling transforms are specified, they are applied to the + params before being stored. + """ + # add the static params + values = self.static_params.copy() + values.update(params) + self._current_params = self._transform_params(**values) + self._current_stats = ModelStats()
+ + + @property + def current_params(self): + if self._current_params is None: + raise ValueError("no parameters values currently stored; " + "run update to add some") + return self._current_params + + @property + def default_stats(self): + """The stats that ``get_current_stats`` returns by default.""" + return ['logjacobian', 'logprior', 'loglikelihood'] + self._extra_stats + + @property + def _extra_stats(self): + """Allows child classes to add more stats to the default stats. + + This returns an empty list; classes that inherit should override this + property if they want to add extra stats. + """ + return [] + +
+[docs] + def get_current_stats(self, names=None): + """Return one or more of the current stats as a tuple. + + This function does no computation. It only returns what has already + been calculated. If a stat hasn't been calculated, it will be returned + as ``numpy.nan``. + + Parameters + ---------- + names : list of str, optional + Specify the names of the stats to retrieve. If ``None`` (the + default), will return ``default_stats``. + + Returns + ------- + tuple : + The current values of the requested stats, as a tuple. The order + of the stats is the same as the names. + """ + if names is None: + names = self.default_stats + return self._current_stats.getstats(names)
+ + + @property + def current_stats(self): + """Return the ``default_stats`` as a dict. + + This does no computation. It only returns what has already been + calculated. If a stat hasn't been calculated, it will be returned + as ``numpy.nan``. + + Returns + ------- + dict : + Dictionary of stat names -> current stat values. + """ + return self._current_stats.getstatsdict(self.default_stats) + + def _trytoget(self, statname, fallback, apply_transforms=False, **kwargs): + r"""Helper function to get a stat from ``_current_stats``. + + If the statistic hasn't been calculated, ``_current_stats`` will raise + an ``AttributeError``. In that case, the ``fallback`` function will + be called. If that call is successful, the ``statname`` will be added + to ``_current_stats`` with the returned value. + + Parameters + ---------- + statname : str + The stat to get from ``current_stats``. + fallback : method of self + The function to call if the property call fails. + apply_transforms : bool, optional + Apply waveform transforms to the current parameters before calling + the fallback function. Default is False. + \**kwargs : + Any other keyword arguments are passed through to the function. + + Returns + ------- + float : + The value of the property. + """ + try: + return getattr(self._current_stats, statname) + except AttributeError: + # apply waveform transforms if requested + if apply_transforms and self.waveform_transforms is not None: + self._current_params = transforms.apply_transforms( + self._current_params, self.waveform_transforms, + inverse=False) + val = fallback(**kwargs) + setattr(self._current_stats, statname, val) + return val + + @property + def loglikelihood(self): + """The log likelihood at the current parameters. + + This will initially try to return the ``current_stats.loglikelihood``. + If that raises an ``AttributeError``, will call `_loglikelihood`` to + calculate it and store it to ``current_stats``. + """ + return self._trytoget('loglikelihood', self._loglikelihood, + apply_transforms=True) + + @abstractmethod + def _loglikelihood(self): + """Low-level function that calculates the log likelihood of the current + params.""" + pass + + @property + def logjacobian(self): + """The log jacobian of the sampling transforms at the current postion. + + If no sampling transforms were provided, will just return 0. + + Parameters + ---------- + \**params : + The keyword arguments should specify values for all of the variable + args and all of the sampling args. + + Returns + ------- + float : + The value of the jacobian. + """ + return self._trytoget('logjacobian', self._logjacobian) + + def _logjacobian(self): + """Calculates the logjacobian of the current parameters.""" + if self.sampling_transforms is None: + logj = 0. + else: + logj = self.sampling_transforms.logjacobian( + **self.current_params) + return logj + + @property + def logprior(self): + """Returns the log prior at the current parameters.""" + return self._trytoget('logprior', self._logprior) + + def _logprior(self): + """Calculates the log prior at the current parameters.""" + logj = self.logjacobian + logp = self.prior_distribution(**self.current_params) + logj + if numpy.isnan(logp): + logp = -numpy.inf + return logp + + @property + def logposterior(self): + """Returns the log of the posterior of the current parameter values. + + The logprior is calculated first. If the logprior returns ``-inf`` + (possibly indicating a non-physical point), then the ``loglikelihood`` + is not called. + """ + logp = self.logprior + if logp == -numpy.inf: + return logp + else: + return logp + self.loglikelihood + +
+[docs] + def prior_rvs(self, size=1, prior=None): + """Returns random variates drawn from the prior. + + If the ``sampling_params`` are different from the ``variable_params``, + the variates are transformed to the `sampling_params` parameter space + before being returned. + + Parameters + ---------- + size : int, optional + Number of random values to return for each parameter. Default is 1. + prior : JointDistribution, optional + Use the given prior to draw values rather than the saved prior. + + Returns + ------- + FieldArray + A field array of the random values. + """ + # draw values from the prior + if prior is None: + prior = self.prior_distribution + p0 = prior.rvs(size=size) + # transform if necessary + if self.sampling_transforms is not None: + ptrans = self.sampling_transforms.apply(p0) + # pull out the sampling args + p0 = FieldArray.from_arrays([ptrans[arg] + for arg in self.sampling_params], + names=self.sampling_params) + return p0
+ + + def _transform_params(self, **params): + """Applies sampling transforms and boundary conditions to parameters. + + Parameters + ---------- + \**params : + Key, value pairs of parameters to apply the transforms to. + + Returns + ------- + dict + A dictionary of the transformed parameters. + """ + # apply inverse transforms to go from sampling parameters to + # variable args + if self.sampling_transforms is not None: + params = self.sampling_transforms.apply(params, inverse=True) + # apply boundary conditions + params = self.prior_distribution.apply_boundary_conditions(**params) + return params + + # + # Methods for initiating from a config file. + # +
+[docs] + @staticmethod + def extra_args_from_config(cp, section, skip_args=None, dtypes=None): + """Gets any additional keyword in the given config file. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + section : str + The name of the section to read. + skip_args : list of str, optional + Names of arguments to skip. + dtypes : dict, optional + A dictionary of arguments -> data types. If an argument is found + in the dict, it will be cast to the given datatype. Otherwise, the + argument's value will just be read from the config file (and thus + be a string). + + Returns + ------- + dict + Dictionary of keyword arguments read from the config file. + """ + kwargs = {} + if dtypes is None: + dtypes = {} + if skip_args is None: + skip_args = [] + read_args = [opt for opt in cp.options(section) + if opt not in skip_args] + for opt in read_args: + val = cp.get(section, opt) + # try to cast the value if a datatype was specified for this opt + try: + val = dtypes[opt](val) + except KeyError: + pass + kwargs[opt] = val + return kwargs
+ + +
+[docs] + @staticmethod + def prior_from_config(cp, variable_params, static_params, prior_section, + constraint_section): + """Gets arguments and keyword arguments from a config file. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + variable_params : list + List of variable model parameter names. + static_params : dict + Dictionary of static model parameters and their values. + prior_section : str + Section to read prior(s) from. + constraint_section : str + Section to read constraint(s) from. + + Returns + ------- + pycbc.distributions.JointDistribution + The prior. + """ + # get prior distribution for each variable parameter + logging.info("Setting up priors for each parameter") + dists = distributions.read_distributions_from_config(cp, prior_section) + constraints = distributions.read_constraints_from_config( + cp, constraint_section, static_args=static_params) + return distributions.JointDistribution(variable_params, *dists, + constraints=constraints)
+ + + @classmethod + def _init_args_from_config(cls, cp): + """Helper function for loading parameters. + + This retrieves the prior, variable parameters, static parameterss, + constraints, sampling transforms, and waveform transforms + (if provided). + + Parameters + ---------- + cp : ConfigParser + Config parser to read. + + Returns + ------- + dict : + Dictionary of the arguments. Has keys ``variable_params``, + ``static_params``, ``prior``, and ``sampling_transforms``. If + waveform transforms are in the config file, will also have + ``waveform_transforms``. + """ + section = "model" + prior_section = "prior" + vparams_section = 'variable_params' + sparams_section = 'static_params' + constraint_section = 'constraint' + # check that the name exists and matches + name = cp.get(section, 'name') + if name != cls.name: + raise ValueError("section's {} name does not match mine {}".format( + name, cls.name)) + # get model parameters + variable_params, static_params = distributions.read_params_from_config( + cp, prior_section=prior_section, vargs_section=vparams_section, + sargs_section=sparams_section) + # get prior + prior = cls.prior_from_config( + cp, variable_params, static_params, prior_section, + constraint_section) + args = {'variable_params': variable_params, + 'static_params': static_params, + 'prior': prior} + # try to load sampling transforms + try: + sampling_transforms = SamplingTransforms.from_config( + cp, variable_params) + except NoSectionError: + sampling_transforms = None + args['sampling_transforms'] = sampling_transforms + # get any waveform transforms + if any(cp.get_subsections('waveform_transforms')): + logging.info("Loading waveform transforms") + waveform_transforms = transforms.read_transforms_from_config( + cp, 'waveform_transforms') + args['waveform_transforms'] = waveform_transforms + else: + waveform_transforms = [] + # safety check for spins + # we won't do this if the following exists in the config file + ignore = "no_err_on_missing_cartesian_spins" + check_for_cartesian_spins(1, variable_params, static_params, + waveform_transforms, cp, ignore) + check_for_cartesian_spins(2, variable_params, static_params, + waveform_transforms, cp, ignore) + return args + +
+[docs] + @classmethod + def from_config(cls, cp, **kwargs): + """Initializes an instance of this class from the given config file. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + \**kwargs : + All additional keyword arguments are passed to the class. Any + provided keyword will over ride what is in the config file. + """ + args = cls._init_args_from_config(cp) + # get any other keyword arguments provided in the model section + args.update(cls.extra_args_from_config(cp, "model", + skip_args=['name'])) + args.update(kwargs) + return cls(**args)
+ + +
+[docs] + def write_metadata(self, fp, group=None): + """Writes metadata to the given file handler. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + attrs = fp.getattrs(group=group) + attrs['model'] = self.name + attrs['variable_params'] = list(map(str, self.variable_params)) + attrs['sampling_params'] = list(map(str, self.sampling_params)) + fp.write_kwargs_to_attrs(attrs, static_params=self.static_params)
+
+ + + +
+[docs] +def check_for_cartesian_spins(which, variable_params, static_params, + waveform_transforms, cp, ignore): + """Checks that if any spin parameters exist, cartesian spins also exist. + + This looks for parameters starting with ``spinN`` in the variable and + static params, where ``N`` is either 1 or 2 (specified by the ``which`` + argument). If any parameters are found with those names, the params and + the output of the waveform transforms are checked to see that there is + at least one of ``spinN(x|y|z)``. If not, a ``ValueError`` is raised. + + This check will not be done if the config file has an section given by + the ignore argument. + + Parameters + ---------- + which : {1, 2} + Which component to check for. Must be either 1 or 2. + variable_params : list + List of the variable parameters. + static_params : dict + The dictionary of static params. + waveform_transforms : list + List of the transforms that will be applied to the variable and + static params before being passed to the waveform generator. + cp : ConfigParser + The config file. + ignore : str + The section to check for in the config file. If the section is + present in the config file, the check will not be done. + """ + # don't do this check if the config file has the ignore section + if cp.has_section(ignore): + logging.info("[{}] found in config file; not performing check for " + "cartesian spin{} parameters".format(ignore, which)) + return + errmsg = ( + "Spin parameters {sp} found in variable/static " + "params for component {n}, but no Cartesian spin parameters ({cp}) " + "found in either the variable/static params or " + "the waveform transform outputs. Most waveform " + "generators only recognize Cartesian spin " + "parameters; without them, all spins are set to " + "zero. If you are using spherical spin coordinates, add " + "the following waveform_transform to your config file:\n\n" + "[waveform_transforms-spin{n}x+spin{n}y+spin{n}z]\n" + "name = spherical_to_cartesian\n" + "x = spin{n}x\n" + "y = spin{n}y\n" + "z = spin{n}z\n" + "radial = spin{n}_a\n" + "azimuthal = spin{n}_azimuthal\n" + "polar = spin{n}_polar\n\n" + "Here, spin{n}_a, spin{n}_azimuthal, and spin{n}_polar are the names " + "of your radial, azimuthal, and polar coordinates, respectively. " + "If you intentionally did not include Cartesian spin parameters, " + "(e.g., you are using a custom waveform or model) add\n\n" + "[{ignore}]\n\n" + "to your config file as an empty section and rerun. This check will " + "not be performed in that case.") + allparams = set(variable_params) | set(static_params.keys()) + spinparams = set(p for p in allparams + if p.startswith('spin{}'.format(which))) + if any(spinparams): + cartspins = set('spin{}{}'.format(which, coord) + for coord in ['x', 'y', 'z']) + # add any parameters to all params that will be output by waveform + # transforms + allparams = allparams.union(*[t.outputs for t in waveform_transforms]) + if not any(allparams & cartspins): + raise ValueError(errmsg.format(sp=', '.join(spinparams), + cp=', '.join(cartspins), + n=which, ignore=ignore))
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/base_data.html b/latest/html/_modules/pycbc/inference/models/base_data.html new file mode 100644 index 00000000000..23eaf022f72 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/base_data.html @@ -0,0 +1,296 @@ + + + + + + pycbc.inference.models.base_data — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.base_data

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""Base classes for mofdels with data.
+"""
+
+import numpy
+from abc import (ABCMeta, abstractmethod)
+from .base import BaseModel
+
+
+
+[docs] +class BaseDataModel(BaseModel, metaclass=ABCMeta): + r"""Base class for models that require data and a waveform generator. + + This adds propeties for the log of the likelihood that the data contain + noise, ``lognl``, and the log likelihood ratio ``loglr``. + + Classes that inherit from this class must define ``_loglr`` and ``_lognl`` + functions, in addition to the ``_loglikelihood`` requirement inherited from + ``BaseModel``. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data. + recalibration : dict of pycbc.calibration.Recalibrate, optional + Dictionary of detectors -> recalibration class instances for + recalibrating data. + gates : dict of tuples, optional + Dictionary of detectors -> tuples of specifying gate times. The + sort of thing returned by `pycbc.gate.gates_from_cli`. + injection_file : str, optional + If an injection was added to the data, the name of the injection file + used. If provided, the injection parameters will be written to + file when ``write_metadata`` is called. + + \**kwargs : + All other keyword arguments are passed to ``BaseModel``. + + + See ``BaseModel`` for additional attributes and properties. + """ + + def __init__(self, variable_params, data, recalibration=None, gates=None, + injection_file=None, no_save_data=False, **kwargs): + self._data = None + self.data = data + self.recalibration = recalibration + self.no_save_data = no_save_data + self.gates = gates + self.injection_file = injection_file + super(BaseDataModel, self).__init__(variable_params, **kwargs) + + @property + def data(self): + """dict: Dictionary mapping detector names to data.""" + return self._data + + @data.setter + def data(self, data): + """Store a copy of the data.""" + self._data = {det: d.copy() for (det, d) in data.items()} + + @property + def _extra_stats(self): + """Adds ``loglr`` and ``lognl`` to the ``default_stats``.""" + return ['loglr', 'lognl'] + + @property + def lognl(self): + """The log likelihood of the model assuming the data is noise. + + This will initially try to return the ``current_stats.lognl``. + If that raises an ``AttributeError``, will call `_lognl`` to + calculate it and store it to ``current_stats``. + """ + return self._trytoget('lognl', self._lognl) + + @abstractmethod + def _lognl(self): + """Low-level function that calculates the lognl.""" + pass + + @property + def loglr(self): + """The log likelihood ratio at the current parameters, + or the inner product <s|h> and <h|h> if set the flag + `self.return_sh_hh` to be True. + + This will initially try to return the ``current_stats.loglr``. + If that raises an ``AttributeError``, will call `_loglr`` to + calculate it and store it to ``current_stats``. + """ + return self._trytoget('loglr', self._loglr, apply_transforms=True) + + @abstractmethod + def _loglr(self): + """Low-level function that calculates the loglr.""" + pass + + @property + def logplr(self): + """Returns the log of the prior-weighted likelihood ratio at the + current parameter values. + + The logprior is calculated first. If the logprior returns ``-inf`` + (possibly indicating a non-physical point), then ``loglr`` is not + called. + """ + logp = self.logprior + if logp == -numpy.inf: + return logp + else: + return logp + self.loglr + + @property + def detectors(self): + """list: Returns the detectors used.""" + return list(self._data.keys()) + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds data to the metadata that's written. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + super().write_metadata(fp, group=group) + if not self.no_save_data: + fp.write_stilde(self.data, group=group) + # save injection parameters + if self.injection_file is not None: + fp.write_injections(self.injection_file, group=group)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/brute_marg.html b/latest/html/_modules/pycbc/inference/models/brute_marg.html new file mode 100644 index 00000000000..6de07421625 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/brute_marg.html @@ -0,0 +1,372 @@ + + + + + + pycbc.inference.models.brute_marg — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.brute_marg

+# Copyright (C) 2020 Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides model classes that do brute force marginalization
+using at the likelihood level.
+"""
+import math
+import logging
+import numpy
+
+from pycbc.pool import BroadcastPool as Pool
+from scipy.special import logsumexp
+
+from .gaussian_noise import BaseGaussianNoise
+from .tools import draw_sample
+
+_model = None
+
+[docs] +class likelihood_wrapper(object): + def __init__(self, model): + global _model + _model = model + + def __call__(self, params): + global _model + _model.update(**params) + loglr = _model.loglr + return loglr, _model.current_stats
+ + +
+[docs] +class BruteParallelGaussianMarginalize(BaseGaussianNoise): + name = "brute_parallel_gaussian_marginalize" + + def __init__(self, variable_params, + cores=10, + base_model=None, + marginalize_phase=None, + **kwds): + super().__init__(variable_params, **kwds) + + from pycbc.inference.models import models + self.model = models[base_model](variable_params, **kwds) + + self.call = likelihood_wrapper(self.model) + + # size of pool for each likelihood call + self.pool = Pool(int(cores)) + + # Only one for now, but can be easily extended + self.phase = None + if marginalize_phase: + samples = int(marginalize_phase) + self.phase = numpy.linspace(0, 2.0 * numpy.pi, samples) + + @property + def _extra_stats(self): + stats = self.model._extra_stats + stats.append('maxl_phase') + if 'maxl_loglr' not in stats: + stats.append('maxl_loglr') + return stats + + def _loglr(self): + if self.phase is not None: + params = [] + for p in self.phase: + pref = self.current_params.copy() + pref['coa_phase'] = p + params.append(pref) + vals = list(self.pool.map(self.call, params)) + loglr = numpy.array([v[0] for v in vals]) + # get the maxl values + if 'maxl_loglr' not in self.model._extra_stats: + maxl_loglrs = loglr + else: + maxl_loglrs = numpy.array([v[1]['maxl_loglr'] for v in vals]) + maxidx = maxl_loglrs.argmax() + maxstats = vals[maxidx][1] + maxphase = self.phase[maxidx] + # set the stats + for stat in maxstats: + setattr(self._current_stats, stat, maxstats[stat]) + self._current_stats.maxl_phase = maxphase + self._current_stats.maxl_loglr = maxl_loglrs[maxidx] + # calculate the marginal loglr and return + return logsumexp(loglr) - numpy.log(len(self.phase))
+ + + +
+[docs] +class BruteLISASkyModesMarginalize(BaseGaussianNoise): + name = "brute_lisa_sky_modes_marginalize" + + def __init__(self, variable_params, + cores=1, + loop_polarization=False, + base_model=None, + **kwds): + super().__init__(variable_params, **kwds) + + from pycbc.inference.models import models + kwds.update(models[base_model].extra_args_from_config( + kwds['config_object'], + "model", + skip_args=[]) + ) + + self.model = models[base_model](variable_params, **kwds) + + self.call = likelihood_wrapper(self.model) + + # size of pool for each likelihood call + if cores > 1: + self.pool = Pool(int(cores)) + self.mapfunc = self.pool.map + else: + self.pool = None + self.mapfunc = map + + # Do I explicitly check the polarization + pi/2 points + # We could also add other arguments here, ie only check longitude + # or latitude symmetry points. + if loop_polarization: + self.num_sky_modes = 16 + else: + self.num_sky_modes = 8 + + self.reconstruct_sky_points = False + + @property + def _extra_stats(self): + stats = self.model._extra_stats + return stats + + def _loglr(self): + params = [] + for sym_num in range(self.num_sky_modes): + pref = self.current_params.copy() + self._apply_sky_point_rotation(pref, sym_num) + params.append(pref) + + vals = list(self.mapfunc(self.call, params)) + loglr = numpy.array([v[0] for v in vals]) + + if self.reconstruct_sky_points: + return loglr + + max_llr_idx = loglr.argmax() + max_llr = loglr[max_llr_idx] + marg_lrfac = sum([math.exp(llr - max_llr) for llr in loglr]) + marg_llr = max_llr + math.log(marg_lrfac/self.num_sky_modes) + + # set the stats + for sym_num in range(self.num_sky_modes): + setattr(self._current_stats, f'llr_mode_{sym_num}', loglr[sym_num]) + + return marg_llr + + def _apply_sky_point_rotation(self, pref, sky_num): + """ Apply the sky point rotation for mode sky_num to parameters pref + """ + lambdal = pref['eclipticlongitude'] + beta = pref['eclipticlatitude'] + psi = pref['polarization'] + inc = pref['inclination'] + + pol_num = sky_num // 8 + sky_num = sky_num % 8 + long_num = sky_num % 4 + lat_num = sky_num // 4 + + # Apply latitude symmetry mode + if lat_num: + beta = - beta + inc = numpy.pi - inc + psi = numpy.pi - psi + + # Apply longitudonal symmetry mode + lambdal = (lambdal + long_num * 0.5 * numpy.pi) % (2*numpy.pi) + psi = (psi + long_num * 0.5 * numpy.pi) % (2*numpy.pi) + + # Apply additional polarization mode (shouldn't be needed) + if pol_num: + psi = psi + (math.pi / 2.) + + pref['eclipticlongitude'] = lambdal + pref['eclipticlatitude'] = beta + pref['polarization'] = psi + pref['inclination'] = inc + +
+[docs] + @classmethod + def from_config(cls, cp, **kwargs): + kwargs['config_object'] = cp + return super(BruteLISASkyModesMarginalize, cls).from_config( + cp, + **kwargs + )
+ + +
+[docs] + def reconstruct(self, seed=None): + """ Reconstruct a point from unwrapping the 8-fold sky symmetry + """ + if seed: + numpy.random.seed(seed) + rec = {} + + logging.info('Reconstruct LISA sky mode symmetry') + self.reconstruct_sky_points = True + loglr = self.loglr + xl = draw_sample(loglr) + logging.info('Found point %d', xl) + # Undo rotations + pref = self.current_params.copy() + self._apply_sky_point_rotation(pref, xl) + + for val in ['polarization', 'eclipticlongitude', 'eclipticlatitude', + 'inclination']: + rec[val] = pref[val] + rec['loglr'] = loglr[xl] + rec['loglikelihood'] = self.lognl + rec['loglr'] + self.reconstruct_sky_points = False + return self.model.reconstruct(seed=seed, rec=rec)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/data_utils.html b/latest/html/_modules/pycbc/inference/models/data_utils.html new file mode 100644 index 00000000000..2a9cefffccc --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/data_utils.html @@ -0,0 +1,712 @@ + + + + + + pycbc.inference.models.data_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.data_utils

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Utilities for loading data for models.
+"""
+
+import logging
+from argparse import ArgumentParser
+from time import sleep
+import numpy
+try:
+    from mpi4py import MPI
+except ImportError:
+    MPI = None
+
+from pycbc.types import MultiDetOptionAction
+from pycbc.psd import (insert_psd_option_group_multi_ifo,
+                       from_cli_multi_ifos as psd_from_cli_multi_ifos,
+                       verify_psd_options_multi_ifo)
+from pycbc import strain
+from pycbc.strain import (gates_from_cli, psd_gates_from_cli,
+                          apply_gates_to_td, apply_gates_to_fd,
+                          verify_strain_options_multi_ifo)
+from pycbc import dq
+
+
+
+[docs] +def strain_from_cli_multi_ifos(*args, **kwargs): + """Wrapper around strain.from_cli_multi_ifos that tries a few times before + quiting. + + When running in a parallel environment, multiple concurrent queries to the + segment data base can cause time out errors. If that happens, this will + sleep for a few seconds, then try again a few times before giving up. + """ + count = 0 + while count < 3: + try: + return strain.from_cli_multi_ifos(*args, **kwargs) + except RuntimeError as e: + exception = e + count += 1 + sleep(10) + # if get to here, we've tries 3 times and still got an error, so exit + raise exception
+ + + +# +# ============================================================================= +# +# Utilities for gravitational-wave data +# +# ============================================================================= +# +
+[docs] +class NoValidDataError(Exception): + """This should be raised if a continous segment of valid data could not be + found. + """ + pass
+ + + +
+[docs] +def create_data_parser(): + """Creates an argument parser for loading GW data.""" + parser = ArgumentParser() + # add data options + parser.add_argument("--instruments", type=str, nargs="+", required=True, + help="Instruments to analyze, eg. H1 L1.") + parser.add_argument("--trigger-time", type=float, default=0., + help="Reference GPS time (at geocenter) from which " + "the (anlaysis|psd)-(start|end)-time options are " + "measured. The integer seconds will be used. " + "Default is 0; i.e., if not provided, " + "the analysis and psd times should be in GPS " + "seconds.") + parser.add_argument("--analysis-start-time", type=int, required=True, + nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', + help="The start time to use for the analysis, " + "measured with respect to the trigger-time. " + "If psd-inverse-length is provided, the given " + "start time will be padded by half that length " + "to account for wrap-around effects.") + parser.add_argument("--analysis-end-time", type=int, required=True, + nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', + help="The end time to use for the analysis, " + "measured with respect to the trigger-time. " + "If psd-inverse-length is provided, the given " + "end time will be padded by half that length " + "to account for wrap-around effects.") + parser.add_argument("--psd-start-time", type=int, default=None, + nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', + help="Start time to use for PSD estimation, measured " + "with respect to the trigger-time.") + parser.add_argument("--psd-end-time", type=int, default=None, + nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', + help="End time to use for PSD estimation, measured " + "with respect to the trigger-time.") + parser.add_argument("--data-conditioning-low-freq", type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:FLOW', dest="low_frequency_cutoff", + help="Low frequency cutoff of the data. Needed for " + "PSD estimation and when creating fake strain. " + "If not provided, will use the model's " + "low-frequency-cutoff.") + insert_psd_option_group_multi_ifo(parser) + strain.insert_strain_option_group_multi_ifo(parser, gps_times=False) + strain.add_gate_option_group(parser) + # add arguments for dq + dqgroup = parser.add_argument_group("Options for quering data quality " + "(DQ)") + dqgroup.add_argument('--dq-segment-name', default='DATA', + help='The status flag to query for data quality. ' + 'Default is "DATA".') + dqgroup.add_argument('--dq-source', choices=['any', 'GWOSC', 'dqsegdb'], + default='any', + help='Where to look for DQ information. If "any" ' + '(the default) will first try GWOSC, then ' + 'dqsegdb.') + dqgroup.add_argument('--dq-server', default='https://segments.ligo.org', + help='The server to use for dqsegdb.') + dqgroup.add_argument('--veto-definer', default=None, + help='Path to a veto definer file that defines ' + 'groups of flags, which themselves define a set ' + 'of DQ segments.') + return parser
+ + + +
+[docs] +def check_validtimes(detector, gps_start, gps_end, shift_to_valid=False, + max_shift=None, segment_name='DATA', + **kwargs): + r"""Checks DQ server to see if the given times are in a valid segment. + + If the ``shift_to_valid`` flag is provided, the times will be shifted left + or right to try to find a continous valid block nearby. The shifting starts + by shifting the times left by 1 second. If that does not work, it shifts + the times right by one second. This continues, increasing the shift time by + 1 second, until a valid block could be found, or until the shift size + exceeds ``max_shift``. + + If the given times are not in a continuous valid segment, or a valid block + cannot be found nearby, a ``NoValidDataError`` is raised. + + Parameters + ---------- + detector : str + The name of the detector to query; e.g., 'H1'. + gps_start : int + The GPS start time of the segment to query. + gps_end : int + The GPS end time of the segment to query. + shift_to_valid : bool, optional + If True, will try to shift the gps start and end times to the nearest + continous valid segment of data. Default is False. + max_shift : int, optional + The maximum number of seconds to try to shift left or right to find + a valid segment. Default is ``gps_end - gps_start``. + segment_name : str, optional + The status flag to query; passed to :py:func:`pycbc.dq.query_flag`. + Default is "DATA". + \**kwargs : + All other keyword arguments are passed to + :py:func:`pycbc.dq.query_flag`. + + Returns + ------- + use_start : int + The start time to use. If ``shift_to_valid`` is True, this may differ + from the given GPS start time. + use_end : int + The end time to use. If ``shift_to_valid`` is True, this may differ + from the given GPS end time. + """ + # expand the times checked encase we need to shift + if max_shift is None: + max_shift = int(gps_end - gps_start) + check_start = gps_start - max_shift + check_end = gps_end + max_shift + # if we're running in an mpi enviornment and we're not the parent process, + # we'll wait before quering the segment database. This will result in + # getting the segments from the cache, so as not to overload the database + if MPI is not None and (MPI.COMM_WORLD.Get_size() > 1 and + MPI.COMM_WORLD.Get_rank() != 0): + # we'll wait for 2 minutes + sleep(120) + validsegs = dq.query_flag(detector, segment_name, check_start, + check_end, cache=True, + **kwargs) + use_start = gps_start + use_end = gps_end + # shift if necessary + if shift_to_valid: + shiftsize = 1 + while (use_start, use_end) not in validsegs and shiftsize < max_shift: + # try shifting left + use_start = gps_start - shiftsize + use_end = gps_end - shiftsize + if (use_start, use_end) not in validsegs: + # try shifting right + use_start = gps_start + shiftsize + use_end = gps_end + shiftsize + shiftsize += 1 + # check that we have a valid range + if (use_start, use_end) not in validsegs: + raise NoValidDataError("Could not find a continous valid segment in " + "in detector {}".format(detector)) + return use_start, use_end
+ + + +
+[docs] +def detectors_with_valid_data(detectors, gps_start_times, gps_end_times, + pad_data=None, err_on_missing_detectors=False, + **kwargs): + r"""Determines which detectors have valid data. + + Parameters + ---------- + detectors : list of str + Names of the detector names to check. + gps_start_times : dict + Dictionary of detector name -> start time listing the GPS start times + of the segment to check for each detector. + gps_end_times : dict + Dictionary of detector name -> end time listing the GPS end times of + the segment to check for each detector. + pad_data : dict, optional + Dictionary of detector name -> pad time to add to the beginning/end of + the GPS start/end times before checking. A pad time for every detector + in ``detectors`` must be given. Default (None) is to apply no pad to + the times. + err_on_missing_detectors : bool, optional + If True, a ``NoValidDataError`` will be raised if any detector does not + have continous valid data in its requested segment. Otherwise, the + detector will not be included in the returned list of detectors with + valid data. Default is False. + \**kwargs : + All other keyword arguments are passed to ``check_validtimes``. + + Returns + ------- + dict : + A dictionary of detector name -> valid times giving the detectors with + valid data and their segments. If ``shift_to_valid`` was passed to + ``check_validtimes`` this may not be the same as the input segments. If + no valid times could be found for a detector (and + ``err_on_missing_detectors`` is False), it will not be included in the + returned dictionary. + """ + if pad_data is None: + pad_data = {det: 0 for det in detectors} + dets_with_data = {} + for det in detectors: + logging.info("Checking that %s has valid data in the requested " + "segment", det) + try: + pad = pad_data[det] + start, end = check_validtimes(det, gps_start_times[det]-pad, + gps_end_times[det]+pad, + **kwargs) + dets_with_data[det] = (start+pad, end-pad) + except NoValidDataError as e: + if err_on_missing_detectors: + raise e + logging.warning("WARNING: Detector %s will not be used in " + "the analysis, as it does not have " + "continuous valid data that spans the " + "segment [%d, %d).", det, gps_start_times[det]-pad, + gps_end_times[det]+pad) + return dets_with_data
+ + + +
+[docs] +def check_for_nans(strain_dict): + """Checks if any data in a dictionary of strains has NaNs. + + If any NaNs are found, a ``ValueError`` is raised. + + Parameters + ---------- + strain_dict : dict + Dictionary of detectors -> + :py:class:`pycbc.types.timeseries.TimeSeries`. + """ + for det, ts in strain_dict.items(): + if numpy.isnan(ts.numpy()).any(): + raise ValueError("NaN found in strain from {}".format(det))
+ + + +
+[docs] +def data_opts_from_config(cp, section, filter_flow): + """Loads data options from a section in a config file. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file to read. + section : str + The section to read. All options in the section will be loaded as-if + they wre command-line arguments. + filter_flow : dict + Dictionary of detectors -> inner product low frequency cutoffs. + If a `data-conditioning-low-freq` cutoff wasn't provided for any + of the detectors, these values will be used. Otherwise, the + data-conditioning-low-freq must be less than the inner product cutoffs. + If any are not, a ``ValueError`` is raised. + + Returns + ------- + opts : parsed argparse.ArgumentParser + An argument parser namespace that was constructed as if the options + were specified on the command line. + """ + # convert the section options into a command-line options + optstr = cp.section_to_cli(section) + # create a fake parser to parse them + parser = create_data_parser() + # parse the options + opts = parser.parse_args(optstr.split()) + # figure out the times to use + logging.info("Determining analysis times to use") + opts.trigger_time = int(opts.trigger_time) + gps_start = opts.analysis_start_time.copy() + gps_end = opts.analysis_end_time.copy() + for det in opts.instruments: + gps_start[det] += opts.trigger_time + gps_end[det] += opts.trigger_time + if opts.psd_inverse_length[det] is not None: + pad = int(numpy.ceil(opts.psd_inverse_length[det] / 2)) + logging.info("Padding %s analysis start and end times by %d " + "(= psd-inverse-length/2) seconds to " + "account for PSD wrap around effects.", + det, pad) + else: + pad = 0 + gps_start[det] -= pad + gps_end[det] += pad + if opts.psd_start_time[det] is not None: + opts.psd_start_time[det] += opts.trigger_time + if opts.psd_end_time[det] is not None: + opts.psd_end_time[det] += opts.trigger_time + opts.gps_start_time = gps_start + opts.gps_end_time = gps_end + # check for the frequencies + low_freq_cutoff = filter_flow.copy() + if opts.low_frequency_cutoff: + # add in any missing detectors + low_freq_cutoff.update({det: opts.low_frequency_cutoff[det] + for det in opts.instruments + if opts.low_frequency_cutoff[det] is not None}) + # make sure the data conditioning low frequency cutoff is < than + # the matched filter cutoff + if any(low_freq_cutoff[det] > filter_flow[det] for det in filter_flow): + raise ValueError("data conditioning low frequency cutoff must " + "be less than the filter low frequency " + "cutoff") + opts.low_frequency_cutoff = low_freq_cutoff + + # verify options are sane + verify_psd_options_multi_ifo(opts, parser, opts.instruments) + verify_strain_options_multi_ifo(opts, parser, opts.instruments) + return opts
+ + + +
+[docs] +def data_from_cli(opts, check_for_valid_times=False, + shift_psd_times_to_valid=False, + err_on_missing_detectors=False): + """Loads the data needed for a model from the given command-line options. + + Gates specifed on the command line are also applied. + + Parameters + ---------- + opts : ArgumentParser parsed args + Argument options parsed from a command line string (the sort of thing + returned by `parser.parse_args`). + check_for_valid_times : bool, optional + Check that valid data exists in the requested gps times. Default is + False. + shift_psd_times_to_valid : bool, optional + If estimating the PSD from data, shift the PSD times to a valid + segment if needed. Default is False. + err_on_missing_detectors : bool, optional + Raise a NoValidDataError if any detector does not have valid data. + Otherwise, a warning is printed, and that detector is skipped. + + Returns + ------- + strain_dict : dict + Dictionary of detectors -> time series strain. + psd_strain_dict : dict or None + If ``opts.psd_(start|end)_time`` were set, a dctionary of + detectors -> time series data to use for PSD estimation. Otherwise, + ``None``. + """ + # get gates to apply + gates = gates_from_cli(opts) + psd_gates = psd_gates_from_cli(opts) + + # get strain time series + instruments = opts.instruments + + # validate times + if check_for_valid_times: + dets_with_data = detectors_with_valid_data( + instruments, opts.gps_start_time, opts.gps_end_time, + pad_data=opts.pad_data, + err_on_missing_detectors=err_on_missing_detectors, + shift_to_valid=False, + segment_name=opts.dq_segment_name, source=opts.dq_source, + server=opts.dq_server, veto_definer=opts.veto_definer) + # reset instruments to only be those with valid data + instruments = list(dets_with_data.keys()) + + strain_dict = strain_from_cli_multi_ifos(opts, instruments, + precision="double") + # apply gates if not waiting to overwhiten + if not opts.gate_overwhitened: + strain_dict = apply_gates_to_td(strain_dict, gates) + + # check that there aren't nans in the data + check_for_nans(strain_dict) + + # get strain time series to use for PSD estimation + # if user has not given the PSD time options then use same data as analysis + if opts.psd_start_time and opts.psd_end_time: + logging.info("Will generate a different time series for PSD " + "estimation") + if check_for_valid_times: + psd_times = detectors_with_valid_data( + instruments, opts.psd_start_time, opts.psd_end_time, + pad_data=opts.pad_data, + err_on_missing_detectors=err_on_missing_detectors, + shift_to_valid=shift_psd_times_to_valid, + segment_name=opts.dq_segment_name, source=opts.dq_source, + server=opts.dq_server, veto_definer=opts.veto_definer) + # remove detectors from the strain dict that did not have valid + # times for PSD estimation + for det in set(strain_dict.keys())-set(psd_times.keys()): + _ = strain_dict.pop(det) + # reset instruments to only be those with valid data + instruments = list(psd_times.keys()) + else: + psd_times = {det: (opts.psd_start_time[det], + opts.psd_end_time[det]) + for det in instruments} + psd_strain_dict = {} + for det, (psd_start, psd_end) in psd_times.items(): + opts.gps_start_time = psd_start + opts.gps_end_time = psd_end + psd_strain_dict.update( + strain_from_cli_multi_ifos(opts, [det], precision="double")) + # apply any gates + logging.info("Applying gates to PSD data") + psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates) + # check that there aren't nans in the psd data + check_for_nans(psd_strain_dict) + elif opts.psd_start_time or opts.psd_end_time: + raise ValueError("Must give psd-start-time and psd-end-time") + else: + psd_strain_dict = None + + # check that we have data left to analyze + if instruments == []: + raise NoValidDataError("No valid data could be found in any of the " + "requested instruments.") + + return strain_dict, psd_strain_dict
+ + + +
+[docs] +def fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict=None): + """Converts a dictionary of time series to the frequency domain, and gets + the PSDs. + + Parameters + ---------- + opts : ArgumentParser parsed args + Argument options parsed from a command line string (the sort of thing + returned by `parser.parse_args`). + strain_dict : dict + Dictionary of detectors -> time series data. + psd_strain_dict : dict, optional + Dictionary of detectors -> time series data to use for PSD estimation. + If not provided, will use the ``strain_dict``. This is + ignored if ``opts.psd_estimation`` is not set. See + :py:func:`pycbc.psd.psd_from_cli_multi_ifos` for details. + + Returns + ------- + stilde_dict : dict + Dictionary of detectors -> frequency series data. + psd_dict : dict + Dictionary of detectors -> frequency-domain PSDs. + """ + # FFT strain and save each of the length of the FFT, delta_f, and + # low frequency cutoff to a dict + stilde_dict = {} + length_dict = {} + delta_f_dict = {} + for det, tsdata in strain_dict.items(): + stilde_dict[det] = tsdata.to_frequencyseries() + length_dict[det] = len(stilde_dict[det]) + delta_f_dict[det] = stilde_dict[det].delta_f + + if psd_strain_dict is None: + psd_strain_dict = strain_dict + + # get PSD as frequency series + psd_dict = psd_from_cli_multi_ifos( + opts, length_dict, delta_f_dict, opts.low_frequency_cutoff, + list(psd_strain_dict.keys()), strain_dict=psd_strain_dict, + precision="double") + + return stilde_dict, psd_dict
+ + + +
+[docs] +def gate_overwhitened_data(stilde_dict, psd_dict, gates): + """Applies gates to overwhitened data. + + Parameters + ---------- + stilde_dict : dict + Dictionary of detectors -> frequency series data to apply the gates to. + psd_dict : dict + Dictionary of detectors -> PSD to use for overwhitening. + gates : dict + Dictionary of detectors -> gates. + + Returns + ------- + dict : + Dictionary of detectors -> frequency series data with the gates + applied after overwhitening. The returned data is not overwhitened. + """ + logging.info("Applying gates to overwhitened data") + # overwhiten the data + out = {} + for det in gates: + out[det] = stilde_dict[det] / psd_dict[det] + # now apply the gate + out = apply_gates_to_fd(out, gates) + # now unwhiten + for det in gates: + out[det] *= psd_dict[det] + return out
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/gated_gaussian_noise.html b/latest/html/_modules/pycbc/inference/models/gated_gaussian_noise.html new file mode 100644 index 00000000000..6aae0ae1dc2 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/gated_gaussian_noise.html @@ -0,0 +1,953 @@ + + + + + + pycbc.inference.models.gated_gaussian_noise — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.gated_gaussian_noise

+# Copyright (C) 2020  Collin Capano and Shilpa Kastha
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides model classes that assume the noise is Gaussian and
+introduces a gate to remove given times from the data, using the inpainting
+method to fill the removed part such that it does not enter the likelihood.
+"""
+
+from abc import abstractmethod
+import logging
+import numpy
+from scipy import special
+
+from pycbc.waveform import (NoWaveformError, FailedWaveformError)
+from pycbc.types import FrequencySeries
+from pycbc.detector import Detector
+from pycbc.pnutils import hybrid_meco_frequency
+from pycbc.waveform.utils import time_from_frequencyseries
+from pycbc.waveform import generator
+from pycbc.filter import highpass
+from .gaussian_noise import (BaseGaussianNoise, create_waveform_generator)
+from .base_data import BaseDataModel
+from .data_utils import fd_data_from_strain_dict
+
+
+
+[docs] +class BaseGatedGaussian(BaseGaussianNoise): + r"""Base model for gated gaussian. + + Provides additional routines for applying a time-domain gate to data. + See :py:class:`GatedGaussianNoise` for more details. + """ + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, highpass_waveforms=False, **kwargs): + # we'll want the time-domain data, so store that + self._td_data = {} + # cache the overwhitened data + self._overwhitened_data = {} + # cache the current gated data + self._gated_data = {} + # highpass waveforms with the given frequency + self.highpass_waveforms = highpass_waveforms + if self.highpass_waveforms: + logging.info("Will highpass waveforms at %f Hz", + highpass_waveforms) + # set up the boiler-plate attributes + super().__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + +
+[docs] + @classmethod + def from_config(cls, cp, data_section='data', data=None, psds=None, + **kwargs): + """Adds highpass filtering to keyword arguments based on config file. + """ + if cp.has_option(data_section, 'strain-high-pass') and \ + 'highpass_waveforms' not in kwargs: + kwargs['highpass_waveforms'] = float(cp.get(data_section, + 'strain-high-pass')) + return super().from_config(cp, data_section=data_section, + data=data, psds=psds, + **kwargs)
+ + + @BaseDataModel.data.setter + def data(self, data): + """Store a copy of the FD and TD data.""" + BaseDataModel.data.fset(self, data) + # store the td version + self._td_data = {det: d.to_timeseries() for det, d in data.items()} + + @property + def td_data(self): + """The data in the time domain.""" + return self._td_data + + @BaseGaussianNoise.psds.setter + def psds(self, psds): + """Sets the psds, and calculates the weight and norm from them. + The data and the low and high frequency cutoffs must be set first. + """ + # check that the data has been set + if self._data is None: + raise ValueError("No data set") + if self._f_lower is None: + raise ValueError("low frequency cutoff not set") + if self._f_upper is None: + raise ValueError("high frequency cutoff not set") + # make sure the relevant caches are cleared + self._psds.clear() + self._invpsds.clear() + self._gated_data.clear() + # store the psds + for det, d in self._data.items(): + if psds is None: + # No psd means assume white PSD + p = FrequencySeries(numpy.ones(int(self._N/2+1)), + delta_f=d.delta_f) + else: + # copy for storage + p = psds[det].copy() + self._psds[det] = p + # we'll store the weight to apply to the inner product + invp = 1./p + self._invpsds[det] = invp + self._overwhitened_data = self.whiten(self.data, 2, inplace=False) + +
+[docs] + def det_lognorm(self, det): + # FIXME: just returning 0 for now, but should be the determinant + # of the truncated covariance matrix + return 0.
+ + + @property + def normalize(self): + """Determines if the loglikelihood includes the normalization term. + """ + return self._normalize + + @normalize.setter + def normalize(self, normalize): + """Clears the current stats if the normalization state is changed. + """ + self._normalize = normalize + + @staticmethod + def _nowaveform_logl(): + """Convenience function to set logl values if no waveform generated. + """ + return -numpy.inf + + def _loglr(self): + r"""Computes the log likelihood ratio. + Returns + ------- + float + The value of the log likelihood ratio evaluated at the given point. + """ + return self.loglikelihood - self.lognl + +
+[docs] + def whiten(self, data, whiten, inplace=False): + """Whitens the given data. + + Parameters + ---------- + data : dict + Dictionary of detector names -> FrequencySeries. + whiten : {0, 1, 2} + Integer indicating what level of whitening to apply. Levels are: + 0: no whitening; 1: whiten; 2: overwhiten. + inplace : bool, optional + If True, modify the data in place. Otherwise, a copy will be + created for whitening. + + + Returns + ------- + dict : + Dictionary of FrequencySeries after the requested whitening has + been applied. + """ + if not inplace: + data = {det: d.copy() for det, d in data.items()} + if whiten: + for det, dtilde in data.items(): + invpsd = self._invpsds[det] + if whiten == 1: + dtilde *= invpsd**0.5 + elif whiten == 2: + dtilde *= invpsd + else: + raise ValueError("whiten must be either 0, 1, or 2") + return data
+ + +
+[docs] + def get_waveforms(self): + """The waveforms generated using the current parameters. + + If the waveforms haven't been generated yet, they will be generated, + resized to the same length as the data, and cached. If the + ``highpass_waveforms`` attribute is set, a highpass filter will + also be applied to the waveforms. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + if self._current_wfs is None: + params = self.current_params + wfs = self.waveform_generator.generate(**params) + for det, h in wfs.items(): + # make the same length as the data + h.resize(len(self.data[det])) + # apply high pass + if self.highpass_waveforms: + h = highpass( + h.to_timeseries(), + frequency=self.highpass_waveforms).to_frequencyseries() + wfs[det] = h + self._current_wfs = wfs + return self._current_wfs
+ + +
+[docs] + @abstractmethod + def get_gated_waveforms(self): + """Generates and gates waveforms using the current parameters. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + pass
+ + +
+[docs] + def get_residuals(self): + """Generates the residuals ``d-h`` using the current parameters. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + wfs = self.get_waveforms() + out = {} + for det, h in wfs.items(): + d = self.data[det] + out[det] = d - h + return out
+ + +
+[docs] + def get_data(self): + """Return a copy of the data. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + return {det: d.copy() for det, d in self.data.items()}
+ + +
+[docs] + def get_gated_data(self): + """Return a copy of the gated data. + + The gated data will be cached for faster retrieval. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + gate_times = self.get_gate_times() + out = {} + for det, d in self.td_data.items(): + # make sure the cache at least has the detectors in it + try: + cache = self._gated_data[det] + except KeyError: + cache = self._gated_data[det] = {} + invpsd = self._invpsds[det] + gatestartdelay, dgatedelay = gate_times[det] + try: + dtilde = cache[gatestartdelay, dgatedelay] + except KeyError: + # doesn't exist yet, or the gate times changed + cache.clear() + d = d.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=True, + invpsd=invpsd, method='paint') + dtilde = d.to_frequencyseries() + # save for next time + cache[gatestartdelay, dgatedelay] = dtilde + out[det] = dtilde + return out
+ + +
+[docs] + def get_gate_times(self): + """Gets the time to apply a gate based on the current sky position. + + If the parameter ``gatefunc`` is set to ``'hmeco'``, the gate times + will be calculated based on the hybrid MECO of the given set of + parameters; see ``get_gate_times_hmeco`` for details. Otherwise, the + gate times will just be retrieved from the ``t_gate_start`` and + ``t_gate_end`` parameters. + + .. warning:: + Since the normalization of the likelihood is currently not + being calculated, it is recommended that you do not use + ``gatefunc``, instead using fixed gate times. + + Returns + ------- + dict : + Dictionary of detector names -> (gate start, gate width) + """ + params = self.current_params + try: + gatefunc = self.current_params['gatefunc'] + except KeyError: + gatefunc = None + if gatefunc == 'hmeco': + return self.get_gate_times_hmeco() + # gate input for ringdown analysis which consideres a start time + # and an end time + gatestart = params['t_gate_start'] + gateend = params['t_gate_end'] + # we'll need the sky location for determining time shifts + ra = self.current_params['ra'] + dec = self.current_params['dec'] + gatetimes = {} + for det in self._invpsds: + thisdet = Detector(det) + # account for the time delay between the waveforms of the + # different detectors + gatestartdelay = gatestart + thisdet.time_delay_from_earth_center( + ra, dec, gatestart) + gateenddelay = gateend + thisdet.time_delay_from_earth_center( + ra, dec, gateend) + dgatedelay = gateenddelay - gatestartdelay + gatetimes[det] = (gatestartdelay, dgatedelay) + return gatetimes
+ + +
+[docs] + def get_gate_times_hmeco(self): + """Gets the time to apply a gate based on the current sky position. + Returns + ------- + dict : + Dictionary of detector names -> (gate start, gate width) + """ + # generate the template waveform + try: + wfs = self.get_waveforms() + except NoWaveformError: + return self._nowaveform_logl() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_logl() + raise e + # get waveform parameters + params = self.current_params + spin1 = params['spin1z'] + spin2 = params['spin2z'] + # gate input for ringdown analysis which consideres a start time + # and an end time + dgate = params['gate_window'] + meco_f = hybrid_meco_frequency(params['mass1'], params['mass2'], + spin1, spin2) + # figure out the gate times + gatetimes = {} + for det, h in wfs.items(): + invpsd = self._invpsds[det] + h.resize(len(invpsd)) + ht = h.to_timeseries() + f_low = int((self._f_lower[det]+1)/h.delta_f) + sample_freqs = h.sample_frequencies[f_low:].numpy() + f_idx = numpy.where(sample_freqs <= meco_f)[0][-1] + # find time corresponding to meco frequency + t_from_freq = time_from_frequencyseries( + h[f_low:], sample_frequencies=sample_freqs) + if t_from_freq[f_idx] > 0: + gatestartdelay = t_from_freq[f_idx] + float(t_from_freq.epoch) + else: + gatestartdelay = t_from_freq[f_idx] + ht.sample_times[-1] + gatestartdelay = min(gatestartdelay, params['t_gate_start']) + gatetimes[det] = (gatestartdelay, dgate) + return gatetimes
+ + + def _lognl(self): + """Calculates the log of the noise likelihood. + """ + # clear variables + lognl = 0. + self._det_lognls.clear() + # get the times of the gates + gate_times = self.get_gate_times() + for det, invpsd in self._invpsds.items(): + norm = self.det_lognorm(det) + gatestartdelay, dgatedelay = gate_times[det] + # we always filter the entire segment starting from kmin, since the + # gated series may have high frequency components + slc = slice(self._kmin[det], self._kmax[det]) + # gate the data + data = self.td_data[det] + gated_dt = data.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=True, + invpsd=invpsd, method='paint') + # convert to the frequency series + gated_d = gated_dt.to_frequencyseries() + # overwhiten + gated_d *= invpsd + d = self.data[det] + # inner product + ip = 4 * invpsd.delta_f * d[slc].inner(gated_d[slc]).real # <d, d> + dd = norm - 0.5*ip + # store + self._det_lognls[det] = dd + lognl += dd + return float(lognl) + +
+[docs] + def det_lognl(self, det): + # make sure lognl has been called + _ = self._trytoget('lognl', self._lognl) + # the det_lognls dict should have been updated, so can call it now + return self._det_lognls[det]
+ + + @staticmethod + def _fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict): + """Wrapper around :py:func:`data_utils.fd_data_from_strain_dict`. + + Ensures that if the PSD is estimated from data, the inverse spectrum + truncation uses a Hann window, and that the low frequency cutoff is + zero. + """ + if opts.psd_inverse_length and opts.invpsd_trunc_method is None: + # make sure invpsd truncation is set to hanning + logging.info("Using Hann window to truncate inverse PSD") + opts.invpsd_trunc_method = 'hann' + lfs = None + if opts.psd_estimation: + # make sure low frequency cutoff is zero + logging.info("Setting low frequency cutoff of PSD to 0") + lfs = opts.low_frequency_cutoff.copy() + opts.low_frequency_cutoff = {d: 0. for d in lfs} + out = fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict) + # set back + if lfs is not None: + opts.low_frequency_cutoff = lfs + return out + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds writing the psds, and analyzed detectors. + + The analyzed detectors, their analysis segments, and the segments + used for psd estimation are written as + ``analyzed_detectors``, ``{{detector}}_analysis_segment``, and + ``{{detector}}_psd_segment``, respectively. These are either written + to the specified ``group``'s attrs, or to the top level attrs if + ``group`` is None. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + BaseDataModel.write_metadata(self, fp) + attrs = fp.getattrs(group=group) + # write the analyzed detectors and times + attrs['analyzed_detectors'] = self.detectors + for det, data in self.data.items(): + key = '{}_analysis_segment'.format(det) + attrs[key] = [float(data.start_time), float(data.end_time)] + if self._psds is not None and not self.no_save_data: + fp.write_psd(self._psds, group=group) + # write the times used for psd estimation (if they were provided) + for det in self.psd_segments: + key = '{}_psd_segment'.format(det) + attrs[key] = list(map(float, self.psd_segments[det])) + # save the frequency cutoffs + for det in self.detectors: + attrs['{}_likelihood_low_freq'.format(det)] = self._f_lower[det] + if self._f_upper[det] is not None: + attrs['{}_likelihood_high_freq'.format(det)] = \ + self._f_upper[det]
+
+ + + +
+[docs] +class GatedGaussianNoise(BaseGatedGaussian): + r"""Model that applies a time domain gate, assuming stationary Gaussian + noise. + + The gate start and end times are set by providing ``t_gate_start`` and + ``t_gate_end`` parameters, respectively. This will cause the gated times + to be excised from the analysis. For more details on the likelihood + function and its derivation, see + `arXiv:2105.05238 <https://arxiv.org/abs/2105.05238>`_. + + .. warning:: + The normalization of the likelihood depends on the gate times. However, + at the moment, the normalization is not calculated, as it depends on + the determinant of the truncated covariance matrix (see Eq. 4 of + arXiv:2105.05238). For this reason it is recommended that you only + use this model for fixed gate times. + + """ + name = 'gated_gaussian_noise' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, **kwargs): + # set up the boiler-plate attributes + super().__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + # create the waveform generator + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + gates=self.gates, **self.static_params) + + @property + def _extra_stats(self): + """No extra stats are stored.""" + return [] + + def _loglikelihood(self): + r"""Computes the log likelihood after removing the power within the + given time window, + + .. math:: + \log p(d|\Theta) = -\frac{1}{2} \sum_i + \left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood. + """ + # generate the template waveform + try: + wfs = self.get_waveforms() + except NoWaveformError: + return self._nowaveform_logl() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_logl() + raise e + # get the times of the gates + gate_times = self.get_gate_times() + logl = 0. + for det, h in wfs.items(): + invpsd = self._invpsds[det] + norm = self.det_lognorm(det) + gatestartdelay, dgatedelay = gate_times[det] + # we always filter the entire segment starting from kmin, since the + # gated series may have high frequency components + slc = slice(self._kmin[det], self._kmax[det]) + # calculate the residual + data = self.td_data[det] + ht = h.to_timeseries() + res = data - ht + rtilde = res.to_frequencyseries() + gated_res = res.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=True, + invpsd=invpsd, method='paint') + gated_rtilde = gated_res.to_frequencyseries() + # overwhiten + gated_rtilde *= invpsd + rr = 4 * invpsd.delta_f * rtilde[slc].inner(gated_rtilde[slc]).real + logl += norm - 0.5*rr + return float(logl) + +
+[docs] + def get_gated_waveforms(self): + wfs = self.get_waveforms() + gate_times = self.get_gate_times() + out = {} + for det, h in wfs.items(): + invpsd = self._invpsds[det] + gatestartdelay, dgatedelay = gate_times[det] + ht = h.to_timeseries() + ht = ht.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=False, + invpsd=invpsd, method='paint') + h = ht.to_frequencyseries() + out[det] = h + return out
+ + +
+[docs] + def get_gated_residuals(self): + """Generates the gated residuals ``d-h`` using the current parameters. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + params = self.current_params + wfs = self.waveform_generator.generate(**params) + gate_times = self.get_gate_times() + out = {} + for det, h in wfs.items(): + invpsd = self._invpsds[det] + gatestartdelay, dgatedelay = gate_times[det] + data = self.td_data[det] + ht = h.to_timeseries() + res = data - ht + res = res.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=True, + invpsd=invpsd, method='paint') + res = res.to_frequencyseries() + out[det] = res + return out
+
+ + + +
+[docs] +class GatedGaussianMargPol(BaseGatedGaussian): + r"""Gated gaussian model with numerical marginalization over polarization. + + This implements the GatedGaussian likelihood with an explicit numerical + marginalization over polarization angle. This is accomplished using + a fixed set of integration points distribution uniformation between + 0 and 2pi. By default, 1000 integration points are used. + The 'polarization_samples' argument can be passed to set an alternate + number of integration points. + """ + name = 'gated_gaussian_margpol' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, + polarization_samples=1000, **kwargs): + # set up the boiler-plate attributes + super().__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + # the polarization parameters + self.polarization_samples = polarization_samples + self.pol = numpy.linspace(0, 2*numpy.pi, self.polarization_samples) + self.dets = {} + # create the waveform generator + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameTwoPolGenerator, + **self.static_params) + +
+[docs] + def get_waveforms(self): + if self._current_wfs is None: + params = self.current_params + wfs = self.waveform_generator.generate(**params) + for det, (hp, hc) in wfs.items(): + # make the same length as the data + hp.resize(len(self.data[det])) + hc.resize(len(self.data[det])) + # apply high pass + if self.highpass_waveforms: + hp = highpass( + hp.to_timeseries(), + frequency=self.highpass_waveforms).to_frequencyseries() + hc = highpass( + hc.to_timeseries(), + frequency=self.highpass_waveforms).to_frequencyseries() + wfs[det] = (hp, hc) + self._current_wfs = wfs + return self._current_wfs
+ + +
+[docs] + def get_gated_waveforms(self): + wfs = self.get_waveforms() + gate_times = self.get_gate_times() + out = {} + for det in wfs: + invpsd = self._invpsds[det] + gatestartdelay, dgatedelay = gate_times[det] + # the waveforms are a dictionary of (hp, hc) + pols = [] + for h in wfs[det]: + ht = h.to_timeseries() + ht = ht.gate(gatestartdelay + dgatedelay/2, + window=dgatedelay/2, copy=False, + invpsd=invpsd, method='paint') + h = ht.to_frequencyseries() + pols.append(h) + out[det] = tuple(pols) + return out
+ + + @property + def _extra_stats(self): + """Adds the maxL polarization and corresponding likelihood.""" + return ['maxl_polarization', 'maxl_logl'] + + def _loglikelihood(self): + r"""Computes the log likelihood after removing the power within the + given time window, + + .. math:: + \log p(d|\Theta) = -\frac{1}{2} \sum_i + \left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood. + """ + # generate the template waveform + try: + wfs = self.get_waveforms() + except NoWaveformError: + return self._nowaveform_logl() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_logl() + raise e + # get the gated waveforms and data + gated_wfs = self.get_gated_waveforms() + gated_data = self.get_gated_data() + # cycle over + loglr = 0. + lognl = 0. + for det, (hp, hc) in wfs.items(): + # get the antenna patterns + if det not in self.dets: + self.dets[det] = Detector(det) + fp, fc = self.dets[det].antenna_pattern(self.current_params['ra'], + self.current_params['dec'], + self.pol, + self.current_params['tc']) + norm = self.det_lognorm(det) + # we always filter the entire segment starting from kmin, since the + # gated series may have high frequency components + slc = slice(self._kmin[det], self._kmax[det]) + # get the gated values + gated_hp, gated_hc = gated_wfs[det] + gated_d = gated_data[det] + # we'll overwhiten the ungated data and waveforms for computing + # inner products + d = self._overwhitened_data[det] + # overwhiten the hp and hc + # we'll do this in place for computational efficiency, but as a + # result we'll clear the current waveforms cache so a repeated call + # to get_waveforms does not return the overwhitened versions + self._current_wfs = None + invpsd = self._invpsds[det] + hp *= invpsd + hc *= invpsd + # get the various gated inner products + hpd = hp[slc].inner(gated_d[slc]).real # <hp, d> + hcd = hc[slc].inner(gated_d[slc]).real # <hc, d> + dhp = d[slc].inner(gated_hp[slc]).real # <d, hp> + dhc = d[slc].inner(gated_hc[slc]).real # <d, hc> + hphp = hp[slc].inner(gated_hp[slc]).real # <hp, hp> + hchc = hc[slc].inner(gated_hc[slc]).real # <hc, hc> + hphc = hp[slc].inner(gated_hc[slc]).real # <hp, hc> + hchp = hc[slc].inner(gated_hp[slc]).real # <hc, hp> + dd = d[slc].inner(gated_d[slc]).real # <d, d> + # since the antenna patterns are real, + # <h, d>/2 + <d, h>/2 = fp*(<hp, d>/2 + <d, hp>/2) + # + fc*(<hc, d>/2 + <d, hc>/2) + hd = fp*(hpd + dhp) + fc*(hcd + dhc) + # <h, h>/2 = <fp*hp + fc*hc, fp*hp + fc*hc>/2 + # = fp*fp*<hp, hp>/2 + fc*fc*<hc, hc>/2 + # + fp*fc*<hp, hc>/2 + fc*fp*<hc, hp>/2 + hh = fp*fp*hphp + fc*fc*hchc + fp*fc*(hphc + hchp) + # sum up; note that the factor is 2df instead of 4df to account + # for the factor of 1/2 + loglr += norm + 2*invpsd.delta_f*(hd - hh) + lognl += -2 * invpsd.delta_f * dd + # store the maxl polarization + idx = loglr.argmax() + setattr(self._current_stats, 'maxl_polarization', self.pol[idx]) + setattr(self._current_stats, 'maxl_logl', loglr[idx] + lognl) + # compute the marginalized log likelihood + marglogl = special.logsumexp(loglr) + lognl - numpy.log(len(self.pol)) + return float(marglogl)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/gaussian_noise.html b/latest/html/_modules/pycbc/inference/models/gaussian_noise.html new file mode 100644 index 00000000000..03760e04377 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/gaussian_noise.html @@ -0,0 +1,1394 @@ + + + + + + pycbc.inference.models.gaussian_noise — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.gaussian_noise

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides model classes that assume the noise is Gaussian.
+"""
+
+import logging
+import shlex
+from abc import ABCMeta
+import numpy
+
+from pycbc import filter as pyfilter
+from pycbc.waveform import (NoWaveformError, FailedWaveformError)
+from pycbc.waveform import generator
+from pycbc.types import FrequencySeries
+from pycbc.strain import gates_from_cli
+from pycbc.strain.calibration import Recalibrate
+from pycbc.inject import InjectionSet
+from pycbc.io import FieldArray
+from pycbc.types.optparse import MultiDetOptionAction
+
+from .base import ModelStats
+from .base_data import BaseDataModel
+from .data_utils import (data_opts_from_config, data_from_cli,
+                         fd_data_from_strain_dict, gate_overwhitened_data)
+
+
+
+[docs] +class BaseGaussianNoise(BaseDataModel, metaclass=ABCMeta): + r"""Model for analyzing GW data with assuming a wide-sense stationary + Gaussian noise model. + + This model will load gravitational wave data and calculate the log noise + likelihood ``_lognl`` and normalization. It also implements the + ``_loglikelihood`` function as the sum of the log likelihood ratio and the + ``lognl``. It does not implement a log likelihood ratio function + ``_loglr``, however, since that can differ depending on the signal model. + Models that analyze GW data assuming it is stationary Gaussian should + therefore inherit from this class and implement their own ``_loglr`` + function. + + For more details on the inner product used, the log likelihood of the + noise, and the normalization factor, see :py:class:`GaussianNoise`. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data (assumed to be unwhitened). All data must have the + same frequency resolution. + low_frequency_cutoff : dict + A dictionary of starting frequencies, in which the keys are the + detector names and the values are the starting frequencies for the + respective detectors to be used for computing inner products. + psds : dict, optional + A dictionary of FrequencySeries keyed by the detector names. The + dictionary must have a psd for each detector specified in the data + dictionary. If provided, the inner products in each detector will be + weighted by 1/psd of that detector. + high_frequency_cutoff : dict, optional + A dictionary of ending frequencies, in which the keys are the + detector names and the values are the ending frequencies for the + respective detectors to be used for computing inner products. If not + provided, the minimum of the largest frequency stored in the data + and a given waveform will be used. + normalize : bool, optional + If True, the normalization factor :math:`alpha` will be included in the + log likelihood. See :py:class:`GaussianNoise` for details. Default is + to not include it. + static_params : dict, optional + A dictionary of parameter names -> values to keep fixed. + ignore_failed_waveforms : bool, optional + If the waveform generator raises an error when it tries to generate, + treat the point as having zero likelihood. This allows the parameter + estimation to continue. Otherwise, an error will be raised, stopping + the run. Default is False. + \**kwargs : + All other keyword arguments are passed to ``BaseDataModel``. + + Attributes + ---------- + ignore_failed_waveforms : bool + If True, points in parameter space that cause waveform generation to + fail (i.e., they raise a ``FailedWaveformError``) will be treated as + points with zero likelihood. Otherwise, such points will cause the + model to raise a ``FailedWaveformError``. + """ + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, ignore_failed_waveforms=False, + no_save_data=False, + **kwargs): + # set up the boiler-plate attributes + super(BaseGaussianNoise, self).__init__(variable_params, data, + static_params=static_params, + no_save_data=no_save_data, + **kwargs) + self.ignore_failed_waveforms = ignore_failed_waveforms + self.no_save_data = no_save_data + # check if low frequency cutoff has been provided for every IFO with + # data + for ifo in self.data: + if low_frequency_cutoff[ifo] is None: + raise ValueError( + "A low-frequency-cutoff must be provided for every " + "detector for which data has been provided. If " + "loading the model settings from " + "a config file, please provide " + "`{DETECTOR}:low-frequency-cutoff` options for " + "every detector in the `[model]` section, where " + "`{DETECTOR} is the name of the detector," + "or provide a single low-frequency-cutoff option" + "which will be used for all detectors") + + # check that the data sets all have the same delta fs and delta ts + dts = numpy.array([d.delta_t for d in self.data.values()]) + dfs = numpy.array([d.delta_f for d in self.data.values()]) + if all(dts == dts[0]) and all(dfs == dfs[0]): + self.all_ifodata_same_rate_length = True + else: + self.all_ifodata_same_rate_length = False + logging.info( + "You are using different data segment lengths or " + "sampling rates for different IFOs") + + # store the number of samples in the time domain + self._N = {} + for (det, d) in self._data.items(): + self._N[det] = int(1./(d.delta_f*d.delta_t)) + + # set lower/upper frequency cutoff + if high_frequency_cutoff is None: + high_frequency_cutoff = {ifo: None for ifo in self.data} + self._f_upper = high_frequency_cutoff + self._f_lower = low_frequency_cutoff + + # Set the cutoff indices + self._kmin = {} + self._kmax = {} + + for (det, d) in self._data.items(): + kmin, kmax = pyfilter.get_cutoff_indices(self._f_lower[det], + self._f_upper[det], + d.delta_f, self._N[det]) + self._kmin[det] = kmin + self._kmax[det] = kmax + + # store the psd segments + self._psd_segments = {} + if psds is not None: + self.set_psd_segments(psds) + + # store the psds and calculate the inner product weight + self._psds = {} + self._invpsds = {} + self._weight = {} + self._lognorm = {} + self._det_lognls = {} + self._whitened_data = {} + + # set the normalization state + self._normalize = False + self.normalize = normalize + # store the psds and whiten the data + self.psds = psds + + # attribute for storing the current waveforms + self._current_wfs = None + + @property + def high_frequency_cutoff(self): + """The high frequency cutoff of the inner product.""" + return self._f_upper + + @property + def low_frequency_cutoff(self): + """The low frequency cutoff of the inner product.""" + return self._f_lower + + @property + def kmin(self): + """Dictionary of starting indices for the inner product. + + This is determined from the lower frequency cutoff and the ``delta_f`` + of the data using + :py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`. + """ + return self._kmin + + @property + def kmax(self): + """Dictionary of ending indices for the inner product. + + This is determined from the high frequency cutoff and the ``delta_f`` + of the data using + :py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`. If no high + frequency cutoff was provided, this will be the indice corresponding to + the Nyquist frequency. + """ + return self._kmax + + @property + def psds(self): + """Dictionary of detectors -> PSD frequency series. + + If no PSD was provided for a detector, this will just be a frequency + series of ones. + """ + return self._psds + + @psds.setter + def psds(self, psds): + """Sets the psds, and calculates the weight and norm from them. + + The data and the low and high frequency cutoffs must be set first. + """ + # check that the data has been set + if self._data is None: + raise ValueError("No data set") + if self._f_lower is None: + raise ValueError("low frequency cutoff not set") + if self._f_upper is None: + raise ValueError("high frequency cutoff not set") + # make sure the relevant caches are cleared + self._psds.clear() + self._invpsds.clear() + self._weight.clear() + self._lognorm.clear() + self._det_lognls.clear() + self._whitened_data.clear() + for det, d in self._data.items(): + if psds is None: + # No psd means assume white PSD + p = FrequencySeries(numpy.ones(int(self._N[det]/2+1)), + delta_f=d.delta_f) + else: + # copy for storage + p = psds[det].copy() + self._psds[det] = p + # we'll store the weight to apply to the inner product + # only set weight in band we will analyze + kmin = self._kmin[det] + kmax = self._kmax[det] + invp = FrequencySeries(numpy.zeros(len(p)), delta_f=p.delta_f) + invp[kmin:kmax] = 1./p[kmin:kmax] + self._invpsds[det] = invp + self._weight[det] = numpy.sqrt(4 * invp.delta_f * invp) + self._whitened_data[det] = d.copy() + self._whitened_data[det] *= self._weight[det] + # set the lognl and lognorm; we'll get this by just calling lognl + _ = self.lognl + + @property + def psd_segments(self): + """Dictionary giving times used for PSD estimation for each detector. + + If a detector's PSD was not estimated from data, or the segment wasn't + provided, that detector will not be in the dictionary. + """ + return self._psd_segments + +
+[docs] + def set_psd_segments(self, psds): + """Sets the PSD segments from a dictionary of PSDs. + + This attempts to get the PSD segment from a ``psd_segment`` attribute + of each detector's PSD frequency series. If that attribute isn't set, + then that detector is not added to the dictionary of PSD segments. + + Parameters + ---------- + psds : dict + Dictionary of detector name -> PSD frequency series. The segment + used for each PSD will try to be retrieved from the PSD's + ``.psd_segment`` attribute. + """ + for det, p in psds.items(): + try: + self._psd_segments[det] = p.psd_segment + except AttributeError: + continue
+ + + @property + def weight(self): + r"""Dictionary of detectors -> frequency series of inner-product + weights. + + The weights are :math:`\sqrt{4 \Delta f / S_n(f)}`. This is set when + the PSDs are set. + """ + return self._weight + + @property + def whitened_data(self): + r"""Dictionary of detectors -> whitened data frequency series. + + The whitened data is the data multiplied by the inner-product weight. + Note that this includes the :math:`\sqrt{4 \Delta f}` factor. This + is set when the PSDs are set. + """ + return self._whitened_data + +
+[docs] + def det_lognorm(self, det): + """The log of the likelihood normalization in the given detector. + + If ``self.normalize`` is False, will just return 0. + """ + if not self.normalize: + return 0. + try: + return self._lognorm[det] + except KeyError: + # hasn't been calculated yet + p = self._psds[det] + dt = self._whitened_data[det].delta_t + kmin = self._kmin[det] + kmax = self._kmax[det] + lognorm = -float(self._N[det]*numpy.log(numpy.pi*self._N[det]*dt)/2. + + numpy.log(p[kmin:kmax]).sum()) + self._lognorm[det] = lognorm + return self._lognorm[det]
+ + + @property + def normalize(self): + """Determines if the loglikelihood includes the normalization term. + """ + return self._normalize + + @normalize.setter + def normalize(self, normalize): + """Clears the current stats if the normalization state is changed. + """ + if normalize != self._normalize: + self._current_stats = ModelStats() + self._lognorm.clear() + self._det_lognls.clear() + self._normalize = normalize + + @property + def lognorm(self): + """The log of the normalization of the log likelihood.""" + return sum(self.det_lognorm(det) for det in self._data) + +
+[docs] + def det_lognl(self, det): + r"""Returns the log likelihood of the noise in the given detector: + + .. math:: + + \log p(d_i|n_i) = \log \alpha_i - + \frac{1}{2} \left<d_i | d_i\right>. + + + Parameters + ---------- + det : str + The name of the detector. + + Returns + ------- + float : + The log likelihood of the noise in the requested detector. + """ + try: + return self._det_lognls[det] + except KeyError: + # hasn't been calculated yet; calculate & store + kmin = self._kmin[det] + kmax = self._kmax[det] + d = self._whitened_data[det] + lognorm = self.det_lognorm(det) + lognl = lognorm - 0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real + self._det_lognls[det] = lognl + return self._det_lognls[det]
+ + + def _lognl(self): + """Computes the log likelihood assuming the data is noise. + + Since this is a constant for Gaussian noise, this is only computed once + then stored. + """ + return sum(self.det_lognl(det) for det in self._data) + +
+[docs] + def update(self, **params): + # update + super().update(**params) + # reset current waveforms + self._current_wfs = None
+ + + def _loglikelihood(self): + r"""Computes the log likelihood of the paramaters, + + .. math:: + + \log p(d|\Theta, h) = \log \alpha -\frac{1}{2}\sum_i + \left<d_i - h_i(\Theta) | d_i - h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood evaluated at the given point. + """ + # since the loglr has fewer terms, we'll call that, then just add + # back the noise term that canceled in the log likelihood ratio + return self.loglr + self.lognl + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds writing the psds, analyzed detectors, and lognl. + + The analyzed detectors, their analysis segments, and the segments + used for psd estimation are written as + ``analyzed_detectors``, ``{{detector}}_analysis_segment``, and + ``{{detector}}_psd_segment``, respectively. These are either written + to the specified ``group``'s attrs, or to the top level attrs if + ``group`` is None. + + The total and each detector's lognl is written to the sample group's + ``attrs``. If a group is specified, the group name will be prependend + to the lognl labels with ``{group}__``, with any ``/`` in the group + path replaced with ``__``. For example, if group is ``/a/b``, the + ``lognl`` will be written as ``a__b__lognl`` in the sample's group + attrs. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + super().write_metadata(fp, group=group) + attrs = fp.getattrs(group=group) + # write the analyzed detectors and times + attrs['analyzed_detectors'] = self.detectors + for det, data in self.data.items(): + key = '{}_analysis_segment'.format(det) + attrs[key] = [float(data.start_time), float(data.end_time)] + if self._psds is not None and not self.no_save_data: + fp.write_psd(self._psds, group=group) + # write the times used for psd estimation (if they were provided) + for det in self.psd_segments: + key = '{}_psd_segment'.format(det) + attrs[key] = list(map(float, self.psd_segments[det])) + # save the frequency cutoffs + for det in self.detectors: + attrs['{}_likelihood_low_freq'.format(det)] = self._f_lower[det] + if self._f_upper[det] is not None: + attrs['{}_likelihood_high_freq'.format(det)] = \ + self._f_upper[det] + # write the lognl to the samples group attrs + sampattrs = fp.getattrs(group=fp.samples_group) + # if a group is specified, prepend the lognl names with it + if group is None or group == '/': + prefix = '' + else: + prefix = group.replace('/', '__') + if not prefix.endswith('__'): + prefix += '__' + sampattrs['{}lognl'.format(prefix)] = self.lognl + # also save the lognl in each detector + for det in self.detectors: + sampattrs['{}{}_lognl'.format(prefix, det)] = self.det_lognl(det)
+ + + @staticmethod + def _fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict): + """Wrapper around :py:func:`data_utils.fd_data_from_strain_dict`.""" + return fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict) + +
+[docs] + @classmethod + def from_config(cls, cp, data_section='data', data=None, psds=None, + **kwargs): + r"""Initializes an instance of this class from the given config file. + + In addition to ``[model]``, a ``data_section`` (default ``[data]``) + must be in the configuration file. The data section specifies settings + for loading data and estimating PSDs. See the `online documentation + <http://pycbc.org/pycbc/latest/html/inference.html#setting-data>`_ for + more details. + + The following options are read from the ``[model]`` section, in + addition to ``name`` (which must be set): + + * ``{{DET}}-low-frequency-cutoff = FLOAT`` : + The low frequency cutoff to use for each detector {{DET}}. A cutoff + must be provided for every detector that may be analyzed (any + additional detectors are ignored). + * ``{{DET}}-high-frequency-cutoff = FLOAT`` : + (Optional) A high frequency cutoff for each detector. If not + provided, the Nyquist frequency is used. + * ``check-for-valid-times =`` : + (Optional) If provided, will check that there are no data quality + flags on during the analysis segment and the segment used for PSD + estimation in each detector. To check for flags, + :py:func:`pycbc.dq.query_flag` is used, with settings pulled from the + ``dq-*`` options in the ``[data]`` section. If a detector has bad + data quality during either the analysis segment or PSD segment, it + will be removed from the analysis. + * ``shift-psd-times-to-valid =`` : + (Optional) If provided, the segment used for PSD estimation will + automatically be shifted left or right until a continous block of + data with no data quality issues can be found. If no block can be + found with a maximum shift of +/- the requested psd segment length, + the detector will not be analyzed. + * ``err-on-missing-detectors =`` : + Raises an error if any detector is removed from the analysis because + a valid time could not be found. Otherwise, a warning is printed + to screen and the detector is removed from the analysis. + * ``normalize =`` : + (Optional) Turn on the normalization factor. + * ``ignore-failed-waveforms =`` : + Sets the ``ignore_failed_waveforms`` attribute. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + data_section : str, optional + The name of the section to load data options from. + \**kwargs : + All additional keyword arguments are passed to the class. Any + provided keyword will override what is in the config file. + """ + # get the injection file, to replace any FROM_INJECTION settings + if 'injection-file' in cp.options('data'): + injection_file = cp.get('data', 'injection-file') + else: + injection_file = None + # update any values that are to be retrieved from the injection + # Note: this does nothing if there are FROM_INJECTION values + get_values_from_injection(cp, injection_file, update_cp=True) + args = cls._init_args_from_config(cp) + # add the injection file + args['injection_file'] = injection_file + # check if normalize is set + if cp.has_option('model', 'normalize'): + args['normalize'] = True + if cp.has_option('model', 'ignore-failed-waveforms'): + args['ignore_failed_waveforms'] = True + if cp.has_option('model', 'det-frame-waveform'): + args['det_frame_waveform'] = True + if cp.has_option('model', 'no-save-data'): + args['no_save_data'] = True + # get any other keyword arguments provided in the model section + ignore_args = [ + 'name', + 'normalize', + 'ignore-failed-waveforms', + 'no-save-data', + 'det-frame-waveform' + ] + for option in cp.options("model"): + if option in ("low-frequency-cutoff", "high-frequency-cutoff"): + ignore_args.append(option) + name = option.replace('-', '_') + args[name] = cp.get_cli_option('model', name, + nargs='+', type=float, + action=MultiDetOptionAction) + + if 'low_frequency_cutoff' not in args: + raise ValueError("low-frequency-cutoff must be provided in the" + " model section, but is not found!") + + # data args + bool_args = ['check-for-valid-times', 'shift-psd-times-to-valid', + 'err-on-missing-detectors'] + data_args = {arg.replace('-', '_'): True for arg in bool_args + if cp.has_option('model', arg)} + ignore_args += bool_args + # load the data + opts = data_opts_from_config(cp, data_section, + args['low_frequency_cutoff']) + if data is None or psds is None: + strain_dict, psd_strain_dict = data_from_cli(opts, **data_args) + # convert to frequency domain and get psds + stilde_dict, psds = cls._fd_data_from_strain_dict( + opts, strain_dict, psd_strain_dict) + # save the psd data segments if the psd was estimated from data + if opts.psd_estimation: + _tdict = psd_strain_dict or strain_dict + for det in psds: + psds[det].psd_segment = (_tdict[det].start_time, + _tdict[det].end_time) + # gate overwhitened if desired + if opts.gate_overwhitened and opts.gate is not None: + stilde_dict = gate_overwhitened_data( + stilde_dict, psds, opts.gate) + data = stilde_dict + args.update({'data': data, 'psds': psds}) + # any extra args + args.update(cls.extra_args_from_config(cp, "model", + skip_args=ignore_args)) + # get ifo-specific instances of calibration model + if cp.has_section('calibration'): + logging.info("Initializing calibration model") + recalib = { + ifo: Recalibrate.from_config(cp, ifo, section='calibration') + for ifo in opts.instruments} + args['recalibration'] = recalib + # get gates for templates + gates = gates_from_cli(opts) + if gates: + args['gates'] = gates + args.update(kwargs) + return cls(**args)
+
+ + + +
+[docs] +class GaussianNoise(BaseGaussianNoise): + r"""Model that assumes data is stationary Gaussian noise. + + With Gaussian noise the log likelihood functions for signal + :math:`\log p(d|\Theta, h)` and for noise :math:`\log p(d|n)` are given by: + + .. math:: + + \log p(d|\Theta, h) &= \log\alpha -\frac{1}{2} \sum_i + \left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right> \\ + \log p(d|n) &= \log\alpha -\frac{1}{2} \sum_i \left<d_i | d_i\right> + + where the sum is over the number of detectors, :math:`d_i` is the data in + each detector, and :math:`h_i(\Theta)` is the model signal in each + detector. The (discrete) inner product is given by: + + .. math:: + + \left<a_i | b_i\right> = 4\Re \Delta f + \sum_{k=k_{\mathrm{min}}}^{k_{\mathrm{max}}} + \frac{\tilde{a}_i^{*}[k] \tilde{b}_i[k]}{S^{(i)}_n[k]}, + + where :math:`\Delta f` is the frequency resolution (given by 1 / the + observation time :math:`T`), :math:`k` is an index over the discretely + sampled frequencies :math:`f = k \Delta_f`, and :math:`S^{(i)}_n[k]` is the + PSD in the given detector. The upper cutoff on the inner product + :math:`k_{\max}` is by default the Nyquist frequency + :math:`k_{\max} = N/2+1`, where :math:`N = \lfloor T/\Delta t \rfloor` + is the number of samples in the time domain, but this can be set manually + to a smaller value. + + The normalization factor :math:`\alpha` is: + + .. math:: + + \alpha = \prod_{i} \frac{1}{\left(\pi T\right)^{N/2} + \prod_{k=k_\mathrm{min}}^{k_{\mathrm{max}}} S^{(i)}_n[k]}, + + where the product is over the number of detectors. By default, the + normalization constant is not included in the log likelihood, but it can + be turned on using the ``normalize`` keyword argument. + + Note that the log likelihood ratio has fewer terms than the log likelihood, + since the normalization and :math:`\left<d_i|d_i\right>` terms cancel: + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i \left[ + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2} \left<h_i(\Theta)|h_i(\Theta)\right> \right] + + Upon initialization, the data is whitened using the given PSDs. If no PSDs + are given the data and waveforms returned by the waveform generator are + assumed to be whitened. + + For more details on initialization parameters and definition of terms, see + :py:class:`models.BaseDataModel`. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data (assumed to be unwhitened). The list of keys must + match the waveform generator's detectors keys, and the epoch of every + data set must be the same as the waveform generator's epoch. + low_frequency_cutoff : dict + A dictionary of starting frequencies, in which the keys are the + detector names and the values are the starting frequencies for the + respective detectors to be used for computing inner products. + psds : dict, optional + A dictionary of FrequencySeries keyed by the detector names. The + dictionary must have a psd for each detector specified in the data + dictionary. If provided, the inner products in each detector will be + weighted by 1/psd of that detector. + high_frequency_cutoff : dict, optional + A dictionary of ending frequencies, in which the keys are the + detector names and the values are the ending frequencies for the + respective detectors to be used for computing inner products. If not + provided, the minimum of the largest frequency stored in the data + and a given waveform will be used. + normalize : bool, optional + If True, the normalization factor :math:`alpha` will be included in the + log likelihood. Default is to not include it. + static_params : dict, optional + A dictionary of parameter names -> values to keep fixed. + det_frame_waveform : bool + If True, the waveform will be generated directly in the detector frame + using the + :py:class:`~pycbc.waveform.generator.FDomainDirectDetFrameGenerator`. + This requires the approximant be implemented in + :py:func:`~pycbc.waveform.get_fd_det_waveform`. + If False, the + :py:class:`~pycbc.waveform.generator.FDomainDetFrameGenerator` will be + used instead. Defaults to :code:`False`. + \**kwargs : + All other keyword arguments are passed to ``BaseDataModel``. + + Examples + -------- + Create a signal, and set up the model using that signal: + + >>> from pycbc import psd as pypsd + >>> from pycbc.inference.models import GaussianNoise + >>> from pycbc.waveform.generator import (FDomainDetFrameGenerator, + ... FDomainCBCGenerator) + >>> seglen = 4 + >>> sample_rate = 2048 + >>> N = seglen*sample_rate/2+1 + >>> fmin = 30. + >>> static_params = {'approximant': 'IMRPhenomD', 'f_lower': fmin, + ... 'mass1': 38.6, 'mass2': 29.3, + ... 'spin1z': 0., 'spin2z': 0., 'ra': 1.37, 'dec': -1.26, + ... 'polarization': 2.76, 'distance': 3*500.} + >>> variable_params = ['tc'] + >>> tsig = 3.1 + >>> generator = FDomainDetFrameGenerator( + ... FDomainCBCGenerator, 0., detectors=['H1', 'L1'], + ... variable_args=variable_params, + ... delta_f=1./seglen, **static_params) + >>> signal = generator.generate(tc=tsig) + >>> psd = pypsd.aLIGOZeroDetHighPower(N, 1./seglen, 20.) + >>> psds = {'H1': psd, 'L1': psd} + >>> low_frequency_cutoff = {'H1': fmin, 'L1': fmin} + >>> model = GaussianNoise(variable_params, signal, low_frequency_cutoff, + psds=psds, static_params=static_params) + + Set the current position to the coalescence time of the signal: + + >>> model.update(tc=tsig) + + Now compute the log likelihood ratio and prior-weighted likelihood ratio; + since we have not provided a prior, these should be equal to each other: + + >>> print('{:.2f}'.format(model.loglr)) + 282.43 + >>> print('{:.2f}'.format(model.logplr)) + 282.43 + + Print all of the default_stats: + + >>> print(',\n'.join(['{}: {:.2f}'.format(s, v) + ... for (s, v) in sorted(model.current_stats.items())])) + H1_cplx_loglr: 177.76+0.00j, + H1_optimal_snrsq: 355.52, + L1_cplx_loglr: 104.67+0.00j, + L1_optimal_snrsq: 209.35, + logjacobian: 0.00, + loglikelihood: 0.00, + loglr: 282.43, + logprior: 0.00 + + Compute the SNR; for this system and PSD, this should be approximately 24: + + >>> from pycbc.conversions import snr_from_loglr + >>> x = snr_from_loglr(model.loglr) + >>> print('{:.2f}'.format(x)) + 23.77 + + Since there is no noise, the SNR should be the same as the quadrature sum + of the optimal SNRs in each detector: + + >>> x = (model.det_optimal_snrsq('H1') + + ... model.det_optimal_snrsq('L1'))**0.5 + >>> print('{:.2f}'.format(x)) + 23.77 + + Toggle on the normalization constant: + + >>> model.normalize = True + >>> model.loglikelihood + 835397.8757405131 + + Using the same model, evaluate the log likelihood ratio at several points + in time and check that the max is at tsig: + + >>> import numpy + >>> times = numpy.linspace(tsig-1, tsig+1, num=101) + >>> loglrs = numpy.zeros(len(times)) + >>> for (ii, t) in enumerate(times): + ... model.update(tc=t) + ... loglrs[ii] = model.loglr + >>> print('tsig: {:.2f}, time of max loglr: {:.2f}'.format( + ... tsig, times[loglrs.argmax()])) + tsig: 3.10, time of max loglr: 3.10 + + Create a prior and use it (see distributions module for more details): + + >>> from pycbc import distributions + >>> uniform_prior = distributions.Uniform(tc=(tsig-0.2,tsig+0.2)) + >>> prior = distributions.JointDistribution(variable_params, uniform_prior) + >>> model = GaussianNoise(variable_params, + ... signal, low_frequency_cutoff, psds=psds, prior=prior, + ... static_params=static_params) + >>> model.update(tc=tsig) + >>> print('{:.2f}'.format(model.logplr)) + 283.35 + >>> print(',\n'.join(['{}: {:.2f}'.format(s, v) + ... for (s, v) in sorted(model.current_stats.items())])) + H1_cplx_loglr: 177.76+0.00j, + H1_optimal_snrsq: 355.52, + L1_cplx_loglr: 104.67+0.00j, + L1_optimal_snrsq: 209.35, + logjacobian: 0.00, + loglikelihood: 0.00, + loglr: 282.43, + logprior: 0.92 + + """ + name = 'gaussian_noise' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, det_frame_waveform=False, **kwargs): + # set up the boiler-plate attributes + super(GaussianNoise, self).__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + # Determine if all data have the same sampling rate and segment length + if det_frame_waveform: + generator_class = generator.FDomainDirectDetFrameGenerator + else: + generator_class = generator.FDomainDetFrameGenerator + if self.all_ifodata_same_rate_length: + # create a waveform generator for all ifos + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + generator_class=generator_class, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + gates=self.gates, **self.static_params) + else: + # create a waveform generator for each ifo respestively + self.waveform_generator = {} + for det in self.data: + self.waveform_generator[det] = create_waveform_generator( + self.variable_params, {det: self.data[det]}, + generator_class=generator_class, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + gates=self.gates, **self.static_params) + + @property + def _extra_stats(self): + """Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each + detector.""" + return ['loglr'] + \ + ['{}_cplx_loglr'.format(det) for det in self._data] + \ + ['{}_optimal_snrsq'.format(det) for det in self._data] + + def _nowaveform_loglr(self): + """Convenience function to set loglr values if no waveform generated. + """ + for det in self._data: + setattr(self._current_stats, 'loglikelihood', -numpy.inf) + setattr(self._current_stats, '{}_cplx_loglr'.format(det), + -numpy.inf) + # snr can't be < 0 by definition, so return 0 + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.) + return -numpy.inf + + @property + def multi_signal_support(self): + """ The list of classes that this model supports in a multi-signal + likelihood + """ + return [type(self)] + +
+[docs] + def multi_loglikelihood(self, models): + """ Calculate a multi-model (signal) likelihood + """ + # Generate the waveforms for each submodel + wfs = [] + for m in models + [self]: + wfs.append(m.get_waveforms()) + + # combine into a single waveform + combine = {} + for det in self.data: + mlen = max([len(x[det]) for x in wfs]) + [x[det].resize(mlen) for x in wfs] + combine[det] = sum([x[det] for x in wfs]) + + self._current_wfs = combine + loglr = self._loglr() + self._current_wfs = None + return loglr + self.lognl
+ + +
+[docs] + def get_waveforms(self): + """The waveforms generated using the current parameters. + + If the waveforms haven't been generated yet, they will be generated. + + Returns + ------- + dict : + Dictionary of detector names -> FrequencySeries. + """ + if self._current_wfs is None: + params = self.current_params + if self.all_ifodata_same_rate_length: + wfs = self.waveform_generator.generate(**params) + else: + wfs = {} + for det in self.data: + wfs.update(self.waveform_generator[det].generate(**params)) + self._current_wfs = wfs + return self._current_wfs
+ + + def _loglr(self): + r"""Computes the log likelihood ratio, + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + try: + wfs = self.get_waveforms() + except NoWaveformError: + return self._nowaveform_loglr() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_loglr() + else: + raise e + + lr = 0. + for det, h in wfs.items(): + # the kmax of the waveforms may be different than internal kmax + kmax = min(len(h), self._kmax[det]) + if self._kmin[det] >= kmax: + # if the waveform terminates before the filtering low frequency + # cutoff, then the loglr is just 0 for this detector + cplx_hd = 0j + hh = 0. + else: + slc = slice(self._kmin[det], kmax) + # whiten the waveform + h[self._kmin[det]:kmax] *= self._weight[det][slc] + + # the inner products + cplx_hd = h[slc].inner(self._whitened_data[det][slc]) # <h, d> + hh = h[slc].inner(h[slc]).real # < h, h> + cplx_loglr = cplx_hd - 0.5 * hh + # store + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh) + setattr(self._current_stats, '{}_cplx_loglr'.format(det), + cplx_loglr) + lr += cplx_loglr.real + # also store the loglikelihood, to ensure it is populated in the + # current stats even if loglikelihood is never called + self._current_stats.loglikelihood = lr + self.lognl + return float(lr) + +
+[docs] + def det_cplx_loglr(self, det): + """Returns the complex log likelihood ratio in the given detector. + + Parameters + ---------- + det : str + The name of the detector. + + Returns + ------- + complex float : + The complex log likelihood ratio. + """ + # try to get it from current stats + try: + return getattr(self._current_stats, '{}_cplx_loglr'.format(det)) + except AttributeError: + # hasn't been calculated yet; call loglr to do so + self._loglr() + # now try returning again + return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
+ + +
+[docs] + def det_optimal_snrsq(self, det): + """Returns the opitmal SNR squared in the given detector. + + Parameters + ---------- + det : str + The name of the detector. + + Returns + ------- + float : + The opimtal SNR squared. + """ + # try to get it from current stats + try: + return getattr(self._current_stats, '{}_optimal_snrsq'.format(det)) + except AttributeError: + # hasn't been calculated yet; call loglr to do so + self._loglr() + # now try returning again + return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
+
+ + + +# +# ============================================================================= +# +# Support functions +# +# ============================================================================= +# + + +
+[docs] +def get_values_from_injection(cp, injection_file, update_cp=True): + """Replaces all FROM_INJECTION values in a config file with the + corresponding value from the injection. + + This looks for any options that start with ``FROM_INJECTION[:ARG]`` in + a config file. It then replaces that value with the corresponding value + from the injection file. An argument may be optionally provided, in which + case the argument will be retrieved from the injection file. Functions of + parameters in the injection file may be used; the syntax and functions + available is the same as the ``--parameters`` argument in executables + such as ``pycbc_inference_extract_samples``. If no ``ARG`` is provided, + then the option name will try to be retrieved from the injection. + + For example, + + .. code-block:: ini + + mass1 = FROM_INJECTION + + will cause ``mass1`` to be retrieved from the injection file, while: + + .. code-block:: ini + + mass1 = FROM_INJECTION:'primary_mass(mass1, mass2)' + + will cause the larger of mass1 and mass2 to be retrieved from the injection + file. Note that if spaces are in the argument, it must be encased in + single quotes. + + The injection file may contain only one injection. Otherwise, a ValueError + will be raised. + + Parameters + ---------- + cp : ConfigParser + The config file within which to replace values. + injection_file : str or None + The injection file to get values from. A ValueError will be raised + if there are any ``FROM_INJECTION`` values in the config file, and + injection file is None, or if there is more than one injection. + update_cp : bool, optional + Update the config parser with the replaced parameters. If False, + will just retrieve the parameter values to update, without updating + the config file. Default is True. + + Returns + ------- + list + The parameters that were replaced, as a tuple of section name, option, + value. + """ + lookfor = 'FROM_INJECTION' + # figure out what parameters need to be set + replace_params = [] + for sec in cp.sections(): + for opt in cp.options(sec): + val = cp.get(sec, opt) + splitvals = shlex.split(val) + replace_this = [] + for ii, subval in enumerate(splitvals): + if subval.startswith(lookfor): + # determine what we should retrieve from the injection + subval = subval.split(':', 1) + if len(subval) == 1: + subval = opt + else: + subval = subval[1] + replace_this.append((ii, subval)) + if replace_this: + replace_params.append((sec, opt, splitvals, replace_this)) + if replace_params: + # check that we have an injection file + if injection_file is None: + raise ValueError("One or values are set to {}, but no injection " + "file provided".format(lookfor)) + # load the injection file + inj = InjectionSet(injection_file).table.view(type=FieldArray) + # make sure there's only one injection provided + if inj.size > 1: + raise ValueError("One or more values are set to {}, but more than " + "one injection exists in the injection file." + .format(lookfor)) + # get the injection values to replace + for ii, (sec, opt, splitvals, replace_this) in enumerate(replace_params): + # replace the value in the shlex-splitted string with the value + # from the injection + for jj, arg in replace_this: + splitvals[jj] = str(inj[arg][0]) + # now rejoin the string... + # shlex will strip quotes around arguments; this can be problematic + # when rejoining if the the argument had a space in it. In python 3.8 + # there is a shlex.join function which properly rejoins things taking + # that into account. Since we need to continue to support earlier + # versions of python, the following kludge tries to account for that. + # If/when we drop support for all earlier versions of python, then the + # following can just be replaced by: + # replace_val = shlex.join(splitvals) + for jj, arg in enumerate(splitvals): + if ' ' in arg: + arg = "'" + arg + "'" + splitvals[jj] = arg + replace_val = ' '.join(splitvals) + replace_params[ii] = (sec, opt, replace_val) + # replace in the config file + if update_cp: + for (sec, opt, replace_val) in replace_params: + cp.set(sec, opt, replace_val) + return replace_params
+ + + +
+[docs] +def create_waveform_generator( + variable_params, data, waveform_transforms=None, + recalibration=None, gates=None, + generator_class=generator.FDomainDetFrameGenerator, + **static_params): + r"""Creates a waveform generator for use with a model. + + Parameters + ---------- + variable_params : list of str + The names of the parameters varied. + data : dict + Dictionary mapping detector names to either a + :py:class:`<pycbc.types.TimeSeries TimeSeries>` or + :py:class:`<pycbc.types.FrequencySeries FrequencySeries>`. + waveform_transforms : list, optional + The list of transforms applied to convert variable parameters into + parameters that will be understood by the waveform generator. + recalibration : dict, optional + Dictionary mapping detector names to + :py:class:`<pycbc.calibration.Recalibrate>` instances for + recalibrating data. + gates : dict of tuples, optional + Dictionary of detectors -> tuples of specifying gate times. The + sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`. + generator_class : detector-frame fdomain generator, optional + Class to use for generating waveforms. Default is + :py:class:`waveform.generator.FDomainDetFrameGenerator`. + \**static_params : + All other keyword arguments are passed as static parameters to the + waveform generator. + + Returns + ------- + pycbc.waveform.FDomainDetFrameGenerator + A waveform generator for frequency domain generation. + """ + # the waveform generator will get the variable_params + the output + # of the waveform transforms, so we'll add them to the list of + # parameters + if waveform_transforms is not None: + wfoutputs = set.union(*[t.outputs + for t in waveform_transforms]) + else: + wfoutputs = set() + variable_params = list(variable_params) + list(wfoutputs) + # figure out what generator to use based on the approximant + try: + approximant = static_params['approximant'] + except KeyError: + raise ValueError("no approximant provided in the static args") + + generator_function = generator_class.select_rframe_generator(approximant) + # get data parameters; we'll just use one of the data to get the + # values, then check that all the others are the same + delta_f = None + for d in data.values(): + if delta_f is None: + delta_f = d.delta_f + delta_t = d.delta_t + start_time = d.start_time + else: + if not all([d.delta_f == delta_f, d.delta_t == delta_t, + d.start_time == start_time]): + raise ValueError("data must all have the same delta_t, " + "delta_f, and start_time") + waveform_generator = generator_class( + generator_function, epoch=start_time, + variable_args=variable_params, detectors=list(data.keys()), + delta_f=delta_f, delta_t=delta_t, + recalib=recalibration, gates=gates, + **static_params) + return waveform_generator
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/hierarchical.html b/latest/html/_modules/pycbc/inference/models/hierarchical.html new file mode 100644 index 00000000000..2e60f32932a --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/hierarchical.html @@ -0,0 +1,1172 @@ + + + + + + pycbc.inference.models.hierarchical — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.hierarchical

+# Copyright (C) 2022  Collin Capano
+#               2023  Alex Nitz & Shichao Wu
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""Hierarchical model definitions."""
+
+import shlex
+import logging
+import numpy
+from pycbc import transforms
+from pycbc.workflow import WorkflowConfigParser
+from .base import BaseModel
+
+#
+# =============================================================================
+#
+#                       Hierarhical model definition
+#
+# =============================================================================
+#
+
+
+
+[docs] +class HierarchicalModel(BaseModel): + r"""Model that is a combination of other models. + + Sub-models are treated as being independent of each other, although + they can share parameters. In other words, the hierarchical likelihood is: + + .. math:: + + p(\mathbf{D}|\mathbf{\vartheta}, \mathbf{H}) = + \prod_{I}^{K} p(\mathbf{d}_I|\mathbf{\vartheta}, H_{I}) + + Submodels are provided as a dictionary upon initialization with a unique + label assigned to each model, e.g., ``{'event1' -> model1, 'event2' -> + model2}``. Variable and static parameters that are specific to each + submodel should be prepended with ``{label}__``, where ``{label}__`` is the + label associated with the given submodel. Shared parameters across multiple + models have no labels prepended. To specify shared models over a subset of + models, separate models with an underscore. For example, + ``event1_event2__foo`` will result in ``foo`` being common between models + ``event1`` and ``event2``. For more details on parameter naming see + :py:class:`HierarchicalParam + <pycbc.inference.models.hierarchical.HierarchicalParam>`. + + All waveform and sampling transforms, as well as prior evaluation, are + handled by this model, not the sub-models. Parameters created by waveform + transforms should therefore also have sub-model names prepended to them, + to indicate which models they should be provided to for likelihood + evaluation. + + Parameters + ---------- + variable_params: (tuple of) string(s) + A tuple of parameter names that will be varied. + submodels: dict + Dictionary of model labels -> model instances of all the submodels. + \**kwargs : + All other keyword arguments are passed to + :py:class:`BaseModel <pycbc.inference.models.base.BaseModel>`. + """ + name = 'hierarchical' + + def __init__(self, variable_params, submodels, **kwargs): + # sub models is assumed to be a dict of model labels -> model instances + self.submodels = submodels + # initialize standard attributes + super().__init__(variable_params, **kwargs) + # store a map of model labels -> parameters for quick look up later + self.param_map = map_params(self.hvariable_params) + # add any parameters created by waveform transforms + if self.waveform_transforms is not None: + derived_params = set() + derived_params.update(*[t.outputs + for t in self.waveform_transforms]) + # convert to hierarchical params + derived_params = map_params(hpiter(derived_params, + list(self.submodels.keys()))) + for lbl, pset in derived_params.items(): + self.param_map[lbl].update(pset) + # make sure the static parameters of all submodels are set correctly + self.static_param_map = map_params(self.hstatic_params.keys()) + # also create a map of model label -> extra stats created by each model + # stats are prepended with the model label. We'll include the + # loglikelihood returned by each submodel in the extra stats. + self.extra_stats_map = {} + self.__extra_stats = [] + for lbl, model in self.submodels.items(): + model.static_params = {p.subname: self.static_params[p.fullname] + for p in self.static_param_map[lbl]} + self.extra_stats_map.update(map_params([ + HierarchicalParam.from_subname(lbl, p) + for p in model._extra_stats+['loglikelihood']])) + self.__extra_stats += self.extra_stats_map[lbl] + # also make sure the model's sampling transforms and waveform + # transforms are not set, as these are handled by the hierarchical + # model, except for `joint_primary_marginalized` model, because + # this specific model needs to allow its submodels to handle + # transform with prefix on the submodel's level + if self.name != "joint_primary_marginalized": + if model.sampling_transforms is not None: + raise ValueError("Model {} has sampling transforms " + "set; in a hierarchical analysis, " + "these are handled by the " + "hierarchical model".format(lbl)) + if model.waveform_transforms is not None: + raise ValueError("Model {} has waveform transforms " + "set; in a hierarchical analysis, " + "these are handled by the " + "hierarchical model".format(lbl)) + + @property + def hvariable_params(self): + """The variable params as a tuple of :py:class:`HierarchicalParam` + instances. + """ + return self._variable_params + + @property + def variable_params(self): + # converts variable params back to a set of strings before returning + return tuple(p.fullname for p in self._variable_params) + + @variable_params.setter + def variable_params(self, variable_params): + # overrides BaseModel's variable params to store the variable params + # as HierarchicalParam instances + if isinstance(variable_params, str): + variable_params = [variable_params] + self._variable_params = tuple(HierarchicalParam(p, self.submodels) + for p in variable_params) + + @property + def hstatic_params(self): + """The static params with :py:class:`HierarchicalParam` instances used + as dictionary keys. + """ + return self._static_params + + @property + def static_params(self): + # converts the static param keys back to strings + return {p.fullname: val for p, val in self._static_params.items()} + + @static_params.setter + def static_params(self, static_params): + if static_params is None: + static_params = {} + self._static_params = {HierarchicalParam(p, self.submodels): val + for p, val in static_params.items()} + + @property + def _extra_stats(self): + return [p.fullname for p in self.__extra_stats] + + @property + def _hextra_stats(self): + """The extra stats as :py:class:`HierarchicalParam` instances.""" + return self.__extra_stats + + def _loglikelihood(self): + # takes the sum of the constitutent models' loglikelihoods + logl = 0. + for lbl, model in self.submodels.items(): + # update the model with the current params. This is done here + # instead of in `update` because waveform transforms are not + # applied until the loglikelihood function is called + model.update(**{p.subname: self.current_params[p.fullname] + for p in self.param_map[lbl]}) + # now get the loglikelihood from the model + sublogl = model.loglikelihood + # store the extra stats + mstats = model.current_stats + for stat in self.extra_stats_map[lbl]: + setattr(self._current_stats, stat, mstats[stat.subname]) + # add to the total loglikelihood + logl += sublogl + return logl + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds data to the metadata that's written. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + + """ + # write information about self + super().write_metadata(fp, group=group) + # write information about each submodel into a different group for + # each one + if group is None or group == '/': + prefix = '' + else: + prefix = group+'/' + for lbl, model in self.submodels.items(): + model.write_metadata(fp, group=prefix+lbl) + + # if all submodels support it, write a combined lognl parameter + try: + sampattrs = fp.getattrs(group=fp.samples_group) + lognl = [self.submodels[k].lognl for k in self.submodels] + sampattrs['{}lognl'.format(prefix)] = sum(lognl) + except AttributeError: + pass
+ + +
+[docs] + @classmethod + def from_config(cls, cp, **kwargs): + r"""Initializes an instance of this class from the given config file. + + Sub-models are initialized before initializing this class. The model + section must have a ``submodels`` argument that lists the names of all + the submodels to generate as a space-separated list. Each sub-model + should have its own ``[{label}__model]`` section that sets up the + model for that sub-model. For example: + + .. code-block:: ini + + [model] + name = hierarchical + submodels = event1 event2 + + [event1__model] + <event1 model options> + + [event2__model] + <event2 model options> + + Similarly, all other sections that are specific to a model should start + with the model's label. All sections starting with a model's label will + be passed to that model's ``from_config`` method with the label removed + from the section name. For example, if a sub-model requires a data + section to be specified, it should be titled ``[{label}__data]``. Upon + initialization, the ``{label}__`` will be stripped from the section + header and passed to the model. + + No model labels should preceed the ``variable_params``, + ``static_params``, ``waveform_transforms``, or ``sampling_transforms`` + sections. Instead, the parameters specified in these sections should + follow the naming conventions described in :py:class:`HierachicalParam` + to determine which sub-model(s) they belong to. (Sampling parameters + can follow any naming convention, as they are only handled by the + hierarchical model.) This is because the hierarchical model handles + all transforms, communication with the sampler, file IO, and prior + calculation. Only sub-model's loglikelihood functions are called. + + Metadata for each sub-model is written to the output hdf file under + groups given by the sub-model label. For example, if we have two + submodels labelled ``event1`` and ``event2``, there will be groups + with the same names in the top level of the output that contain that + model's subdata. For instance, if event1 used the ``gaussian_noise`` + model, the GW data and PSDs will be found in ``event1/data`` and the + low frequency cutoff used for that model will be in the ``attrs`` of + the ``event1`` group. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + \**kwargs : + All additional keyword arguments are passed to the class. Any + provided keyword will override what is in the config file. + """ + # we need the read from config function from the init; to prevent + # circular imports, we import it here + from pycbc.inference.models import read_from_config + # get the submodels + submodel_lbls = shlex.split(cp.get('model', 'submodels')) + # sort parameters by model + vparam_map = map_params(hpiter(cp.options('variable_params'), + submodel_lbls)) + sparam_map = map_params(hpiter(cp.options('static_params'), + submodel_lbls)) + + # we'll need any waveform transforms for the initializing sub-models, + # as the underlying models will receive the output of those transforms + if any(cp.get_subsections('waveform_transforms')): + waveform_transforms = transforms.read_transforms_from_config( + cp, 'waveform_transforms') + wfoutputs = set.union(*[t.outputs + for t in waveform_transforms]) + wfparam_map = map_params(hpiter(wfoutputs, submodel_lbls)) + else: + wfparam_map = {lbl: [] for lbl in submodel_lbls} + # initialize the models + submodels = {} + logging.info("Loading submodels") + for lbl in submodel_lbls: + logging.info("============= %s =============", lbl) + # create a config parser to pass to the model + subcp = WorkflowConfigParser() + # copy sections over that start with the model label (this should + # include the [model] section for that model) + copy_sections = [ + HierarchicalParam(sec, submodel_lbls) + for sec in cp.sections() if lbl in + sec.split('-')[0].split(HierarchicalParam.delim, 1)[0]] + for sec in copy_sections: + # check that the user isn't trying to set variable or static + # params for the model (we won't worry about waveform or + # sampling transforms here, since that is checked for in the + # __init__) + if sec.subname in ['variable_params', 'static_params']: + raise ValueError("Section {} found in the config file; " + "[variable_params] and [static_params] " + "sections should not include model " + "labels. To specify parameters unique to " + "one or more sub-models, prepend the " + "individual parameter names with the " + "model label. See HierarchicalParam for " + "details.".format(sec)) + subcp.add_section(sec.subname) + for opt, val in cp.items(sec): + subcp.set(sec.subname, opt, val) + # set the static params + subcp.add_section('static_params') + for param in sparam_map[lbl]: + subcp.set('static_params', param.subname, + cp.get('static_params', param.fullname)) + # set the variable params: for now we'll just set all the + # variable params as static params + # so that the model doesn't raise an error looking for + # prior sections. We'll then manually set the variable + # params after the model is initialized + + subcp.add_section('variable_params') + for param in vparam_map[lbl]: + subcp.set('static_params', param.subname, 'REPLACE') + # add the outputs from the waveform transforms + for param in wfparam_map[lbl]: + subcp.set('static_params', param.subname, 'REPLACE') + + # initialize + submodel = read_from_config(subcp) + # move the static params back to variable + for p in vparam_map[lbl]: + submodel.static_params.pop(p.subname) + submodel.variable_params = tuple(p.subname + for p in vparam_map[lbl]) + # remove the waveform transform parameters + for p in wfparam_map[lbl]: + submodel.static_params.pop(p.subname) + # store + submodels[lbl] = submodel + logging.info("") + # now load the model + logging.info("Loading hierarchical model") + return super().from_config(cp, submodels=submodels)
+
+ + + +
+[docs] +class HierarchicalParam(str): + """Sub-class of str for hierarchical parameter names. + + This adds attributes that keep track of the model label(s) the parameter + is associated with, along with the name that is passed to the models. + + The following conventions are used for parsing parameter names: + + * Model labels and parameter names are separated by the ``delim`` class + attribute, which by default is ``__``, e.g., ``event1__mass``. + * Multiple model labels can be provided by separating the model labels + with the ``model_delim`` class attribute, which by default is ``_``, + e.g., ``event1_event2__mass``. Note that this means that individual + model labels cannot contain ``_``, else they'll be parsed as separate + models. + * Parameters that have no model labels prepended to them (i.e., there + is no ``__`` in the name) are common to all models. + + These parsing rules are applied by the :py:meth:`HierarchicalParam.parse` + method. + + Parameters + ---------- + fullname : str + Name of the hierarchical parameter. Should have format + ``{model1}[_{model2}[_{...}]]__{param}``. + possible_models : set of str + The possible sub-models a parameter can belong to. Should a set of + model labels. + + Attributes + ---------- + fullname : str + The full name of the parameter, including model labels. For example, + ``e1_e2__foo``. + models : set + The model labels the parameter is associated with. For example, + ``e1_e2__foo`` yields models ``e1, e2``. + subname : str + The name of the parameter without the model labels prepended to it. + For example, ``e1_e2__foo`` yields ``foo``. + """ + delim = '__' + model_delim = '_' + + def __new__(cls, fullname, possible_models): + fullname = str(fullname) + obj = str.__new__(cls, fullname) + obj.fullname = fullname + models, subp = HierarchicalParam.parse(fullname, possible_models) + obj.models = models + obj.subname = subp + return obj + +
+[docs] + @classmethod + def from_subname(cls, model_label, subname): + """Creates a HierarchicalParam from the given subname and model label. + """ + return cls(cls.delim.join([model_label, subname]), set([model_label]))
+ + +
+[docs] + @classmethod + def parse(cls, fullname, possible_models): + """Parses the full parameter name into the models the parameter is + associated with and the parameter name that is passed to the models. + + Parameters + ---------- + fullname : str + The full name of the parameter, which includes both the model + label(s) and the parameter name. + possible_models : set + Set of model labels the parameter can be associated with. + + Returns + ------- + models : list + List of the model labels the parameter is associated with. + subp : str + Parameter name that is passed to the models. This is the parameter + name with the model label(s) stripped from it. + """ + # make sure possible models is a set + possible_models = set(possible_models) + p = fullname.split(cls.delim, 1) + if len(p) == 1: + # is a global fullname, associate with all + subp = fullname + models = possible_models.copy() + else: + models, subp = p + # convert into set of model label(s) + models = set(models.split(cls.model_delim)) + # make sure the given labels are in the list of possible models + unknown = models - possible_models + if any(unknown): + raise ValueError('unrecognized model label(s) {} present in ' + 'parameter {}'.format(', '.join(unknown), + fullname)) + return models, subp
+
+ + + +
+[docs] +def hpiter(params, possible_models): + """Turns a list of parameter strings into a list of HierarchicalParams. + + Parameters + ---------- + params : list of str + List of parameter names. + possible_models : set + Set of model labels the parameters can be associated with. + + Returns + ------- + iterator : + Iterator of :py:class:`HierarchicalParam` instances. + """ + return map(lambda x: HierarchicalParam(x, possible_models), params)
+ + + +
+[docs] +def map_params(params): + """Creates a map of models -> parameters. + + Parameters + ---------- + params : list of HierarchicalParam instances + The list of hierarchical parameter names to parse. + + Returns + ------- + dict : + Dictionary of model labels -> associated parameters. + """ + param_map = {} + for p in params: + for lbl in p.models: + try: + param_map[lbl].update([p]) + except KeyError: + param_map[lbl] = set([p]) + return param_map
+ + + +
+[docs] +class MultiSignalModel(HierarchicalModel): + """ Model for multiple signals which share data + + Sub models are treated as if the signals overlap in data. This requires + constituent models to implement a specific method to handle this case. + All models must be of the same type or the specific model is responsible + for implement cross-compatibility with another model. Each model h_i is + responsible for calculating its own loglikelihood ratio for itself, and + must also implement a method to calculate crossterms of the form + <h_i | h_j> which arise from the full calculation of <d - h|d - h>. + This model inherits from the HierarchicalModel so the syntax for + configuration files is the same. The primary model is used to determine + the noise terms <d | d>, which by default will be the first model used. + """ + name = 'multi_signal' + + def __init__(self, variable_params, submodels, **kwargs): + super().__init__(variable_params, submodels, **kwargs) + + # Check what models each model supports + support = {} + ctypes = set() # The set of models we need to completely support + for lbl in self.submodels: + model = self.submodels[lbl] + + ctypes.add(type(model)) + if hasattr(model, 'multi_signal_support'): + support[lbl] = set(model.multi_signal_support) + + # pick the primary model if it supports the set of constituent models + for lbl in support: + if ctypes <= support[lbl]: + self.primary_model = lbl + logging.info('MultiSignalModel: PrimaryModel == %s', lbl) + break + else: + # Oh, no, we don't support this combo! + raise RuntimeError("It looks like the combination of models, {}," + "for the MultiSignal model isn't supported by" + "any of the constituent models.".format(ctypes)) + + self.other_models = self.submodels.copy() + self.other_models.pop(self.primary_model) + self.other_models = list(self.other_models.values()) + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds metadata to the output files + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + super().write_metadata(fp, group=group) + sampattrs = fp.getattrs(group=fp.samples_group) + # if a group is specified, prepend the lognl names with it + if group is None or group == '/': + prefix = '' + else: + prefix = group.replace('/', '__') + if not prefix.endswith('__'): + prefix += '__' + try: + model = self.submodels[self.primary_model] + sampattrs['{}lognl'.format(prefix)] = model.lognl + except AttributeError: + pass
+ + + def _loglikelihood(self): + for lbl, model in self.submodels.items(): + # Update the parameters of each + model.update(**{p.subname: self.current_params[p.fullname] + for p in self.param_map[lbl]}) + + # Calculate the combined loglikelihood + p = self.primary_model + logl = self.submodels[p].multi_loglikelihood(self.other_models) + + # store any extra stats from the submodels + for lbl, model in self.submodels.items(): + mstats = model.current_stats + for stat in self.extra_stats_map[lbl]: + setattr(self._current_stats, stat, mstats[stat.subname]) + return logl
+ + + +
+[docs] +class JointPrimaryMarginalizedModel(HierarchicalModel): + """This likelihood model can be used for cases when one of the submodels + can be marginalized to accelerate the total likelihood. This likelihood + model also allows for further acceleration of other models during + marginalization, if some extrinsic parameters can be tightly constrained + by the primary model. More specifically, such as the EM + GW parameter + estimation, the sky localization can be well measured. For LISA + 3G + multiband observation, SOBHB signals' (tc, ra, dec) can be tightly + constrained by 3G network, so this model is also useful for this case. + """ + name = 'joint_primary_marginalized' + + def __init__(self, variable_params, submodels, **kwargs): + super().__init__(variable_params, submodels, **kwargs) + + # assume the ground-based submodel as the primary model + self.primary_model = self.submodels[kwargs['primary_lbl'][0]] + self.primary_lbl = kwargs['primary_lbl'][0] + self.other_models = self.submodels.copy() + self.other_models.pop(kwargs['primary_lbl'][0]) + self.other_models = list(self.other_models.values()) + + # determine whether to accelerate total_loglr + from .tools import str_to_bool + self.static_margin_params_in_other_models = \ + str_to_bool(kwargs['static_margin_params_in_other_models'][0]) + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds metadata to the output files + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + super().write_metadata(fp, group=group) + sampattrs = fp.getattrs(group=fp.samples_group) + # if a group is specified, prepend the lognl names with it + if group is None or group == '/': + prefix = '' + else: + prefix = group.replace('/', '__') + if not prefix.endswith('__'): + prefix += '__' + try: + for lbl, model in self.submodels.items(): + sampattrs['{}lognl'.format(prefix + '%s__' % lbl) + ] = model.lognl + except AttributeError: + pass
+ + +
+[docs] + def total_loglr(self): + r"""Computes the total log likelihood ratio, + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + # calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant + + self.primary_model.return_sh_hh = True + sh_primary, hh_primary = self.primary_model.loglr + self.primary_model.return_sh_hh = False + # set logr, otherwise it will store (sh, hh) + setattr(self.primary_model._current_stats, 'loglr', + self.primary_model.marginalize_loglr(sh_primary, hh_primary)) + if isinstance(sh_primary, numpy.ndarray): + nums = len(sh_primary) + else: + nums = 1 + + margin_params = {} + if self.static_margin_params_in_other_models: + # Due to the high precision of extrinsic parameters constrined + # by the primary model, the mismatch of wavefroms in others by + # varing those parameters is pretty small, so we can keep them + # static to accelerate total_loglr. Here, we use matched-filering + # SNR instead of lilkelihood, because luminosity distance and + # inclination has a very strong degeneracy, change of inclination + # will change best match distance, so change the amplitude of + # waveform. Using SNR will cancel out the effect of amplitude.err + i_max_extrinsic = numpy.argmax( + numpy.abs(sh_primary) / hh_primary**0.5) + for p in self.primary_model.marginalized_params_name: + if isinstance(self.primary_model.current_params[p], + numpy.ndarray): + margin_params[p] = \ + self.primary_model.current_params[p][i_max_extrinsic] + else: + margin_params[p] = self.primary_model.current_params[p] + else: + for key, value in self.primary_model.current_params.items(): + # add marginalize_vector_params + if key in self.primary_model.marginalized_params_name: + margin_params[key] = value + + # add likelihood contribution from other_models, we + # calculate sh/hh for each marginalized parameter point + sh_others = numpy.full(nums, 0 + 0.0j) + hh_others = numpy.zeros(nums) + + # update parameters in other_models + for _, other_model in enumerate(self.other_models): + # not using self.primary_model.current_params, because others_model + # may have its own static parameters + current_params_other = other_model.current_params.copy() + if not self.static_margin_params_in_other_models: + for i in range(nums): + current_params_other.update( + {key: value[i] if isinstance(value, numpy.ndarray) + else value for key, value in margin_params.items()}) + other_model.update(**current_params_other) + other_model.return_sh_hh = True + sh_other, hh_other = other_model.loglr + sh_others[i] += sh_other + hh_others[i] += hh_other + other_model.return_sh_hh = False + # set logr, otherwise it will store (sh, hh) + setattr(other_model._current_stats, 'loglr', + other_model.marginalize_loglr(sh_other, hh_other)) + else: + # use one margin point set to approximate all the others + current_params_other.update( + {key: value[0] if isinstance(value, numpy.ndarray) + else value for key, value in margin_params.items()}) + other_model.update(**current_params_other) + other_model.return_sh_hh = True + sh_other, hh_other = other_model.loglr + other_model.return_sh_hh = False + # set logr, otherwise it will store (sh, hh) + setattr(other_model._current_stats, 'loglr', + other_model.marginalize_loglr(sh_other, hh_other)) + sh_others += sh_other + hh_others += hh_other + + if nums == 1: + # the type of the original sh/hh_others are numpy.array, + # might not the same as sh/hh_primary during reconstruct, + # during reconstruct of distance, sh/hh_others need to be scalar + sh_others = sh_others[0] + hh_others = hh_others[0] + sh_total = sh_primary + sh_others + hh_total = hh_primary + hh_others + + loglr = self.primary_model.marginalize_loglr(sh_total, hh_total) + + return loglr
+ + +
+[docs] + def others_lognl(self): + """Calculate the combined lognl from all others sub-models.""" + total_others_lognl = 0 + for model in self.other_models: + total_others_lognl += model.lognl + return total_others_lognl
+ + +
+[docs] + def update_all_models(self, **params): + """This update method is also useful for loglr checking, + the original update method in base module can't update + parameters in submodels correctly in loglr checking.""" + for lbl, model in self.submodels.items(): + if self.param_map != {}: + p = {params.subname: self.current_params[params.fullname] + for params in self.param_map[lbl]} + else: + # dummy sampler doesn't have real variables, + # which means self.param_map is {} + p = {} + p.update(params) + model.update(**p)
+ + + def _loglikelihood(self): + self.update_all_models() + + # calculate the combined loglikelihood + logl = self.total_loglr() + self.primary_model.lognl + \ + self.others_lognl() + + # store any extra stats from the submodels + for lbl, model in self.submodels.items(): + mstats = model.current_stats + for stat in self.extra_stats_map[lbl]: + setattr(self._current_stats, stat, mstats[stat.subname]) + return logl + +
+[docs] + @classmethod + def from_config(cls, cp, **kwargs): + r"""Initializes an instance of this class from the given config file. + For more details, see `from_config` in `HierarchicalModel`. + + Parameters + ---------- + cp : WorkflowConfigParser + Config file parser to read. + \**kwargs : + All additional keyword arguments are passed to the class. Any + provided keyword will override what is in the config file. + """ + # we need the read from config function from the init; to prevent + # circular imports, we import it here + from pycbc.inference.models import read_from_config + # get the submodels + kwargs['primary_lbl'] = shlex.split(cp.get('model', 'primary_model')) + kwargs['others_lbls'] = shlex.split(cp.get('model', 'other_models')) + submodel_lbls = kwargs['primary_lbl'] + kwargs['others_lbls'] + # sort parameters by model + vparam_map = map_params(hpiter(cp.options('variable_params'), + submodel_lbls)) + sparam_map = map_params(hpiter(cp.options('static_params'), + submodel_lbls)) + + # get the acceleration label + kwargs['static_margin_params_in_other_models'] = shlex.split( + cp.get('model', 'static_margin_params_in_other_models')) + + # we'll need any waveform transforms for the initializing sub-models, + # as the underlying models will receive the output of those transforms + + # if `waveform_transforms` section doesn't have the prefix of + # sub-model's name, then add this `waveform_transforms` section + # into top level, if not, add it into sub-models' config + if any(cp.get_subsections('waveform_transforms')): + waveform_transforms = transforms.read_transforms_from_config( + cp, 'waveform_transforms') + wfoutputs = set.union(*[t.outputs + for t in waveform_transforms]) + wfparam_map = map_params(hpiter(wfoutputs, submodel_lbls)) + else: + wfparam_map = {lbl: [] for lbl in submodel_lbls} + # initialize the models + submodels = {} + logging.info("Loading submodels") + for lbl in submodel_lbls: + logging.info("============= %s =============", lbl) + # create a config parser to pass to the model + subcp = WorkflowConfigParser() + # copy sections over that start with the model label (this should + # include the [model] section for that model) + copy_sections = [ + HierarchicalParam(sec, submodel_lbls) + for sec in cp.sections() if lbl in + sec.split('-')[0].split(HierarchicalParam.delim, 1)[0]] + for sec in copy_sections: + # check that the user isn't trying to set variable or static + # params for the model (we won't worry about waveform or + # sampling transforms here, since that is checked for in the + # __init__) + if sec.subname in ['variable_params', 'static_params']: + raise ValueError("Section {} found in the config file; " + "[variable_params] and [static_params] " + "sections should not include model " + "labels. To specify parameters unique to " + "one or more sub-models, prepend the " + "individual parameter names with the " + "model label. See HierarchicalParam for " + "details.".format(sec)) + subcp.add_section(sec.subname) + for opt, val in cp.items(sec): + subcp.set(sec.subname, opt, val) + # set the static params + subcp.add_section('static_params') + for param in sparam_map[lbl]: + subcp.set('static_params', param.subname, + cp.get('static_params', param.fullname)) + + # set the variable params: different from the standard + # hierarchical model, in this JointPrimaryMarginalizedModel model, + # all sub-models has the same variable parameters, so we don't + # need to worry about the unique variable issue. Besides, + # the primary model needs to do marginalization, so we must set + # variable_params and prior section before initializing it. + + subcp.add_section('variable_params') + for param in vparam_map[lbl]: + if lbl in kwargs['primary_lbl']: + # set variable_params for the primary model + subcp.set('variable_params', param.subname, + cp.get('variable_params', param.fullname)) + else: + # all variable_params in other models will come + # from the primary model during sampling + subcp.set('static_params', param.subname, 'REPLACE') + + for section in cp.sections(): + # the primary model needs prior of marginlized parameters + if 'prior-' in section and lbl in kwargs['primary_lbl']: + prior_section = '%s' % section + subcp[prior_section] = cp[prior_section] + + # similar to the standard hierarchical model, + # add the outputs from the waveform transforms if sub-model + # doesn't need marginalization + if lbl not in kwargs['primary_lbl']: + for param in wfparam_map[lbl]: + subcp.set('static_params', param.subname, 'REPLACE') + + # save the vitual config file to disk for later check + with open('%s.ini' % lbl, 'w', encoding='utf-8') as file: + subcp.write(file) + + # initialize + submodel = read_from_config(subcp) + + if lbl not in kwargs['primary_lbl']: + # similar to the standard hierarchical model, + # move the static params back to variable if sub-model + # doesn't need marginalization + for p in vparam_map[lbl]: + submodel.static_params.pop(p.subname) + submodel.variable_params = tuple(p.subname + for p in vparam_map[lbl]) + # similar to the standard hierarchical model, + # remove the waveform transform parameters if sub-model + # doesn't need marginalization + for p in wfparam_map[lbl]: + submodel.static_params.pop(p.subname) + submodels[lbl] = submodel + logging.info("") + + # remove all marginalized parameters from the top-level model's + # `variable_params` and `prior` sections + # here we ignore `coa_phase`, because if it's been marginalized, + # it will not be listed in `variable_params` and `prior` sections + primary_model = submodels[kwargs['primary_lbl'][0]] + marginalized_params = primary_model.marginalized_params_name.copy() + + for p in primary_model.static_params.keys(): + p_full = '%s__%s' % (kwargs['primary_lbl'][0], p) + if p_full not in cp['static_params']: + cp['static_params'][p_full] = "%s" % \ + primary_model.static_params[p] + + for section in cp.sections(): + if 'prior-' in section: + p = section.split('-')[-1] + if p in marginalized_params: + cp['variable_params'].pop(p) + cp.pop(section) + + # save the vitual config file to disk for later check + with open('internal_top.ini', 'w', encoding='utf-8') as file: + cp.write(file) + + # now load the model + logging.info("Loading joint_primary_marginalized model") + return super(HierarchicalModel, cls).from_config( + cp, submodels=submodels, **kwargs)
+ + +
+[docs] + def reconstruct(self, rec=None, seed=None): + """ Reconstruct marginalized parameters by using the primary + model's reconstruct method, total_loglr, and others_lognl. + """ + if seed: + numpy.random.seed(seed) + + if rec is None: + rec = {} + + def get_loglr(): + # make sure waveform transforms have been applied in + # the top-level model + if self.waveform_transforms is not None: + self._current_params = transforms.apply_transforms( + self._current_params, self.waveform_transforms, + inverse=False) + self.update_all_models(**rec) + return self.total_loglr() + + rec = self.primary_model.reconstruct( + rec=rec, seed=seed, set_loglr=get_loglr) + # the primary model's reconstruct doesn't know lognl in other models + rec['loglikelihood'] += self.others_lognl() + return rec
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/marginalized_gaussian_noise.html b/latest/html/_modules/pycbc/inference/models/marginalized_gaussian_noise.html new file mode 100644 index 00000000000..7c3f89f752b --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/marginalized_gaussian_noise.html @@ -0,0 +1,974 @@ + + + + + + pycbc.inference.models.marginalized_gaussian_noise — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.marginalized_gaussian_noise

+# Copyright (C) 2018  Charlie Hoy, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides model classes that assume the noise is Gaussian and
+allows for the likelihood to be marginalized over phase and/or time and/or
+distance.
+"""
+
+import itertools
+import logging
+import numpy
+from scipy import special
+
+from pycbc.waveform import generator
+from pycbc.waveform import (NoWaveformError, FailedWaveformError)
+from pycbc.detector import Detector
+from .gaussian_noise import (BaseGaussianNoise,
+                             create_waveform_generator,
+                             GaussianNoise)
+from .tools import marginalize_likelihood, DistMarg
+
+
+
+[docs] +class MarginalizedPhaseGaussianNoise(GaussianNoise): + r"""The likelihood is analytically marginalized over phase. + + This class can be used with signal models that can be written as: + + .. math:: + + \tilde{h}(f; \Theta, \phi) = A(f; \Theta)e^{i\Psi(f; \Theta) + i \phi}, + + where :math:`\phi` is an arbitrary phase constant. This phase constant + can be analytically marginalized over with a uniform prior as follows: + assuming the noise is stationary and Gaussian (see `GaussianNoise` + for details), the posterior is: + + .. math:: + + p(\Theta,\phi|d) + &\propto p(\Theta)p(\phi)p(d|\Theta,\phi) \\ + &\propto p(\Theta)\frac{1}{2\pi}\exp\left[ + -\frac{1}{2}\sum_{i}^{N_D} \left< + h_i(\Theta,\phi) - d_i, h_i(\Theta,\phi) - d_i + \right>\right]. + + Here, the sum is over the number of detectors :math:`N_D`, :math:`d_i` + and :math:`h_i` are the data and signal in detector :math:`i`, + respectively, and we have assumed a uniform prior on :math:`\phi \in [0, + 2\pi)`. With the form of the signal model given above, the inner product + in the exponent can be written as: + + .. math:: + + -\frac{1}{2}\left<h_i - d_i, h_i- d_i\right> + &= \left<h_i, d_i\right> - + \frac{1}{2}\left<h_i, h_i\right> - + \frac{1}{2}\left<d_i, d_i\right> \\ + &= \Re\left\{O(h^0_i, d_i)e^{-i\phi}\right\} - + \frac{1}{2}\left<h^0_i, h^0_i\right> - + \frac{1}{2}\left<d_i, d_i\right>, + + where: + + .. math:: + + h_i^0 &\equiv \tilde{h}_i(f; \Theta, \phi=0); \\ + O(h^0_i, d_i) &\equiv 4 \int_0^\infty + \frac{\tilde{h}_i^*(f; \Theta,0)\tilde{d}_i(f)}{S_n(f)}\mathrm{d}f. + + Gathering all of the terms that are not dependent on :math:`\phi` together: + + .. math:: + + \alpha(\Theta, d) \equiv \exp\left[-\frac{1}{2}\sum_i + \left<h^0_i, h^0_i\right> + \left<d_i, d_i\right>\right], + + we can marginalize the posterior over :math:`\phi`: + + .. math:: + + p(\Theta|d) + &\propto p(\Theta)\alpha(\Theta,d)\frac{1}{2\pi} + \int_{0}^{2\pi}\exp\left[\Re \left\{ + e^{-i\phi} \sum_i O(h^0_i, d_i) + \right\}\right]\mathrm{d}\phi \\ + &\propto p(\Theta)\alpha(\Theta, d)\frac{1}{2\pi} + \int_{0}^{2\pi}\exp\left[ + x(\Theta,d)\cos(\phi) + y(\Theta, d)\sin(\phi) + \right]\mathrm{d}\phi. + + The integral in the last line is equal to :math:`2\pi I_0(\sqrt{x^2+y^2})`, + where :math:`I_0` is the modified Bessel function of the first kind. Thus + the marginalized posterior is: + + .. math:: + + p(\Theta|d) \propto + I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right) + p(\Theta)\exp\left[\frac{1}{2}\sum_i\left( \left<h^0_i, h^0_i\right> - + \left<d_i, d_i\right> \right)\right] + """ + name = 'marginalized_phase' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + static_params=None, **kwargs): + # set up the boiler-plate attributes + super(MarginalizedPhaseGaussianNoise, self).__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + + @property + def _extra_stats(self): + """Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each + detector.""" + return ['loglr', 'maxl_phase'] + \ + ['{}_optimal_snrsq'.format(det) for det in self._data] + + def _nowaveform_loglr(self): + """Convenience function to set loglr values if no waveform generated. + """ + setattr(self._current_stats, 'loglikelihood', -numpy.inf) + # maxl phase doesn't exist, so set it to nan + setattr(self._current_stats, 'maxl_phase', numpy.nan) + for det in self._data: + # snr can't be < 0 by definition, so return 0 + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.) + return -numpy.inf + + def _loglr(self): + r"""Computes the log likelihood ratio, + .. math:: + \log \mathcal{L}(\Theta) = + I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) - + \frac{1}{2}\left<h^0_i, h^0_i\right>, + at the current point in parameter space :math:`\Theta`. + Returns + ------- + float + The value of the log likelihood ratio evaluated at the given point. + """ + params = self.current_params + try: + if self.all_ifodata_same_rate_length: + wfs = self.waveform_generator.generate(**params) + else: + wfs = {} + for det in self.data: + wfs.update(self.waveform_generator[det].generate(**params)) + + except NoWaveformError: + return self._nowaveform_loglr() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_loglr() + else: + raise e + hh = 0. + hd = 0j + for det, h in wfs.items(): + # the kmax of the waveforms may be different than internal kmax + kmax = min(len(h), self._kmax[det]) + if self._kmin[det] >= kmax: + # if the waveform terminates before the filtering low frequency + # cutoff, then the loglr is just 0 for this detector + hh_i = 0. + hd_i = 0j + else: + # whiten the waveform + h[self._kmin[det]:kmax] *= \ + self._weight[det][self._kmin[det]:kmax] + # calculate inner products + hh_i = h[self._kmin[det]:kmax].inner( + h[self._kmin[det]:kmax]).real + hd_i = h[self._kmin[det]:kmax].inner( + self._whitened_data[det][self._kmin[det]:kmax]) + # store + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i) + hh += hh_i + hd += hd_i + self._current_stats.maxl_phase = numpy.angle(hd) + return marginalize_likelihood(hd, hh, phase=True)
+ + + +
+[docs] +class MarginalizedTime(DistMarg, BaseGaussianNoise): + r""" This likelihood numerically marginalizes over time + + This likelihood is optimized for marginalizing over time, but can also + handle marginalization over polarization, phase (where appropriate), + and sky location. The time series is interpolated using a + quadratic apparoximation for sub-sample times. + """ + name = 'marginalized_time' + + def __init__(self, variable_params, + data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + sample_rate=None, + **kwargs): + + # the flag used in `_loglr` + self.return_sh_hh = False + self.sample_rate = float(sample_rate) + self.kwargs = kwargs + variable_params, kwargs = self.setup_marginalization( + variable_params, + **kwargs) + + # set up the boiler-plate attributes + super(MarginalizedTime, self).__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + **kwargs) + # Determine if all data have the same sampling rate and segment length + if self.all_ifodata_same_rate_length: + # create a waveform generator for all ifos + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameTwoPolNoRespGenerator, + gates=self.gates, **kwargs['static_params']) + else: + # create a waveform generator for each ifo respectively + self.waveform_generator = {} + for det in self.data: + self.waveform_generator[det] = create_waveform_generator( + self.variable_params, {det: self.data[det]}, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameTwoPolNoRespGenerator, + gates=self.gates, **kwargs['static_params']) + + self.dets = {} + + if sample_rate is not None: + for ifo in self.data: + if self.sample_rate < self.data[ifo].sample_rate: + raise ValueError("Model sample rate was set less than the" + " data. ") + logging.info("Using %s sample rate for marginalization", + sample_rate) + + def _nowaveform_loglr(self): + """Convenience function to set loglr values if no waveform generated. + """ + return -numpy.inf + + def _loglr(self): + r"""Computes the log likelihood ratio, + or inner product <s|h> and <h|h> if `self.return_sh_hh` is True. + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + from pycbc.filter import matched_filter_core + + params = self.current_params + try: + if self.all_ifodata_same_rate_length: + wfs = self.waveform_generator.generate(**params) + else: + wfs = {} + for det in self.data: + wfs.update(self.waveform_generator[det].generate(**params)) + except NoWaveformError: + return self._nowaveform_loglr() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_loglr() + else: + raise e + + sh_total = hh_total = 0. + snr_estimate = {} + cplx_hpd = {} + cplx_hcd = {} + hphp = {} + hchc = {} + hphc = {} + for det, (hp, hc) in wfs.items(): + # the kmax of the waveforms may be different than internal kmax + kmax = min(max(len(hp), len(hc)), self._kmax[det]) + slc = slice(self._kmin[det], kmax) + + # whiten both polarizations + hp[self._kmin[det]:kmax] *= self._weight[det][slc] + hc[self._kmin[det]:kmax] *= self._weight[det][slc] + + # Use a higher sample rate if requested + if self.sample_rate is not None: + tlen = int(round(self.sample_rate * + self.whitened_data[det].duration)) + flen = tlen // 2 + 1 + hp.resize(flen) + hc.resize(flen) + self._whitened_data[det].resize(flen) + + cplx_hpd[det], _, _ = matched_filter_core( + hp, + self._whitened_data[det], + low_frequency_cutoff=self._f_lower[det], + high_frequency_cutoff=self._f_upper[det], + h_norm=1) + cplx_hcd[det], _, _ = matched_filter_core( + hc, + self._whitened_data[det], + low_frequency_cutoff=self._f_lower[det], + high_frequency_cutoff=self._f_upper[det], + h_norm=1) + + hphp[det] = hp[slc].inner(hp[slc]).real + hchc[det] = hc[slc].inner(hc[slc]).real + hphc[det] = hp[slc].inner(hc[slc]).real + + snr_proxy = ((cplx_hpd[det] / hphp[det] ** 0.5).squared_norm() + + (cplx_hcd[det] / hchc[det] ** 0.5).squared_norm()) + snr_estimate[det] = (0.5 * snr_proxy) ** 0.5 + + self.draw_ifos(snr_estimate, log=False, **self.kwargs) + self.snr_draw(snrs=snr_estimate) + + for det in wfs: + if det not in self.dets: + self.dets[det] = Detector(det) + + if self.precalc_antenna_factors: + fp, fc, dt = self.get_precalc_antenna_factors(det) + else: + fp, fc = self.dets[det].antenna_pattern( + params['ra'], + params['dec'], + params['polarization'], + params['tc']) + dt = self.dets[det].time_delay_from_earth_center(params['ra'], + params['dec'], + params['tc']) + dtc = params['tc'] + dt + + cplx_hd = fp * cplx_hpd[det].at_time(dtc, + interpolate='quadratic') + cplx_hd += fc * cplx_hcd[det].at_time(dtc, + interpolate='quadratic') + hh = (fp * fp * hphp[det] + + fc * fc * hchc[det] + + 2.0 * fp * fc * hphc[det]) + + sh_total += cplx_hd + hh_total += hh + + loglr = self.marginalize_loglr(sh_total, hh_total) + if self.return_sh_hh: + results = (sh_total, hh_total) + else: + results = loglr + return results
+ + + +
+[docs] +class MarginalizedPolarization(DistMarg, BaseGaussianNoise): + r""" This likelihood numerically marginalizes over polarization angle + + This class implements the Gaussian likelihood with an explicit numerical + marginalization over polarization angle. This is accomplished using + a fixed set of integration points distribution uniformation between + 0 and 2pi. By default, 1000 integration points are used. + The 'polarization_samples' argument can be passed to set an alternate + number of integration points. + """ + name = 'marginalized_polarization' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + polarization_samples=1000, + **kwargs): + + variable_params, kwargs = self.setup_marginalization( + variable_params, + polarization_samples=polarization_samples, + **kwargs) + + # set up the boiler-plate attributes + super(MarginalizedPolarization, self).__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + **kwargs) + # Determine if all data have the same sampling rate and segment length + if self.all_ifodata_same_rate_length: + # create a waveform generator for all ifos + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameTwoPolGenerator, + gates=self.gates, **kwargs['static_params']) + else: + # create a waveform generator for each ifo respectively + self.waveform_generator = {} + for det in self.data: + self.waveform_generator[det] = create_waveform_generator( + self.variable_params, {det: self.data[det]}, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameTwoPolGenerator, + gates=self.gates, **kwargs['static_params']) + + self.dets = {} + + @property + def _extra_stats(self): + """Adds ``loglr``, ``maxl_polarization``, and the ``optimal_snrsq`` in + each detector. + """ + return ['loglr', 'maxl_polarization', 'maxl_loglr'] + \ + ['{}_optimal_snrsq'.format(det) for det in self._data] + + def _nowaveform_loglr(self): + """Convenience function to set loglr values if no waveform generated. + """ + setattr(self._current_stats, 'loglr', -numpy.inf) + # maxl phase doesn't exist, so set it to nan + setattr(self._current_stats, 'maxl_polarization', numpy.nan) + for det in self._data: + # snr can't be < 0 by definition, so return 0 + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.) + return -numpy.inf + + def _loglr(self): + r"""Computes the log likelihood ratio, + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + params = self.current_params + try: + if self.all_ifodata_same_rate_length: + wfs = self.waveform_generator.generate(**params) + else: + wfs = {} + for det in self.data: + wfs.update(self.waveform_generator[det].generate(**params)) + except NoWaveformError: + return self._nowaveform_loglr() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_loglr() + else: + raise e + + lr = sh_total = hh_total = 0. + for det, (hp, hc) in wfs.items(): + if det not in self.dets: + self.dets[det] = Detector(det) + fp, fc = self.dets[det].antenna_pattern( + params['ra'], + params['dec'], + params['polarization'], + params['tc']) + + # the kmax of the waveforms may be different than internal kmax + kmax = min(max(len(hp), len(hc)), self._kmax[det]) + slc = slice(self._kmin[det], kmax) + + # whiten both polarizations + hp[self._kmin[det]:kmax] *= self._weight[det][slc] + hc[self._kmin[det]:kmax] *= self._weight[det][slc] + + # h = fp * hp + hc * hc + # <h, d> = fp * <hp,d> + fc * <hc,d> + # the inner products + cplx_hpd = hp[slc].inner(self._whitened_data[det][slc]) # <hp, d> + cplx_hcd = hc[slc].inner(self._whitened_data[det][slc]) # <hc, d> + + cplx_hd = fp * cplx_hpd + fc * cplx_hcd + + # <h, h> = <fp * hp + fc * hc, fp * hp + fc * hc> + # = Real(fpfp * <hp,hp> + fcfc * <hc,hc> + \ + # fphc * (<hp, hc> + <hc, hp>)) + hphp = hp[slc].inner(hp[slc]).real # < hp, hp> + hchc = hc[slc].inner(hc[slc]).real # <hc, hc> + + # Below could be combined, but too tired to figure out + # if there should be a sign applied if so + hphc = hp[slc].inner(hc[slc]).real # <hp, hc> + hchp = hc[slc].inner(hp[slc]).real # <hc, hp> + + hh = fp * fp * hphp + fc * fc * hchc + fp * fc * (hphc + hchp) + # store + setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh) + sh_total += cplx_hd + hh_total += hh + + lr, idx, maxl = self.marginalize_loglr(sh_total, hh_total, + return_peak=True) + + # store the maxl polarization + setattr(self._current_stats, + 'maxl_polarization', + params['polarization']) + setattr(self._current_stats, 'maxl_loglr', maxl) + + # just store the maxl optimal snrsq + for det in wfs: + p = '{}_optimal_snrsq'.format(det) + setattr(self._current_stats, p, + getattr(self._current_stats, p)[idx]) + + return lr
+ + + +
+[docs] +class MarginalizedHMPolPhase(BaseGaussianNoise): + r"""Numerically marginalizes waveforms with higher modes over polarization + `and` phase. + + This class implements the Gaussian likelihood with an explicit numerical + marginalization over polarization angle and orbital phase. This is + accomplished using a fixed set of integration points distributed uniformly + between 0 and 2:math:`\pi` for both the polarization and phase. By default, + 100 integration points are used for each parameter, giving :math:`10^4` + evaluation points in total. This can be modified using the + ``polarization_samples`` and ``coa_phase_samples`` arguments. + + This only works with waveforms that return separate spherical harmonic + modes for each waveform. For a list of currently supported approximants, + see :py:func:`pycbc.waveform.waveform_modes.fd_waveform_mode_approximants` + and :py:func:`pycbc.waveform.waveform_modes.td_waveform_mode_approximants`. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data (assumed to be unwhitened). All data must have the + same frequency resolution. + low_frequency_cutoff : dict + A dictionary of starting frequencies, in which the keys are the + detector names and the values are the starting frequencies for the + respective detectors to be used for computing inner products. + psds : dict, optional + A dictionary of FrequencySeries keyed by the detector names. The + dictionary must have a psd for each detector specified in the data + dictionary. If provided, the inner products in each detector will be + weighted by 1/psd of that detector. + high_frequency_cutoff : dict, optional + A dictionary of ending frequencies, in which the keys are the + detector names and the values are the ending frequencies for the + respective detectors to be used for computing inner products. If not + provided, the minimum of the largest frequency stored in the data + and a given waveform will be used. + normalize : bool, optional + If True, the normalization factor :math:`alpha` will be included in the + log likelihood. See :py:class:`GaussianNoise` for details. Default is + to not include it. + polarization_samples : int, optional + How many points to use in polarization. Default is 100. + coa_phase_samples : int, optional + How many points to use in phase. Defaults is 100. + \**kwargs : + All other keyword arguments are passed to + :py:class:`BaseGaussianNoise + <pycbc.inference.models.gaussian_noise.BaseGaussianNoise>`. + + """ + name = 'marginalized_hmpolphase' + + def __init__(self, variable_params, data, low_frequency_cutoff, psds=None, + high_frequency_cutoff=None, normalize=False, + polarization_samples=100, + coa_phase_samples=100, + static_params=None, **kwargs): + # set up the boiler-plate attributes + super(MarginalizedHMPolPhase, self).__init__( + variable_params, data, low_frequency_cutoff, psds=psds, + high_frequency_cutoff=high_frequency_cutoff, normalize=normalize, + static_params=static_params, **kwargs) + # create the waveform generator + self.waveform_generator = create_waveform_generator( + self.variable_params, self.data, + waveform_transforms=self.waveform_transforms, + recalibration=self.recalibration, + generator_class=generator.FDomainDetFrameModesGenerator, + gates=self.gates, **self.static_params) + pol = numpy.linspace(0, 2*numpy.pi, polarization_samples) + phase = numpy.linspace(0, 2*numpy.pi, coa_phase_samples) + # remap to every combination of the parameters + # this gets every combination by mappin them to an NxM grid + # one needs to be transposed so that they run allong opposite + # dimensions + n = coa_phase_samples * polarization_samples + self.nsamples = n + self.pol = numpy.resize(pol, n) + phase = numpy.resize(phase, n) + phase = phase.reshape(coa_phase_samples, polarization_samples) + self.phase = phase.T.flatten() + self._phase_fac = {} + self.dets = {} + +
+[docs] + def phase_fac(self, m): + r"""The phase :math:`\exp[i m \phi]`.""" + try: + return self._phase_fac[m] + except KeyError: + # hasn't been computed yet, calculate it + self._phase_fac[m] = numpy.exp(1.0j * m * self.phase) + return self._phase_fac[m]
+ + + @property + def _extra_stats(self): + """Adds ``maxl_polarization`` and the ``maxl_phase`` + """ + return ['maxl_polarization', 'maxl_phase', ] + + def _nowaveform_loglr(self): + """Convenience function to set loglr values if no waveform generated. + """ + # maxl phase doesn't exist, so set it to nan + setattr(self._current_stats, 'maxl_polarization', numpy.nan) + setattr(self._current_stats, 'maxl_phase', numpy.nan) + return -numpy.inf + + def _loglr(self, return_unmarginalized=False): + r"""Computes the log likelihood ratio, + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + params = self.current_params + try: + wfs = self.waveform_generator.generate(**params) + except NoWaveformError: + return self._nowaveform_loglr() + except FailedWaveformError as e: + if self.ignore_failed_waveforms: + return self._nowaveform_loglr() + else: + raise e + + # --------------------------------------------------------------------- + # Some optimizations not yet taken: + # * higher m calculations could have a lot of redundancy + # * fp/fc need not be calculated except where polarization is different + # * may be possible to simplify this by making smarter use of real/imag + # --------------------------------------------------------------------- + lr = 0. + hds = {} + hhs = {} + for det, modes in wfs.items(): + if det not in self.dets: + self.dets[det] = Detector(det) + + fp, fc = self.dets[det].antenna_pattern(params['ra'], + params['dec'], + self.pol, + params['tc']) + + # loop over modes and prepare the waveform modes + # we will sum up zetalm = glm <ulm, d> + i glm <vlm, d> + # over all common m so that we can apply the phase once + zetas = {} + rlms = {} + slms = {} + for mode in modes: + l, m = mode + ulm, vlm = modes[mode] + + # whiten the waveforms + # the kmax of the waveforms may be different than internal kmax + kmax = min(max(len(ulm), len(vlm)), self._kmax[det]) + slc = slice(self._kmin[det], kmax) + ulm[self._kmin[det]:kmax] *= self._weight[det][slc] + vlm[self._kmin[det]:kmax] *= self._weight[det][slc] + + # the inner products + # <ulm, d> + ulmd = ulm[slc].inner(self._whitened_data[det][slc]).real + # <vlm, d> + vlmd = vlm[slc].inner(self._whitened_data[det][slc]).real + + # add inclination, and pack into a complex number + import lal + glm = lal.SpinWeightedSphericalHarmonic( + params['inclination'], 0, -2, l, m).real + + if m not in zetas: + zetas[m] = 0j + zetas[m] += glm * (ulmd + 1j*vlmd) + + # Get condense set of the parts of the waveform that only diff + # by m, this is used next to help calculate <h, h> + r = glm * ulm + s = glm * vlm + + if m not in rlms: + rlms[m] = r + slms[m] = s + else: + rlms[m] += r + slms[m] += s + + # now compute all possible <hlm, hlm> + rr_m = {} + ss_m = {} + rs_m = {} + sr_m = {} + combos = itertools.combinations_with_replacement(rlms.keys(), 2) + for m, mprime in combos: + r = rlms[m] + s = slms[m] + rprime = rlms[mprime] + sprime = slms[mprime] + rr_m[mprime, m] = r[slc].inner(rprime[slc]).real + ss_m[mprime, m] = s[slc].inner(sprime[slc]).real + rs_m[mprime, m] = s[slc].inner(rprime[slc]).real + sr_m[mprime, m] = r[slc].inner(sprime[slc]).real + # store the conjugate for easy retrieval later + rr_m[m, mprime] = rr_m[mprime, m] + ss_m[m, mprime] = ss_m[mprime, m] + rs_m[m, mprime] = sr_m[mprime, m] + sr_m[m, mprime] = rs_m[mprime, m] + # now apply the phase to all the common ms + hpd = 0. + hcd = 0. + hphp = 0. + hchc = 0. + hphc = 0. + for m, zeta in zetas.items(): + phase_coeff = self.phase_fac(m) + + # <h+, d> = (exp[i m phi] * zeta).real() + # <hx, d> = -(exp[i m phi] * zeta).imag() + z = phase_coeff * zeta + hpd += z.real + hcd -= z.imag + + # now calculate the contribution to <h, h> + cosm = phase_coeff.real + sinm = phase_coeff.imag + + for mprime in zetas: + pcprime = self.phase_fac(mprime) + + cosmprime = pcprime.real + sinmprime = pcprime.imag + # needed components + rr = rr_m[m, mprime] + ss = ss_m[m, mprime] + rs = rs_m[m, mprime] + sr = sr_m[m, mprime] + # <hp, hp> + hphp += rr * cosm * cosmprime \ + + ss * sinm * sinmprime \ + - rs * cosm * sinmprime \ + - sr * sinm * cosmprime + # <hc, hc> + hchc += rr * sinm * sinmprime \ + + ss * cosm * cosmprime \ + + rs * sinm * cosmprime \ + + sr * cosm * sinmprime + # <hp, hc> + hphc += -rr * cosm * sinmprime \ + + ss * sinm * cosmprime \ + + sr * sinm * sinmprime \ + - rs * cosm * cosmprime + + # Now apply the polarizations and calculate the loglr + # We have h = Fp * hp + Fc * hc + # loglr = <h, d> - <h, h>/2 + # = Fp*<hp, d> + Fc*<hc, d> + # - (1/2)*(Fp*Fp*<hp, hp> + Fc*Fc*<hc, hc> + # + 2*Fp*Fc<hp, hc>) + # (in the last line we have made use of the time series being + # real, so that <a, b> = <b, a>). + hd = fp * hpd + fc * hcd + hh = fp * fp * hphp + fc * fc * hchc + 2 * fp * fc * hphc + hds[det] = hd + hhs[det] = hh + lr += hd - 0.5 * hh + + if return_unmarginalized: + return self.pol, self.phase, lr, hds, hhs + + lr_total = special.logsumexp(lr) - numpy.log(self.nsamples) + + # store the maxl values + idx = lr.argmax() + setattr(self._current_stats, 'maxl_polarization', self.pol[idx]) + setattr(self._current_stats, 'maxl_phase', self.phase[idx]) + return float(lr_total)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/relbin.html b/latest/html/_modules/pycbc/inference/models/relbin.html new file mode 100644 index 00000000000..caa6087f3fc --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/relbin.html @@ -0,0 +1,1059 @@ + + + + + + pycbc.inference.models.relbin — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.relbin

+# Copyright (C) 2020  Daniel Finstad
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module provides model classes and functions for implementing
+a relative binning likelihood for parameter estimation.
+"""
+
+
+import logging
+import numpy
+import itertools
+from scipy.interpolate import interp1d
+
+from pycbc.waveform import (get_fd_waveform_sequence,
+                            get_fd_det_waveform_sequence, fd_det_sequence)
+from pycbc.detector import Detector
+from pycbc.types import Array, TimeSeries
+
+from .gaussian_noise import BaseGaussianNoise
+from .relbin_cpu import (likelihood_parts, likelihood_parts_v,
+                         likelihood_parts_multi, likelihood_parts_multi_v,
+                         likelihood_parts_det, likelihood_parts_det_multi,
+                         likelihood_parts_vector,
+                         likelihood_parts_v_pol,
+                         likelihood_parts_v_time,
+                         likelihood_parts_v_pol_time,
+                         likelihood_parts_vectorp, snr_predictor,
+                         likelihood_parts_vectort,
+                         snr_predictor_dom)
+from .tools import DistMarg
+
+
+
+[docs] +def setup_bins(f_full, f_lo, f_hi, chi=1.0, + eps=0.1, gammas=None, + ): + """Construct frequency bins for use in a relative likelihood + model. For details, see [Barak, Dai & Venumadhav 2018]. + + Parameters + ---------- + f_full : array + The full resolution array of frequencies being used in the analysis. + f_lo : float + The starting frequency used in matched filtering. This will be the + left edge of the first frequency bin. + f_hi : float + The ending frequency used in matched filtering. This will be the right + edge of the last frequency bin. + chi : float, optional + Tunable parameter, see [Barak, Dai & Venumadhav 2018] + eps : float, optional + Tunable parameter, see [Barak, Dai & Venumadhav 2018]. Lower values + result in larger number of bins. + gammas : array, optional + Frequency powerlaw indices to be used in computing bins. + + Returns + ------- + nbin : int + Number of bins. + fbin : numpy.array of floats + Bin edge frequencies. + fbin_ind : numpy.array of ints + Indices of bin edges in full frequency array. + """ + f = numpy.linspace(f_lo, f_hi, 10000) + # f^ga power law index + ga = ( + gammas + if gammas is not None + else numpy.array([-5.0 / 3, -2.0 / 3, 1.0, 5.0 / 3, 7.0 / 3]) + ) + logging.info("Using powerlaw indices: %s", ga) + dalp = chi * 2.0 * numpy.pi / numpy.absolute((f_lo ** ga) - (f_hi ** ga)) + dphi = numpy.sum( + numpy.array([numpy.sign(g) * d * (f ** g) for g, d in zip(ga, dalp)]), + axis=0, + ) + dphi_diff = dphi - dphi[0] + # now construct frequency bins + nbin = int(dphi_diff[-1] / eps) + dphi2f = interp1d( + dphi_diff, f, kind="slinear", bounds_error=False, fill_value=0.0 + ) + dphi_grid = numpy.linspace(dphi_diff[0], dphi_diff[-1], nbin + 1) + # frequency grid points + fbin = dphi2f(dphi_grid) + # indices of frequency grid points in the FFT array + fbin_ind = numpy.searchsorted(f_full, fbin) + for idx_fbin, idx_f_full in enumerate(fbin_ind): + if idx_f_full == 0: + curr_idx = 0 + elif idx_f_full == len(f_full): + curr_idx = len(f_full) - 1 + else: + abs1 = abs(f_full[idx_f_full] - fbin[idx_fbin]) + abs2 = abs(f_full[idx_f_full-1] - fbin[idx_fbin]) + if abs1 > abs2: + curr_idx = idx_f_full - 1 + else: + curr_idx = idx_f_full + fbin_ind[idx_fbin] = curr_idx + fbin_ind = numpy.unique(fbin_ind) + return fbin_ind
+ + + +
+[docs] +class Relative(DistMarg, BaseGaussianNoise): + r"""Model that assumes the likelihood in a region around the peak + is slowly varying such that a linear approximation can be made, and + likelihoods can be calculated at a coarser frequency resolution. For + more details on the implementation, see https://arxiv.org/abs/1806.08792. + + This model requires the use of a fiducial waveform whose parameters are + near the peak of the likelihood. The fiducial waveform and all template + waveforms used in likelihood calculation are currently generated using + the SPAtmplt approximant. + + For more details on initialization parameters and definition of terms, see + :py:class:`BaseGaussianNoise`. + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data (assumed to be unwhitened). All data must have the + same frequency resolution. + low_frequency_cutoff : dict + A dictionary of starting frequencies, in which the keys are the + detector names and the values are the starting frequencies for the + respective detectors to be used for computing inner products. + figucial_params : dict + A dictionary of waveform parameters to be used for generating the + fiducial waveform. Keys must be parameter names in the form + 'PARAM_ref' where PARAM is a recognized extrinsic parameter or + an intrinsic parameter compatible with the chosen approximant. + gammas : array of floats, optional + Frequency powerlaw indices to be used in computing frequency bins. + epsilon : float, optional + Tuning parameter used in calculating the frequency bins. Lower values + will result in higher resolution and more bins. + earth_rotation: boolean, optional + Default is False. If True, then vary the fp/fc polarization values + as a function of frequency bin, using a predetermined PN approximation + for the time offsets. + \**kwargs : + All other keyword arguments are passed to + :py:class:`BaseGaussianNoise`. + """ + name = "relative" + + def __init__( + self, + variable_params, + data, + low_frequency_cutoff, + fiducial_params=None, + gammas=None, + epsilon=0.5, + earth_rotation=False, + earth_rotation_mode=2, + marginalize_phase=True, + **kwargs + ): + + variable_params, kwargs = self.setup_marginalization( + variable_params, + marginalize_phase=marginalize_phase, + **kwargs) + + super(Relative, self).__init__( + variable_params, data, low_frequency_cutoff, **kwargs + ) + + # If the waveform needs us to apply the detector response, + # set flag to true (most cases for ground-based observatories). + self.still_needs_det_response = False + if self.static_params['approximant'] in fd_det_sequence: + self.still_needs_det_response = True + + # reference waveform and bin edges + self.f, self.df, self.end_time, self.det = {}, {}, {}, {} + self.h00, self.h00_sparse = {}, {} + self.fedges, self.edges = {}, {} + self.ta, self.antenna_time = {}, {} + + # filtered summary data for linear approximation + self.sdat = {} + + # store fiducial waveform params + self.fid_params = self.static_params.copy() + self.fid_params.update(fiducial_params) + + # the flag used in `_loglr` + self.return_sh_hh = False + + for k in self.static_params: + if self.fid_params[k] == 'REPLACE': + self.fid_params.pop(k) + + for ifo in data: + # store data and frequencies + d0 = self.data[ifo] + self.f[ifo] = numpy.array(d0.sample_frequencies) + self.df[ifo] = d0.delta_f + self.end_time[ifo] = float(d0.end_time) + + # generate fiducial waveform + f_lo = self.kmin[ifo] * self.df[ifo] + f_hi = self.kmax[ifo] * self.df[ifo] + logging.info( + "%s: Generating fiducial waveform from %s to %s Hz", + ifo, f_lo, f_hi, + ) + + # prune low frequency samples to avoid waveform errors + fpoints = Array(self.f[ifo].astype(numpy.float64)) + fpoints = fpoints[self.kmin[ifo]:self.kmax[ifo]+1] + + if self.still_needs_det_response: + wave = get_fd_det_waveform_sequence(ifos=ifo, + sample_points=fpoints, + **self.fid_params) + curr_wav = wave[ifo] + self.ta[ifo] = 0. + else: + fid_hp, fid_hc = get_fd_waveform_sequence(sample_points=fpoints, + **self.fid_params) + # Apply detector response if not handled by + # the waveform generator + self.det[ifo] = Detector(ifo) + dt = self.det[ifo].time_delay_from_earth_center( + self.fid_params["ra"], + self.fid_params["dec"], + self.fid_params["tc"], + ) + self.ta[ifo] = self.fid_params["tc"] + dt + fp, fc = self.det[ifo].antenna_pattern( + self.fid_params["ra"], self.fid_params["dec"], + self.fid_params["polarization"], self.fid_params["tc"]) + curr_wav = (fid_hp * fp + fid_hc * fc) + + # check for zeros at low and high frequencies + # make sure only nonzero samples are included in bins + numzeros_lo = list(curr_wav != 0j).index(True) + if numzeros_lo > 0: + new_kmin = self.kmin[ifo] + numzeros_lo + f_lo = new_kmin * self.df[ifo] + logging.info( + "WARNING! Fiducial waveform starts above " + "low-frequency-cutoff, initial bin frequency " + "will be %s Hz", f_lo) + numzeros_hi = list(curr_wav[::-1] != 0j).index(True) + if numzeros_hi > 0: + new_kmax = self.kmax[ifo] - numzeros_hi + f_hi = new_kmax * self.df[ifo] + logging.info( + "WARNING! Fiducial waveform terminates below " + "high-frequency-cutoff, final bin frequency " + "will be %s Hz", f_hi) + + self.ta[ifo] -= self.end_time[ifo] + curr_wav.resize(len(self.f[ifo])) + curr_wav = numpy.roll(curr_wav, self.kmin[ifo]) + + # We'll apply this to the data, in lieu of the ref waveform + # This makes it easier to compare target signal to reference later + tshift = numpy.exp(-2.0j * numpy.pi * self.f[ifo] * self.ta[ifo]) + self.h00[ifo] = numpy.array(curr_wav) # * tshift + data_shifted = self.data[ifo] * numpy.conjugate(tshift) + + logging.info("Computing frequency bins") + fbin_ind = setup_bins( + f_full=self.f[ifo], f_lo=f_lo, f_hi=f_hi, + gammas=gammas, eps=float(epsilon), + ) + logging.info("Using %s bins for this model", len(fbin_ind)) + + self.fedges[ifo] = self.f[ifo][fbin_ind] + self.edges[ifo] = fbin_ind + self.init_from_frequencies(data_shifted, self.h00, fbin_ind, ifo) + self.antenna_time[ifo] = self.setup_antenna( + earth_rotation, + int(earth_rotation_mode), + self.fedges[ifo]) + self.combine_layout() + +
+[docs] + def init_from_frequencies(self, data, h00, fbin_ind, ifo): + bins = numpy.array( + [ + (fbin_ind[i], fbin_ind[i + 1]) + for i in range(len(fbin_ind) - 1) + ] + ) + + # store low res copy of fiducial waveform + self.h00_sparse[ifo] = h00[ifo].copy().take(fbin_ind) + + # compute summary data + logging.info( + "Calculating summary data at frequency resolution %s Hz", + self.df[ifo], + ) + + a0, a1 = self.summary_product(data, h00[ifo], bins, ifo) + b0, b1 = self.summary_product(h00[ifo], h00[ifo], bins, ifo) + self.sdat[ifo] = {"a0": a0, "a1": a1, "b0": abs(b0), "b1": abs(b1)}
+ + +
+[docs] + def combine_layout(self): + # determine the unique ifo layouts + self.edge_unique = [] + self.ifo_map = {} + for ifo in self.fedges: + if len(self.edge_unique) == 0: + self.ifo_map[ifo] = 0 + self.edge_unique.append(Array(self.fedges[ifo])) + else: + for i, edge in enumerate(self.edge_unique): + if numpy.array_equal(edge, self.fedges[ifo]): + self.ifo_map[ifo] = i + break + else: + self.ifo_map[ifo] = len(self.edge_unique) + self.edge_unique.append(Array(self.fedges[ifo])) + logging.info("%s unique ifo layouts", len(self.edge_unique))
+ + +
+[docs] + def setup_antenna(self, earth_rotation, mode, fedges): + # Calculate the times to evaluate fp/fc + self.earth_rotation = earth_rotation + if earth_rotation is not False: + logging.info("Enabling frequency-dependent earth rotation") + from pycbc.waveform.spa_tmplt import spa_length_in_time + + times = spa_length_in_time( + phase_order=-1, + mass1=self.fid_params["mass1"], + mass2=self.fid_params["mass2"], + f_lower=numpy.array(fedges) / mode * 2.0, + ) + atimes = self.fid_params["tc"] - times + self.lik = likelihood_parts_v + self.mlik = likelihood_parts_multi_v + else: + atimes = self.fid_params["tc"] + if self.still_needs_det_response: + self.lik = likelihood_parts_det + self.mlik = likelihood_parts_det_multi + else: + self.lik = likelihood_parts + self.mlik = likelihood_parts_multi + return atimes
+ + + @property + def likelihood_function(self): + self.lformat = None + if self.marginalize_vector_params: + p = self.current_params + + vmarg = set(k for k in self.marginalize_vector_params + if not numpy.isscalar(p[k])) + + if self.earth_rotation: + if set(['tc', 'polarization']).issubset(vmarg): + self.lformat = 'earth_time_pol' + return likelihood_parts_v_pol_time + elif set(['polarization']).issubset(vmarg): + self.lformat = 'earth_pol' + return likelihood_parts_v_pol + elif set(['tc']).issubset(vmarg): + self.lformat = 'earth_time' + return likelihood_parts_v_time + else: + if set(['ra', 'dec', 'tc']).issubset(vmarg): + return likelihood_parts_vector + elif set(['tc', 'polarization']).issubset(vmarg): + return likelihood_parts_vector + elif set(['tc']).issubset(vmarg): + return likelihood_parts_vectort + elif set(['polarization']).issubset(vmarg): + return likelihood_parts_vectorp + + return self.lik + +
+[docs] + def summary_product(self, h1, h2, bins, ifo): + """ Calculate the summary values for the inner product <h1|h2> + """ + # calculate coefficients + h12 = numpy.conjugate(h1) * h2 / self.psds[ifo] + + # constant terms + a0 = numpy.array([ + 4.0 * self.df[ifo] * h12[l:h].sum() + for l, h in bins + ]) + + # linear terms + a1 = numpy.array([ + 4.0 / (h - l) * + (h12[l:h] * (self.f[ifo][l:h] - self.f[ifo][l])).sum() + for l, h in bins]) + + return a0, a1
+ + +
+[docs] + def get_waveforms(self, params): + """ Get the waveform polarizations for each ifo + """ + if self.still_needs_det_response: + wfs = {} + for ifo in self.data: + wfs.update(get_fd_det_waveform_sequence( + ifos=ifo, sample_points=self.fedges[ifo], **params)) + return wfs + + wfs = [] + for edge in self.edge_unique: + hp, hc = get_fd_waveform_sequence(sample_points=edge, **params) + hp = hp.numpy() + hc = hc.numpy() + wfs.append((hp, hc)) + wf_ret = {ifo: wfs[self.ifo_map[ifo]] for ifo in self.data} + + self.wf_ret = wf_ret + return wf_ret
+ + + @property + def multi_signal_support(self): + """ The list of classes that this model supports in a multi-signal + likelihood + """ + # Check if this model *can* be included in a multi-signal model. + # All marginalizations must currently be disabled to work! + if (self.marginalize_vector_params or + self.marginalize_distance or + self.marginalize_phase): + logging.info("Cannot use single template model inside of" + "multi_signal if marginalizations are enabled") + return [type(self)] + +
+[docs] + def calculate_hihjs(self, models): + """ Pre-calculate the hihj inner products on a grid + """ + self.hihj = {} + for m1, m2 in itertools.combinations(models, 2): + self.hihj[(m1, m2)] = {} + for ifo in self.data: + h1 = m1.h00[ifo] + h2 = m2.h00[ifo] + + # Combine the grids + edge = numpy.unique([m1.edges[ifo], m2.edges[ifo]]) + + # Remove any points where either reference is zero + keep = numpy.where((h1[edge] != 0) | (h2[edge] != 0))[0] + edge = edge[keep] + fedge = m1.f[ifo][edge] + + bins = numpy.array([ + (edge[i], edge[i + 1]) + for i in range(len(edge) - 1) + ]) + a0, a1 = self.summary_product(h1, h2, bins, ifo) + self.hihj[(m1, m2)][ifo] = a0, a1, fedge
+ + +
+[docs] + def multi_loglikelihood(self, models): + """ Calculate a multi-model (signal) likelihood + """ + models = [self] + models + loglr = 0 + # handle sum[<d|h_i> - 0.5 <h_i|h_i>] + for m in models: + loglr += m.loglr + + if not hasattr(self, 'hihj'): + self.calculate_hihjs(models) + + if self.still_needs_det_response: + for m1, m2 in itertools.combinations(models, 2): + for det in self.data: + a0, a1, fedge = self.hihj[(m1, m2)][det] + + dtc, channel, h00 = m1._current_wf_parts[det] + dtc2, channel2, h002 = m2._current_wf_parts[det] + + c1c2 = self.mlik(fedge, + dtc, channel, h00, + dtc2, channel2, h002, + a0, a1) + loglr += - c1c2.real # This is -0.5 * re(<h1|h2> + <h2|h1>) + else: + # finally add in the lognl term from this model + for m1, m2 in itertools.combinations(models, 2): + for det in self.data: + a0, a1, fedge = self.hihj[(m1, m2)][det] + + fp, fc, dtc, hp, hc, h00 = m1._current_wf_parts[det] + fp2, fc2, dtc2, hp2, hc2, h002 = m2._current_wf_parts[det] + + h1h2 = self.mlik(fedge, + fp, fc, dtc, hp, hc, h00, + fp2, fc2, dtc2, hp2, hc2, h002, + a0, a1) + loglr += - h1h2.real # This is -0.5 * re(<h1|h2> + <h2|h1>) + return loglr + self.lognl
+ + + def _loglr(self): + r"""Computes the log likelihood ratio, + or inner product <s|h> and <h|h> if `self.return_sh_hh` is True. + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + or + tuple + The inner product (<s|h>, <h|h>). + """ + # get model params + p = self.current_params + wfs = self.get_waveforms(p) + lik = self.likelihood_function + norm = 0.0 + filt = 0j + self._current_wf_parts = {} + pol_phase = numpy.exp(-2.0j * p['polarization']) + + for ifo in self.data: + freqs = self.fedges[ifo] + sdat = self.sdat[ifo] + h00 = self.h00_sparse[ifo] + end_time = self.end_time[ifo] + times = self.antenna_time[ifo] + + # project waveform to detector frame if waveform does not deal + # with detector response. Otherwise, skip detector response. + + if self.still_needs_det_response: + dtc = 0. + + channel = wfs[ifo].numpy() + filter_i, norm_i = lik(freqs, dtc, channel, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + self._current_wf_parts[ifo] = (dtc, channel, h00) + else: + hp, hc = wfs[ifo] + det = self.det[ifo] + fp, fc = det.antenna_pattern(p["ra"], p["dec"], + 0.0, times) + dt = det.time_delay_from_earth_center(p["ra"], p["dec"], times) + dtc = p["tc"] + dt - end_time - self.ta[ifo] + + if self.lformat == 'earth_pol': + filter_i, norm_i = lik(freqs, fp, fc, dtc, pol_phase, + hp, hc, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + else: + f = (fp + 1.0j * fc) * pol_phase + fp = f.real.copy() + fc = f.imag.copy() + filter_i, norm_i = lik(freqs, fp, fc, dtc, + hp, hc, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + self._current_wf_parts[ifo] = (fp, fc, dtc, hp, hc, h00) + + filt += filter_i + norm += norm_i + + loglr = self.marginalize_loglr(filt, norm) + if self.return_sh_hh: + results = (filt, norm) + else: + results = loglr + return results + +
+[docs] + def write_metadata(self, fp, group=None): + """Adds writing the fiducial parameters and epsilon to file's attrs. + + Parameters + ---------- + fp : pycbc.inference.io.BaseInferenceFile instance + The inference file to write to. + group : str, optional + If provided, the metadata will be written to the attrs specified + by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is + written to the top-level attrs (``fp.attrs``). + """ + super().write_metadata(fp, group=group) + if group is None: + attrs = fp.attrs + else: + attrs = fp[group].attrs + for p, v in self.fid_params.items(): + attrs["{}_ref".format(p)] = v
+ + +
+[docs] + def max_curvature_from_reference(self): + """ Return the maximum change in slope between frequency bins + relative to the reference waveform. + """ + dmax = 0 + for ifo in self.data: + r = self.wf_ret[ifo][0] / self.h00_sparse[ifo] + d = abs(numpy.diff(r / abs(r).min(), n=2)).max() + dmax = d if dmax < d else dmax + return dmax
+ + +
+[docs] + @staticmethod + def extra_args_from_config(cp, section, skip_args=None, dtypes=None): + """Adds reading fiducial waveform parameters from config file.""" + # add fiducial params to skip list + skip_args += [ + option for option in cp.options(section) if option.endswith("_ref") + ] + + # get frequency power-law indices if specified + # NOTE these should be supplied in units of 1/3 + gammas = None + if cp.has_option(section, "gammas"): + skip_args.append("gammas") + gammas = numpy.array( + [float(g) / 3.0 for g in cp.get(section, "gammas").split()] + ) + args = super(Relative, Relative).extra_args_from_config( + cp, section, skip_args=skip_args, dtypes=dtypes + ) + + # get fiducial params from config + fid_params = { + p.replace("_ref", ""): float(cp.get("model", p)) + for p in cp.options("model") + if p.endswith("_ref") + } + + # add optional params with default values if not specified + opt_params = { + "ra": numpy.pi, + "dec": 0.0, + "inclination": 0.0, + "polarization": numpy.pi, + } + fid_params.update( + {p: opt_params[p] for p in opt_params if p not in fid_params} + ) + args.update({"fiducial_params": fid_params, "gammas": gammas}) + return args
+
+ + + +
+[docs] +class RelativeTime(Relative): + """ Heterodyne likelihood optimized for time marginalization. In addition + it supports phase (dominant-mode), sky location, and polarization + marginalization. + """ + name = "relative_time" + + def __init__(self, *args, + sample_rate=4096, + **kwargs): + super(RelativeTime, self).__init__(*args, **kwargs) + self.sample_rate = float(sample_rate) + self.setup_peak_lock(sample_rate=self.sample_rate, **kwargs) + self.draw_ifos(self.ref_snr, **kwargs) + + @property + def ref_snr(self): + if not hasattr(self, '_ref_snr'): + wfs = {ifo: (self.h00_sparse[ifo], + self.h00_sparse[ifo]) for ifo in self.h00_sparse} + self._ref_snr = self.get_snr(wfs) + return self._ref_snr + +
+[docs] + def get_snr(self, wfs): + """ Return hp/hc maximized SNR time series + """ + delta_t = 1.0 / self.sample_rate + snrs = {} + for ifo in wfs: + sdat = self.sdat[ifo] + dtc = self.tstart[ifo] - self.end_time[ifo] - self.ta[ifo] + + snr = snr_predictor(self.fedges[ifo], + dtc - delta_t * 2.0, delta_t, + self.num_samples[ifo] + 4, + wfs[ifo][0], wfs[ifo][1], + self.h00_sparse[ifo], + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + snrs[ifo] = TimeSeries(snr, delta_t=delta_t, + epoch=self.tstart[ifo] - delta_t * 2.0) + return snrs
+ + + def _loglr(self): + r"""Computes the log likelihood ratio, + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + """ + # get model params + p = self.current_params + wfs = self.get_waveforms(p) + lik = self.likelihood_function + norm = 0.0 + filt = 0j + pol_phase = numpy.exp(-2.0j * p['polarization']) + + self.snr_draw(wfs) + p = self.current_params + + for ifo in self.data: + freqs = self.fedges[ifo] + sdat = self.sdat[ifo] + h00 = self.h00_sparse[ifo] + end_time = self.end_time[ifo] + times = self.antenna_time[ifo] + + hp, hc = wfs[ifo] + det = self.det[ifo] + fp, fc = det.antenna_pattern(p["ra"], p["dec"], + 0, times) + times = det.time_delay_from_earth_center(p["ra"], p["dec"], times) + dtc = p["tc"] - end_time - self.ta[ifo] + + if self.lformat == 'earth_time_pol': + filter_i, norm_i = lik( + freqs, fp, fc, times, dtc, pol_phase, + hp, hc, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + else: + f = (fp + 1.0j * fc) * pol_phase + fp = f.real.copy() + fc = f.imag.copy() + if self.lformat == 'earth_time': + filter_i, norm_i = lik( + freqs, fp, fc, times, dtc, + hp, hc, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + else: + filter_i, norm_i = lik(freqs, fp, fc, times + dtc, + hp, hc, h00, + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + filt += filter_i + norm += norm_i + loglr = self.marginalize_loglr(filt, norm) + return loglr
+ + + +
+[docs] +class RelativeTimeDom(RelativeTime): + """ Heterodyne likelihood optimized for time marginalization and only + dominant-mode waveforms. This enables the ability to do inclination + marginalization in addition to the other forms supportedy by RelativeTime. + """ + name = "relative_time_dom" + +
+[docs] + def get_snr(self, wfs): + """ Return hp/hc maximized SNR time series + """ + delta_t = 1.0 / self.sample_rate + snrs = {} + self.sh = {} + self.hh = {} + for ifo in wfs: + sdat = self.sdat[ifo] + dtc = self.tstart[ifo] - self.end_time[ifo] - self.ta[ifo] + + sh, hh = snr_predictor_dom(self.fedges[ifo], + dtc - delta_t * 2.0, delta_t, + self.num_samples[ifo] + 4, + wfs[ifo][0], + self.h00_sparse[ifo], + sdat['a0'], sdat['a1'], + sdat['b0'], sdat['b1']) + snr = TimeSeries(abs(sh[2:-2]) / hh ** 0.5, delta_t=delta_t, + epoch=self.tstart[ifo]) + self.sh[ifo] = TimeSeries(sh, delta_t=delta_t, + epoch=self.tstart[ifo] - delta_t * 2.0) + self.hh[ifo] = hh + snrs[ifo] = snr + + return snrs
+ + + def _loglr(self): + r"""Computes the log likelihood ratio, + or inner product <s|h> and <h|h> if `self.return_sh_hh` is True. + + .. math:: + + \log \mathcal{L}(\Theta) = \sum_i + \left<h_i(\Theta)|d_i\right> - + \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>, + + at the current parameter values :math:`\Theta`. + + Returns + ------- + float + The value of the log likelihood ratio. + or + tuple + The inner product (<s|h>, <h|h>). + """ + # calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant + p = self.current_params + + p2 = p.copy() + p2.pop('inclination') + wfs = self.get_waveforms(p2) + + sh_total = hh_total = 0 + ic = numpy.cos(p['inclination']) + ip = 0.5 * (1.0 + ic * ic) + pol_phase = numpy.exp(-2.0j * p['polarization']) + + snrs = self.get_snr(wfs) + self.snr_draw(snrs=snrs) + + for ifo in self.sh: + if self.precalc_antenna_factors: + fp, fc, dt = self.get_precalc_antenna_factors(ifo) + else: + dt = self.det[ifo].time_delay_from_earth_center(p['ra'], + p['dec'], + p['tc']) + fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'], + 0, p['tc']) + dts = p['tc'] + dt + f = (fp + 1.0j * fc) * pol_phase + # Note, this includes complex conjugation already + # as our stored inner products were hp* x data + htf = (f.real * ip + 1.0j * f.imag * ic) + sh = self.sh[ifo].at_time(dts, + interpolate='quadratic', + extrapolate=0.0j) + sh_total += sh * htf + hh_total += self.hh[ifo] * abs(htf) ** 2.0 + + loglr = self.marginalize_loglr(sh_total, hh_total) + if self.return_sh_hh: + results = (sh_total, hh_total) + else: + results = loglr + return results
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/single_template.html b/latest/html/_modules/pycbc/inference/models/single_template.html new file mode 100644 index 00000000000..0e8c91a1423 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/single_template.html @@ -0,0 +1,364 @@ + + + + + + pycbc.inference.models.single_template — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.single_template

+# Copyright (C) 2018 Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides model classes that assume the noise is Gaussian.
+"""
+
+import logging
+import numpy
+import itertools
+
+from pycbc import filter as pyfilter
+from pycbc.waveform import get_fd_waveform
+from pycbc.detector import Detector
+
+from .gaussian_noise import BaseGaussianNoise
+from .tools import DistMarg
+
+
+
+[docs] +class SingleTemplate(DistMarg, BaseGaussianNoise): + r"""Model that assumes we know all the intrinsic parameters. + + This model assumes we know all the intrinsic parameters, and are only + maximizing over the extrinsic ones. We also assume a dominant mode waveform + approximant only and non-precessing. + + + Parameters + ---------- + variable_params : (tuple of) string(s) + A tuple of parameter names that will be varied. + data : dict + A dictionary of data, in which the keys are the detector names and the + values are the data (assumed to be unwhitened). All data must have the + same frequency resolution. + low_frequency_cutoff : dict + A dictionary of starting frequencies, in which the keys are the + detector names and the values are the starting frequencies for the + respective detectors to be used for computing inner products. + sample_rate : int, optional + The sample rate to use. Default is 32768. + polarization_samples: int, optional + Parameter to specify how finely to marginalize over polarization angle. + If None, then polarization must be a parameter. + \**kwargs : + All other keyword arguments are passed to + :py:class:`BaseGaussianNoise`; see that class for details. + """ + name = 'single_template' + + def __init__(self, variable_params, data, low_frequency_cutoff, + sample_rate=32768, + marginalize_phase=True, + **kwargs): + variable_params, kwargs = self.setup_marginalization( + variable_params, + marginalize_phase=marginalize_phase, + **kwargs) + super(SingleTemplate, self).__init__( + variable_params, data, low_frequency_cutoff, **kwargs) + + sample_rate = float(sample_rate) + + # Generate template waveforms + df = data[self.detectors[0]].delta_f + self.df = df + p = self.static_params.copy() + for k in self.static_params: + if p[k] == 'REPLACE': + p.pop(k) + if 'distance' in p: + _ = p.pop('distance') + if 'inclination' in p: + _ = p.pop('inclination') + + hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p) + + # Extend template to high sample rate + flen = int(round(sample_rate / df) / 2 + 1) + hp.resize(flen) + + # Calculate high sample rate SNR time series + self.sh = {} + self.hh = {} + self.snr = {} + self.det = {} + for ifo in self.data: + flow = self.kmin[ifo] * df + fhigh = self.kmax[ifo] * df + # Extend data to high sample rate + self.data[ifo].resize(flen) + self.det[ifo] = Detector(ifo) + snr, _, norm = pyfilter.matched_filter_core( + hp, self.data[ifo], + psd=self.psds[ifo], + low_frequency_cutoff=flow, + high_frequency_cutoff=fhigh) + + self.sh[ifo] = 4 * df * snr + self.snr[ifo] = snr * norm + + self.hh[ifo] = pyfilter.sigmasq( + hp, psd=self.psds[ifo], + low_frequency_cutoff=flow, + high_frequency_cutoff=fhigh) + + self.waveform = hp + self.htfs = {} # Waveform phase / distance transformation factors + self.dts = {} + + # Retrict to analyzing around peaks if chosen and choose what + # ifos to draw from + self.setup_peak_lock(snrs=self.snr, + sample_rate=sample_rate, + **kwargs) + self.draw_ifos(self.snr) + + @property + def multi_signal_support(self): + """ The list of classes that this model supports in a multi-signal + likelihood + """ + # Check if this model *can* be included in a multi-signal model. + # All marginalizations must currently be disabled to work! + if (self.marginalize_vector_params or + self.marginalize_distance or + self.marginalize_phase): + logging.info("Cannot use single template model inside of" + "multi_signal if marginalizations are enabled") + return [type(self)] + +
+[docs] + def calculate_hihjs(self, models): + """ Pre-calculate the hihj inner products on a grid + """ + self.hihj = {} + for m1, m2 in itertools.combinations(models, 2): + self.hihj[(m1, m2)] = {} + h1 = m1.waveform + h2 = m2.waveform + for ifo in self.data: + flow = self.kmin[ifo] * self.df + fhigh = self.kmax[ifo] * self.df + h1h2, _, _ = pyfilter.matched_filter_core( + h1, h2, + psd=self.psds[ifo], + low_frequency_cutoff=flow, + high_frequency_cutoff=fhigh) + self.hihj[(m1, m2)][ifo] = 4 * self.df * h1h2
+ + +
+[docs] + def multi_loglikelihood(self, models): + """ Calculate a multi-model (signal) likelihood + """ + models = [self] + models + loglr = 0 + # handle sum[<d|h_i> - 0.5 <h_i|h_i>] + for m in models: + loglr += m.loglr + + if not hasattr(self, 'hihj'): + self.calculate_hihjs(models) + + # finally add in the lognl term from this model + for m1, m2 in itertools.combinations(models, 2): + for det in self.data: + hihj_vec = self.hihj[(m1, m2)][det] + dt = m1.dts[det] - m2.dts[det] + hihj_vec.start_time + if dt < hihj_vec.start_time: + dt += hihj_vec.duration + + h1h2 = hihj_vec.at_time(dt, nearest_sample=True) + h1h2 *= m1.htfs[det] * m2.htfs[det].conj() + loglr += - h1h2.real # This is -0.5 * re(<h1|h2> + <h2|h1>) + return loglr + self.lognl
+ + + def _loglr(self): + r"""Computes the log likelihood ratio + + Returns + ------- + float + The value of the log likelihood ratio. + """ + # calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant + p = self.current_params + + phase = 1 + if 'coa_phase' in p: + phase = numpy.exp(-1.0j * 2 * p['coa_phase']) + + sh_total = hh_total = 0 + + ic = numpy.cos(p['inclination']) + ip = 0.5 * (1.0 + ic * ic) + pol_phase = numpy.exp(-2.0j * p['polarization']) + + self.snr_draw(snrs=self.snr) + + for ifo in self.sh: + dt = self.det[ifo].time_delay_from_earth_center(p['ra'], p['dec'], + p['tc']) + self.dts[ifo] = p['tc'] + dt + + fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'], + 0, p['tc']) + f = (fp + 1.0j * fc) * pol_phase + + # Note, this includes complex conjugation already + # as our stored inner products were hp* x data + htf = (f.real * ip + 1.0j * f.imag * ic) / p['distance'] * phase + self.htfs[ifo] = htf + sh = self.sh[ifo].at_time(self.dts[ifo], interpolate='quadratic') + sh_total += sh * htf + hh_total += self.hh[ifo] * abs(htf) ** 2.0 + + loglr = self.marginalize_loglr(sh_total, hh_total) + return loglr
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/models/tools.html b/latest/html/_modules/pycbc/inference/models/tools.html new file mode 100644 index 00000000000..bce71831479 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/models/tools.html @@ -0,0 +1,1145 @@ + + + + + + pycbc.inference.models.tools — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.models.tools

+""" Common utility functions for calculation of likelihoods
+"""
+
+import logging
+import warnings
+from distutils.util import strtobool
+
+import numpy
+import numpy.random
+import tqdm
+
+from scipy.special import logsumexp, i0e
+from scipy.interpolate import RectBivariateSpline, interp1d
+from pycbc.distributions import JointDistribution
+
+from pycbc.detector import Detector
+
+
+# Earth radius in seconds
+EARTH_RADIUS = 0.031
+
+
+
+[docs] +def str_to_tuple(sval, ftype): + """ Convenience parsing to convert str to tuple""" + if sval is None: + return () + return tuple(ftype(x.strip(' ')) for x in sval.split(','))
+ + + +
+[docs] +def str_to_bool(sval): + """ Ensure value is a bool if it can be converted """ + if isinstance(sval, str): + return strtobool(sval) + return sval
+ + + +
+[docs] +def draw_sample(loglr, size=None): + """ Draw a random index from a 1-d vector with loglr weights + """ + if size: + x = numpy.random.uniform(size=size) + else: + x = numpy.random.uniform() + loglr = loglr - loglr.max() + cdf = numpy.exp(loglr).cumsum() + cdf /= cdf[-1] + xl = numpy.searchsorted(cdf, x) + return xl
+ + + +
+[docs] +class DistMarg(): + """Help class to add bookkeeping for likelihood marginalization""" + +
+[docs] + def setup_marginalization(self, + variable_params, + marginalize_phase=False, + marginalize_distance=False, + marginalize_distance_param='distance', + marginalize_distance_samples=int(1e4), + marginalize_distance_interpolator=False, + marginalize_distance_snr_range=None, + marginalize_distance_density=None, + marginalize_vector_params=None, + marginalize_vector_samples=1e3, + marginalize_sky_initial_samples=1e6, + **kwargs): + """ Setup the model for use with distance marginalization + + This function sets up precalculations for distance / phase + marginalization. For distance margininalization it modifies the + model to internally remove distance as a parameter. + + Parameters + ---------- + variable_params: list of strings + The set of variable parameters + marginalize_phase: bool, False + Do analytic marginalization (appopriate only for 22 mode waveforms) + marginalize_distance: bool, False + Marginalize over distance + marginalize_distance_param: str + Name of the parameter that is used to determine the distance. + This might be 'distance' or a parameter which can be converted + to distance by a provided univariate transformation. + marginalize_distance_interpolator: bool + Use a pre-calculated interpolating function for the distance + marginalized likelihood. + marginalize_distance_snr_range: tuple of floats, (1, 50) + The SNR range for the interpolating function to be defined in. + If a sampler goes outside this range, the logl will be returned + as -numpy.inf. + marginalize_distance_density: tuple of intes, (1000, 1000) + The dimensions of the interpolation grid over (sh, hh). + + Returns + ------- + variable_params: list of strings + Set of variable params (missing distance-related parameter). + kwags: dict + The keyword arguments to the model initialization, may be modified + from the original set by this function. + """ + def pop_prior(param): + variable_params.remove(param) + old_prior = kwargs['prior'] + + dists = [d for d in old_prior.distributions + if param not in d.params] + dprior = [d for d in old_prior.distributions + if param in d.params][0] + prior = JointDistribution(variable_params, + *dists, **old_prior.kwargs) + kwargs['prior'] = prior + return dprior + + self.reconstruct_phase = False + self.reconstruct_distance = False + self.reconstruct_vector = False + self.precalc_antenna_factors = False + + # Handle any requested parameter vector / brute force marginalizations + self.marginalize_vector_params = {} + self.marginalized_vector_priors = {} + self.vsamples = int(marginalize_vector_samples) + + self.marginalize_sky_initial_samples = \ + int(float(marginalize_sky_initial_samples)) + + for param in str_to_tuple(marginalize_vector_params, str): + logging.info('Marginalizing over %s, %s points from prior', + param, self.vsamples) + self.marginalized_vector_priors[param] = pop_prior(param) + + # Remove in the future, backwards compatibility + if 'polarization_samples' in kwargs: + warnings.warn("use marginalize_vector_samples rather " + "than 'polarization_samples'", DeprecationWarning) + pol_uniform = numpy.linspace(0, numpy.pi * 2.0, self.vsamples) + self.marginalize_vector_params['polarization'] = pol_uniform + self.vsamples = int(kwargs['polarization_samples']) + kwargs.pop('polarization_samples') + + self.reset_vector_params() + + self.marginalize_phase = str_to_bool(marginalize_phase) + + self.distance_marginalization = False + self.distance_interpolator = None + + marginalize_distance = str_to_bool(marginalize_distance) + self.marginalize_distance = marginalize_distance + if not marginalize_distance: + return variable_params, kwargs + + if isinstance(marginalize_distance_snr_range, str): + marginalize_distance_snr_range = \ + str_to_tuple(marginalize_distance_snr_range, float) + + if isinstance(marginalize_distance_density, str): + marginalize_distance_density = \ + str_to_tuple(marginalize_distance_density, int) + + logging.info('Marginalizing over distance') + + # Take distance out of the variable params since we'll handle it + # manually now + dprior = pop_prior(marginalize_distance_param) + + if len(dprior.params) != 1 or not hasattr(dprior, 'bounds'): + raise ValueError('Distance Marginalization requires a ' + 'univariate and bounded prior') + + # Set up distance prior vector and samples + + # (1) prior is using distance + if dprior.params[0] == 'distance': + logging.info("Prior is directly on distance, setting up " + "%s grid weights", marginalize_distance_samples) + dmin, dmax = dprior.bounds['distance'] + dist_locs = numpy.linspace(dmin, dmax, + int(marginalize_distance_samples)) + dist_weights = [dprior.pdf(distance=l) for l in dist_locs] + dist_weights = numpy.array(dist_weights) + + # (2) prior is univariate and can be converted to distance + elif marginalize_distance_param != 'distance': + waveform_transforms = kwargs['waveform_transforms'] + pname = dprior.params[0] + logging.info("Settings up transform, prior is in terms of" + " %s", pname) + wtrans = [d for d in waveform_transforms + if 'distance' not in d.outputs] + if len(wtrans) == 0: + wtrans = None + kwargs['waveform_transforms'] = wtrans + dtrans = [d for d in waveform_transforms + if 'distance' in d.outputs][0] + v = dprior.rvs(int(1e8)) + d = dtrans.transform({pname: v[pname]})['distance'] + d.sort() + cdf = numpy.arange(1, len(d)+1) / len(d) + i = interp1d(d, cdf) + dmin, dmax = d.min(), d.max() + logging.info('Distance range %s-%s', dmin, dmax) + x = numpy.linspace(dmin, dmax, + int(marginalize_distance_samples) + 1) + xl, xr = x[:-1], x[1:] + dist_locs = 0.5 * (xr + xl) + dist_weights = i(xr) - i(xl) + else: + raise ValueError("No prior seems to determine the distance") + + dist_weights /= dist_weights.sum() + dist_ref = 0.5 * (dmax + dmin) + self.dist_locs = dist_locs + self.distance_marginalization = dist_ref / dist_locs, dist_weights + self.distance_interpolator = None + + if str_to_bool(marginalize_distance_interpolator): + setup_args = {} + if marginalize_distance_snr_range: + setup_args['snr_range'] = marginalize_distance_snr_range + if marginalize_distance_density: + setup_args['density'] = marginalize_distance_density + i = setup_distance_marg_interpolant(self.distance_marginalization, + phase=self.marginalize_phase, + **setup_args) + self.distance_interpolator = i + + kwargs['static_params']['distance'] = dist_ref + + # Save marginalized parameters' name into one place, + # coa_phase will be a static param if been marginalized + if marginalize_distance: + self.marginalized_params_name =\ + list(self.marginalize_vector_params.keys()) +\ + [marginalize_distance_param] + + return variable_params, kwargs
+ + +
+[docs] + def reset_vector_params(self): + """ Redraw vector params from their priors + """ + for param in self.marginalized_vector_priors: + vprior = self.marginalized_vector_priors[param] + values = vprior.rvs(self.vsamples)[param] + self.marginalize_vector_params[param] = values
+ + +
+[docs] + def marginalize_loglr(self, sh_total, hh_total, + skip_vector=False, return_peak=False): + """ Return the marginal likelihood + + Parameters + ----------- + sh_total: float or ndarray + The total <s|h> inner product summed over detectors + hh_total: float or ndarray + The total <h|h> inner product summed over detectors + skip_vector: bool, False + If true, and input is a vector, do not marginalize over that + vector, instead return the likelihood values as a vector. + """ + interpolator = self.distance_interpolator + return_complex = False + distance = self.distance_marginalization + + if self.reconstruct_vector: + skip_vector = True + + if self.reconstruct_distance: + interpolator = None + skip_vector = True + + if self.reconstruct_phase: + interpolator = None + distance = False + skip_vector = True + return_complex = True + + return marginalize_likelihood(sh_total, hh_total, + logw=self.marginalize_vector_weights, + phase=self.marginalize_phase, + interpolator=interpolator, + distance=distance, + skip_vector=skip_vector, + return_complex=return_complex, + return_peak=return_peak)
+ + +
+[docs] + def premarg_draw(self): + """ Choose random samples from prechosen set""" + + # Update the current proposed times and the marginalization values + logw = self.premarg['logw_partial'] + if self.vsamples == len(logw): + choice = slice(None, None) + else: + choice = numpy.random.choice(len(logw), size=self.vsamples, + replace=False) + + for k in self.snr_params: + self.marginalize_vector_params[k] = self.premarg[k][choice] + + self._current_params.update(self.marginalize_vector_params) + self.sample_idx = self.premarg['sample_idx'][choice] + + # Update the importance weights for each vector sample + logw = self.marginalize_vector_weights + logw[choice] + self.marginalize_vector_weights = logw - logsumexp(logw) + return self.marginalize_vector_params
+ + +
+[docs] + def snr_draw(self, wfs=None, snrs=None, size=None): + """ Improve the monte-carlo vector marginalization using the SNR time + series of each detector + """ + try: + p = self.current_params + set_scalar = numpy.isscalar(p['tc']) + except: + set_scalar = False + + if not set_scalar: + if hasattr(self, 'premarg'): + return self.premarg_draw() + + if snrs is None: + snrs = self.get_snr(wfs) + if ('tc' in self.marginalized_vector_priors and + not ('ra' in self.marginalized_vector_priors + or 'dec' in self.marginalized_vector_priors)): + return self.draw_times(snrs, size=size) + elif ('tc' in self.marginalized_vector_priors and + 'ra' in self.marginalized_vector_priors and + 'dec' in self.marginalized_vector_priors): + return self.draw_sky_times(snrs, size=size) + else: + # OK, we couldn't do anything with the requested monte-carlo + # marginalizations. + self.precalc_antenna_factors = None + return None
+ + +
+[docs] + def draw_times(self, snrs, size=None): + """ Draw times consistent with the incoherent network SNR + + Parameters + ---------- + snrs: dist of TimeSeries + """ + if not hasattr(self, 'tinfo'): + # determine the rough time offsets for this sky location + tcprior = self.marginalized_vector_priors['tc'] + tcmin, tcmax = tcprior.bounds['tc'] + tcave = (tcmax + tcmin) / 2.0 + ifos = list(snrs.keys()) + if hasattr(self, 'keep_ifos'): + ifos = self.keep_ifos + d = {ifo: Detector(ifo, reference_time=tcave) for ifo in ifos} + self.tinfo = tcmin, tcmax, tcave, ifos, d + self.snr_params = ['tc'] + + tcmin, tcmax, tcave, ifos, d = self.tinfo + vsamples = size if size is not None else self.vsamples + + # Determine the weights for the valid time range + ra = self._current_params['ra'] + dec = self._current_params['dec'] + + # Determine the common valid time range + iref = ifos[0] + dref = d[iref] + dt = dref.time_delay_from_earth_center(ra, dec, tcave) + + starts = [] + ends = [] + + delt = snrs[iref].delta_t + tmin = tcmin + dt - delt + tmax = tcmax + dt + delt + if hasattr(self, 'tstart'): + tmin = self.tstart[iref] + tmax = self.tend[iref] + + # Make sure we draw from times within prior and that have enough + # SNR calculated to do later interpolation + starts.append(max(tmin, snrs[iref].start_time + delt)) + ends.append(min(tmax, snrs[iref].end_time - delt * 2)) + + idels = {} + for ifo in ifos[1:]: + dti = d[ifo].time_delay_from_detector(dref, ra, dec, tcave) + idel = round(dti / snrs[iref].delta_t) * snrs[iref].delta_t + idels[ifo] = idel + + starts.append(snrs[ifo].start_time - idel) + ends.append(snrs[ifo].end_time - idel) + start = max(starts) + end = min(ends) + if end <= start: + return + + # get the weights + snr = snrs[iref].time_slice(start, end, mode='nearest') + logweight = snr.squared_norm().numpy() + for ifo in ifos[1:]: + idel = idels[ifo] + snrv = snrs[ifo].time_slice(snr.start_time + idel, + snr.end_time + idel, + mode='nearest') + logweight += snrv.squared_norm().numpy() + logweight /= 2.0 + logweight -= logsumexp(logweight) # Normalize to PDF + + # Draw proportional to the incoherent likelihood + # Draw first which time sample + tci = draw_sample(logweight, size=vsamples) + # Second draw a subsample size offset so that all times are covered + tct = numpy.random.uniform(-snr.delta_t / 2.0, + snr.delta_t / 2.0, + size=vsamples) + tc = tct + tci * snr.delta_t + float(snr.start_time) - dt + + # Update the current proposed times and the marginalization values + # assumes uniform prior! + logw = - logweight[tci] + numpy.log(1.0 / len(logweight)) + self.marginalize_vector_params['tc'] = tc + self.marginalize_vector_params['logw_partial'] = logw + + if self._current_params is not None: + # Update the importance weights for each vector sample + self._current_params.update(self.marginalize_vector_params) + self.marginalize_vector_weights += logw + + return self.marginalize_vector_params
+ + +
+[docs] + def draw_sky_times(self, snrs, size=None): + """ Draw ra, dec, and tc together using SNR timeseries to determine + monte-carlo weights. + """ + # First setup + # precalculate dense sky grid and make dict and or array of the results + ifos = list(snrs.keys()) + if hasattr(self, 'keep_ifos'): + ifos = self.keep_ifos + ikey = ''.join(ifos) + + # No good SNR peaks, go with prior draw + if len(ifos) == 0: + return + + def make_init(): + self.snr_params = ['tc', 'ra', 'dec'] + size = self.marginalize_sky_initial_samples + logging.info('drawing samples: %s', size) + ra = self.marginalized_vector_priors['ra'].rvs(size=size)['ra'] + dec = self.marginalized_vector_priors['dec'].rvs(size=size)['dec'] + tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc'] + tcave = (tcmax + tcmin) / 2.0 + d = {ifo: Detector(ifo, reference_time=tcave) for ifo in self.data} + + # What data structure to hold times? Dict of offset -> list? + logging.info('sorting into time delay dict') + dts = [] + for i in range(len(ifos) - 1): + dt = d[ifos[0]].time_delay_from_detector(d[ifos[i+1]], + ra, dec, tcave) + dt = numpy.rint(dt / snrs[ifos[0]].delta_t) + dts.append(dt) + + fp, fc, dtc = {}, {}, {} + for ifo in self.data: + fp[ifo], fc[ifo] = d[ifo].antenna_pattern(ra, dec, 0.0, tcave) + dtc[ifo] = d[ifo].time_delay_from_earth_center(ra, dec, tcave) + + dmap = {} + for i, t in enumerate(tqdm.tqdm(zip(*dts))): + if t not in dmap: + dmap[t] = [] + dmap[t].append(i) + + if len(ifos) == 1: + dmap[()] = numpy.arange(0, size, 1).astype(int) + + # Sky prior by bin + bin_prior = {t: len(dmap[t]) / size for t in dmap} + + return dmap, tcmin, tcmax, fp, fc, ra, dec, dtc, bin_prior + + if not hasattr(self, 'tinfo'): + self.tinfo = {} + + if ikey not in self.tinfo: + logging.info('pregenerating sky pointings') + self.tinfo[ikey] = make_init() + + dmap, tcmin, tcmax, fp, fc, ra, dec, dtc, bin_prior = self.tinfo[ikey] + + vsamples = size if size is not None else self.vsamples + + # draw times from each snr time series + # Is it worth doing this if some detector has low SNR? + sref = None + iref = None + idx = [] + dx = [] + mcweight = None + for ifo in ifos: + snr = snrs[ifo] + tmin, tmax = tcmin - EARTH_RADIUS, tcmax + EARTH_RADIUS + if hasattr(self, 'tstart'): + tmin = self.tstart[ifo] + tmax = self.tend[ifo] + + start = max(tmin, snr.start_time + snr.delta_t) + end = min(tmax, snr.end_time - snr.delta_t * 2) + snr = snr.time_slice(start, end, mode='nearest') + + w = snr.squared_norm().numpy() / 2.0 + i = draw_sample(w, size=vsamples) + + if sref is not None: + mcweight += w[i] + delt = float(snr.start_time - sref.start_time) + i += round(delt / sref.delta_t) + dx.append(iref - i) + else: + sref = snr + iref = i + mcweight = w[i] + + idx.append(i) + mcweight -= logsumexp(mcweight) + + # check if delay is in dict, if not, throw out + ti = [] + ix = [] + wi = [] + rand = numpy.random.uniform(0, 1, size=vsamples) + for i in range(vsamples): + t = tuple(x[i] for x in dx) + if t in dmap: + randi = int(rand[i] * (len(dmap[t]))) + ix.append(dmap[t][randi]) + wi.append(bin_prior[t]) + ti.append(i) + + # If we had really poor efficiency at finding a point, we should + # give up and just use the original random draws + if len(ix) < 0.05 * vsamples: + return + + # fill back to fixed size with repeat samples + # sample order is random, so this should be OK statistically + ix = numpy.resize(numpy.array(ix, dtype=int), vsamples) + self.sample_idx = ix + self.precalc_antenna_factors = fp, fc, dtc + resize_factor = len(ti) / vsamples + + ra = ra[ix] + dec = dec[ix] + dtc = {ifo: dtc[ifo][ix] for ifo in dtc} + + ti = numpy.resize(numpy.array(ti, dtype=int), vsamples) + wi = numpy.resize(numpy.array(wi), vsamples) + + # Second draw a subsample size offset so that all times are covered + tct = numpy.random.uniform(-snr.delta_t / 2.0, + snr.delta_t / 2.0, + size=len(ti)) + + tc = tct + iref[ti] * snr.delta_t + float(sref.start_time) - dtc[ifos[0]] + + # Update the current proposed times and the marginalization values + # There's an overall normalization here which may introduce a constant + # factor at the moment. + logw_sky = -mcweight[ti] + numpy.log(wi) - numpy.log(resize_factor) + + self.marginalize_vector_params['tc'] = tc + self.marginalize_vector_params['ra'] = ra + self.marginalize_vector_params['dec'] = dec + self.marginalize_vector_params['logw_partial'] = logw_sky + + if self._current_params is not None: + # Update the importance weights for each vector sample + self._current_params.update(self.marginalize_vector_params) + self.marginalize_vector_weights += logw_sky + + return self.marginalize_vector_params
+ + +
+[docs] + def get_precalc_antenna_factors(self, ifo): + """ Get the antenna factors for marginalized samples if they exist """ + ix = self.sample_idx + fp, fc, dtc = self.precalc_antenna_factors + return fp[ifo][ix], fc[ifo][ix], dtc[ifo][ix]
+ + +
+[docs] + def setup_peak_lock(self, + sample_rate=4096, + snrs=None, + peak_lock_snr=None, + peak_lock_ratio=1e4, + peak_lock_region=4, + **kwargs): + """ Determine where to constrain marginalization based on + the observed reference SNR peaks. + + Parameters + ---------- + sample_rate : float + The SNR sample rate + snrs : Dict of SNR time series + Either provide this or the model needs a function + to get the reference SNRs. + peak_lock_snr: float + The minimum SNR to bother restricting from the prior range + peak_lock_ratio: float + The likelihood ratio (not log) relative to the peak to + act as a threshold bounding region. + peak_lock_region: int + Number of samples to inclue beyond the strict region + determined by the relative likelihood + """ + + if 'tc' not in self.marginalized_vector_priors: + return + + tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc'] + tstart = tcmin - EARTH_RADIUS + tmax = tcmax - tcmin + EARTH_RADIUS * 2.0 + num_samples = int(tmax * sample_rate) + self.tstart = {ifo: tstart for ifo in self.data} + self.num_samples = {ifo: num_samples for ifo in self.data} + + if snrs is None: + if not hasattr(self, 'ref_snr'): + raise ValueError("Model didn't have a reference SNR!") + snrs = self.ref_snr + + # Restrict the time range for constructing SNR time series + # to identifiable peaks + if peak_lock_snr is not None: + peak_lock_snr = float(peak_lock_snr) + peak_lock_ratio = float(peak_lock_ratio) + peak_lock_region = int(peak_lock_region) + + for ifo in snrs: + s = max(tstart, snrs[ifo].start_time) + e = min(tstart + tmax, snrs[ifo].end_time) + z = snrs[ifo].time_slice(s, e, mode='nearest') + peak_snr, imax = z.abs_max_loc() + times = z.sample_times + peak_time = times[imax] + + logging.info('%s: Max Ref SNR Peak of %s at %s', + ifo, peak_snr, peak_time) + + if peak_snr > peak_lock_snr: + target = peak_snr ** 2.0 / 2.0 - numpy.log(peak_lock_ratio) + target = (target * 2.0) ** 0.5 + + region = numpy.where(abs(z) > target)[0] + ts = times[region[0]] - peak_lock_region / sample_rate + te = times[region[-1]] + peak_lock_region / sample_rate + self.tstart[ifo] = ts + self.num_samples[ifo] = int((te - ts) * sample_rate) + + # Check times are commensurate with each other + for ifo in snrs: + ts = self.tstart[ifo] + te = ts + self.num_samples[ifo] / sample_rate + + for ifo2 in snrs: + if ifo == ifo2: + continue + ts2 = self.tstart[ifo2] + te2 = ts2 + self.num_samples[ifo2] / sample_rate + det = Detector(ifo) + dt = Detector(ifo2).light_travel_time_to_detector(det) + + ts = max(ts, ts2 - dt) + te = min(te, te2 + dt) + + self.tstart[ifo] = ts + self.num_samples[ifo] = int((te - ts) * sample_rate) + 1 + logging.info('%s: use region %s-%s, %s points', + ifo, ts, te, self.num_samples[ifo]) + + self.tend = self.tstart.copy() + for ifo in snrs: + self.tend[ifo] += self.num_samples[ifo] / sample_rate
+ + +
+[docs] + def draw_ifos(self, snrs, peak_snr_threshold=4.0, log=True, + precalculate_marginalization_points=False, + **kwargs): + """ Helper utility to determine which ifos we should use based on the + reference SNR time series. + """ + if 'tc' not in self.marginalized_vector_priors: + return + + peak_snr_threshold = float(peak_snr_threshold) + + tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc'] + ifos = list(snrs.keys()) + keep_ifos = [] + psnrs = [] + for ifo in ifos: + snr = snrs[ifo] + start = max(tcmin - EARTH_RADIUS, snr.start_time) + end = min(tcmax + EARTH_RADIUS, snr.end_time) + snr = snr.time_slice(start, end, mode='nearest') + psnr = abs(snr).max() + if psnr > peak_snr_threshold: + keep_ifos.append(ifo) + psnrs.append(psnr) + + if log: + logging.info("Ifos used for SNR based draws:" + " %s, snrs: %s, peak_snr_threshold=%s", + keep_ifos, psnrs, peak_snr_threshold) + + self.keep_ifos = keep_ifos + + if precalculate_marginalization_points: + num_points = int(float(precalculate_marginalization_points)) + self.premarg = self.snr_draw(size=num_points, snrs=snrs).copy() + self.premarg['sample_idx'] = self.sample_idx + + return keep_ifos
+ + + @property + def current_params(self): + """ The current parameters + + If a parameter has been vector marginalized, the likelihood should + expect an array for the given parameter. This allows transparent + vectorization for many models. + """ + params = self._current_params + for k in self.marginalize_vector_params: + if k not in params: + params[k] = self.marginalize_vector_params[k] + self.marginalize_vector_weights = - numpy.log(self.vsamples) + return params + +
+[docs] + def reconstruct(self, rec=None, seed=None, set_loglr=None): + """ Reconstruct the distance or vectored marginalized parameter + of this class. + """ + if seed: + numpy.random.seed(seed) + + if rec is None: + rec = {} + + if set_loglr is None: + def get_loglr(): + p = self.current_params.copy() + p.update(rec) + self.update(**p) + return self.loglr + else: + get_loglr = set_loglr + + if self.marginalize_vector_params: + logging.debug('Reconstruct vector') + self.reconstruct_vector = True + self.reset_vector_params() + loglr = get_loglr() + xl = draw_sample(loglr + self.marginalize_vector_weights) + for k in self.marginalize_vector_params: + rec[k] = self.marginalize_vector_params[k][xl] + self.reconstruct_vector = False + + if self.distance_marginalization: + logging.debug('Reconstruct distance') + # call likelihood to get vector output + self.reconstruct_distance = True + _, weights = self.distance_marginalization + loglr = get_loglr() + xl = draw_sample(loglr + numpy.log(weights)) + rec['distance'] = self.dist_locs[xl] + self.reconstruct_distance = False + + if self.marginalize_phase: + logging.debug('Reconstruct phase') + self.reconstruct_phase = True + s, h = get_loglr() + phasev = numpy.linspace(0, numpy.pi*2.0, int(1e4)) + # This assumes that the template was conjugated in inner products + loglr = (numpy.exp(-2.0j * phasev) * s).real + h + xl = draw_sample(loglr) + rec['coa_phase'] = phasev[xl] + self.reconstruct_phase = False + + rec['loglr'] = loglr[xl] + rec['loglikelihood'] = self.lognl + rec['loglr'] + return rec
+
+ + + +
+[docs] +def setup_distance_marg_interpolant(dist_marg, + phase=False, + snr_range=(1, 50), + density=(1000, 1000)): + """ Create the interpolant for distance marginalization + + Parameters + ---------- + dist_marg: tuple of two arrays + The (dist_loc, dist_weight) tuple which defines the grid + for integrating over distance + snr_range: tuple of (float, float) + Tuple of min, max SNR that the interpolant is expected to work + for. + density: tuple of (float, float) + The number of samples in either dimension of the 2d interpolant + + Returns + ------- + interp: function + Function which returns the precalculated likelihood for a given + inner product sh/hh. + """ + dist_rescale, _ = dist_marg + logging.info("Interpolator valid for SNRs in %s", snr_range) + logging.info("Interpolator using grid %s", density) + # approximate maximum shr and hhr values, assuming the true SNR is + # within the indicated range (and neglecting noise fluctuations) + snr_min, snr_max = snr_range + smax = dist_rescale.max() + smin = dist_rescale.min() + shr_max = snr_max ** 2.0 / smin + hhr_max = snr_max ** 2.0 / smin / smin + + shr_min = snr_min ** 2.0 / smax + hhr_min = snr_min ** 2.0 / smax / smax + + shr = numpy.geomspace(shr_min, shr_max, density[0]) + hhr = numpy.geomspace(hhr_min, hhr_max, density[1]) + lvals = numpy.zeros((len(shr), len(hhr))) + logging.info('Setup up likelihood interpolator') + for i, sh in enumerate(tqdm.tqdm(shr)): + for j, hh in enumerate(hhr): + lvals[i, j] = marginalize_likelihood(sh, hh, + distance=dist_marg, + phase=phase) + interp = RectBivariateSpline(shr, hhr, lvals) + + def interp_wrapper(x, y, bounds_check=True): + k = None + if bounds_check: + if isinstance(x, float): + if x > shr_max or x < shr_min or y > hhr_max or y < hhr_min: + return -numpy.inf + else: + k = (x > shr_max) | (x < shr_min) + k = k | (y > hhr_max) | (y < hhr_min) + + v = interp(x, y, grid=False) + if k is not None: + v[k] = -numpy.inf + return v + return interp_wrapper
+ + + +
+[docs] +def marginalize_likelihood(sh, hh, + logw=None, + phase=False, + distance=False, + skip_vector=False, + interpolator=None, + return_peak=False, + return_complex=False, + ): + """ Return the marginalized likelihood. + + Apply various marginalizations to the data, including phase, distance, + and brute-force vector marginalizations. Several options relate + to how the distance marginalization is approximated and others allow for + special return products to aid in parameter reconstruction. + + Parameters + ---------- + sh: complex float or numpy.ndarray + The data-template inner product + hh: complex float or numpy.ndarray + The template-template inner product + logw: + log weighting factors if vector marginalization is used, if not + given, each sample is assumed to be equally weighted + phase: bool, False + Enable phase marginalization. Only use if orbital phase can be related + to just a single overall phase (e.g. not true for waveform with + sub-dominant modes) + skip_vector: bool, False + Don't apply marginalization of vector component of input (i.e. leave + as vector). + interpolator: function, None + If provided, internal calculation is skipped in favor of a + precalculated interpolating function which takes in sh/hh + and returns the likelihood. + return_peak: bool, False + Return the peak likelihood and index if using passing an array as + input in addition to the marginalized over the array likelihood. + return_complex: bool, False + Return the sh / hh data products before applying phase marginalization. + This option is intended to aid in reconstucting phase marginalization + and is unlikely to be useful for other purposes. + + Returns + ------- + loglr: float + The marginalized loglikehood ratio + """ + if distance and not interpolator and not numpy.isscalar(sh): + raise ValueError("Cannot do vector marginalization " + "and distance at the same time") + + if logw is None: + if isinstance(hh, float): + logw = 0 + else: + logw = -numpy.log(len(sh)) + + if return_complex: + pass + elif phase: + sh = abs(sh) + else: + sh = sh.real + + if interpolator: + # pre-calculated result for this function + vloglr = interpolator(sh, hh) + + if skip_vector: + return vloglr + else: + # explicit calculation + if distance: + # brute force distance path + dist_rescale, dist_weights = distance + sh = sh * dist_rescale + hh = hh * dist_rescale ** 2.0 + logw = numpy.log(dist_weights) + + if return_complex: + return sh, -0.5 * hh + + # Apply the phase marginalization + if phase: + sh = numpy.log(i0e(sh)) + sh + + # Calculate loglikelihood ratio + vloglr = sh - 0.5 * hh + + if return_peak: + maxv = vloglr.argmax() + maxl = vloglr[maxv] + + # Do brute-force marginalization if loglr is a vector + if isinstance(vloglr, float): + vloglr = float(vloglr) + elif not skip_vector: + vloglr = float(logsumexp(vloglr, b=numpy.exp(logw))) + + if return_peak: + return vloglr, maxv, maxl + return vloglr
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/option_utils.html b/latest/html/_modules/pycbc/inference/option_utils.html new file mode 100644 index 00000000000..e8aa95ca44c --- /dev/null +++ b/latest/html/_modules/pycbc/inference/option_utils.html @@ -0,0 +1,512 @@ + + + + + + pycbc.inference.option_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.option_utils

+# Copyright (C) 2016 Collin Capano, Duncan Brown
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module contains standard options used for inference-related programs.
+"""
+
+import argparse
+from pycbc import waveform
+
+# -----------------------------------------------------------------------------
+#
+#                Utilities for plotting results
+#
+# -----------------------------------------------------------------------------
+
+
+
+[docs] +class ParseLabelArg(argparse.Action): + """Argparse action that will parse arguments that can accept labels. + + This assumes that the values set on the command line for its assigned + argument are strings formatted like ``PARAM[:LABEL]``. When the arguments + are parsed, the ``LABEL`` bit is stripped off and added to a dictionary + mapping ``PARAM -> LABEL``. This dictionary is stored to the parsed + namespace called ``{dest}_labels``, where ``{dest}`` is the argument's + ``dest`` setting (by default, this is the same as the option string). + Likewise, the argument's ``dest`` in the parsed namespace is updated so + that it is just ``PARAM``. + + If no ``LABEL`` is provided, then ``PARAM`` will be used for ``LABEL``. + + This action can work on arguments that have ``nargs != 0`` and ``type`` set + to ``str``. + """ + def __init__(self, type=str, nargs=None, + **kwargs): # pylint: disable=redefined-builtin + # check that type is string + if type != str: + raise ValueError("the type for this action must be a string") + if nargs == 0: + raise ValueError("nargs must not be 0 for this action") + super(ParseLabelArg, self).__init__(type=type, nargs=nargs, + **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + singlearg = isinstance(values, str) + if singlearg: + values = [values] + params = [] + labels = {} + for param in values: + psplit = param.split(':') + if len(psplit) == 2: + param, label = psplit + else: + label = param + labels[param] = label + params.append(param) + # update the namespace + if singlearg: + params = params[0] + setattr(namespace, self.dest, params) + setattr(namespace, '{}_labels'.format(self.dest), labels)
+ + + +
+[docs] +class ParseParametersArg(ParseLabelArg): + """Argparse action that will parse parameters and labels from an opton. + + Does the same as ``ParseLabelArg``, with the additional functionality that + if ``LABEL`` is a known parameter in ``pycbc.waveform.parameters``, then + the label attribute there will be used in the labels dictionary. + Otherwise, ``LABEL`` will be used. + + Examples + -------- + Create a parser and add two arguments that use this action (note that the + first argument accepts multiple inputs while the second only accepts a + single input): + + >>> import argparse + >>> parser = argparse.ArgumentParser() + >>> parser.add_argument('--parameters', type=str, nargs="+", + action=ParseParametersArg) + >>> parser.add_argument('--z-arg', type=str, action=ParseParametersArg) + + Parse a command line that uses these options: + + >>> import shlex + >>> cli = "--parameters 'mass1+mass2:mtotal' ra ni --z-arg foo:bar" + >>> opts = parser.parse_args(shlex.split(cli)) + >>> opts.parameters + ['mass1+mass2', 'ra', 'ni'] + >>> opts.parameters_labels + {'mass1+mass2': '$M~(\\mathrm{M}_\\odot)$', 'ni': 'ni', 'ra': '$\\alpha$'} + >>> opts.z_arg + 'foo' + >>> opts.z_arg_labels + {'foo': 'bar'} + + In the above, the first argument to ``--parameters`` was ``mtotal``. Since + this is a recognized parameter in ``pycbc.waveform.parameters``, the label + dictionary contains the latex string associated with the ``mtotal`` + parameter. A label was not provided for the second argument, and so ``ra`` + was used. Since ``ra`` is also a recognized parameter, its associated latex + string was used in the labels dictionary. Since ``ni`` and ``bar`` (the + label for ``z-arg``) are not recognized parameters, they were just used + as-is in the labels dictionaries. + """ + def __call__(self, parser, namespace, values, option_string=None): + super(ParseParametersArg, self).__call__(parser, namespace, values, + option_string=option_string) + # try to replace the labels with a label from waveform.parameters + labels = getattr(namespace, '{}_labels'.format(self.dest)) + for param, label in labels.items(): + try: + label = getattr(waveform.parameters, label).label + labels[param] = label + except AttributeError: + pass
+ + + +
+[docs] +def add_injsamples_map_opt(parser): + """Adds option to parser to specify a mapping between injection parameters + an sample parameters. + """ + parser.add_argument('--injection-samples-map', nargs='+', + metavar='INJECTION_PARAM:SAMPLES_PARAM', + help='Rename/apply functions to the injection ' + 'parameters and name them the same as one of the ' + 'parameters in samples. This can be used if the ' + 'injection parameters are not the same as the ' + 'samples parameters. INJECTION_PARAM may be a ' + 'function of the injection parameters; ' + 'SAMPLES_PARAM must a name of one of the ' + 'parameters in the samples group.')
+ + + +
+[docs] +def add_plot_posterior_option_group(parser): + """Adds the options needed to configure plots of posterior results. + + Parameters + ---------- + parser : object + ArgumentParser instance. + """ + pgroup = parser.add_argument_group("Options for what plots to create and " + "their formats.") + pgroup.add_argument('--plot-marginal', action='store_true', default=False, + help="Plot 1D marginalized distributions on the " + "diagonal axes.") + pgroup.add_argument('--marginal-percentiles', nargs='+', default=None, + type=float, + help="Percentiles to draw lines at on the 1D " + "histograms.") + pgroup.add_argument('--no-marginal-lines', action='store_true', + default=False, + help="Do not add vertical lines in the 1D marginal " + "plots showing the marginal percentiles.") + pgroup.add_argument('--no-marginal-titles', action='store_true', + default=False, + help="Do not add titles giving the 1D credible range " + "over the 1D marginal plots.") + pgroup.add_argument("--plot-scatter", action='store_true', default=False, + help="Plot each sample point as a scatter plot.") + pgroup.add_argument("--plot-density", action="store_true", default=False, + help="Plot the posterior density as a color map.") + pgroup.add_argument("--plot-contours", action="store_true", default=False, + help="Draw contours showing the 50th and 90th " + "percentile confidence regions.") + pgroup.add_argument('--contour-percentiles', nargs='+', default=None, + type=float, + help="Percentiles to draw contours if different " + "than 50th and 90th.") + # add mins, maxs options + pgroup.add_argument('--mins', nargs='+', metavar='PARAM:VAL', default=[], + help="Specify minimum parameter values to plot. This " + "should be done by specifying the parameter name " + "followed by the value. Parameter names must be " + "the same as the PARAM argument in --parameters " + "(or, if no parameters are provided, the same as " + "the parameter name specified in the variable " + "args in the input file. If none provided, " + "the smallest parameter value in the posterior " + "will be used.") + pgroup.add_argument('--maxs', nargs='+', metavar='PARAM:VAL', default=[], + help="Same as mins, but for the maximum values to " + "plot.") + # add expected parameters options + pgroup.add_argument('--expected-parameters', nargs='+', + metavar='PARAM:VAL', + default=[], + help="Specify expected parameter values to plot. If " + "provided, a cross will be plotted in each axis " + "that an expected parameter is provided. " + "Parameter names must be " + "the same as the PARAM argument in --parameters " + "(or, if no parameters are provided, the same as " + "the parameter name specified in the variable " + "args in the input file.") + pgroup.add_argument('--expected-parameters-color', default='r', + help="What to color the expected-parameters cross. " + "Default is red.") + pgroup.add_argument('--plot-injection-parameters', action='store_true', + default=False, + help="Get the expected parameters from the injection " + "in the input file. There must be only a single " + "injection in the file to work. Any values " + "specified by expected-parameters will override " + "the values obtained for the injection.") + pgroup.add_argument('--pick-injection-by-time', action='store_true', + default=False, + help="In the case of multiple injections, pick one" + " for plotting based on its proximity in time.") + add_injsamples_map_opt(pgroup) + return pgroup
+ + + +
+[docs] +def plot_ranges_from_cli(opts): + """Parses the mins and maxs arguments from the `plot_posterior` option + group. + + Parameters + ---------- + opts : ArgumentParser + The parsed arguments from the command line. + + Returns + ------- + mins : dict + Dictionary of parameter name -> specified mins. Only parameters that + were specified in the --mins option will be included; if no parameters + were provided, will return an empty dictionary. + maxs : dict + Dictionary of parameter name -> specified maxs. Only parameters that + were specified in the --mins option will be included; if no parameters + were provided, will return an empty dictionary. + """ + mins = {} + for x in opts.mins: + x = x.split(':') + if len(x) != 2: + raise ValueError("option --mins not specified correctly; see help") + mins[x[0]] = float(x[1]) + maxs = {} + for x in opts.maxs: + x = x.split(':') + if len(x) != 2: + raise ValueError("option --maxs not specified correctly; see help") + maxs[x[0]] = float(x[1]) + return mins, maxs
+ + + +
+[docs] +def expected_parameters_from_cli(opts): + """Parses the --expected-parameters arguments from the `plot_posterior` + option group. + + Parameters + ---------- + opts : ArgumentParser + The parsed arguments from the command line. + + Returns + ------- + dict + Dictionary of parameter name -> expected value. Only parameters that + were specified in the --expected-parameters option will be included; if + no parameters were provided, will return an empty dictionary. + """ + expected = {} + for x in opts.expected_parameters: + x = x.split(':') + if len(x) != 2: + raise ValueError("option --expected-paramters not specified " + "correctly; see help") + expected[x[0]] = float(x[1]) + return expected
+ + + +
+[docs] +def add_scatter_option_group(parser): + """Adds the options needed to configure scatter plots. + + Parameters + ---------- + parser : object + ArgumentParser instance. + """ + scatter_group = parser.add_argument_group("Options for configuring the " + "scatter plot.") + + scatter_group.add_argument( + '--z-arg', type=str, default=None, action=ParseParametersArg, + help='What to color the scatter points by. Syntax is the same as the ' + 'parameters option.') + scatter_group.add_argument( + "--vmin", type=float, help="Minimum value for the colorbar.") + scatter_group.add_argument( + "--vmax", type=float, help="Maximum value for the colorbar.") + scatter_group.add_argument( + "--scatter-cmap", type=str, default='plasma', + help="Specify the colormap to use for points. Default is plasma.") + + return scatter_group
+ + + +
+[docs] +def add_density_option_group(parser): + """Adds the options needed to configure contours and density colour map. + + Parameters + ---------- + parser : object + ArgumentParser instance. + """ + density_group = parser.add_argument_group("Options for configuring the " + "contours and density color map") + + density_group.add_argument( + "--density-cmap", type=str, default='viridis', + help="Specify the colormap to use for the density. " + "Default is viridis.") + density_group.add_argument( + "--contour-color", type=str, default=None, + help="Specify the color to use for the contour lines. Default is " + "white for density plots and black for scatter plots.") + density_group.add_argument( + "--contour-linestyles", type=str, default=None, nargs="+", + help="Specify the linestyles to use for the contour lines. Defaut " + "is solid for all.") + density_group.add_argument( + "--no-contour-labels", action="store_true", default=False, + help="Don't put labels on the contours.") + density_group.add_argument( + '--use-kombine-kde', default=False, action="store_true", + help="Use kombine's clustered KDE for determining 2D marginal " + "contours and density instead of scipy's gaussian_kde (the " + "default). This is better at distinguishing bimodal " + "distributions, but is much slower than the default. For speed, " + "suggest setting --kde-args 'max_samples:20000' or smaller if " + "using this. Requires kombine to be installed.") + density_group.add_argument( + '--max-kde-samples', type=int, default=None, + help="Limit the number of samples used for KDE construction to the " + "given value. This can substantially speed up plot generation " + "(particularly when plotting multiple parameters). Suggested " + "values: 5000 to 10000.") + density_group.add_argument( + '--kde-args', metavar="ARG:VALUE", nargs='+', default=None, + help="Pass the given argrument, value pairs to the KDE function " + "(either scipy's or kombine's) when setting it up.") + return density_group
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler.html b/latest/html/_modules/pycbc/inference/sampler.html new file mode 100644 index 00000000000..bccc24dae17 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler.html @@ -0,0 +1,242 @@ + + + + + + pycbc.inference.sampler — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler

+# Copyright (C) 2016  Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+This module provides a list of implemented samplers for parameter estimation.
+"""
+
+import logging
+
+# pylint: disable=unused-import
+from .base import (initial_dist_from_config, create_new_output_file)
+from .multinest import MultinestSampler
+from .ultranest import UltranestSampler
+from .dummy import DummySampler
+from .refine import RefineSampler
+from .snowline import SnowlineSampler
+
+# list of available samplers
+samplers = {cls.name: cls for cls in (
+    MultinestSampler,
+    UltranestSampler,
+    DummySampler,
+    RefineSampler,
+    SnowlineSampler,
+)}
+
+try:
+    from .emcee import EmceeEnsembleSampler
+    from .emcee_pt import EmceePTSampler
+    samplers[EmceeEnsembleSampler.name] = EmceeEnsembleSampler
+    samplers[EmceePTSampler.name] = EmceePTSampler
+except ImportError:
+    pass
+
+try:
+    from .epsie import EpsieSampler
+    samplers[EpsieSampler.name] = EpsieSampler
+except ImportError:
+    pass
+
+try:
+    from .ptemcee import PTEmceeSampler
+    samplers[PTEmceeSampler.name] = PTEmceeSampler
+except ImportError:
+    pass
+
+try:
+    from .cpnest import CPNestSampler
+    samplers[CPNestSampler.name] = CPNestSampler
+except ImportError:
+    pass
+
+try:
+    from .dynesty import DynestySampler
+    samplers[DynestySampler.name] = DynestySampler
+except ImportError:
+    pass
+
+try:
+    from .nessai import NessaiSampler
+    samplers[NessaiSampler.name] = NessaiSampler
+except ImportError:
+    pass
+
+
+
+[docs] +def load_from_config(cp, model, **kwargs): + """Loads a sampler from the given config file. + + This looks for a name in the section ``[sampler]`` to determine which + sampler class to load. That sampler's ``from_config`` is then called. + + Parameters + ---------- + cp : WorkflowConfigParser + Config parser to read from. + model : pycbc.inference.model + Which model to pass to the sampler. + **kwargs : + All other keyword arguments are passed directly to the sampler's + ``from_config`` file. + + Returns + ------- + sampler : + The initialized sampler. + """ + if len(model.variable_params) == 0: + logging.info('No variable params, so assuming Dummy Sampler') + return DummySampler.from_config(cp, model, **kwargs) + + name = cp.get('sampler', 'name') + try: + return samplers[name].from_config(cp, model, **kwargs) + except KeyError: + raise ImportError( + f"No available sampler named {name}. Please check " + "if the name is correct or the required package " + "for this sampler is installed correctly. " + f"Available samplers: {', '.join(list(samplers.keys()))}" + )
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/base.html b/latest/html/_modules/pycbc/inference/sampler/base.html new file mode 100644 index 00000000000..d776399fa88 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/base.html @@ -0,0 +1,415 @@ + + + + + + pycbc.inference.sampler.base — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.base

+# Copyright (C) 2016  Christopher M. Biwer, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Defines the base sampler class to be inherited by all samplers.
+"""
+
+
+from abc import ABCMeta, abstractmethod, abstractproperty
+import shutil
+import logging
+
+from six import add_metaclass
+
+from pycbc import distributions
+from pycbc.inference.io import validate_checkpoint_files
+
+#
+# =============================================================================
+#
+#                           Base Sampler definition
+#
+# =============================================================================
+#
+
+
+
+[docs] +@add_metaclass(ABCMeta) +class BaseSampler(object): + """Abstract base class for all inference samplers. + + All sampler classes must inherit from this class and implement its abstract + methods. + + Parameters + ---------- + model : Model + An instance of a model from ``pycbc.inference.models``. + """ + name = None + + def __init__(self, model): + self.model = model + self.checkpoint_file = None + self.backup_file = None + self.checkpoint_valid = None + self.new_checkpoint = None + + # @classmethod <--uncomment when we move to python 3.3 +
+[docs] + @abstractmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """This should initialize the sampler given a config file. + """ + pass
+ + + @property + def variable_params(self): + """Returns the parameters varied in the model. + """ + return self.model.variable_params + + @property + def sampling_params(self): + """Returns the sampling params used by the model. + """ + return self.model.sampling_params + + @property + def static_params(self): + """Returns the model's fixed parameters. + """ + return self.model.static_params + + @abstractproperty + def samples(self): + """A dict mapping variable_params to arrays of samples currently + in memory. The dictionary may also contain sampling_params. + + The sample arrays may have any shape, and may or may not be thinned. + """ + pass + + @abstractproperty + def model_stats(self): + """A dict mapping model's metadata fields to arrays of values for + each sample in ``raw_samples``. + + The arrays may have any shape, and may or may not be thinned. + """ + pass + +
+[docs] + @abstractmethod + def run(self): + """This function should run the sampler. + + Any checkpointing should be done internally in this function. + """ + pass
+ + + @abstractproperty + def io(self): + """A class that inherits from ``BaseInferenceFile`` to handle IO with + an hdf file. + + This should be a class, not an instance of class, so that the sampler + can initialize it when needed. + """ + pass + +
+[docs] + @abstractmethod + def checkpoint(self): + """The sampler must have a checkpoint method for dumping raw samples + and stats to the file type defined by ``io``. + """ + pass
+ + +
+[docs] + @abstractmethod + def finalize(self): + """Do any finalization to the samples file before exiting.""" + pass
+ + +
+[docs] + @abstractmethod + def resume_from_checkpoint(self): + """Resume the sampler from the output file. + """ + pass
+
+ + +# +# ============================================================================= +# +# Convenience functions +# +# ============================================================================= +# + + +
+[docs] +def setup_output(sampler, output_file, check_nsamples=True, validate=True): + r"""Sets up the sampler's checkpoint and output files. + + The checkpoint file has the same name as the output file, but with + ``.checkpoint`` appended to the name. A backup file will also be + created. + + Parameters + ---------- + sampler : sampler instance + Sampler + output_file : str + Name of the output file. + """ + # check for backup file(s) + checkpoint_file = output_file + '.checkpoint' + backup_file = output_file + '.bkup' + # check if we have a good checkpoint and/or backup file + logging.info("Looking for checkpoint file") + checkpoint_valid = False + if validate: + checkpoint_valid = validate_checkpoint_files(checkpoint_file, + backup_file, + check_nsamples) + # Create a new file if the checkpoint doesn't exist, or if it is + # corrupted + sampler.new_checkpoint = False # keeps track if this is a new file or not + if not checkpoint_valid: + logging.info("Checkpoint not found or not valid") + create_new_output_file(sampler, checkpoint_file) + # now the checkpoint is valid + sampler.new_checkpoint = True + # copy to backup + shutil.copy(checkpoint_file, backup_file) + # write the command line, startup + for fn in [checkpoint_file, backup_file]: + with sampler.io(fn, "a") as fp: + fp.write_command_line() + + fp.write_resume_point() + fp.write_run_start_time() + # store + sampler.checkpoint_file = checkpoint_file + sampler.backup_file = backup_file
+ + + +
+[docs] +def create_new_output_file(sampler, filename, **kwargs): + r"""Creates a new output file. + + Parameters + ---------- + sampler : sampler instance + Sampler + filename : str + Name of the file to create. + \**kwargs : + All other keyword arguments are passed through to the file's + ``write_metadata`` function. + """ + logging.info("Creating file {}".format(filename)) + with sampler.io(filename, "w") as fp: + # create the samples group and sampler info group + fp.create_group(fp.samples_group) + fp.create_group(fp.sampler_group) + # save the sampler's metadata + fp.write_sampler_metadata(sampler)
+ + + +
+[docs] +def initial_dist_from_config(cp, variable_params, static_params=None): + r"""Loads a distribution for the sampler start from the given config file. + + A distribution will only be loaded if the config file has a [initial-\*] + section(s). + + Parameters + ---------- + cp : Config parser + The config parser to try to load from. + variable_params : list of str + The variable parameters for the distribution. + static_params : dict, optional + The static parameters used to place constraints on the + distribution. + + Returns + ------- + JointDistribution or None : + The initial distribution. If no [initial-\*] section found in the + config file, will just return None. + """ + if len(cp.get_subsections("initial")): + logging.info("Using a different distribution for the starting points " + "than the prior.") + initial_dists = distributions.read_distributions_from_config( + cp, section="initial") + constraints = distributions.read_constraints_from_config( + cp, constraint_section="initial_constraint", + static_args=static_params) + init_dist = distributions.JointDistribution( + variable_params, *initial_dists, + **{"constraints": constraints}) + else: + init_dist = None + return init_dist
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/base_cube.html b/latest/html/_modules/pycbc/inference/sampler/base_cube.html new file mode 100644 index 00000000000..83b69fa3644 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/base_cube.html @@ -0,0 +1,242 @@ + + + + + + pycbc.inference.sampler.base_cube — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.base_cube

+# Copyright (C) 2020 Sumit Kumar, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Common utilities for samplers that rely on transforming between a unit cube
+and the prior space. This is typical of many nested sampling algorithms.
+"""
+import numpy
+
+from .. import models
+
+
+
+[docs] +def call_global_loglikelihood(cube): + return models._global_instance.log_likelihood(cube)
+ + + +
+[docs] +def call_global_logprior(cube): + return models._global_instance.prior_transform(cube)
+ + + +
+[docs] +def setup_calls(model, loglikelihood_function=None, copy_prior=False): + """ Configure calls for MPI support + """ + model_call = CubeModel(model, loglikelihood_function, + copy_prior=copy_prior) + + # these are used to help paralleize over multiple cores / MPI + models._global_instance = model_call + log_likelihood_call = call_global_loglikelihood + prior_call = call_global_logprior + return log_likelihood_call, prior_call
+ + + +
+[docs] +class CubeModel(object): + """ Class for making PyCBC Inference 'model class' + + Parameters + ---------- + model : inference.BaseModel instance + A model instance from pycbc. + """ + + def __init__(self, model, loglikelihood_function=None, copy_prior=False): + if model.sampling_transforms is not None: + raise ValueError("Ultranest or dynesty do not support sampling transforms") + self.model = model + if loglikelihood_function is None: + loglikelihood_function = 'loglikelihood' + self.loglikelihood_function = loglikelihood_function + self.copy_prior = copy_prior + +
+[docs] + def log_likelihood(self, cube): + """ + returns log likelihood function + """ + params = dict(zip(self.model.sampling_params, cube)) + self.model.update(**params) + if self.model.logprior == -numpy.inf: + return -numpy.inf + return getattr(self.model, self.loglikelihood_function)
+ + +
+[docs] + def prior_transform(self, cube): + """ + prior transform function for ultranest sampler + It takes unit cube as input parameter and apply + prior transforms + """ + if self.copy_prior: + cube = cube.copy() + + # we preserve the type of cube to whatever we were given + dict_cube = dict(zip(self.model.variable_params, cube)) + inv = self.model.prior_distribution.cdfinv(**dict_cube) + for i, param in enumerate(self.model.variable_params): + cube[i] = inv[param] + return cube
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/base_mcmc.html b/latest/html/_modules/pycbc/inference/sampler/base_mcmc.html new file mode 100644 index 00000000000..a5ae0f5e605 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/base_mcmc.html @@ -0,0 +1,1160 @@ + + + + + + pycbc.inference.sampler.base_mcmc — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.base_mcmc

+# Copyright (C) 2016  Christopher M. Biwer, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides constructor classes and convenience functions for MCMC samplers."""
+
+import logging
+from abc import (ABCMeta, abstractmethod, abstractproperty)
+
+import configparser as ConfigParser
+
+import numpy
+
+from pycbc.filter import autocorrelation
+from pycbc.inference.io import (validate_checkpoint_files, loadfile)
+from pycbc.inference.io.base_mcmc import nsamples_in_chain
+from .base import initial_dist_from_config
+
+#
+# =============================================================================
+#
+#                              Convenience functions
+#
+# =============================================================================
+#
+
+
+
+[docs] +def raw_samples_to_dict(sampler, raw_samples): + """Convenience function for converting ND array to a dict of samples. + + The samples are assumed to have dimension + ``[sampler.base_shape x] niterations x len(sampler.sampling_params)``. + + Parameters + ---------- + sampler : sampler instance + An instance of an MCMC sampler. + raw_samples : array + The array of samples to convert. + + Returns + ------- + dict : + A dictionary mapping the raw samples to the variable params. If the + sampling params are not the same as the variable params, they will + also be included. Each array will have shape + ``[sampler.base_shape x] niterations``. + """ + sampling_params = sampler.sampling_params + # convert to dictionary + samples = {param: raw_samples[..., ii] for + ii, param in enumerate(sampling_params)} + # apply boundary conditions + samples = sampler.model.prior_distribution.apply_boundary_conditions( + **samples) + # apply transforms to go to model's variable params space + if sampler.model.sampling_transforms is not None: + samples = sampler.model.sampling_transforms.apply( + samples, inverse=True) + return samples
+ + + +
+[docs] +def blob_data_to_dict(stat_names, blobs): + """Converts list of "blobs" to a dictionary of model stats. + + Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to + a list called blobs. This is a list of lists of tuples with shape + niterations x nwalkers x nstats, where nstats is the number of stats + returned by the model's ``default_stats``. This converts that list to a + dictionary of arrays keyed by the stat names. + + Parameters + ---------- + stat_names : list of str + The list of the stat names. + blobs : list of list of tuples + The data to convert. + + Returns + ------- + dict : + A dictionary mapping the model's ``default_stats`` to arrays of values. + Each array will have shape ``nwalkers x niterations``. + """ + # get the dtypes of each of the stats; we'll just take this from the + # first iteration and walker + dtypes = [type(val) for val in blobs[0][0]] + assert len(stat_names) == len(dtypes), ( + "number of stat names must match length of tuples in the blobs") + # convert to an array; to ensure that we get the dtypes correct, we'll + # cast to a structured array + raw_stats = numpy.array(blobs, dtype=list(zip(stat_names, dtypes))) + # transpose so that it has shape nwalkers x niterations + raw_stats = raw_stats.transpose() + # now return as a dictionary + return {stat: raw_stats[stat] for stat in stat_names}
+ + + +
+[docs] +def get_optional_arg_from_config(cp, section, arg, dtype=str): + """Convenience function to retrieve an optional argument from a config + file. + + Parameters + ---------- + cp : ConfigParser + Open config parser to retrieve the argument from. + section : str + Name of the section to retrieve from. + arg : str + Name of the argument to retrieve. + dtype : datatype, optional + Cast the retrieved value (if it exists) to the given datatype. Default + is ``str``. + + Returns + ------- + val : None or str + If the argument is present, the value. Otherwise, None. + """ + if cp.has_option(section, arg): + val = dtype(cp.get(section, arg)) + else: + val = None + return val
+ + + +# +# ============================================================================= +# +# BaseMCMC definition +# +# ============================================================================= +# + + +
+[docs] +class BaseMCMC(object, metaclass=ABCMeta): + """Abstract base class that provides methods common to MCMCs. + + This is not a sampler class itself. Sampler classes can inherit from this + along with ``BaseSampler``. + + This class provides ``set_initial_conditions``, ``run``, and ``checkpoint`` + methods, which are some of the abstract methods required by + ``BaseSampler``. + + This class introduces the following abstract properties and methods: + + * base_shape + [`property`] Should give the shape of the samples arrays used by the + sampler, excluding the iteraitons dimension. Needed for writing + results. + * run_mcmc(niterations) + Should run the sampler for the given number of iterations. Called by + ``run``. + * clear_samples() + Should clear samples from memory. Called by ``run``. + * set_state_from_file(filename) + Should set the random state of the sampler using the given filename. + Called by ``set_initial_conditions``. + * write_results(filename) + Writes results to the given filename. Called by ``checkpoint``. + * compute_acf(filename, \**kwargs) + [`classmethod`] Should compute the autocorrelation function using + the given filename. Also allows for other keyword arguments. + * compute_acl(filename, \**kwargs) + [`classmethod`] Should compute the autocorrelation length using + the given filename. Also allows for other keyword arguments. + """ + _lastclear = None # the iteration when samples were cleared from memory + _itercounter = None # the number of iterations since the last clear + _pos = None + _p0 = None + _nchains = None + _burn_in = None + _acls = None + _checkpoint_interval = None + _checkpoint_signal = None + _target_niterations = None + _target_eff_nsamples = None + _thin_interval = 1 + _max_samples_per_chain = None + + @abstractproperty + def base_shape(self): + """What shape the sampler's samples arrays are in, excluding + the iterations dimension. + + For example, if a sampler uses 20 chains and 3 temperatures, this + would be ``(3, 20)``. If a sampler only uses a single walker and no + temperatures this would be ``()``. + """ + pass + + @property + def nchains(self): + """The number of chains used.""" + if self._nchains is None: + raise ValueError("number of chains not set") + return self._nchains + + @nchains.setter + def nchains(self, value): + """Sets the number of chains.""" + # we'll actually store it to the nchains attribute + self._nchains = int(value) + + @property + def niterations(self): + """The current number of iterations.""" + itercounter = self._itercounter + if itercounter is None: + itercounter = 0 + lastclear = self._lastclear + if lastclear is None: + lastclear = 0 + return itercounter + lastclear + + @property + def checkpoint_interval(self): + """The number of iterations to do between checkpoints.""" + return self._checkpoint_interval + + @property + def checkpoint_signal(self): + """The signal to use when checkpointing.""" + return self._checkpoint_signal + + @property + def target_niterations(self): + """The number of iterations the sampler should run for.""" + return self._target_niterations + + @property + def target_eff_nsamples(self): + """The target number of effective samples the sampler should get.""" + return self._target_eff_nsamples + + @property + def thin_interval(self): + """Returns the thin interval being used.""" + return self._thin_interval + + @thin_interval.setter + def thin_interval(self, interval): + """Sets the thin interval to use. + + If ``None`` provided, will default to 1. + """ + if interval is None: + interval = 1 + if interval < 1: + raise ValueError("thin interval must be >= 1") + self._thin_interval = interval + + @property + def thin_safety_factor(self): + """The minimum value that ``max_samples_per_chain`` may be set to.""" + return 100 + + @property + def max_samples_per_chain(self): + """The maximum number of samplers per chain that is written to disk.""" + return self._max_samples_per_chain + + @max_samples_per_chain.setter + def max_samples_per_chain(self, n): + if n is not None: + n = int(n) + if n < self.thin_safety_factor: + raise ValueError("max samples per chain must be >= {}" + .format(self.thin_safety_factor)) + # also check that this is consistent with the target number of + # effective samples + if self.target_eff_nsamples is not None: + target_samps_per_chain = int(numpy.ceil( + self.target_eff_nsamples / self.nchains)) + if n <= target_samps_per_chain: + raise ValueError("max samples per chain must be > target " + "effective number of samples per walker " + "({})".format(target_samps_per_chain)) + self._max_samples_per_chain = n + +
+[docs] + def get_thin_interval(self): + """Gets the thin interval to use. + + If ``max_samples_per_chain`` is set, this will figure out what thin + interval is needed to satisfy that criteria. In that case, the thin + interval used must be a multiple of the currently used thin interval. + """ + if self.max_samples_per_chain is not None: + # the extra factor of 2 is to account for the fact that the thin + # interval will need to be at least twice as large as a previously + # used interval + thinfactor = 2*(self.niterations // self.max_samples_per_chain) + # make sure it's at least 1 + thinfactor = max(thinfactor, 1) + # make the new interval is a multiple of the previous, to ensure + # that any samples currently on disk can be thinned accordingly + if thinfactor < self.thin_interval: + thin_interval = self.thin_interval + else: + thin_interval = (thinfactor // self.thin_interval) * \ + self.thin_interval + else: + thin_interval = self.thin_interval + return thin_interval
+ + +
+[docs] + def set_target(self, niterations=None, eff_nsamples=None): + """Sets the target niterations/nsamples for the sampler. + + One or the other must be provided, not both. + """ + if niterations is None and eff_nsamples is None: + raise ValueError("Must provide a target niterations or " + "eff_nsamples") + if niterations is not None and eff_nsamples is not None: + raise ValueError("Must provide a target niterations or " + "eff_nsamples, not both") + self._target_niterations = int(niterations) \ + if niterations is not None else None + self._target_eff_nsamples = int(eff_nsamples) \ + if eff_nsamples is not None else None
+ + +
+[docs] + @abstractmethod + def clear_samples(self): + """A method to clear samples from memory.""" + pass
+ + + @property + def pos(self): + """A dictionary of the current walker positions. + + If the sampler hasn't been run yet, returns p0. + """ + pos = self._pos + if pos is None: + return self.p0 + # convert to dict + pos = {param: self._pos[..., k] + for (k, param) in enumerate(self.sampling_params)} + return pos + + @property + def p0(self): + """A dictionary of the initial position of the chains. + + This is set by using ``set_p0``. If not set yet, a ``ValueError`` is + raised when the attribute is accessed. + """ + if self._p0 is None: + raise ValueError("initial positions not set; run set_p0") + # convert to dict + p0 = {param: self._p0[..., k] + for (k, param) in enumerate(self.sampling_params)} + return p0 + +
+[docs] + def set_p0(self, samples_file=None, prior=None): + """Sets the initial position of the chains. + + Parameters + ---------- + samples_file : InferenceFile, optional + If provided, use the last iteration in the given file for the + starting positions. + prior : JointDistribution, optional + Use the given prior to set the initial positions rather than + ``model``'s prior. + + Returns + ------- + p0 : dict + A dictionary maping sampling params to the starting positions. + """ + # if samples are given then use those as initial positions + if samples_file is not None: + with self.io(samples_file, 'r') as fp: + samples = fp.read_samples(self.variable_params, + iteration=-1, flatten=False) + # remove the (length 1) niterations dimension + samples = samples[..., 0] + # make sure we have the same shape + assert samples.shape == self.base_shape, ( + "samples in file {} have shape {}, but I have shape {}". + format(samples_file, samples.shape, self.base_shape)) + # transform to sampling parameter space + if self.model.sampling_transforms is not None: + samples = self.model.sampling_transforms.apply(samples) + # draw random samples if samples are not provided + else: + nsamples = numpy.prod(self.base_shape) + samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape( + self.base_shape) + # store as ND array with shape [base_shape] x nparams + ndim = len(self.variable_params) + p0 = numpy.ones(list(self.base_shape)+[ndim]) + for i, param in enumerate(self.sampling_params): + p0[..., i] = samples[param] + self._p0 = p0 + return self.p0
+ + +
+[docs] + @abstractmethod + def set_state_from_file(self, filename): + """Sets the state of the sampler to the instance saved in a file. + """ + pass
+ + +
+[docs] + def set_start_from_config(self, cp): + """Sets the initial state of the sampler from config file + """ + if cp.has_option('sampler', 'start-file'): + start_file = cp.get('sampler', 'start-file') + logging.info("Using file %s for initial positions", start_file) + init_prior = None + else: + start_file = None + init_prior = initial_dist_from_config( + cp, self.variable_params, self.static_params) + self.set_p0(samples_file=start_file, prior=init_prior)
+ + +
+[docs] + def resume_from_checkpoint(self): + """Resume the sampler from the checkpoint file + """ + with self.io(self.checkpoint_file, "r") as fp: + self._lastclear = fp.niterations + self.set_p0(samples_file=self.checkpoint_file) + self.set_state_from_file(self.checkpoint_file)
+ + +
+[docs] + def run(self): + """Runs the sampler.""" + if self.target_eff_nsamples and self.checkpoint_interval is None: + raise ValueError("A checkpoint interval must be set if " + "targetting an effective number of samples") + # get the starting number of samples: + # "nsamples" keeps track of the number of samples we've obtained (if + # target_eff_nsamples is not None, this is the effective number of + # samples; otherwise, this is the total number of samples). + # contains (either due to sampler burn-in, or a previous checkpoint) + if self.new_checkpoint: + self._lastclear = 0 + else: + with self.io(self.checkpoint_file, "r") as fp: + self._lastclear = fp.niterations + self.thin_interval = fp.thinned_by + if self.target_eff_nsamples is not None: + target_nsamples = self.target_eff_nsamples + with self.io(self.checkpoint_file, "r") as fp: + nsamples = fp.effective_nsamples + elif self.target_niterations is not None: + # the number of samples is the number of iterations times the + # number of chains + target_nsamples = self.nchains * self.target_niterations + nsamples = self._lastclear * self.nchains + else: + raise ValueError("must set either target_eff_nsamples or " + "target_niterations; see set_target") + self._itercounter = 0 + # figure out the interval to use + iterinterval = self.checkpoint_interval + if iterinterval is None: + iterinterval = self.target_niterations + # run sampler until we have the desired number of samples + while nsamples < target_nsamples: + # adjust the interval if we would go past the number of iterations + if self.target_niterations is not None and ( + self.niterations + iterinterval > self.target_niterations): + iterinterval = self.target_niterations - self.niterations + # run sampler and set initial values to None so that sampler + # picks up from where it left off next call + logging.info("Running sampler for {} to {} iterations".format( + self.niterations, self.niterations + iterinterval)) + # run the underlying sampler for the desired interval + self.run_mcmc(iterinterval) + # update the itercounter + self._itercounter = self._itercounter + iterinterval + # dump the current results + self.checkpoint() + # update nsamples for next loop + if self.target_eff_nsamples is not None: + nsamples = self.effective_nsamples + logging.info("Have {} effective samples post burn in".format( + nsamples)) + else: + nsamples += iterinterval * self.nchains
+ + + @property + def burn_in(self): + """The class for doing burn-in tests (if specified).""" + return self._burn_in + +
+[docs] + def set_burn_in(self, burn_in): + """Sets the object to use for doing burn-in tests.""" + self._burn_in = burn_in
+ + +
+[docs] + @abstractmethod + def effective_nsamples(self): + """The effective number of samples post burn-in that the sampler has + acquired so far. + """ + pass
+ + +
+[docs] + @abstractmethod + def run_mcmc(self, niterations): + """Run the MCMC for the given number of iterations.""" + pass
+ + +
+[docs] + @abstractmethod + def write_results(self, filename): + """Should write all samples currently in memory to the given file.""" + pass
+ + +
+[docs] + def checkpoint(self): + """Dumps current samples to the checkpoint file.""" + # thin and write new samples + # get the updated thin interval to use + thin_interval = self.get_thin_interval() + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + # write the current number of iterations + fp.write_niterations(self.niterations) + # thin samples on disk if it changed + if thin_interval > 1: + # if this is the first time writing, set the file's + # thinned_by + if fp.last_iteration() == 0: + fp.thinned_by = thin_interval + elif thin_interval < fp.thinned_by: + # whatever was done previously resulted in a larger + # thin interval, so we'll set it to the file's + thin_interval = fp.thinned_by + elif thin_interval > fp.thinned_by: + # we need to thin the samples on disk + logging.info("Thinning samples in %s by a factor " + "of %i", fn, int(thin_interval)) + fp.thin(thin_interval) + fp_lastiter = fp.last_iteration() + logging.info("Writing samples to %s with thin interval %i", fn, + thin_interval) + self.write_results(fn) + # update the running thin interval + self.thin_interval = thin_interval + # see if we had anything to write after thinning; if not, don't try + # to compute anything + with self.io(self.checkpoint_file, "r") as fp: + nsamples_written = fp.last_iteration() - fp_lastiter + if nsamples_written == 0: + logging.info("No samples written due to thinning") + else: + # check for burn in, compute the acls + self.raw_acls = None + if self.burn_in is not None: + logging.info("Updating burn in") + self.burn_in.evaluate(self.checkpoint_file) + # write + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + self.burn_in.write(fp) + logging.info("Computing autocorrelation time") + self.raw_acls = self.compute_acl(self.checkpoint_file) + # write acts, effective number of samples + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + if self.raw_acls is not None: + fp.raw_acls = self.raw_acls + fp.acl = self.acl + # write effective number of samples + fp.write_effective_nsamples(self.effective_nsamples) + # write history + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp.update_checkpoint_history() + # check validity + logging.info("Validating checkpoint and backup files") + checkpoint_valid = validate_checkpoint_files( + self.checkpoint_file, self.backup_file) + if not checkpoint_valid: + raise IOError("error writing to checkpoint file") + elif self.checkpoint_signal: + # kill myself with the specified signal + logging.info("Exiting with SIG{}".format(self.checkpoint_signal)) + kill_cmd="os.kill(os.getpid(), signal.SIG{})".format( + self.checkpoint_signal) + exec(kill_cmd) + # clear the in-memory chain to save memory + logging.info("Clearing samples from memory") + self.clear_samples()
+ + +
+[docs] + @staticmethod + def checkpoint_from_config(cp, section): + """Gets the checkpoint interval from the given config file. + + This looks for 'checkpoint-interval' in the section. + + Parameters + ---------- + cp : ConfigParser + Open config parser to retrieve the argument from. + section : str + Name of the section to retrieve from. + + Return + ------ + int or None : + The checkpoint interval, if it is in the section. Otherw + """ + return get_optional_arg_from_config(cp, section, 'checkpoint-interval', + dtype=int)
+ + +
+[docs] + @staticmethod + def ckpt_signal_from_config(cp, section): + """Gets the checkpoint signal from the given config file. + + This looks for 'checkpoint-signal' in the section. + + Parameters + ---------- + cp : ConfigParser + Open config parser to retrieve the argument from. + section : str + Name of the section to retrieve from. + + Return + ------ + int or None : + The checkpoint interval, if it is in the section. Otherw + """ + return get_optional_arg_from_config(cp, section, 'checkpoint-signal', + dtype=str)
+ + +
+[docs] + def set_target_from_config(self, cp, section): + """Sets the target using the given config file. + + This looks for ``niterations`` to set the ``target_niterations``, and + ``effective-nsamples`` to set the ``target_eff_nsamples``. + + Parameters + ---------- + cp : ConfigParser + Open config parser to retrieve the argument from. + section : str + Name of the section to retrieve from. + """ + if cp.has_option(section, "niterations"): + niterations = int(cp.get(section, "niterations")) + else: + niterations = None + if cp.has_option(section, "effective-nsamples"): + nsamples = int(cp.get(section, "effective-nsamples")) + else: + nsamples = None + self.set_target(niterations=niterations, eff_nsamples=nsamples)
+ + +
+[docs] + def set_burn_in_from_config(self, cp): + """Sets the burn in class from the given config file. + + If no burn-in section exists in the file, then this just set the + burn-in class to None. + """ + try: + bit = self.burn_in_class.from_config(cp, self) + except ConfigParser.Error: + bit = None + self.set_burn_in(bit)
+ + +
+[docs] + def set_thin_interval_from_config(self, cp, section): + """Sets thinning options from the given config file. + """ + if cp.has_option(section, "thin-interval"): + thin_interval = int(cp.get(section, "thin-interval")) + logging.info("Will thin samples using interval %i", thin_interval) + else: + thin_interval = None + if cp.has_option(section, "max-samples-per-chain"): + max_samps_per_chain = int(cp.get(section, "max-samples-per-chain")) + logging.info("Setting max samples per chain to %i", + max_samps_per_chain) + else: + max_samps_per_chain = None + # check for consistency + if thin_interval is not None and max_samps_per_chain is not None: + raise ValueError("provide either thin-interval or " + "max-samples-per-chain, not both") + # check that the thin interval is < then the checkpoint interval + if thin_interval is not None and self.checkpoint_interval is not None \ + and thin_interval >= self.checkpoint_interval: + raise ValueError("thin interval must be less than the checkpoint " + "interval") + self.thin_interval = thin_interval + self.max_samples_per_chain = max_samps_per_chain
+ + + @property + def raw_acls(self): + """Dictionary of parameter names -> autocorrelation lengths. + + Depending on the sampler, the ACLs may be an integer, or an arrray of + values per chain and/or per temperature. + + Returns ``None`` if no ACLs have been calculated. + """ + return self._acls + + @raw_acls.setter + def raw_acls(self, acls): + """Sets the raw acls.""" + self._acls = acls + +
+[docs] + @abstractmethod + def acl(self): + """The autocorrelation length. + + This method should convert the raw ACLs into an integer or array that + can be used to extract independent samples from a chain. + """ + pass
+ + + @property + def raw_acts(self): + """Dictionary of parameter names -> autocorrelation time(s). + + Returns ``None`` if no ACLs have been calculated. + """ + acls = self.raw_acls + if acls is None: + return None + return {p: acl * self.thin_interval + for (p, acl) in acls.items()} + + @property + def act(self): + """The autocorrelation time(s). + + The autocorrelation time is defined as the autocorrelation length times + the ``thin_interval``. It gives the number of iterations between + independent samples. Depending on the sampler, this may either be + a single integer or an array of values. + + Returns ``None`` if no ACLs have been calculated. + """ + acl = self.acl + if acl is None: + return None + return acl * self.thin_interval + +
+[docs] + @abstractmethod + def compute_acf(cls, filename, **kwargs): + """A method to compute the autocorrelation function of samples in the + given file.""" + pass
+ + +
+[docs] + @abstractmethod + def compute_acl(cls, filename, **kwargs): + """A method to compute the autocorrelation length of samples in the + given file.""" + pass
+
+ + + +
+[docs] +class EnsembleSupport(object): + """Adds support for ensemble MCMC samplers.""" + + @property + def nwalkers(self): + """The number of walkers used. + + Alias of ``nchains``. + """ + return self.nchains + + @nwalkers.setter + def nwalkers(self, value): + """Sets the number of walkers.""" + # we'll actually store it to the nchains attribute + self.nchains = value + + @property + def acl(self): + """The autocorrelation length of the ensemble. + + This is calculated by taking the maximum over all of the ``raw_acls``. + This works for both single and parallel-tempered ensemble samplers. + + Returns ``None`` if no ACLs have been set. + """ + acls = self.raw_acls + if acls is None: + return None + return numpy.array(list(acls.values())).max() + + @property + def effective_nsamples(self): + """The effective number of samples post burn-in that the sampler has + acquired so far. + """ + if self.burn_in is not None and not self.burn_in.is_burned_in: + # not burned in, so there's no effective samples + return 0 + act = self.act + if act is None: + act = numpy.inf + if self.burn_in is None: + start_iter = 0 + else: + start_iter = self.burn_in.burn_in_iteration + nperwalker = nsamples_in_chain(start_iter, act, self.niterations) + if self.burn_in is not None: + # after burn in, we always have atleast 1 sample per walker + nperwalker = max(nperwalker, 1) + return int(self.nwalkers * nperwalker)
+ + + +# +# ============================================================================= +# +# Functions for computing autocorrelation lengths +# +# ============================================================================= +# + + +
+[docs] +def ensemble_compute_acf(filename, start_index=None, end_index=None, + per_walker=False, walkers=None, parameters=None): + """Computes the autocorrleation function for an ensemble MCMC. + + By default, parameter values are averaged over all walkers at each + iteration. The ACF is then calculated over the averaged chain. An + ACF per-walker will be returned instead if ``per_walker=True``. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACFs for. + start_index : int, optional + The start index to compute the acl from. If None (the default), will + try to use the number of burn-in iterations in the file; otherwise, + will start at the first sample. + end_index : int, optional + The end index to compute the acl to. If None (the default), will go to + the end of the current iteration. + per_walker : bool, optional + Return the ACF for each walker separately. Default is False. + walkers : int or array, optional + Calculate the ACF using only the given walkers. If None (the + default) all walkers will be used. + parameters : str or array, optional + Calculate the ACF for only the given parameters. If None (the + default) will calculate the ACF for all of the model params. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. If + ``per-walker`` is True, the arrays will have shape + ``nwalkers x niterations``. + """ + acfs = {} + with loadfile(filename, 'r') as fp: + if parameters is None: + parameters = fp.variable_params + if isinstance(parameters, str): + parameters = [parameters] + for param in parameters: + if per_walker: + # just call myself with a single walker + if walkers is None: + walkers = numpy.arange(fp.nwalkers) + arrays = [ + ensemble_compute_acf(filename, start_index=start_index, + end_index=end_index, + per_walker=False, walkers=ii, + parameters=param)[param] + for ii in walkers] + acfs[param] = numpy.vstack(arrays) + else: + samples = fp.read_raw_samples( + param, thin_start=start_index, thin_interval=1, + thin_end=end_index, walkers=walkers, + flatten=False)[param] + samples = samples.mean(axis=0) + acfs[param] = autocorrelation.calculate_acf( + samples).numpy() + return acfs
+ + + +
+[docs] +def ensemble_compute_acl(filename, start_index=None, end_index=None, + min_nsamples=10): + """Computes the autocorrleation length for an ensemble MCMC. + + Parameter values are averaged over all walkers at each iteration. + The ACL is then calculated over the averaged chain. If an ACL cannot + be calculated because there are not enough samples, it will be set + to ``inf``. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + start_index : int, optional + The start index to compute the acl from. If None, will try to use + the number of burn-in iterations in the file; otherwise, will start + at the first sample. + end_index : int, optional + The end index to compute the acl to. If None, will go to the end + of the current iteration. + min_nsamples : int, optional + Require a minimum number of samples to compute an ACL. If the + number of samples per walker is less than this, will just set to + ``inf``. Default is 10. + + Returns + ------- + dict + A dictionary giving the ACL for each parameter. + """ + acls = {} + with loadfile(filename, 'r') as fp: + for param in fp.variable_params: + samples = fp.read_raw_samples( + param, thin_start=start_index, thin_interval=1, + thin_end=end_index, flatten=False)[param] + samples = samples.mean(axis=0) + # if < min number of samples, just set to inf + if samples.size < min_nsamples: + acl = numpy.inf + else: + acl = autocorrelation.calculate_acl(samples) + if acl <= 0: + acl = numpy.inf + acls[param] = acl + maxacl = numpy.array(list(acls.values())).max() + logging.info("ACT: %s", str(maxacl*fp.thinned_by)) + return acls
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/base_multitemper.html b/latest/html/_modules/pycbc/inference/sampler/base_multitemper.html new file mode 100644 index 00000000000..10465f68a27 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/base_multitemper.html @@ -0,0 +1,569 @@ + + + + + + pycbc.inference.sampler.base_multitemper — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.base_multitemper

+# Copyright (C) 2018  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Provides constructor classes provide support for parallel tempered MCMC
+samplers."""
+
+
+import logging
+import numpy
+import h5py
+from pycbc.filter import autocorrelation
+from pycbc.inference.io import loadfile
+
+
+
+[docs] +class MultiTemperedSupport(object): + """Provides methods for supporting multi-tempered samplers. + """ + _ntemps = None + + @property + def ntemps(self): + """The number of temeratures that are set.""" + return self._ntemps + +
+[docs] + @staticmethod + def betas_from_config(cp, section): + """Loads number of temperatures or betas from a config file. + + This looks in the given section for: + + * ``ntemps`` : + The number of temperatures to use. Either this, or + ``inverse-temperatures-file`` must be provided (but not both). + * ``inverse-temperatures-file`` : + Path to an hdf file containing the inverse temperatures ("betas") + to use. The betas will be retrieved from the file's + ``.attrs['betas']``. Either this or ``ntemps`` must be provided + (but not both). + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file object to parse. + section : str + The name of the section to look in. + + Returns + ------- + ntemps : int or None + The number of temperatures to use, if it was provided. + betas : array + The array of betas to use, if a inverse-temperatures-file was + provided. + """ + if cp.has_option(section, "ntemps") and \ + cp.has_option(section, "inverse-temperatures-file"): + raise ValueError("Must specify either ntemps or " + "inverse-temperatures-file, not both.") + if cp.has_option(section, "inverse-temperatures-file"): + # get the path of the file containing inverse temperatures values. + inverse_temperatures_file = cp.get(section, + "inverse-temperatures-file") + betas = read_betas_from_hdf(inverse_temperatures_file) + ntemps = betas.shape[0] + else: + # get the number of temperatures + betas = None + ntemps = int(cp.get(section, "ntemps")) + return ntemps, betas
+
+ + + +
+[docs] +def read_betas_from_hdf(filename): + """Loads inverse temperatures from the given file. + """ + # get the path of the file containing inverse temperatures values. + with h5py.File(filename, "r") as fp: + try: + betas = numpy.array(fp.attrs['betas']) + # betas must be in decending order + betas = numpy.sort(betas)[::-1] + except KeyError: + raise AttributeError("No attribute called betas") + return betas
+ + + +# +# ============================================================================= +# +# Functions for computing autocorrelation lengths +# +# ============================================================================= +# + + +
+[docs] +def compute_acf(filename, start_index=None, end_index=None, + chains=None, parameters=None, temps=None): + """Computes the autocorrleation function for independent MCMC chains with + parallel tempering. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACFs for. + start_index : int, optional + The start index to compute the acl from. If None (the default), + will try to use the burn in iteration for each chain; + otherwise, will start at the first sample. + end_index : {None, int} + The end index to compute the acl to. If None, will go to the end + of the current iteration. + chains : optional, int or array + Calculate the ACF for only the given chains. If None (the + default) ACFs for all chains will be estimated. + parameters : optional, str or array + Calculate the ACF for only the given parameters. If None (the + default) will calculate the ACF for all of the model params. + temps : optional, (list of) int or 'all' + The temperature index (or list of indices) to retrieve. If None + (the default), the ACF will only be computed for the coldest (= 0) + temperature chain. To compute an ACF for all temperates pass 'all', + or a list of all of the temperatures. + + Returns + ------- + dict : + Dictionary parameter name -> ACF arrays. The arrays have shape + ``ntemps x nchains x niterations``. + """ + acfs = {} + with loadfile(filename, 'r') as fp: + if parameters is None: + parameters = fp.variable_params + if isinstance(parameters, str): + parameters = [parameters] + temps = _get_temps_idx(fp, temps) + if chains is None: + chains = numpy.arange(fp.nchains) + for param in parameters: + subacfs = [] + for tk in temps: + subsubacfs = [] + for ci in chains: + samples = fp.read_raw_samples( + param, thin_start=start_index, thin_interval=1, + thin_end=end_index, chains=ci, temps=tk)[param] + thisacf = autocorrelation.calculate_acf(samples).numpy() + subsubacfs.append(thisacf) + # stack the chains + subacfs.append(subsubacfs) + # stack the temperatures + acfs[param] = numpy.stack(subacfs) + return acfs
+ + + +
+[docs] +def compute_acl(filename, start_index=None, end_index=None, + min_nsamples=10): + """Computes the autocorrleation length for independent MCMC chains with + parallel tempering. + + ACLs are calculated separately for each chain. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + start_index : {None, int} + The start index to compute the acl from. If None, will try to use + the number of burn-in iterations in the file; otherwise, will start + at the first sample. + end_index : {None, int} + The end index to compute the acl to. If None, will go to the end + of the current iteration. + min_nsamples : int, optional + Require a minimum number of samples to compute an ACL. If the + number of samples per walker is less than this, will just set to + ``inf``. Default is 10. + + Returns + ------- + dict + A dictionary of ntemps x nchains arrays of the ACLs of each + parameter. + """ + # following is a convenience function to calculate the acl for each chain + # defined here so that we can use map for this below + def _getacl(si): + # si: the samples loaded for a specific chain; may have nans in it + si = si[~numpy.isnan(si)] + if len(si) < min_nsamples: + acl = numpy.inf + else: + acl = autocorrelation.calculate_acl(si) + if acl <= 0: + acl = numpy.inf + return acl + acls = {} + with loadfile(filename, 'r') as fp: + tidx = numpy.arange(fp.ntemps) + for param in fp.variable_params: + these_acls = numpy.zeros((fp.ntemps, fp.nchains)) + for tk in tidx: + samples = fp.read_raw_samples( + param, thin_start=start_index, thin_interval=1, + thin_end=end_index, temps=tk, flatten=False)[param] + # flatten out the temperature + samples = samples[0, ...] + # samples now has shape nchains x maxiters + if samples.shape[-1] < min_nsamples: + these_acls[tk, :] = numpy.inf + else: + these_acls[tk, :] = list(map(_getacl, samples)) + acls[param] = these_acls + # report the mean ACL: take the max over the temps and parameters + act = acl_from_raw_acls(acls)*fp.thinned_by + finite = act[numpy.isfinite(act)] + logging.info("ACTs: min %s, mean (of finite) %s, max %s", + str(act.min()), + str(finite.mean() if finite.size > 0 else numpy.inf), + str(act.max())) + return acls
+ + + +
+[docs] +def acl_from_raw_acls(acls): + """Calculates the ACL for one or more chains from a dictionary of ACLs. + + This is for parallel tempered MCMCs in which the chains are independent + of each other. + + The ACL for each chain is maximized over the temperatures and parameters. + + Parameters + ---------- + acls : dict + Dictionary of parameter names -> ntemps x nchains arrays of ACLs (the + thing returned by :py:func:`compute_acl`). + + Returns + ------- + array + The ACL of each chain. + """ + return numpy.array(list(acls.values())).max(axis=0).max(axis=0)
+ + + +
+[docs] +def ensemble_compute_acf(filename, start_index=None, end_index=None, + per_walker=False, walkers=None, parameters=None, + temps=None): + """Computes the autocorrleation function for a parallel tempered, ensemble + MCMC. + + By default, parameter values are averaged over all walkers at each + iteration. The ACF is then calculated over the averaged chain for each + temperature. An ACF per-walker will be returned instead if + ``per_walker=True``. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACFs for. + start_index : int, optional + The start index to compute the acl from. If None (the default), will + try to use the number of burn-in iterations in the file; otherwise, + will start at the first sample. + end_index : int, optional + The end index to compute the acl to. If None (the default), will go to + the end of the current iteration. + per_walker : bool, optional + Return the ACF for each walker separately. Default is False. + walkers : int or array, optional + Calculate the ACF using only the given walkers. If None (the + default) all walkers will be used. + parameters : str or array, optional + Calculate the ACF for only the given parameters. If None (the + default) will calculate the ACF for all of the model params. + temps : (list of) int or 'all', optional + The temperature index (or list of indices) to retrieve. If None + (the default), the ACF will only be computed for the coldest (= 0) + temperature chain. To compute an ACF for all temperates pass 'all', + or a list of all of the temperatures. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. If + ``per-walker`` is True, the arrays will have shape + ``ntemps x nwalkers x niterations``. Otherwise, the returned array + will have shape ``ntemps x niterations``. + """ + acfs = {} + with loadfile(filename, 'r') as fp: + if parameters is None: + parameters = fp.variable_params + if isinstance(parameters, str): + parameters = [parameters] + temps = _get_temps_idx(fp, temps) + for param in parameters: + subacfs = [] + for tk in temps: + if per_walker: + # just call myself with a single walker + if walkers is None: + walkers = numpy.arange(fp.nwalkers) + arrays = [ensemble_compute_acf(filename, + start_index=start_index, + end_index=end_index, + per_walker=False, + walkers=ii, + parameters=param, + temps=tk)[param][0, :] + for ii in walkers] + # we'll stack all of the walker arrays to make a single + # nwalkers x niterations array; when these are stacked + # below, we'll get a ntemps x nwalkers x niterations + # array + subacfs.append(numpy.vstack(arrays)) + else: + samples = fp.read_raw_samples( + param, thin_start=start_index, + thin_interval=1, thin_end=end_index, + walkers=walkers, temps=tk, flatten=False)[param] + # contract the walker dimension using the mean, and + # flatten the (length 1) temp dimension + samples = samples.mean(axis=1)[0, :] + thisacf = autocorrelation.calculate_acf( + samples).numpy() + subacfs.append(thisacf) + # stack the temperatures + acfs[param] = numpy.stack(subacfs) + return acfs
+ + + +
+[docs] +def ensemble_compute_acl(filename, start_index=None, end_index=None, + min_nsamples=10): + """Computes the autocorrleation length for a parallel tempered, ensemble + MCMC. + + Parameter values are averaged over all walkers at each iteration and + temperature. The ACL is then calculated over the averaged chain. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + start_index : int, optional + The start index to compute the acl from. If None (the default), will + try to use the number of burn-in iterations in the file; otherwise, + will start at the first sample. + end_index : int, optional + The end index to compute the acl to. If None, will go to the end + of the current iteration. + min_nsamples : int, optional + Require a minimum number of samples to compute an ACL. If the + number of samples per walker is less than this, will just set to + ``inf``. Default is 10. + + Returns + ------- + dict + A dictionary of ntemps-long arrays of the ACLs of each parameter. + """ + acls = {} + with loadfile(filename, 'r') as fp: + if end_index is None: + end_index = fp.niterations + tidx = numpy.arange(fp.ntemps) + for param in fp.variable_params: + these_acls = numpy.zeros(fp.ntemps) + for tk in tidx: + samples = fp.read_raw_samples( + param, thin_start=start_index, thin_interval=1, + thin_end=end_index, temps=tk, flatten=False)[param] + # contract the walker dimension using the mean, and flatten + # the (length 1) temp dimension + samples = samples.mean(axis=1)[0, :] + if samples.size < min_nsamples: + acl = numpy.inf + else: + acl = autocorrelation.calculate_acl(samples) + if acl <= 0: + acl = numpy.inf + these_acls[tk] = acl + acls[param] = these_acls + maxacl = numpy.array(list(acls.values())).max() + logging.info("ACT: %s", str(maxacl*fp.thinned_by)) + return acls
+ + + +def _get_temps_idx(fp, temps): + """Gets the indices of temperatures to load for computing ACF. + """ + if isinstance(temps, int): + temps = [temps] + elif temps == 'all': + temps = numpy.arange(fp.ntemps) + elif temps is None: + temps = [0] + return temps +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/dummy.html b/latest/html/_modules/pycbc/inference/sampler/dummy.html new file mode 100644 index 00000000000..33e6632c303 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/dummy.html @@ -0,0 +1,227 @@ + + + + + + pycbc.inference.sampler.dummy — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.dummy

+""" Dummy class when no actual sampling is needed, but we may want to do
+some reconstruction supported by the likelihood model.
+"""
+
+import numpy
+
+from pycbc.inference.io import PosteriorFile
+from pycbc.inference import models
+from pycbc.pool import choose_pool
+
+from .base import (BaseSampler, setup_output)
+
+
+
+[docs] +def call_reconstruct(iteration): + """ Accessor to update the global model and call its reconstruction + routine. + """ + models._global_instance.update() + return models._global_instance.reconstruct(seed=iteration)
+ + + +
+[docs] +class DummySampler(BaseSampler): + """Dummy sampler for not doing sampling + + Parameters + ---------- + model : Model + An instance of a model from ``pycbc.inference.models``. + """ + name = 'dummy' + + def __init__(self, model, *args, nprocesses=1, use_mpi=False, + num_samples=1000, **kwargs): + super().__init__(model, *args) + + models._global_instance = model + self.num_samples = int(num_samples) + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + self._samples = {} + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """This should initialize the sampler given a config file. + """ + kwargs = {k: cp.get('sampler', k) for k in cp.options('sampler')} + obj = cls(model, nprocesses=nprocesses, use_mpi=use_mpi, **kwargs) + setup_output(obj, output_file, check_nsamples=False, validate=False) + return obj
+ + + @property + def samples(self): + """A dict mapping variable_params to arrays of samples currently + in memory. The dictionary may also contain sampling_params. + + The sample arrays may have any shape, and may or may not be thinned. + """ + return self._samples + + @property + def model_stats(self): + pass + +
+[docs] + def run(self): + samples = self.pool.map(call_reconstruct, + range(self.num_samples)) + self._samples = {k: numpy.array([x[k] for x in samples]) + for k in samples[0]}
+ + +
+[docs] + def finalize(self): + with self.io(self.checkpoint_file, "a") as fp: + fp.write_samples(samples=self._samples)
+ + + checkpoint = resume_from_checkpoint = run + + @property + def io(self): + """A class that inherits from ``BaseInferenceFile`` to handle IO with + an hdf file. + + This should be a class, not an instance of class, so that the sampler + can initialize it when needed. + """ + return PosteriorFile
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/dynesty.html b/latest/html/_modules/pycbc/inference/sampler/dynesty.html new file mode 100644 index 00000000000..6e28f7b0a94 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/dynesty.html @@ -0,0 +1,810 @@ + + + + + + pycbc.inference.sampler.dynesty — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.dynesty

+# Copyright (C) 2019  Collin Capano, Sumit Kumar, Prayush Kumar
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for using the dynesty sampler
+packages for parameter estimation.
+"""
+
+import logging
+import time
+import numpy
+import dynesty, dynesty.dynesty, dynesty.nestedsamplers
+from pycbc.pool import choose_pool
+from dynesty import utils as dyfunc
+from pycbc.inference.io import (DynestyFile, validate_checkpoint_files,
+                                loadfile)
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import get_optional_arg_from_config
+from .base_cube import setup_calls
+from .. import models
+
+
+#
+# =============================================================================
+#
+#                                   Samplers
+#
+# =============================================================================
+#
+
+
+[docs] +class DynestySampler(BaseSampler): + """This class is used to construct an Dynesty sampler from the dynesty + package. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + nlive : int + Number of live points to use in sampler. + pool : function with map, Optional + A provider of a map function that allows a function call to be run + over multiple sets of arguments and possibly maps them to + cores/nodes/etc. + """ + name = "dynesty" + _io = DynestyFile + + def __init__(self, model, nlive, nprocesses=1, + checkpoint_time_interval=None, maxcall=None, + loglikelihood_function=None, use_mpi=False, + no_save_state=False, + run_kwds=None, + extra_kwds=None, + internal_kwds=None, + **kwargs): + + self.model = model + self.no_save_state = no_save_state + log_likelihood_call, prior_call = setup_calls( + model, + loglikelihood_function=loglikelihood_function, + copy_prior=True) + # Set up the pool + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + + self.maxcall = maxcall + self.checkpoint_time_interval = checkpoint_time_interval + self.run_kwds = {} if run_kwds is None else run_kwds + self.extra_kwds = {} if extra_kwds is None else extra_kwds + self.internal_kwds = {} if internal_kwds is None else internal_kwds + self.nlive = nlive + self.names = model.sampling_params + self.ndim = len(model.sampling_params) + self.checkpoint_file = None + # Enable checkpointing if checkpoint_time_interval is set in config + # file in sampler section + if self.checkpoint_time_interval: + self.run_with_checkpoint = True + if self.maxcall is None: + self.maxcall = 5000 * self.pool.size + logging.info("Checkpointing enabled, will verify every %s calls" + " and try to checkpoint every %s seconds", + self.maxcall, self.checkpoint_time_interval) + else: + self.run_with_checkpoint = False + + # Check for cyclic boundaries + periodic = [] + cyclic = self.model.prior_distribution.cyclic + for i, param in enumerate(self.variable_params): + if param in cyclic: + logging.info('Param: %s will be cyclic', param) + periodic.append(i) + + if len(periodic) == 0: + periodic = None + + # Check for reflected boundaries. Dynesty only supports + # reflection on both min and max of boundary. + reflective = [] + reflect = self.model.prior_distribution.well_reflected + for i, param in enumerate(self.variable_params): + if param in reflect: + logging.info("Param: %s will be well reflected", param) + reflective.append(i) + + if len(reflective) == 0: + reflective = None + + if 'sample' in extra_kwds: + if 'rwalk2' in extra_kwds['sample']: + dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_mod + dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_mod + extra_kwds['sample'] = 'rwalk' + + if self.nlive < 0: + # Interpret a negative input value for the number of live points + # (which is clearly an invalid input in all senses) + # as the desire to dynamically determine that number + self._sampler = dynesty.DynamicNestedSampler(log_likelihood_call, + prior_call, self.ndim, + pool=self.pool, + reflective=reflective, + periodic=periodic, + **extra_kwds) + self.run_with_checkpoint = False + logging.info("Checkpointing not currently supported with" + "DYNAMIC nested sampler") + else: + self._sampler = dynesty.NestedSampler(log_likelihood_call, + prior_call, self.ndim, + nlive=self.nlive, + reflective=reflective, + periodic=periodic, + pool=self.pool, **extra_kwds) + self._sampler.kwargs.update(internal_kwds) + + # properties of the internal sampler which should not be pickled + self.no_pickle = ['loglikelihood', + 'prior_transform', + 'propose_point', + 'update_proposal', + '_UPDATE', '_PROPOSE', + 'evolve_point', 'use_pool', 'queue_size', + 'use_pool_ptform', 'use_pool_logl', + 'use_pool_evolve', 'use_pool_update', + 'pool', 'M'] + +
+[docs] + def run(self): + diff_niter = 1 + if self.run_with_checkpoint is True: + n_checkpointing = 1 + t0 = time.time() + it = self._sampler.it + + logging.info('Starting from iteration: %s', it) + while diff_niter != 0: + self._sampler.run_nested(maxcall=self.maxcall, **self.run_kwds) + + delta_t = time.time() - t0 + diff_niter = self._sampler.it - it + logging.info("Checking if we should checkpoint: %.2f s", delta_t) + + if delta_t >= self.checkpoint_time_interval: + logging.info('Checkpointing N={}'.format(n_checkpointing)) + self.checkpoint() + n_checkpointing += 1 + t0 = time.time() + it = self._sampler.it + else: + self._sampler.run_nested(**self.run_kwds)
+ + + @property + def io(self): + return self._io + + @property + def niterations(self): + return len(tuple(self.samples.values())[0]) + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False, loglikelihood_function=None): + """Loads the sampler from the given config file. Many options are + directly passed to the underlying dynesty sampler, see the official + dynesty documentation for more details on these. + + The following options are retrieved in the ``[sampler]`` section: + + * ``name = STR``: + Required. This must match the sampler's name. + * ``maxiter = INT``: + The maximum number of iterations to run. + * ``dlogz = FLOAT``: + The target dlogz stopping condition. + * ``logl_max = FLOAT``: + The maximum logl stopping condition. + * ``n_effective = INT``: + Target effective number of samples stopping condition + * ``sample = STR``: + The method to sample the space. Should be one of 'uniform', + 'rwalk', 'rwalk2' (a modified version of rwalk), or 'slice'. + * ``walk = INT``: + Used for some of the walk methods. Sets the minimum number of + steps to take when evolving a point. + * ``maxmcmc = INT``: + Used for some of the walk methods. Sets the maximum number of steps + to take when evolving a point. + * ``nact = INT``: + used for some of the walk methods. Sets number of autorcorrelation + lengths before terminating evolution of a point. + * ``first_update_min_ncall = INT``: + The minimum number of calls before updating the bounding region + for the first time. + * ``first_update_min_neff = FLOAT``: + Don't update the the bounding region untill the efficiency drops + below this value. + * ``bound = STR``: + The method of bounding of the prior volume. + Should be one of 'single', 'balls', 'cubes', 'multi' or 'none'. + * ``update_interval = INT``: + Number of iterations between updating the bounding regions + * ``enlarge = FLOAT``: + Factor to enlarge the bonding region. + * ``bootstrap = INT``: + The number of bootstrap iterations to determine the enlargement + factor. + * ``maxcall = INT``: + The maximum number of calls before checking if we should checkpoint + * ``checkpoint_time_interval``: + Sets the time in seconds between checkpointing. + * ``loglikelihood-function``: + The attribute of the model to use for the loglikelihood. If + not provided, will default to ``loglikelihood``. + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file object to parse. + model : pycbc.inference.model.BaseModel instance + The model to use. + output_file : str, optional + The name of the output file to checkpoint and write results to. + nprocesses : int, optional + The number of parallel processes to use. Default is 1. + use_mpi : bool, optional + Use MPI for parallelization. Default is False. + + Returns + ------- + DynestySampler : + The sampler instance. + """ + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + # get the number of live points to use + nlive = int(cp.get(section, "nlive")) + loglikelihood_function = \ + get_optional_arg_from_config(cp, section, 'loglikelihood-function') + + no_save_state = cp.has_option(section, 'no-save-state') + + # optional run_nested arguments for dynesty + rargs = {'maxiter': int, + 'dlogz': float, + 'logl_max': float, + 'n_effective': int, + } + + # optional arguments for dynesty + cargs = {'bound': str, + 'bootstrap': int, + 'enlarge': float, + 'update_interval': float, + 'sample': str, + 'first_update_min_ncall': int, + 'first_update_min_eff': float, + 'walks': int, + } + + # optional arguments that must be set internally + internal_args = { + 'maxmcmc': int, + 'nact': int, + } + + extra = {} + run_extra = {} + internal_extra = {} + for args, argt in [(extra, cargs), + (run_extra, rargs), + (internal_extra, internal_args), + ]: + for karg in argt: + if cp.has_option(section, karg): + args[karg] = argt[karg](cp.get(section, karg)) + + #This arg needs to be a dict + first_update = {} + if 'first_update_min_ncall' in extra: + first_update['min_ncall'] = extra.pop('first_update_min_ncall') + logging.info('First update: min_ncall:%s', + first_update['min_ncall']) + if 'first_update_min_eff' in extra: + first_update['min_eff'] = extra.pop('first_update_min_eff') + logging.info('First update: min_eff:%s', first_update['min_eff']) + extra['first_update'] = first_update + + # populate options for checkpointing + checkpoint_time_interval = None + maxcall = None + if cp.has_option(section, 'checkpoint_time_interval'): + ck_time = float(cp.get(section, 'checkpoint_time_interval')) + checkpoint_time_interval = ck_time + if cp.has_option(section, 'maxcall'): + maxcall = int(cp.get(section, 'maxcall')) + + obj = cls(model, nlive=nlive, nprocesses=nprocesses, + loglikelihood_function=loglikelihood_function, + checkpoint_time_interval=checkpoint_time_interval, + maxcall=maxcall, + no_save_state=no_save_state, + use_mpi=use_mpi, run_kwds=run_extra, + extra_kwds=extra, + internal_kwds=internal_extra,) + setup_output(obj, output_file, check_nsamples=False) + + if not obj.new_checkpoint: + obj.resume_from_checkpoint() + return obj
+ + +
+[docs] + def checkpoint(self): + """Checkpoint function for dynesty sampler + """ + # Dynesty has its own __getstate__ which deletes + # random state information and the pool + saved = {} + for key in self.no_pickle: + if hasattr(self._sampler, key): + saved[key] = getattr(self._sampler, key) + setattr(self._sampler, key, None) + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + # Write random state + fp.write_random_state() + + # Write pickled data + fp.write_pickled_data_into_checkpoint_file(self._sampler) + + self.write_results(fn) + + # Restore properties that couldn't be pickled if we are continuing + for key in saved: + setattr(self._sampler, key, saved[key])
+ + +
+[docs] + def resume_from_checkpoint(self): + try: + with loadfile(self.checkpoint_file, 'r') as fp: + sampler = fp.read_pickled_data_from_checkpoint_file() + + for key in sampler.__dict__: + if key not in self.no_pickle: + value = getattr(sampler, key) + setattr(self._sampler, key, value) + + self.set_state_from_file(self.checkpoint_file) + logging.info("Found valid checkpoint file: %s", + self.checkpoint_file) + except Exception as e: + print(e) + logging.info("Failed to load checkpoint file")
+ + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as fp: + state = fp.read_random_state() + # Dynesty handles most randomeness through rstate which is + # pickled along with the class instance + numpy.random.set_state(state)
+ + +
+[docs] + def finalize(self): + """Finalze and write it to the results file + """ + logz = self._sampler.results.logz[-1:][0] + dlogz = self._sampler.results.logzerr[-1:][0] + logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz)) + + if self.no_save_state: + self.write_results(self.checkpoint_file) + else: + self.checkpoint() + logging.info("Validating checkpoint and backup files") + checkpoint_valid = validate_checkpoint_files( + self.checkpoint_file, self.backup_file, check_nsamples=False) + if not checkpoint_valid: + raise IOError("error writing to checkpoint file")
+ + + @property + def samples(self): + """Returns raw nested samples + """ + results = self._sampler.results + samples = results.samples + nest_samp = {} + for i, param in enumerate(self.variable_params): + nest_samp[param] = samples[:, i] + nest_samp['logwt'] = results.logwt + nest_samp['loglikelihood'] = results.logl + return nest_samp + +
+[docs] + def set_initial_conditions(self, initial_distribution=None, + samples_file=None): + """Sets up the starting point for the sampler. + + Should also set the sampler's random state. + """ + pass
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ----------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # Write nested samples + fp.write_raw_samples(self.samples) + + # Write logz and dlogz + logz = self._sampler.results.logz[-1:][0] + dlogz = self._sampler.results.logzerr[-1:][0] + fp.write_logevidence(logz, dlogz)
+ + + @property + def model_stats(self): + pass + + @property + def logz(self): + """ + return bayesian evidence estimated by + dynesty sampler + """ + return self._sampler.results.logz[-1:][0] + + @property + def logz_err(self): + """ + return error in bayesian evidence estimated by + dynesty sampler + """ + return self._sampler.results.logzerr[-1:][0]
+ + + +
+[docs] +def sample_rwalk_mod(args): + """ Modified version of dynesty.sampling.sample_rwalk + + Adapted from version used in bilby/dynesty + """ + try: + # dynesty <= 1.1 + from dynesty.utils import unitcheck, reflect + + # Unzipping. + (u, loglstar, axes, scale, + prior_transform, loglikelihood, kwargs) = args + + except ImportError: + # dynest >= 1.2 + from dynesty.utils import unitcheck, apply_reflect as reflect + + (u, loglstar, axes, scale, + prior_transform, loglikelihood, _, kwargs) = args + + rstate = numpy.random + + # Bounds + nonbounded = kwargs.get('nonbounded', None) + periodic = kwargs.get('periodic', None) + reflective = kwargs.get('reflective', None) + + # Setup. + n = len(u) + walks = kwargs.get('walks', 10 * n) # minimum number of steps + maxmcmc = kwargs.get('maxmcmc', 2000) # Maximum number of steps + nact = kwargs.get('nact', 5) # Number of ACT + old_act = kwargs.get('old_act', walks) + + # Initialize internal variables + accept = 0 + reject = 0 + nfail = 0 + act = numpy.inf + u_list = [] + v_list = [] + logl_list = [] + + ii = 0 + while ii < nact * act: + ii += 1 + + # Propose a direction on the unit n-sphere. + drhat = rstate.randn(n) + drhat /= numpy.linalg.norm(drhat) + + # Scale based on dimensionality. + dr = drhat * rstate.rand() ** (1.0 / n) + + # Transform to proposal distribution. + du = numpy.dot(axes, dr) + u_prop = u + scale * du + + # Wrap periodic parameters + if periodic is not None: + u_prop[periodic] = numpy.mod(u_prop[periodic], 1) + # Reflect + if reflective is not None: + u_prop[reflective] = reflect(u_prop[reflective]) + + # Check unit cube constraints. + if u.max() < 0: + break + if unitcheck(u_prop, nonbounded): + pass + else: + nfail += 1 + # Only start appending to the chain once a single jump is made + if accept > 0: + u_list.append(u_list[-1]) + v_list.append(v_list[-1]) + logl_list.append(logl_list[-1]) + continue + + # Check proposed point. + v_prop = prior_transform(numpy.array(u_prop)) + logl_prop = loglikelihood(numpy.array(v_prop)) + if logl_prop > loglstar: + u = u_prop + v = v_prop + logl = logl_prop + accept += 1 + u_list.append(u) + v_list.append(v) + logl_list.append(logl) + else: + reject += 1 + # Only start appending to the chain once a single jump is made + if accept > 0: + u_list.append(u_list[-1]) + v_list.append(v_list[-1]) + logl_list.append(logl_list[-1]) + + # If we've taken the minimum number of steps, calculate the ACT + if accept + reject > walks: + act = estimate_nmcmc( + accept_ratio=accept / (accept + reject + nfail), + old_act=old_act, maxmcmc=maxmcmc) + + # If we've taken too many likelihood evaluations then break + if accept + reject > maxmcmc: + logging.warning( + "Hit maximum number of walks {} with accept={}, reject={}, " + "and nfail={} try increasing maxmcmc" + .format(maxmcmc, accept, reject, nfail)) + break + + # If the act is finite, pick randomly from within the chain + if numpy.isfinite(act) and int(.5 * nact * act) < len(u_list): + idx = numpy.random.randint(int(.5 * nact * act), len(u_list)) + u = u_list[idx] + v = v_list[idx] + logl = logl_list[idx] + else: + logging.debug("Unable to find a new point using walk: " + "returning a random point") + u = numpy.random.uniform(size=n) + v = prior_transform(u) + logl = loglikelihood(v) + + blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale} + kwargs["old_act"] = act + + ncall = accept + reject + return u, v, logl, ncall, blob
+ + + +
+[docs] +def estimate_nmcmc(accept_ratio, old_act, maxmcmc, safety=5, tau=None): + """Estimate autocorrelation length of chain using acceptance fraction + + Using ACL = (2/acc) - 1 multiplied by a safety margin. Code adapated from + CPNest: + + * https://github.com/johnveitch/cpnest/blob/master/cpnest/sampler.py + * https://github.com/farr/Ensemble.jl + + Parameters + ---------- + accept_ratio: float [0, 1] + Ratio of the number of accepted points to the total number of points + old_act: int + The ACT of the last iteration + maxmcmc: int + The maximum length of the MCMC chain to use + safety: int + A safety factor applied in the calculation + tau: int (optional) + The ACT, if given, otherwise estimated. + """ + if tau is None: + tau = maxmcmc / safety + + if accept_ratio == 0.0: + Nmcmc_exact = (1 + 1 / tau) * old_act + else: + Nmcmc_exact = ( + (1. - 1. / tau) * old_act + + (safety / tau) * (2. / accept_ratio - 1.) + ) + Nmcmc_exact = float(min(Nmcmc_exact, maxmcmc)) + return max(safety, int(Nmcmc_exact))
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/emcee.html b/latest/html/_modules/pycbc/inference/sampler/emcee.html new file mode 100644 index 00000000000..3d3fd3001f7 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/emcee.html @@ -0,0 +1,428 @@ + + + + + + pycbc.inference.sampler.emcee — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.emcee

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for using the emcee sampler
+packages for parameter estimation.
+"""
+
+
+import numpy
+import emcee
+from pycbc.pool import choose_pool
+
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import (BaseMCMC, EnsembleSupport,
+                        ensemble_compute_acf, ensemble_compute_acl,
+                        raw_samples_to_dict,
+                        blob_data_to_dict, get_optional_arg_from_config)
+from ..burn_in import EnsembleMCMCBurnInTests
+from pycbc.inference.io import EmceeFile
+from .. import models
+
+
+#
+# =============================================================================
+#
+#                                   Samplers
+#
+# =============================================================================
+#
+
+if emcee.__version__ >= '3.0.0':
+    raise ImportError
+
+
+
+[docs] +class EmceeEnsembleSampler(EnsembleSupport, BaseMCMC, BaseSampler): + """This class is used to construct an MCMC sampler from the emcee + package's EnsembleSampler. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + nwalkers : int + Number of walkers to use in sampler. + pool : function with map, Optional + A provider of a map function that allows a function call to be run + over multiple sets of arguments and possibly maps them to + cores/nodes/etc. + """ + name = "emcee" + _io = EmceeFile + burn_in_class = EnsembleMCMCBurnInTests + + def __init__(self, model, nwalkers, + checkpoint_interval=None, checkpoint_signal=None, + logpost_function=None, nprocesses=1, use_mpi=False): + + self.model = model + # create a wrapper for calling the model + if logpost_function is None: + logpost_function = 'logposterior' + model_call = models.CallModel(model, logpost_function) + + # these are used to help paralleize over multiple cores / MPI + models._global_instance = model_call + model_call = models._call_global_model + pool = choose_pool(mpi=use_mpi, processes=nprocesses) + + # set up emcee + self.nwalkers = nwalkers + ndim = len(model.variable_params) + self._sampler = emcee.EnsembleSampler(nwalkers, ndim, model_call, + pool=pool) + # emcee uses it's own internal random number generator; we'll set it + # to have the same state as the numpy generator + rstate = numpy.random.get_state() + self._sampler.random_state = rstate + self._checkpoint_interval = checkpoint_interval + self._checkpoint_signal = checkpoint_signal + + @property + def io(self): + return self._io + + @property + def base_shape(self): + return (self.nwalkers,) + + @property + def samples(self): + """A dict mapping ``variable_params`` to arrays of samples currently + in memory. + + The arrays have shape ``nwalkers x niterations``. + """ + # emcee stores samples to it's chain attribute as a + # nwalker x niterations x ndim array + raw_samples = self._sampler.chain + return raw_samples_to_dict(self, raw_samples) + + @property + def model_stats(self): + """A dict mapping the model's ``default_stats`` to arrays of values. + + The returned array has shape ``nwalkers x niterations``. + """ + stats = self.model.default_stats + return blob_data_to_dict(stats, self._sampler.blobs) + +
+[docs] + def clear_samples(self): + """Clears the samples and stats from memory. + """ + # store the iteration that the clear is occuring on + self._lastclear = self.niterations + self._itercounter = 0 + # now clear the chain + self._sampler.reset() + self._sampler.clear_blobs()
+ + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as fp: + rstate = fp.read_random_state() + # set the numpy random state + numpy.random.set_state(rstate) + # set emcee's generator to the same state + self._sampler.random_state = rstate
+ + +
+[docs] + def run_mcmc(self, niterations): + """Advance the ensemble for a number of samples. + + Parameters + ---------- + niterations : int + Number of iterations to run the sampler for. + """ + pos = self._pos + if pos is None: + pos = self._p0 + res = self._sampler.run_mcmc(pos, niterations) + p, _, _ = res[0], res[1], res[2] + # update the positions + self._pos = p
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ----------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, + parameters=self.model.variable_params, + last_iteration=self.niterations) + # write stats + fp.write_samples(self.model_stats, + last_iteration=self.niterations) + # write accpetance + fp.write_acceptance_fraction(self._sampler.acceptance_fraction) + # write random state + fp.write_random_state(state=self._sampler.random_state)
+ + +
+[docs] + def finalize(self): + """All data is written by the last checkpoint in the run method, so + this just passes.""" + pass
+ + +
+[docs] + @staticmethod + def compute_acf(filename, **kwargs): + r"""Computes the autocorrelation function. + + Calls :py:func:`base_mcmc.ensemble_compute_acf`; see that + function for details. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACFs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_mcmc.ensemble_compute_acf`. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. If + ``per-walker`` is True, the arrays will have shape + ``nwalkers x niterations``. + """ + return ensemble_compute_acf(filename, **kwargs)
+ + +
+[docs] + @staticmethod + def compute_acl(filename, **kwargs): + r"""Computes the autocorrelation length. + + Calls :py:func:`base_mcmc.ensemble_compute_acl`; see that + function for details. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_mcmc.ensemble_compute_acf`. + + Returns + ------- + dict + A dictionary giving the ACL for each parameter. + """ + return ensemble_compute_acl(filename, **kwargs)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """Loads the sampler from the given config file.""" + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + # get the number of walkers to use + nwalkers = int(cp.get(section, "nwalkers")) + # get the checkpoint interval, if it's specified + checkpoint_interval = cls.checkpoint_from_config(cp, section) + checkpoint_signal = cls.ckpt_signal_from_config(cp, section) + # get the logpost function + lnpost = get_optional_arg_from_config(cp, section, 'logpost-function') + obj = cls(model, nwalkers, + checkpoint_interval=checkpoint_interval, + checkpoint_signal=checkpoint_signal, + logpost_function=lnpost, nprocesses=nprocesses, + use_mpi=use_mpi) + # set target + obj.set_target_from_config(cp, section) + # add burn-in if it's specified + obj.set_burn_in_from_config(cp) + # set prethin options + obj.set_thin_interval_from_config(cp, section) + # Set up the output file + setup_output(obj, output_file) + if not obj.new_checkpoint: + obj.resume_from_checkpoint() + else: + obj.set_start_from_config(cp) + return obj
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/emcee_pt.html b/latest/html/_modules/pycbc/inference/sampler/emcee_pt.html new file mode 100644 index 00000000000..50a002b53ca --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/emcee_pt.html @@ -0,0 +1,630 @@ + + + + + + pycbc.inference.sampler.emcee_pt — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.emcee_pt

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""
+This modules provides classes and functions for using the emcee_pt sampler
+packages for parameter estimation.
+"""
+
+import logging
+import numpy
+import emcee
+
+from pycbc.pool import choose_pool
+
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import (BaseMCMC, EnsembleSupport, raw_samples_to_dict,
+                        get_optional_arg_from_config)
+from .base_multitemper import (MultiTemperedSupport,
+                               ensemble_compute_acf, ensemble_compute_acl)
+from ..burn_in import EnsembleMultiTemperedMCMCBurnInTests
+from pycbc.inference.io import EmceePTFile
+from .. import models
+
+
+# This is a hack that will allow us to continue using emcee's abandoned
+# PTSampler, which relied on `numpy.float`, until the end of time.
+numpy.float = float
+
+if emcee.__version__ >= '3.0.0':
+    raise ImportError
+
+
+
+[docs] +class EmceePTSampler(MultiTemperedSupport, EnsembleSupport, BaseMCMC, + BaseSampler): + """This class is used to construct a parallel-tempered MCMC sampler from + the emcee package's PTSampler. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + ntemps : int + Number of temeratures to use in the sampler. + nwalkers : int + Number of walkers to use in sampler. + betas : array + An array of inverse temperature values to be used in emcee_pt's + temperature ladder. If not provided, ``emcee_pt`` will use the number + of temperatures and the number of dimensions of the parameter space to + construct the ladder with geometrically spaced temperatures. + loglikelihood_function : str, optional + Set the function to call from the model for the ``loglikelihood``. + Default is ``loglikelihood``. + nprocesses : int, optional + The number of parallel processes to use. Default is 1 + (no paralleliztion). + use_mpi : bool, optional + Use MPI for parallelization. Default (False) will use python's + multiprocessing. + """ + name = "emcee_pt" + _io = EmceePTFile + burn_in_class = EnsembleMultiTemperedMCMCBurnInTests + + def __init__(self, model, ntemps, nwalkers, betas=None, + checkpoint_interval=None, checkpoint_signal=None, + loglikelihood_function=None, + nprocesses=1, use_mpi=False): + + self.model = model + + # create a wrapper for calling the model + if loglikelihood_function is None: + loglikelihood_function = 'loglikelihood' + # frustratingly, emcee_pt does not support blob data, so we have to + # turn it off + model_call = models.CallModel(model, loglikelihood_function, + return_all_stats=False) + + # these are used to help paralleize over multiple cores / MPI + models._global_instance = model_call + model_call = models._call_global_model + prior_call = models._call_global_model_logprior + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + + # construct the sampler: PTSampler needs the likelihood and prior + # functions separately + ndim = len(model.variable_params) + self._sampler = emcee.PTSampler(ntemps, nwalkers, ndim, + model_call, prior_call, pool=self.pool, + betas=betas) + self.nwalkers = nwalkers + self._ntemps = ntemps + self._checkpoint_interval = checkpoint_interval + self._checkpoint_signal = checkpoint_signal + + @property + def io(self): + return self._io + + @property + def base_shape(self): + return (self.ntemps, self.nwalkers,) + + @property + def betas(self): + return self._sampler.betas + +
+[docs] + @staticmethod + def compute_acf(filename, **kwargs): + r"""Computes the autocorrelation function. + + Calls :py:func:`base_multitemper.ensemble_compute_acf`; see that + function for details. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACFs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_compute_acf`. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. If + ``per-walker=True`` is passed as a keyword argument, the arrays + will have shape ``ntemps x nwalkers x niterations``. Otherwise, the + returned array will have shape ``ntemps x niterations``. + """ + return ensemble_compute_acf(filename, **kwargs)
+ + +
+[docs] + @staticmethod + def compute_acl(filename, **kwargs): + r"""Computes the autocorrelation length. + + Calls :py:func:`base_multitemper.ensemble_compute_acl`; see that + function for details. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_compute_acl`. + + Returns + ------- + dict + A dictionary of ntemps-long arrays of the ACLs of each parameter. + """ + return ensemble_compute_acl(filename, **kwargs)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """Loads the sampler from the given config file. + + The following options are retrieved in the ``[sampler]`` section: + + * ``name`` : + Required. This must match the samlper's name. + * ``nwalkers`` : + Required. The number of walkers to use. + * ``ntemps`` : + The number of temperatures to use. Either this, or + ``inverse-temperatures-file`` must be provided (but not both). + * ``inverse-temperatures-file`` : + Path to an hdf file containing the inverse temperatures ("betas") + to use. The betas will be retrieved from the file's + ``.attrs['betas']``. Either this or ``ntemps`` must be provided + (but not both). + * ``niterations`` : + The number of iterations to run the sampler for. Either this or + ``effective-nsamples`` must be provided (but not both). + * ``effective-nsamples`` : + Run the sampler until the given number of effective samples are + obtained. A ``checkpoint-interval`` must also be provided in this + case. Either this or ``niterations`` must be provided (but not + both). + * ``thin-interval`` : + Thin the samples by the given value before saving to disk. May + provide this, or ``max-samples-per-chain``, but not both. If + neither options are provided, will save all samples. + * ``max-samples-per-chain`` : + Thin the samples such that the number of samples per chain per + temperature that are saved to disk never exceeds the given value. + May provide this, or ``thin-interval``, but not both. If neither + options are provided, will save all samples. + * ``checkpoint-interval`` : + Sets the checkpoint interval to use. Must be provided if using + ``effective-nsamples``. + * ``checkpoint-signal`` : + Set the checkpoint signal, e.g., "USR2". Optional. + * ``logl-function`` : + The attribute of the model to use for the loglikelihood. If + not provided, will default to ``loglikelihood``. + + Settings for burn-in tests are read from ``[sampler-burn_in]``. In + particular, the ``burn-in-test`` option is used to set the burn in + tests to perform. See + :py:func:`MultiTemperedMCMCBurnInTests.from_config` for details. If no + ``burn-in-test`` is provided, no burn in tests will be carried out. + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file object to parse. + model : pycbc.inference.model.BaseModel instance + The model to use. + output_file : str, optional + The name of the output file to checkpoint and write results to. + nprocesses : int, optional + The number of parallel processes to use. Default is 1. + use_mpi : bool, optional + Use MPI for parallelization. Default is False. + + Returns + ------- + EmceePTSampler : + The sampler instance. + """ + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + # get the number of walkers to use + nwalkers = int(cp.get(section, "nwalkers")) + # get the temps/betas + ntemps, betas = cls.betas_from_config(cp, section) + # get the checkpoint interval, if it's specified + checkpoint_interval = cls.checkpoint_from_config(cp, section) + checkpoint_signal = cls.ckpt_signal_from_config(cp, section) + # get the loglikelihood function + logl = get_optional_arg_from_config(cp, section, 'logl-function') + obj = cls(model, ntemps, nwalkers, betas=betas, + checkpoint_interval=checkpoint_interval, + checkpoint_signal=checkpoint_signal, + loglikelihood_function=logl, nprocesses=nprocesses, + use_mpi=use_mpi) + # set target + obj.set_target_from_config(cp, section) + # add burn-in if it's specified + obj.set_burn_in_from_config(cp) + # set prethin options + obj.set_thin_interval_from_config(cp, section) + # Set up the output file + setup_output(obj, output_file) + if not obj.new_checkpoint: + obj.resume_from_checkpoint() + else: + obj.set_start_from_config(cp) + return obj
+ + + @property + def samples(self): + """A dict mapping ``variable_params`` to arrays of samples currently + in memory. + + The arrays have shape ``ntemps x nwalkers x niterations``. + """ + # emcee stores samples to it's chain attribute as a + # nwalker x niterations x ndim array + raw_samples = self._sampler.chain + return raw_samples_to_dict(self, raw_samples) + + @property + def model_stats(self): + """Returns the log likelihood ratio and log prior as a dict of arrays. + + The returned array has shape ntemps x nwalkers x niterations. + + Unfortunately, because ``emcee_pt`` does not have blob support, this + will only return the loglikelihood and logprior (with the logjacobian + set to zero) regardless of what stats the model can return. + + + .. warning:: + Since the `logjacobian` is not saved by `emcee_pt`, the `logprior` + returned here is the log of the prior pdf in the sampling + coordinate frame rather than the variable params frame. This + differs from the variable params frame by the log of the Jacobian + of the transform from one frame to the other. If no sampling + transforms were used, then the `logprior` is the same. + """ + # likelihood has shape ntemps x nwalkers x niterations + logl = self._sampler.lnlikelihood + # get prior from posterior + logp = self._sampler.lnprobability - logl + logjacobian = numpy.zeros(logp.shape) + return {'loglikelihood': logl, 'logprior': logp, + 'logjacobian': logjacobian} + +
+[docs] + def clear_samples(self): + """Clears the chain and blobs from memory. + """ + # store the iteration that the clear is occuring on + self._lastclear = self.niterations + self._itercounter = 0 + # now clear the chain + self._sampler.reset()
+ + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as fp: + rstate = fp.read_random_state() + # set the numpy random state + numpy.random.set_state(rstate)
+ + +
+[docs] + def run_mcmc(self, niterations): + """Advance the ensemble for a number of samples. + + Parameters + ---------- + niterations : int + Number of samples to get from sampler. + """ + pos = self._pos + if pos is None: + pos = self._p0 + res = self._sampler.run_mcmc(pos, niterations) + p, _, _ = res[0], res[1], res[2] + # update the positions + self._pos = p
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ----------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, + parameters=self.model.variable_params, + last_iteration=self.niterations) + # write stats + fp.write_samples(self.model_stats, last_iteration=self.niterations) + # write accpetance + fp.write_acceptance_fraction(self._sampler.acceptance_fraction) + # write random state + fp.write_random_state()
+ + +
+[docs] + @classmethod + def calculate_logevidence(cls, filename, thin_start=None, thin_end=None, + thin_interval=None): + """Calculates the log evidence from the given file using ``emcee_pt``'s + thermodynamic integration. + + Parameters + ---------- + filename : str + Name of the file to read the samples from. Should be an + ``EmceePTFile``. + thin_start : int + Index of the sample to begin returning stats. Default is to read + stats after burn in. To start from the beginning set thin_start + to 0. + thin_interval : int + Interval to accept every i-th sample. Default is to use the + `fp.acl`. If `fp.acl` is not set, then use all stats + (set thin_interval to 1). + thin_end : int + Index of the last sample to read. If not given then + `fp.niterations` is used. + + Returns + ------- + lnZ : float + The estimate of log of the evidence. + dlnZ : float + The error on the estimate. + """ + with cls._io(filename, 'r') as fp: + logls = fp.read_raw_samples(['loglikelihood'], + thin_start=thin_start, + thin_interval=thin_interval, + thin_end=thin_end, + temps='all', flatten=False) + logls = logls['loglikelihood'] + # we need the betas that were used + betas = fp.betas + # annoyingly, theromdynaimc integration in PTSampler is an instance + # method, so we'll implement a dummy one + ntemps = fp.ntemps + nwalkers = fp.nwalkers + ndim = len(fp.variable_params) + dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None, + None, betas=betas) + return dummy_sampler.thermodynamic_integration_log_evidence( + logls=logls, fburnin=0.)
+ + + def _correctjacobian(self, samples): + """Corrects the log jacobian values stored on disk. + + Parameters + ---------- + samples : dict + Dictionary of the samples. + """ + # flatten samples for evaluating + orig_shape = list(samples.values())[0].shape + flattened_samples = {p: arr.ravel() + for p, arr in list(samples.items())} + # convert to a list of tuples so we can use map function + params = list(flattened_samples.keys()) + size = flattened_samples[params[0]].size + logj = numpy.zeros(size) + for ii in range(size): + these_samples = {p: flattened_samples[p][ii] for p in params} + these_samples = self.model.sampling_transforms.apply(these_samples) + self.model.update(**these_samples) + logj[ii] = self.model.logjacobian + return logj.reshape(orig_shape) + +
+[docs] + def finalize(self): + """Calculates the log evidence and writes to the checkpoint file. + + If sampling transforms were used, this also corrects the jacobian + stored on disk. + + The thin start/interval/end for calculating the log evidence are + retrieved from the checkpoint file's thinning attributes. + """ + if self.model.sampling_transforms is not None: + # fix the lobjacobian values stored on disk + logging.info("Correcting logjacobian values on disk") + with self.io(self.checkpoint_file, 'r') as fp: + samples = fp.read_raw_samples(self.variable_params, + thin_start=0, + thin_interval=1, thin_end=None, + temps='all', flatten=False) + logjacobian = self._correctjacobian(samples) + # write them back out + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp[fp.samples_group]['logjacobian'][()] = logjacobian + logging.info("Calculating log evidence") + # get the thinning settings + with self.io(self.checkpoint_file, 'r') as fp: + thin_start = fp.thin_start + thin_interval = fp.thin_interval + thin_end = fp.thin_end + # calculate + logz, dlogz = self.calculate_logevidence( + self.checkpoint_file, thin_start=thin_start, thin_end=thin_end, + thin_interval=thin_interval) + logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz)) + # write to both the checkpoint and backup + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp.write_logevidence(logz, dlogz)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/epsie.html b/latest/html/_modules/pycbc/inference/sampler/epsie.html new file mode 100644 index 00000000000..22a38010784 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/epsie.html @@ -0,0 +1,653 @@ + + + + + + pycbc.inference.sampler.epsie — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.epsie

+# Copyright (C) 2019  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module provides classes for interacting with epsie samplers.
+"""
+
+
+import numpy
+
+import epsie
+from epsie.samplers import ParallelTemperedSampler
+
+# we'll use emcee_pt's default beta ladder for temperature levels
+from emcee.ptsampler import default_beta_ladder
+
+from pycbc.pool import choose_pool
+
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import (BaseMCMC, get_optional_arg_from_config,
+                        nsamples_in_chain)
+from .base_multitemper import (MultiTemperedSupport, compute_acf, compute_acl,
+                               acl_from_raw_acls)
+from ..burn_in import MultiTemperedMCMCBurnInTests
+from ..jump import epsie_proposals_from_config
+from ..io import EpsieFile
+from .. import models
+
+
+
+[docs] +class EpsieSampler(MultiTemperedSupport, BaseMCMC, BaseSampler): + """Constructs an MCMC sampler using epsie's parallel-tempered sampler. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + nchains : int + Number of chains to use in the sampler. + ntemps : int, optional + Number of temperatures to use in the sampler. A geometrically-spaced + temperature ladder with the gievn number of levels will be constructed + based on the number of parameters. If not provided, must provide + ``betas``. + betas : array, optional + An array of inverse temperature values to be used in for the + temperature ladder. If not provided, must provide ``ntemps``. + proposals : list, optional + List of proposals to use. Any parameters that do not have a proposal + provided will use the ``default_propsal``. **Note:** proposals should + be specified for the sampling parameters, not the + variable parameters. + default_proposal : an epsie.Proposal class, optional + The default proposal to use for parameters not in ``proposals``. + Default is :py:class:`epsie.proposals.Normal`. + default_proposal_args : dict, optional + Dictionary of arguments to pass to the default proposal. + swap_interval : int, optional + The number of iterations between temperature swaps. Default is 1. + seed : int, optional + Seed for epsie's random number generator. If None provided, will create + one. + checkpoint_interval : int, optional + Specify the number of iterations to do between checkpoints. If not + provided, no checkpointin will be done. + checkpoint_signal : str, optional + Set the signal to use when checkpointing. For example, 'USR2'. + loglikelihood_function : str, optional + Set the function to call from the model for the ``loglikelihood``. + Default is ``loglikelihood``. + nprocesses : int, optional + The number of parallel processes to use. Default is 1 + (no paralleliztion). + use_mpi : bool, optional + Use MPI for parallelization. Default (False) will use python's + multiprocessing. + """ + name = "epsie" + _io = EpsieFile + burn_in_class = MultiTemperedMCMCBurnInTests + + def __init__(self, model, nchains, ntemps=None, betas=None, + proposals=None, default_proposal=None, + default_proposal_args=None, seed=None, + swap_interval=1, + checkpoint_interval=None, checkpoint_signal=None, + loglikelihood_function=None, + nprocesses=1, use_mpi=False): + + # create the betas if not provided + if betas is None: + betas = default_beta_ladder(len(model.variable_params), + ntemps=ntemps) + self.model = model + # create a wrapper for calling the model + model_call = _EpsieCallModel(model, loglikelihood_function) + + # these are used to help paralleize over multiple cores / MPI + models._global_instance = model_call + model_call = models._call_global_model + + # Set up the pool + pool = choose_pool(mpi=use_mpi, processes=nprocesses) + + # initialize the sampler + self._sampler = ParallelTemperedSampler( + model.sampling_params, model_call, nchains, betas=betas, + swap_interval=swap_interval, + proposals=proposals, default_proposal=default_proposal, + default_proposal_args=default_proposal_args, + seed=seed, pool=pool) + # set other parameters + self.nchains = nchains + self._ntemps = ntemps + self._checkpoint_interval = checkpoint_interval + self._checkpoint_signal = checkpoint_signal + + @property + def io(self): + return self._io + + @property + def base_shape(self): + return (self.ntemps, self.nchains,) + + @property + def betas(self): + """The inverse temperatures being used.""" + return self._sampler.betas + + @property + def seed(self): + """The seed used for epsie's random bit generator. + + This is not the same as the seed used for the prior distributions. + """ + return self._sampler.seed + + @property + def swap_interval(self): + """Number of iterations between temperature swaps.""" + return self._sampler.swap_interval + +
+[docs] + @staticmethod + def compute_acf(filename, **kwargs): + r"""Computes the autocorrelation function. + + Calls :py:func:`base_multitemper.compute_acf`; see that + function for details. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACFs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.compute_acf`. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. The arrays + will have shape ``ntemps x nchains x niterations``. + """ + return compute_acf(filename, **kwargs)
+ + +
+[docs] + @staticmethod + def compute_acl(filename, **kwargs): + r"""Computes the autocorrelation length. + + Calls :py:func:`base_multitemper.compute_acl`; see that + function for details. + + Parameters + ----------- + filename : str + Name of a samples file to compute ACLs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.compute_acl`. + + Returns + ------- + dict + A dictionary of ntemps-long arrays of the ACLs of each parameter. + """ + return compute_acl(filename, **kwargs)
+ + + @property + def acl(self): # pylint: disable=invalid-overridden-method + """The autocorrelation lengths of the chains. + """ + return acl_from_raw_acls(self.raw_acls) + + @property + def effective_nsamples(self): # pylint: disable=invalid-overridden-method + """The effective number of samples post burn-in that the sampler has + acquired so far. + """ + act = self.act + if act is None: + act = numpy.inf + if self.burn_in is None: + start_iter = 0 + else: + start_iter = self.burn_in.burn_in_iteration + nperchain = nsamples_in_chain(start_iter, act, self.niterations) + if self.burn_in is not None: + # ensure that any chain not burned in has zero samples + nperchain[~self.burn_in.is_burned_in] = 0 + # and that any chain that is burned in has at least one sample + nperchain[self.burn_in.is_burned_in & (nperchain < 1)] = 1 + return int(nperchain.sum()) + + @property + def samples(self): + """A dict mapping ``variable_params`` to arrays of samples currently + in memory. + + The arrays have shape ``ntemps x nchains x niterations``. + + The dictionary also contains sampling parameters. + """ + samples = epsie.array2dict(self._sampler.positions) + # apply boundary conditions + samples = self.model.prior_distribution.apply_boundary_conditions( + **samples) + # apply transforms to go to model's variable params space + if self.model.sampling_transforms is not None: + samples = self.model.sampling_transforms.apply( + samples, inverse=True) + return samples + + @property + def model_stats(self): + """A dict mapping the model's ``default_stats`` to arrays of values. + + The arrays have shape ``ntemps x nchains x niterations``. + """ + return epsie.array2dict(self._sampler.blobs) + +
+[docs] + def clear_samples(self): + """Clears the chain and blobs from memory. + """ + # store the iteration that the clear is occuring on + self._lastclear = self.niterations + self._itercounter = 0 + # now clear the sampler + self._sampler.clear()
+ + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as fp: + # get the numpy state + numpy_rstate_group = '/'.join([fp.sampler_group, + 'numpy_random_state']) + rstate = fp.read_random_state(group=numpy_rstate_group) + # set the sampler state for epsie + self._sampler.set_state_from_checkpoint(fp, path=fp.sampler_group) + # set the global numpy random state for pycbc + numpy.random.set_state(rstate)
+ + +
+[docs] + def set_p0(self, samples_file=None, prior=None): + p0 = super(EpsieSampler, self).set_p0(samples_file=samples_file, + prior=prior) + self._sampler.start_position = p0
+ + + @property + def pos(self): + """A dictionary of the current chain positions.""" + # we override BaseMCMC's pos property because this can be directly + # retrieved from epsie + return self._sampler.current_positions + +
+[docs] + def run_mcmc(self, niterations): + """Advance the chains for a number of iterations. + + Parameters + ---------- + niterations : int + Number of samples to get from sampler. + """ + self._sampler.run(niterations)
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance ratios, and random state + to the given file. + + Parameters + ----------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, + parameters=self.model.variable_params, + last_iteration=self.niterations) + # write stats + fp.write_samples(self.model_stats, last_iteration=self.niterations) + # write accpetance ratio + acceptance = self._sampler.acceptance + fp.write_acceptance_ratio(acceptance['acceptance_ratio'], + last_iteration=self.niterations) + # write temperature data + if self.ntemps > 1: + temp_ar = self._sampler.temperature_acceptance + temp_swaps = self._sampler.temperature_swaps + fp.write_temperature_data(temp_swaps, temp_ar, + self.swap_interval, + last_iteration=self.niterations) + # write numpy's global state (for the distributions) + numpy_rstate_group = '/'.join([fp.sampler_group, + 'numpy_random_state']) + fp.write_random_state(group=numpy_rstate_group) + # write the sampler's state + self._sampler.checkpoint(fp, path=fp.sampler_group)
+ + +
+[docs] + def finalize(self): + pass
+ + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """Loads the sampler from the given config file. + + The following options are retrieved in the ``[sampler]`` section: + + * ``name`` : + (required) must match the samlper's name + * ``nchains`` : + (required) the number of chains to use + * ``ntemps`` : + The number of temperatures to use. Either this, or + ``inverse-temperatures-file`` must be provided (but not both). + * ``inverse-temperatures-file`` : + Path to an hdf file containing the inverse temperatures ("betas") + to use. The betas will be retrieved from the file's + ``.attrs['betas']``. Either this or ``ntemps`` must be provided + (but not both). + * ``niterations`` : + The number of iterations to run the sampler for. Either this or + ``effective-nsamples`` must be provided (but not both). + * ``effective-nsamples`` : + Run the sampler until the given number of effective samples are + obtained. A ``checkpoint-interval`` must also be provided in this + case. Either this or ``niterations`` must be provided (but not + both). + * ``thin-interval`` : + Thin the samples by the given value before saving to disk. May + provide this, or ``max-samples-per-chain``, but not both. If + neither options are provided, will save all samples. + * ``max-samples-per-chain`` : + Thin the samples such that the number of samples per chain per + temperature that are saved to disk never exceeds the given value. + May provide this, or ``thin-interval``, but not both. If neither + options are provided, will save all samples. + * ``checkpoint-interval`` : + Sets the checkpoint interval to use. Must be provided if using + ``effective-nsamples``. + * ``checkpoint-signal`` : + Set the checkpoint signal, e.g., "USR2". Optional. + * ``seed`` : + The seed to use for epsie's random number generator. If not + provided, epsie will create one. + * ``logl-function`` : + The attribute of the model to use for the loglikelihood. If + not provided, will default to ``loglikelihood``. + * ``swap-interval`` : + The number of iterations between temperature swaps. Default is 1. + + Jump proposals must be provided for every sampling + parameter. These are retrieved from subsections + ``[jump_proposal-{params}]``, where params is a + :py:const:`pycbc.VARARGS_DELIM` separated list of parameters the + proposal should be used for. See + :py:func:`inference.jump.epsie_proposals_from_config` for + details. + + .. note:: + Jump proposals should be specified for **sampling parameters**, + not **variable parameters**. + + Settings for burn-in tests are read from ``[sampler-burn_in]``. In + particular, the ``burn-in-test`` option is used to set the burn in + tests to perform. See + :py:func:`MultiTemperedMCMCBurnInTests.from_config` for details. If no + ``burn-in-test`` is provided, no burn in tests will be carried out. + + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file object to parse. + model : pycbc.inference.model.BaseModel instance + The model to use. + output_file : str, optional + The name of the output file to checkpoint and write results to. + nprocesses : int, optional + The number of parallel processes to use. Default is 1. + use_mpi : bool, optional + Use MPI for parallelization. Default is False. + + Returns + ------- + EpsiePTSampler : + The sampler instance. + """ + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + nchains = int(cp.get(section, "nchains")) + seed = get_optional_arg_from_config(cp, section, 'seed', dtype=int) + ntemps, betas = cls.betas_from_config(cp, section) + # get the swap interval + swap_interval = get_optional_arg_from_config(cp, section, + 'swap-interval', + dtype=int) + if swap_interval is None: + swap_interval = 1 + # get the checkpoint interval, if it's specified + checkpoint_interval = cls.checkpoint_from_config(cp, section) + checkpoint_signal = cls.ckpt_signal_from_config(cp, section) + # get the loglikelihood function + logl = get_optional_arg_from_config(cp, section, 'logl-function') + # get the proposals + proposals = epsie_proposals_from_config(cp) + # check that all of the sampling parameters have a specified + # proposal + sampling_params = set(model.sampling_params) + proposal_params = set(param for prop in proposals + for param in prop.parameters) + missing = sampling_params - proposal_params + if missing: + raise ValueError("Missing jump proposals for sampling parameters " + "{}".format(', '.join(missing))) + # initialize + obj = cls(model, nchains, + ntemps=ntemps, betas=betas, proposals=proposals, + swap_interval=swap_interval, seed=seed, + checkpoint_interval=checkpoint_interval, + checkpoint_signal=checkpoint_signal, + loglikelihood_function=logl, + nprocesses=nprocesses, use_mpi=use_mpi) + # set target + obj.set_target_from_config(cp, section) + # add burn-in if it's specified + obj.set_burn_in_from_config(cp) + # set prethin options + obj.set_thin_interval_from_config(cp, section) + # Set up the output file + setup_output(obj, output_file) + if obj.new_checkpoint: + obj.set_start_from_config(cp) + else: + obj.resume_from_checkpoint() + return obj
+
+ + + +class _EpsieCallModel(object): + """Model wrapper for epsie. + + Allows model to be called like a function. Returns the loglikelihood + function, logprior, and the model's default stats. + """ + + def __init__(self, model, loglikelihood_function=None): + self.model = model + if loglikelihood_function is None: + loglikelihood_function = 'loglikelihood' + self.loglikelihood_function = loglikelihood_function + + def __call__(self, **kwargs): + """Calls update, then calls the loglikelihood and logprior.""" + self.model.update(**kwargs) + logp = self.model.logprior + if logp == -numpy.inf: + # don't try to call the log likelihood if the prior rules it out + logl = numpy.nan + else: + logl = getattr(self.model, self.loglikelihood_function) + return logl, logp, self.model.current_stats +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/multinest.html b/latest/html/_modules/pycbc/inference/sampler/multinest.html new file mode 100644 index 00000000000..6dee7438973 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/multinest.html @@ -0,0 +1,568 @@ + + + + + + pycbc.inference.sampler.multinest — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.multinest

+# Copyright (C) 2018  Daniel Finstad
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for using the Multinest sampler
+packages for parameter estimation.
+"""
+
+
+import logging
+import sys
+import numpy
+
+from pycbc.inference.io import (MultinestFile, validate_checkpoint_files)
+from pycbc.distributions import read_constraints_from_config
+from pycbc.pool import is_main_process
+from pycbc.transforms import apply_transforms
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import get_optional_arg_from_config
+
+
+#
+# =============================================================================
+#
+#                                   Samplers
+#
+# =============================================================================
+#
+
+
+[docs] +class MultinestSampler(BaseSampler): + """This class is used to construct a nested sampler from + the Multinest package. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + nlivepoints : int + Number of live points to use in sampler. + """ + name = "multinest" + _io = MultinestFile + + def __init__(self, model, nlivepoints, checkpoint_interval=1000, + importance_nested_sampling=False, + evidence_tolerance=0.1, sampling_efficiency=0.01, + constraints=None): + try: + loglevel = logging.getLogger().getEffectiveLevel() + logging.getLogger().setLevel(logging.WARNING) + from pymultinest import Analyzer, run + self.run_multinest = run + self.analyzer = Analyzer + logging.getLogger().setLevel(loglevel) + except ImportError: + raise ImportError("pymultinest is not installed.") + + super(MultinestSampler, self).__init__(model) + + self._constraints = constraints + self._nlivepoints = nlivepoints + self._ndim = len(model.variable_params) + self._random_state = numpy.random.get_state() + self._checkpoint_interval = checkpoint_interval + self._ztol = evidence_tolerance + self._eff = sampling_efficiency + self._ins = importance_nested_sampling + self._samples = None + self._itercount = None + self._logz = None + self._dlogz = None + self._importance_logz = None + self._importance_dlogz = None + self.is_main_process = is_main_process() + + @property + def io(self): + return self._io + + @property + def niterations(self): + """Get the current number of iterations. + """ + itercount = self._itercount + if itercount is None: + itercount = 0 + return itercount + + @property + def checkpoint_interval(self): + """Get the number of iterations between checkpoints. + """ + return self._checkpoint_interval + + @property + def nlivepoints(self): + """Get the number of live points used in sampling. + """ + return self._nlivepoints + + @property + def logz(self): + """Get the current estimate of the log evidence. + """ + return self._logz + + @property + def dlogz(self): + """Get the current error estimate of the log evidence. + """ + return self._dlogz + + @property + def importance_logz(self): + """Get the current importance weighted estimate of the log + evidence. + """ + return self._importance_logz + + @property + def importance_dlogz(self): + """Get the current error estimate of the importance + weighted log evidence. + """ + return self._importance_dlogz + + @property + def samples(self): + """A dict mapping ``variable_params`` to arrays of samples currently + in memory. + """ + samples_dict = {p: self._samples[:, i] for i, p in + enumerate(self.model.variable_params)} + return samples_dict + + @property + def model_stats(self): + """A dict mapping the model's ``default_stats`` to arrays of values. + """ + stats = [] + for sample in self._samples: + params = dict(zip(self.model.variable_params, sample)) + if self.model.sampling_transforms is not None: + params = self.model.sampling_transforms.apply(params) + self.model.update(**params) + self.model.logposterior + stats.append(self.model.get_current_stats()) + stats = numpy.array(stats) + return {s: stats[:, i] for i, s in enumerate(self.model.default_stats)} + +
+[docs] + def get_posterior_samples(self): + """Read posterior samples from ASCII output file created by + multinest. + """ + post_file = self.backup_file[:-9]+'-post_equal_weights.dat' + return numpy.loadtxt(post_file, ndmin=2)
+ + +
+[docs] + def check_if_finished(self): + """Estimate remaining evidence to see if desired evidence-tolerance + stopping criterion has been reached. + """ + resume_file = self.backup_file[:-9] + '-resume.dat' + current_vol, _, _ = numpy.loadtxt( + resume_file, skiprows=6, unpack=True) + maxloglike = max(self.get_posterior_samples()[:, -1]) + logz_remain = numpy.exp(maxloglike + + numpy.log(current_vol) - self.logz) + logging.info("Estimate of remaining logZ is %s", logz_remain) + done = logz_remain < self._ztol + return done
+ + +
+[docs] + def set_initial_conditions(self, initial_distribution=None, + samples_file=None): + """Sets the initial starting point for the sampler. + + If a starting samples file is provided, will also load the random + state from it. + """ + # use samples file to set the state of the sampler + if samples_file is not None: + self.set_state_from_file(samples_file)
+ + +
+[docs] + def resume_from_checkpoint(self): + """Resume sampler from checkpoint + """ + pass
+ + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as f_p: + rstate = f_p.read_random_state() + # set the numpy random state + numpy.random.set_state(rstate) + # set sampler's generator to the same state + self._random_state = rstate
+ + +
+[docs] + def loglikelihood(self, cube, *extra_args): + """Log likelihood evaluator that gets passed to multinest. + """ + params = {p: v for p, v in zip(self.model.variable_params, cube)} + # apply transforms + if self.model.sampling_transforms is not None: + params = self.model.sampling_transforms.apply(params) + if self.model.waveform_transforms is not None: + params = apply_transforms(params, self.model.waveform_transforms) + # apply constraints + if (self._constraints is not None and + not all([c(params) for c in self._constraints])): + return -numpy.inf + self.model.update(**params) + return self.model.loglikelihood
+ + +
+[docs] + def transform_prior(self, cube, *extra_args): + """Transforms the unit hypercube that multinest makes its draws + from, into the prior space defined in the config file. + """ + dict_cube = dict(zip(self.model.variable_params, cube)) + inv = self.model.prior_distribution.cdfinv(**dict_cube) + for i, param in enumerate(self.model.variable_params): + cube[i] = inv[param] + return cube
+ + +
+[docs] + def run(self): + """Runs the sampler until the specified evidence tolerance + is reached. + """ + if self.new_checkpoint: + self._itercount = 0 + else: + self.set_initial_conditions(samples_file=self.checkpoint_file) + with self.io(self.checkpoint_file, "r") as f_p: + self._itercount = f_p.niterations + outputfiles_basename = self.backup_file[:-9] + '-' + analyzer = self.analyzer(self._ndim, + outputfiles_basename=outputfiles_basename) + iterinterval = self.checkpoint_interval + done = False + while not done: + logging.info("Running sampler for %s to %s iterations", + self.niterations, self.niterations + iterinterval) + # run multinest + self.run_multinest(self.loglikelihood, self.transform_prior, + self._ndim, n_live_points=self.nlivepoints, + evidence_tolerance=self._ztol, + sampling_efficiency=self._eff, + importance_nested_sampling=self._ins, + max_iter=iterinterval, + n_iter_before_update=iterinterval, + seed=numpy.random.randint(0, 1e6), + outputfiles_basename=outputfiles_basename, + multimodal=False, verbose=True) + # parse results from multinest output files + nest_stats = analyzer.get_mode_stats() + self._logz = nest_stats["nested sampling global log-evidence"] + self._dlogz = nest_stats[ + "nested sampling global log-evidence error"] + if self._ins: + self._importance_logz = nest_stats[ + "nested importance sampling global log-evidence"] + self._importance_dlogz = nest_stats[ + "nested importance sampling global log-evidence error"] + self._samples = self.get_posterior_samples()[:, :-1] + logging.info("Have %s posterior samples", self._samples.shape[0]) + # update the itercounter + self._itercount += iterinterval + # make sure there's at least 1 posterior sample + if self._samples.shape[0] == 0: + continue + # dump the current results + if self.is_main_process: + self.checkpoint() + # check if we're finished + done = self.check_if_finished() + if not self.is_main_process: + sys.exit()
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ----------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as f_p: + # write samples + f_p.write_samples(self.samples, self.model.variable_params) + # write stats + f_p.write_samples(self.model_stats) + # write evidence + f_p.write_logevidence(self.logz, self.dlogz, + self.importance_logz, + self.importance_dlogz) + # write random state (use default numpy.random_state) + f_p.write_random_state()
+ + +
+[docs] + def checkpoint(self): + """Dumps current samples to the checkpoint file.""" + logging.info("Writing samples to files") + for f_n in [self.checkpoint_file, self.backup_file]: + self.write_results(f_n) + with self.io(f_n, "a") as f_p: + f_p.write_niterations(self.niterations) + logging.info("Validating checkpoint and backup files") + checkpoint_valid = validate_checkpoint_files( + self.checkpoint_file, self.backup_file, check_nsamples=False) + if not checkpoint_valid: + raise IOError("error writing to checkpoint file")
+ + +
+[docs] + def setup_output(self, output_file): + """Sets up the sampler's checkpoint and output files. + + The checkpoint file has the same name as the output file, but with + ``.checkpoint`` appended to the name. A backup file will also be + created. + + Parameters + ---------- + sampler : sampler instance + Sampler + output_file : str + Name of the output file. + """ + if self.is_main_process: + setup_output(self, output_file) + else: + # child processes just store filenames + checkpoint_file = output_file + '.checkpoint' + backup_file = output_file + '.bkup' + self.checkpoint_file = checkpoint_file + self.backup_file = backup_file + self.checkpoint_valid = True + self.new_checkpoint = True
+ + +
+[docs] + def finalize(self): + """All data is written by the last checkpoint in the run method, so + this just passes.""" + pass
+ + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """Loads the sampler from the given config file.""" + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + # get the number of live points to use + nlivepoints = int(cp.get(section, "nlivepoints")) + # get the checkpoint interval, if it's specified + checkpoint = get_optional_arg_from_config( + cp, section, 'checkpoint-interval', dtype=int) + # get the evidence tolerance, if specified + ztol = get_optional_arg_from_config(cp, section, 'evidence-tolerance', + dtype=float) + # get the sampling efficiency, if specified + eff = get_optional_arg_from_config(cp, section, 'sampling-efficiency', + dtype=float) + # get importance nested sampling setting, if specified + ins = get_optional_arg_from_config(cp, section, + 'importance-nested-sampling', + dtype=bool) + # get constraints since we can't use the joint prior distribution + constraints = read_constraints_from_config(cp) + # build optional kwarg dict + kwarg_names = ['evidence_tolerance', 'sampling_efficiency', + 'importance_nested_sampling', + 'checkpoint_interval'] + optional_kwargs = {k: v for k, v in + zip(kwarg_names, [ztol, eff, ins, checkpoint]) if + v is not None} + obj = cls(model, nlivepoints, constraints=constraints, + **optional_kwargs) + obj.setup_output(output_file) + return obj
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/nessai.html b/latest/html/_modules/pycbc/inference/sampler/nessai.html new file mode 100644 index 00000000000..628b0cf6d5d --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/nessai.html @@ -0,0 +1,560 @@ + + + + + + pycbc.inference.sampler.nessai — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.nessai

+"""
+This modules provides class for using the nessai sampler package for parameter
+estimation.
+
+Documentation for nessai: https://nessai.readthedocs.io/en/latest/
+"""
+import ast
+import logging
+import os
+
+import nessai.flowsampler
+import nessai.model
+import nessai.livepoint
+import nessai.utils.multiprocessing
+import nessai.utils.settings
+import numpy
+import numpy.lib.recfunctions as rfn
+
+from .base import BaseSampler, setup_output
+from .base_mcmc import get_optional_arg_from_config
+from ..io import NessaiFile, loadfile
+from ...pool import choose_pool
+
+
+
+[docs] +class NessaiSampler(BaseSampler): + """Class to construct a FlowSampler from the nessai package.""" + + name = "nessai" + _io = NessaiFile + + def __init__( + self, + model, + loglikelihood_function, + nlive=1000, + nprocesses=1, + use_mpi=False, + run_kwds=None, + extra_kwds=None, + ): + super().__init__(model) + + self.nlive = nlive + self.model_call = NessaiModel(self.model, loglikelihood_function) + + self.extra_kwds = extra_kwds if extra_kwds is not None else {} + self.run_kwds = run_kwds if run_kwds is not None else {} + + nessai.utils.multiprocessing.initialise_pool_variables(self.model_call) + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + self.nprocesses = nprocesses + + self._sampler = None + self._nested_samples = None + self._posterior_samples = None + self._logz = None + self._dlogz = None + self.checkpoint_file = None + self.resume_data = None + + @property + def io(self): + return self._io + + @property + def model_stats(self): + pass + + @property + def samples(self): + """The raw nested samples including the corresponding weights""" + if self._sampler.ns.nested_samples: + ns = numpy.array(self._sampler.ns.nested_samples) + samples = nessai.livepoint.live_points_to_dict( + ns, + self.model.sampling_params, + ) + samples["logwt"] = self._sampler.ns.state.log_posterior_weights + samples["loglikelihood"] = ns["logL"] + samples["logprior"] = ns["logP"] + samples["it"] = ns["it"] + else: + samples = {} + return samples + +
+[docs] + def run(self, **kwargs): + """Run the sampler""" + default_kwds, default_run_kwds = self.get_default_kwds( + importance_nested_sampler=self.extra_kwds.get( + "importance_nested_sampler", False + ) + ) + + extra_kwds = self.extra_kwds.copy() + run_kwds = self.run_kwds.copy() + + # Output in kwargs takes priority of extra kwds. + output = kwargs.pop("output", extra_kwds.pop("output", None)) + # If neither have been specified, use the path from the checkpoint file + if output is None: + output = os.path.join( + os.path.dirname(os.path.abspath(self.checkpoint_file)), + "outdir_nessai", + ) + + if kwargs is not None: + logging.info("Updating keyword arguments with %s", kwargs) + extra_kwds.update( + {k: v for k, v in kwargs.items() if k in default_kwds} + ) + run_kwds.update( + {k: v for k, v in kwargs.items() if k in default_run_kwds} + ) + + if self._sampler is None: + logging.info("Initialising nessai FlowSampler") + self._sampler = nessai.flowsampler.FlowSampler( + self.model_call, + output=output, + pool=self.pool, + n_pool=self.nprocesses, + close_pool=False, + signal_handling=False, + resume_data=self.resume_data, + checkpoint_callback=self.checkpoint_callback, + **extra_kwds, + ) + logging.info("Starting sampling with nessai") + self._sampler.run(**run_kwds)
+ + +
+[docs] + @staticmethod + def get_default_kwds(importance_nested_sampler=False): + """Return lists of all allowed keyword arguments for nessai. + + Returns + ------- + default_kwds : list + List of keyword arguments that can be passed to FlowSampler + run_kwds: list + List of keyword arguments that can be passed to FlowSampler.run + """ + return nessai.utils.settings.get_all_kwargs( + importance_nested_sampler=importance_nested_sampler, + split_kwargs=True, + )
+ + +
+[docs] + @classmethod + def from_config( + cls, cp, model, output_file=None, nprocesses=1, use_mpi=False + ): + """ + Loads the sampler from the given config file. + """ + section = "sampler" + # check name + assert ( + cp.get(section, "name") == cls.name + ), "name in section [sampler] must match mine" + + if cp.has_option(section, "importance_nested_sampler"): + importance_nested_sampler = cp.get( + section, + "importance_nested_sampler", + ) + else: + importance_nested_sampler = False + + # Requires additional development work, see the model class below + if importance_nested_sampler is True: + raise NotImplementedError( + "Importance nested sampler is not currently supported" + ) + + default_kwds, default_run_kwds = cls.get_default_kwds( + importance_nested_sampler + ) + + # Keyword arguments the user cannot configure via the config + remove_kwds = [ + "pool", + "n_pool", + "close_pool", + "signal_handling", + "checkpoint_callback", + ] + + for kwd in remove_kwds: + default_kwds.pop(kwd, None) + default_run_kwds.pop(kwd, None) + + kwds = {} + run_kwds = {} + + # ast.literal_eval is used here since specifying a dictionary with all + # various types would be difficult. However, one may wish to revisit + # this in future, e.g. if evaluating code is a concern. + for d_out, d_defaults in zip( + [kwds, run_kwds], [default_kwds, default_run_kwds] + ): + for k in d_defaults.keys(): + if cp.has_option(section, k): + option = cp.get(section, k) + try: + # This will fail for e.g. a string with an underscore + option = ast.literal_eval(option) + except ValueError: + pass + d_out[k] = option + + # Specified kwds + ignore_kwds = {"nlive", "name"} + invalid_kwds = ( + cp[section].keys() + - set().union(kwds.keys(), run_kwds.keys()) + - ignore_kwds + ) + + if invalid_kwds: + raise RuntimeError( + f"Config contains unknown options: {invalid_kwds}" + ) + logging.info("nessai keyword arguments: %s", kwds) + logging.info("nessai run keyword arguments: %s", run_kwds) + + loglikelihood_function = get_optional_arg_from_config( + cp, section, "loglikelihood-function" + ) + + obj = cls( + model, + loglikelihood_function=loglikelihood_function, + nprocesses=nprocesses, + use_mpi=use_mpi, + run_kwds=run_kwds, + extra_kwds=kwds, + ) + + # Do not need to check number of samples for a nested sampler + setup_output(obj, output_file, check_nsamples=False) + if not obj.new_checkpoint: + obj.resume_from_checkpoint() + return obj
+ + +
+[docs] + def set_initial_conditions( + self, + initial_distribution=None, + samples_file=None, + ): + """Sets up the starting point for the sampler. + + This is not used for nessai. + """
+ + +
+[docs] + def checkpoint_callback(self, state): + """Callback for checkpointing. + + This will be called periodically by nessai. + """ + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp.write_pickled_data_into_checkpoint_file(state) + self.write_results(fn)
+ + +
+[docs] + def checkpoint(self): + """Checkpoint the sampler""" + self.checkpoint_callback(self._sampler.ns)
+ + +
+[docs] + def resume_from_checkpoint(self): + """Reads the resume data from the checkpoint file.""" + try: + with loadfile(self.checkpoint_file, "r") as fp: + self.resume_data = fp.read_pickled_data_from_checkpoint_file() + logging.info( + "Found valid checkpoint file: %s", self.checkpoint_file + ) + except Exception as e: + logging.info("Failed to load checkpoint file with error: %s", e)
+ + +
+[docs] + def finalize(self): + """Finalize sampling""" + logz = self._sampler.ns.log_evidence + dlogz = self._sampler.ns.log_evidence_error + logging.info("log Z, dlog Z: %s, %s", logz, dlogz) + self.checkpoint()
+ + +
+[docs] + def write_results(self, filename): + """Write the results to a given file. + + Writes the nested samples, log-evidence and log-evidence error. + """ + with self.io(filename, "a") as fp: + fp.write_raw_samples(self.samples) + fp.write_logevidence( + self._sampler.ns.log_evidence, + self._sampler.ns.log_evidence_error, + )
+
+ + + +
+[docs] +class NessaiModel(nessai.model.Model): + """Wrapper for PyCBC Inference model class for use with nessai. + + Parameters + ---------- + model : inference.BaseModel instance + A model instance from PyCBC. + loglikelihood_function : str + Name of the log-likelihood method to call. + """ + + def __init__(self, model, loglikelihood_function=None): + self.model = model + self.names = list(model.sampling_params) + + # Configure the log-likelihood function + if loglikelihood_function is None: + loglikelihood_function = "loglikelihood" + self.loglikelihood_function = loglikelihood_function + + # Configure the priors bounds + bounds = {} + for dist in model.prior_distribution.distributions: + bounds.update( + **{ + k: [v.min, v.max] + for k, v in dist.bounds.items() + if k in self.names + } + ) + self.bounds = bounds + # Prior and likelihood are not vectorised + self.vectorised_likelihood = False + self.vectorised_prior = False + # Use the pool for computing the prior + self.parallelise_prior = True + +
+[docs] + def to_dict(self, x): + """Convert a nessai live point array to a dictionary""" + return {n: x[n].item() for n in self.names}
+ + +
+[docs] + def to_live_points(self, x): + """Convert to the structured arrays used by nessai""" + # It is possible this could be made faster + return nessai.livepoint.numpy_array_to_live_points( + rfn.structured_to_unstructured(x), + self.names, + )
+ + +
+[docs] + def new_point(self, N=1): + """Draw a new point""" + return self.to_live_points(self.model.prior_rvs(size=N))
+ + +
+[docs] + def new_point_log_prob(self, x): + """Log-probability for the ``new_point`` method""" + return self.batch_evaluate_log_prior(x)
+ + +
+[docs] + def log_prior(self, x): + """Compute the log-prior""" + self.model.update(**self.to_dict(x)) + return self.model.logprior
+ + +
+[docs] + def log_likelihood(self, x): + """Compute the log-likelihood""" + self.model.update(**self.to_dict(x)) + return getattr(self.model, self.loglikelihood_function)
+ + +
+[docs] + def from_unit_hypercube(self, x): + """Map from the unit-hypercube to the prior.""" + # Needs to be implemented for importance nested sampler + # This method is already available in pycbc but the inverse is not + raise NotImplementedError
+ + +
+[docs] + def to_unit_hypercube(self, x): + """Map to the unit-hypercube to the prior.""" + # Needs to be implemented for importance nested sampler + raise NotImplementedError
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/ptemcee.html b/latest/html/_modules/pycbc/inference/sampler/ptemcee.html new file mode 100644 index 00000000000..b5a006f6536 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/ptemcee.html @@ -0,0 +1,798 @@ + + + + + + pycbc.inference.sampler.ptemcee — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.ptemcee

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+"""
+This modules provides classes and functions for using the emcee_pt sampler
+packages for parameter estimation.
+"""
+
+
+import shlex
+import numpy
+import ptemcee
+import logging
+from pycbc.pool import choose_pool
+
+from .base import (BaseSampler, setup_output)
+from .base_mcmc import (BaseMCMC, EnsembleSupport, raw_samples_to_dict,
+                        get_optional_arg_from_config)
+from .base_multitemper import (read_betas_from_hdf,
+                               ensemble_compute_acf, ensemble_compute_acl)
+from ..burn_in import EnsembleMultiTemperedMCMCBurnInTests
+from pycbc.inference.io import PTEmceeFile
+from .. import models
+
+
+
+[docs] +class PTEmceeSampler(EnsembleSupport, BaseMCMC, BaseSampler): + """This class is used to construct the parallel-tempered ptemcee sampler. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + nwalkers : int + Number of walkers to use in sampler. + ntemps : int, optional + Specify the number of temps to use. Either this, ``Tmax``, or ``betas`` + must be specified. + Tmax : float, optional + Specify the maximum temperature to use. This may be used with + ``ntemps``; see :py:func:`ptemcee.make_ladder` for details. Either + this, ``ntemps``, or ``betas`` must be specified. + betas : list of float, optional + Specify the betas to use. Must be provided if ``ntemps`` and ``Tmax`` + are not given. Will override ``ntemps`` and ``Tmax`` if provided. + adaptive : bool, optional + Whether or not to use adaptive temperature levels. Default is False. + adaptation_lag : int, optional + Only used if ``adaptive`` is True; see :py:mod:`ptemcee.Sampler` for + details. If not provided, will use ``ptemcee``'s default. + adaptation_time : int, optional + Only used if ``adaptive`` is True; see :py:mod:`ptemcee.Sampler` for + details. If not provided, will use ``ptemcee``'s default. + scale_factor : float, optional + Scale factor used for the stretch proposal; see + :py:mod:`ptemcee.Sampler` for details. If not provided, will use + ``ptemcee``'s default. + loglikelihood_function : str, optional + Set the function to call from the model for the ``loglikelihood``. + Default is ``loglikelihood``. + nprocesses : int, optional + The number of parallel processes to use. Default is 1 + (no paralleliztion). + use_mpi : bool, optional + Use MPI for parallelization. Default (False) will use python's + multiprocessing. + """ + name = "ptemcee" + _io = PTEmceeFile + burn_in_class = EnsembleMultiTemperedMCMCBurnInTests + + def __init__(self, model, nwalkers, ntemps=None, Tmax=None, betas=None, + adaptive=False, adaptation_lag=None, adaptation_time=None, + scale_factor=None, + loglikelihood_function=None, + checkpoint_interval=None, checkpoint_signal=None, + nprocesses=1, use_mpi=False): + + self.model = model + ndim = len(model.variable_params) + # create temperature ladder if needed + if ntemps is None and Tmax is None and betas is None: + raise ValueError("must provide either ntemps/Tmax or betas") + if betas is None: + betas = ptemcee.make_ladder(ndim, ntemps=ntemps, Tmax=Tmax) + # construct the keyword arguments to pass; if a kwarg is None, we + # won't pass it, resulting in ptemcee's defaults being used + kwargs = {} + kwargs['adaptive'] = adaptive + kwargs['betas'] = betas + if adaptation_lag is not None: + kwargs['adaptation_lag'] = adaptation_lag + if adaptation_time is not None: + kwargs['adaptation_time'] = adaptation_time + if scale_factor is not None: + kwargs['scale_factor'] = scale_factor + # create a wrapper for calling the model + if loglikelihood_function is None: + loglikelihood_function = 'loglikelihood' + # frustratingly, ptemcee does not support blob data, so we have to + # turn it off + model_call = models.CallModel(model, loglikelihood_function, + return_all_stats=False) + # these are used to help paralleize over multiple cores / MPI + models._global_instance = model_call + model_call = models._call_global_model + prior_call = models._call_global_model_logprior + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + # construct the sampler + self._sampler = ptemcee.Sampler(nwalkers=nwalkers, ndim=ndim, + logl=model_call, logp=prior_call, + mapper=self.pool.map, **kwargs) + self.nwalkers = nwalkers + self._ntemps = ntemps + self._checkpoint_interval = checkpoint_interval + self._checkpoint_signal = checkpoint_signal + # we'll initialize ensemble and chain to None + self._chain = None + self._ensemble = None + + @property + def io(self): + return self._io + + @property + def ntemps(self): + """The number of temeratures that are set.""" + return self._ntemps + + @property + def base_shape(self): + return (self.ntemps, self.nwalkers,) + + @property + def betas(self): + """Returns the beta history currently in memory.""" + # chain betas has shape niterations x ntemps; transpose to + # ntemps x niterations + return self._chain.betas.transpose() + + @property + def starting_betas(self): + """Returns the betas that were used at startup.""" + # the initial betas that were used + return self._sampler.betas + + @property + def adaptive(self): + """Whether or not the betas are adapted.""" + return self._sampler.adaptive + + @property + def adaptation_lag(self): + """The adaptation lag for the beta evolution.""" + return self._sampler.adaptation_lag + + @property + def adaptation_time(self): + """The adaptation time for the beta evolution.""" + return self._sampler.adaptation_time + + @property + def scale_factor(self): + """The scale factor used by ptemcee.""" + return self._sampler.scale_factor + + @property + def ensemble(self): + """Returns the current ptemcee ensemble. + + The ensemble stores the current location of and temperatures of the + walkers. If the ensemble hasn't been setup yet, will set one up + using p0 for the positions. If set_p0 hasn't been run yet, this will + result in a ValueError. + """ + if self._ensemble is None: + if self._p0 is None: + raise ValueError("initial positions not set; run set_p0") + # use the global numpy random state + rstate = numpy.random.mtrand._rand + # self._p0 has base_shape x ndim = ntemps x nwalkers x ndim (see + # BaseMCMC.set_p0). ptemcee's Ensemble expects + # ntemps x nwalkers x ndim... so we're good + self._ensemble = self._sampler.ensemble(self._p0, rstate) + return self._ensemble + + @property + def _pos(self): + """Uses the ensemble for the position.""" + # BaseMCMC expects _pos to have shape ntemps x nwalkers x ndim, + # which is the same shape as ensemble.x + return self.ensemble.x + + @property + def chain(self): + """The current chain of samples in memory. + The chain is returned as a :py:mod:`ptemcee.chain.Chain` instance. If + no chain has been created yet (``_chain`` is None), then will create + a new chain using the current ``ensemble``. + """ + if self._chain is None: + # create a chain + self._chain = ptemcee.chain.Chain(self.ensemble) + return self._chain + +
+[docs] + def clear_samples(self): + """Clears the chain and blobs from memory. + """ + # store the iteration that the clear is occuring on + self._lastclear = self.niterations + self._itercounter = 0 + # set _chain to None; this will both cause the current chain to + # get garbage collected, and will cause a new chain to be created + # the next time self.chain is called + self._chain = None
+ + + @property + def samples(self): + """A dict mapping ``variable_params`` to arrays of samples currently + in memory. + The arrays have shape ``ntemps x nwalkers x niterations``. + """ + # chain.x has shape niterations x ntemps x nwalkers x ndim + # we'll transpose to ntemps x nwalkers x niterations x ndim + raw_samples = self._chain.x.transpose((1, 2, 0, 3)) + return raw_samples_to_dict(self, raw_samples) + + @property + def model_stats(self): + """Returns the log likelihood ratio and log prior as a dict of arrays. + + The returned array has shape ntemps x nwalkers x niterations. + + Unfortunately, because ``ptemcee`` does not have blob support, this + will only return the loglikelihood and logprior (with the logjacobian + set to zero) regardless of what stats the model can return. + + + .. warning:: + Since the ``logjacobian`` is not saved by ``ptemcee``, the + ``logprior`` returned here is the log of the prior pdf in the + sampling coordinate frame rather than the variable params frame. + This differs from the variable params frame by the log of the + Jacobian of the transform from one frame to the other. If no + sampling transforms were used, then the ``logprior`` is the same. + """ + # log likelihood and prior have shape + # niterations x ntemps x nwalkers; we'll tranpose to have shape + # ntemps x nwalkers x niterations + logl = self._chain.logl.transpose((1, 2, 0)) + logp = self._chain.logP.transpose((1, 2, 0)) + logjacobian = numpy.zeros(logp.shape) + return {'loglikelihood': logl, 'logprior': logp, + 'logjacobian': logjacobian} + +
+[docs] + def set_state_from_file(self, filename): + """Sets the state of the sampler back to the instance saved in a file. + """ + with self.io(filename, 'r') as fp: + rstate = fp.read_random_state() + # set the numpy random state + numpy.random.set_state(rstate) + # set the ensemble to its last state + ensemble = self.ensemble + for attr, val in fp.read_ensemble_attrs().items(): + setattr(ensemble, attr, val) + ensemble.betas = fp.read_betas(iteration=-1) + ensemble.time = fp.niterations
+ + +
+[docs] + def run_mcmc(self, niterations): + """Advance the ensemble for a number of samples. + + Parameters + ---------- + niterations : int + Number of samples to get from sampler. + """ + self.chain.run(niterations)
+ + +
+[docs] + @classmethod + def calculate_logevidence(cls, filename, thin_start=None, thin_end=None, + thin_interval=None): + """Calculates the log evidence from the given file. + This uses ``ptemcee``'s thermodynamic integration. + + Parameters + ---------- + filename : str + Name of the file to read the samples from. Should be an + ``PTEmceeFile``. + thin_start : int + Index of the sample to begin returning stats. Default is to read + stats after burn in. To start from the beginning set thin_start + to 0. + thin_interval : int + Interval to accept every i-th sample. Default is to use the + `fp.acl`. If `fp.acl` is not set, then use all stats + (set thin_interval to 1). + thin_end : int + Index of the last sample to read. If not given then + `fp.niterations` is used. + + Returns + ------- + lnZ : float + The estimate of log of the evidence. + dlnZ : float + The error on the estimate. + """ + with cls._io(filename, 'r') as fp: + logls = fp.read_raw_samples(['loglikelihood'], + thin_start=thin_start, + thin_interval=thin_interval, + thin_end=thin_end, + temps='all', flatten=False) + logls = logls['loglikelihood'] + # we need the betas that were used + betas = fp.read_betas(thin_start=thin_start, + thin_interval=thin_interval, + thin_end=thin_end) + # we'll separate betas out by their unique temperatures + # there's probably a faster way to do this... + mean_logls = [] + unique_betas = [] + ntemps = betas.shape[0] + for ti in range(ntemps): + ubti, idx = numpy.unique(betas[ti, :], return_inverse=True) + unique_idx = numpy.unique(idx) + loglsti = logls[ti, :, :] + for ii in unique_idx: + # average over the walkers and iterations with the same + # betas + getiters = numpy.where(ii == unique_idx)[0] + mean_logls.append(loglsti[:, getiters].mean()) + unique_betas.append(ubti[ii]) + return ptemcee.util.thermodynamic_integration_log_evidence( + numpy.array(unique_betas), numpy.array(mean_logls))
+ + +
+[docs] + @staticmethod + def compute_acf(filename, **kwargs): + r"""Computes the autocorrelation function. + + Calls :py:func:`base_multitemper.ensemble_compute_acf`; see that + function for details. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACFs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_compute_acf`. + + Returns + ------- + dict : + Dictionary of arrays giving the ACFs for each parameter. If + ``per-walker=True`` is passed as a keyword argument, the arrays + will have shape ``ntemps x nwalkers x niterations``. Otherwise, the + returned array will have shape ``ntemps x niterations``. + """ + return ensemble_compute_acf(filename, **kwargs)
+ + +
+[docs] + @staticmethod + def compute_acl(filename, **kwargs): + r"""Computes the autocorrelation length. + + Calls :py:func:`base_multitemper.ensemble_compute_acl`; see that + function for details. + + Parameters + ---------- + filename : str + Name of a samples file to compute ACLs for. + \**kwargs : + All other keyword arguments are passed to + :py:func:`base_multitemper.ensemble_compute_acl`. + + Returns + ------- + dict + A dictionary of ntemps-long arrays of the ACLs of each parameter. + """ + return ensemble_compute_acl(filename, **kwargs)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, nprocesses=1, + use_mpi=False): + """Loads the sampler from the given config file. + + The following options are retrieved in the ``[sampler]`` section: + + * ``name = STR`` : + Required. This must match the sampler's name. + * ``nwalkers = INT`` : + Required. The number of walkers to use. + * ``ntemps = INT`` : + The number of temperatures to use. This may be used in combination + with ``Tmax``. Either this, ``Tmax``, ``betas`` or ``betas-file`` + must be provided. + * ``tmax = FLOAT`` : + The maximum temperature to use. This may be used in combination + with ``ntemps``, or alone. + * ``betas = FLOAT1 FLOAT2 [...]`` : + Space-separated list of (intial) inverse temperatures ("betas") to + use. This sets both the number of temperatures and the tmax. A + ``ValueError`` will be raised if both this and ``ntemps`` or + ``Tmax`` are provided. + * ``betas-file = STR`` : + Path to an hdf file containing the inverse temperatures ("betas") + to use. The betas will be retrieved from the file's + ``.attrs['betas']``. A ``ValueError`` will be raised if both this + and ``betas`` are provided. + * ``adaptive =`` : + If provided, temperature adaptation will be turned on. + * ``adaptation-lag = INT`` : + The adaptation lag to use (see ptemcee for details). + * ``adaptation-time = INT`` : + The adaptation time to use (see ptemcee for details). + * ``scale-factor = FLOAT`` : + The scale factor to use for the emcee stretch. + * ``niterations = INT`` : + The number of iterations to run the sampler for. Either this or + ``effective-nsamples`` must be provided (but not both). + * ``effective-nsamples = INT`` : + Run the sampler until the given number of effective samples are + obtained. A ``checkpoint-interval`` must also be provided in this + case. Either this or ``niterations`` must be provided (but not + both). + * ``thin-interval = INT`` : + Thin the samples by the given value before saving to disk. May + provide this, or ``max-samples-per-chain``, but not both. If + neither options are provided, will save all samples. + * ``max-samples-per-chain = INT`` : + Thin the samples such that the number of samples per chain per + temperature that are saved to disk never exceeds the given value. + May provide this, or ``thin-interval``, but not both. If neither + options are provided, will save all samples. + * ``checkpoint-interval = INT`` : + Sets the checkpoint interval to use. Must be provided if using + ``effective-nsamples``. + * ``checkpoint-signal = STR`` : + Set the checkpoint signal, e.g., "USR2". Optional. + * ``logl-function = STR`` : + The attribute of the model to use for the loglikelihood. If + not provided, will default to ``loglikelihood``. + + Settings for burn-in tests are read from ``[sampler-burn_in]``. In + particular, the ``burn-in-test`` option is used to set the burn in + tests to perform. See + :py:func:`EnsembleMultiTemperedMCMCBurnInTests.from_config` for + details. If no ``burn-in-test`` is provided, no burn in tests will be + carried out. + + Parameters + ---------- + cp : WorkflowConfigParser instance + Config file object to parse. + model : pycbc.inference.model.BaseModel instance + The model to use. + output_file : str, optional + The name of the output file to checkpoint and write results to. + nprocesses : int, optional + The number of parallel processes to use. Default is 1. + use_mpi : bool, optional + Use MPI for parallelization. Default is False. + + Returns + ------- + EmceePTSampler : + The sampler instance. + """ + section = "sampler" + # check name + assert cp.get(section, "name") == cls.name, ( + "name in section [sampler] must match mine") + # get the number of walkers to use + nwalkers = int(cp.get(section, "nwalkers")) + # get the checkpoint interval, if it's specified + checkpoint_interval = cls.checkpoint_from_config(cp, section) + checkpoint_signal = cls.ckpt_signal_from_config(cp, section) + optargs = {} + # get the temperature level settings + ntemps = get_optional_arg_from_config(cp, section, 'ntemps', int) + if ntemps is not None: + optargs['ntemps'] = ntemps + tmax = get_optional_arg_from_config(cp, section, 'tmax', float) + if tmax is not None: + optargs['Tmax'] = tmax + betas = get_optional_arg_from_config(cp, section, 'betas') + if betas is not None: + # convert to list sorted in descencding order + betas = numpy.sort(list(map(float, shlex.split(betas))))[::-1] + optargs['betas'] = betas + betas_file = get_optional_arg_from_config(cp, section, 'betas-file') + if betas_file is not None: + optargs['betas'] = read_betas_from_hdf(betas_file) + # check for consistency + if betas is not None and betas_file is not None: + raise ValueError("provide either betas or betas-file, not both") + if 'betas' in optargs and (ntemps is not None or tmax is not None): + raise ValueError("provide either ntemps/tmax or betas/betas-file, " + "not both") + # adaptation parameters + adaptive = get_optional_arg_from_config(cp, section, 'adaptive') + if adaptive is not None: + optargs['adaptive'] = True + else: + optargs['adaptive'] = False + adaptation_lag = get_optional_arg_from_config(cp, section, + 'adaptation-lag', int) + if adaptation_lag is not None: + optargs['adaptation_lag'] = adaptation_lag + adaptation_time = get_optional_arg_from_config(cp, section, + 'adaptation-time', int) + if adaptation_time is not None: + optargs['adaptation_time'] = adaptation_time + scale_factor = get_optional_arg_from_config(cp, section, + 'scale-factor', float) + if scale_factor is not None: + optargs['scale_factor'] = scale_factor + # get the loglikelihood function + logl = get_optional_arg_from_config(cp, section, 'logl-function') + obj = cls(model, nwalkers, + checkpoint_interval=checkpoint_interval, + checkpoint_signal=checkpoint_signal, + loglikelihood_function=logl, nprocesses=nprocesses, + use_mpi=use_mpi, **optargs) + # set target + obj.set_target_from_config(cp, section) + # add burn-in if it's specified + obj.set_burn_in_from_config(cp) + # set prethin options + obj.set_thin_interval_from_config(cp, section) + # Set up the output file + setup_output(obj, output_file) + if not obj.new_checkpoint: + obj.resume_from_checkpoint() + else: + obj.set_start_from_config(cp) + return obj
+ + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ---------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, + parameters=self.model.variable_params, + last_iteration=self.niterations) + # write stats + fp.write_samples(self.model_stats, last_iteration=self.niterations) + # write random state + fp.write_random_state() + # write betas + fp.write_betas(self.betas, last_iteration=self.niterations) + # write random state + fp.write_random_state() + # write attributes of the ensemble + fp.write_ensemble_attrs(self.ensemble)
+ + + def _correctjacobian(self, samples): + """Corrects the log jacobian values stored on disk. + + Parameters + ---------- + samples : dict + Dictionary of the samples. + """ + # flatten samples for evaluating + orig_shape = list(samples.values())[0].shape + flattened_samples = {p: arr.ravel() + for p, arr in list(samples.items())} + # convert to a list of tuples so we can use map function + params = list(flattened_samples.keys()) + size = flattened_samples[params[0]].size + logj = numpy.zeros(size) + for ii in range(size): + these_samples = {p: flattened_samples[p][ii] for p in params} + these_samples = self.model.sampling_transforms.apply(these_samples) + self.model.update(**these_samples) + logj[ii] = self.model.logjacobian + return logj.reshape(orig_shape) + +
+[docs] + def finalize(self): + """Calculates the log evidence and writes to the checkpoint file. + + If sampling transforms were used, this also corrects the jacobian + stored on disk. + + The thin start/interval/end for calculating the log evidence are + retrieved from the checkpoint file's thinning attributes. + """ + if self.model.sampling_transforms is not None: + # fix the lobjacobian values stored on disk + logging.info("Correcting logjacobian values on disk") + with self.io(self.checkpoint_file, 'r') as fp: + samples = fp.read_raw_samples(self.variable_params, + thin_start=0, + thin_interval=1, thin_end=None, + temps='all', flatten=False) + logjacobian = self._correctjacobian(samples) + # write them back out + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp[fp.samples_group]['logjacobian'][()] = logjacobian + logging.info("Calculating log evidence") + # get the thinning settings + with self.io(self.checkpoint_file, 'r') as fp: + thin_start = fp.thin_start + thin_interval = fp.thin_interval + thin_end = fp.thin_end + # calculate + logz, dlogz = self.calculate_logevidence( + self.checkpoint_file, thin_start=thin_start, thin_end=thin_end, + thin_interval=thin_interval) + logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz)) + # write to both the checkpoint and backup + for fn in [self.checkpoint_file, self.backup_file]: + with self.io(fn, "a") as fp: + fp.write_logevidence(logz, dlogz)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/refine.html b/latest/html/_modules/pycbc/inference/sampler/refine.html new file mode 100644 index 00000000000..ae53a411c25 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/refine.html @@ -0,0 +1,511 @@ + + + + + + pycbc.inference.sampler.refine — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.refine

+""" Sampler that uses kde refinement of an existing posterior estimate.
+"""
+
+import logging
+import numpy
+import numpy.random
+
+from scipy.special import logsumexp
+from scipy.stats import gaussian_kde
+from scipy.stats import entropy as sentropy
+
+from pycbc.inference import models
+from pycbc.pool import choose_pool
+from pycbc.inference.io import loadfile
+
+from .base import setup_output, initial_dist_from_config
+from .dummy import DummySampler
+
+
+
+[docs] +def call_model(params): + models._global_instance.update(**params) + return ( + models._global_instance.logposterior, + models._global_instance.loglikelihood, + )
+ + + +
+[docs] +def resample_equal(samples, logwt, seed=0): + weights = numpy.exp(logwt - logsumexp(logwt)) + N = len(weights) + positions = (numpy.random.random() + numpy.arange(N)) / N + idx = numpy.zeros(N, dtype=int) + cumulative_sum = numpy.cumsum(weights) + cumulative_sum /= cumulative_sum[-1] + + i, j = 0, 0 + while i < N: + if positions[i] < cumulative_sum[j]: + idx[i] = j + i += 1 + else: + j += 1 + try: + rng = numpy.random.default_rng(seed) + except AttributeError: + # numpy pre-1.17 uses RandomState + # Py27: delete this after we drop python 2.7 support + rng = numpy.random.RandomState(seed) + rng.shuffle(idx) + return {p: samples[p][idx] for p in samples}
+ + + +
+[docs] +class RefineSampler(DummySampler): + """Sampler for kde drawn refinement of existing posterior estimate + + Parameters + ---------- + model : Model + An instance of a model from ``pycbc.inference.models``. + num_samples: int + The number of samples to draw from the kde at the conclusion + iterative_kde_samples: int + The number of samples to add to the kde during each iterations + min_refinement_steps: int + The minimum number of iterations to take. + max_refinement_steps: The maximum number of refinment steps to take. + entropy: float + The target entropy between iterative kdes + dlogz: float + The target evidence difference between iterative kde updates + kde: kde + The inital kde to use. + """ + + name = "refine" + + def __init__( + self, + model, + *args, + nprocesses=1, + use_mpi=False, + num_samples=int(1e5), + iterative_kde_samples=int(1e3), + min_refinement_steps=5, + max_refinement_steps=40, + offbase_fraction=0.7, + entropy=0.01, + dlogz=0.01, + kde=None, + update_groups=None, + max_kde_samples=int(5e4), + **kwargs + ): + super().__init__(model, *args) + + self.model = model + self.kde = kde + self.vparam = model.variable_params + models._global_instance = model + self.num_samples = int(num_samples) + self.pool = choose_pool(mpi=use_mpi, processes=nprocesses) + self._samples = {} + + self.num_samples = int(num_samples) + self.iterative_kde_samples = int(iterative_kde_samples) + self.max_kde_samples = int(max_kde_samples) + self.min_refinement_steps = int(min_refinement_steps) + self.max_refinement_steps = int(max_refinement_steps) + self.offbase_fraction = float(offbase_fraction) + self.entropy = float(entropy) + self.dlogz_target = float(dlogz) + + self.param_groups = [] + if update_groups is None: + self.param_groups.append(self.vparam) + else: + for gname in update_groups.split(): + gvalue = kwargs[gname] + if gvalue == "all": + self.param_groups.append(self.vparam) + else: + self.param_groups.append(gvalue.split()) + +
+[docs] + def draw_samples(self, size, update_params=None): + """Draw new samples within the model priors""" + logging.info("getting from kde") + + params = {} + ksamples = self.kde.resample(size=size) + j = 0 + for i, k in enumerate(self.vparam): + if update_params is not None and k not in update_params: + params[k] = numpy.ones(size) * self.fixed_samples[i] + else: + params[k] = ksamples[j, :] + j += 1 + + logging.info("checking prior") + keep = self.model.prior_distribution.contains(params) + logging.info("done checking") + r = numpy.array([params[k][keep] for k in self.vparam]) + return r
+ + +
+[docs] + @staticmethod + def compare_kde(kde1, kde2, size=int(1e4)): + """Calculate information difference between two kde distributions""" + s = kde1.resample(size=size) + return sentropy(kde1.pdf(s), kde2.pdf(s))
+ + +
+[docs] + def converged(self, step, kde_new, factor, logp): + """Check that kde is converged by comparing to previous iteration""" + logging.info("checking convergence") + if not hasattr(self, "old_logz"): + self.old_logz = numpy.inf + + entropy_diff = -1 + if self.entropy < 1: + entropy_diff = self.compare_kde(self.kde, kde_new) + + # Compare how the logz changes when adding new samples + # this is guaranteed to decrease as old samples included + logz = logsumexp(factor) - numpy.log(len(factor)) + dlogz = logz - self.old_logz + self.old_logz = logz + + # compare evidence subsets agree + choice2 = numpy.random.choice(factor, len(factor) // 2) + logz2 = logsumexp(choice2) - numpy.log(len(choice2)) + choice3 = numpy.random.choice(factor, len(factor) // 2) + logz3 = logsumexp(choice3) - numpy.log(len(choice3)) + dlogz2 = logz3 - logz2 + + # If kde matches posterior, the weights should be uniform + # check fraction that are significant deviation from peak + frac_offbase = (logp < logp.max() - 5.0).sum() / len(logp) + + logging.info( + "%s: dlogz_iter=%.4f," + "dlogz_half=%.4f, entropy=%.4f offbase fraction=%.4f", + step, + dlogz, + dlogz2, + entropy_diff, + frac_offbase, + ) + if ( + entropy_diff < self.entropy + and step >= self.min_refinement_steps + and max(abs(dlogz), abs(dlogz2)) < self.dlogz_target + and frac_offbase < self.offbase_fraction + ): + return True + else: + return False
+ + +
+[docs] + @classmethod + def from_config( + cls, cp, model, output_file=None, nprocesses=1, use_mpi=False + ): + """This should initialize the sampler given a config file.""" + kwargs = {k: cp.get("sampler", k) for k in cp.options("sampler")} + obj = cls(model, nprocesses=nprocesses, use_mpi=use_mpi, **kwargs) + obj.set_start_from_config(cp) + setup_output(obj, output_file, check_nsamples=False, validate=False) + return obj
+ + +
+[docs] + def set_start_from_config(self, cp): + """Sets the initial state of the sampler from config file""" + num_samples = self.iterative_kde_samples + if cp.has_option("sampler", "start-file"): + start_file = cp.get("sampler", "start-file") + logging.info("Using file %s for initial positions", start_file) + f = loadfile(start_file, "r") + fsamples = f.read_samples(f["samples"].keys()) + num_samples = len(fsamples) + + init_prior = initial_dist_from_config( + cp, self.model.variable_params, self.model.static_params + ) + if init_prior is not None: + samples = init_prior.rvs(size=num_samples) + else: + p = self.model.prior_distribution + samples = p.rvs(size=num_samples) + + ksamples = [] + for v in self.vparam: + if v in fsamples: + ksamples.append(fsamples[v]) + else: + ksamples.append(samples[v]) + + self.kde = gaussian_kde(numpy.array(ksamples))
+ + +
+[docs] + def run_samples(self, ksamples, update_params=None, iteration=False): + """Calculate the likelihoods and weights for a set of samples""" + # Calculate likelihood for each sample + logging.info("calculating likelihoods...") + args = [] + for i in range(len(ksamples[0])): + param = {k: ksamples[j][i] for j, k in enumerate(self.vparam)} + args.append(param) + + result = self.pool.map(call_model, args) + logging.info("..done with likelihood") + + logp = numpy.array([r[0] for r in result]) + logl = numpy.array([r[1] for r in result]) + + if update_params is not None: + ksamples = numpy.array( + [ + ksamples[i, :] + for i, k in enumerate(self.vparam) + if k in update_params + ] + ) + + # Weights for iteration + if iteration: + logw = logp - numpy.log(self.kde.pdf(ksamples)) + logw = logw - logsumexp(logw) + + # To avoid single samples dominating the weighting kde before + # we will put a cap on the minimum and maximum logw + sort = logw.argsort() + cap = logw[sort[-len(sort) // 5]] + low = logw[sort[len(sort) // 5]] + logw[logw > cap] = cap + logw[logw < low] = low + else: + # Weights for monte-carlo selection + logw = logp - numpy.log(self.kde.pdf(ksamples)) + logw = logw - logsumexp(logw) + + k = logp != -numpy.inf + ksamples = ksamples[:, k] + logp, logl, logw = logp[k], logl[k], logw[k] + return ksamples, logp, logl, logw
+ + +
+[docs] + def run(self): + """Iterative sample from kde and update based on likelihood values""" + self.group_kde = self.kde + for param_group in self.param_groups: + total_samples = None + total_logp = None + total_logw = None + total_logl = None + + gsample = self.group_kde.resample(int(1e5)) + gsample = [ + gsample[i, :] + for i, k in enumerate(self.vparam) + if k in param_group + ] + self.kde = gaussian_kde(numpy.array(gsample)) + self.fixed_samples = self.group_kde.resample(1) + + logging.info("updating: %s", param_group) + for r in range(self.max_refinement_steps): + ksamples = self.draw_samples( + self.iterative_kde_samples, update_params=param_group + ) + ksamples, logp, logl, logw = self.run_samples( + ksamples, update_params=param_group, iteration=True + ) + + if total_samples is not None: + total_samples = numpy.concatenate( + [total_samples, ksamples], axis=1 + ) + total_logp = numpy.concatenate([total_logp, logp]) + total_logw = numpy.concatenate([total_logw, logw]) + total_logl = numpy.concatenate([total_logl, logl]) + else: + total_samples = ksamples + total_logp = logp + total_logw = logw + total_logl = logl + + logging.info("setting up next kde iteration..") + ntotal_logw = total_logw - logsumexp(total_logw) + kde_new = gaussian_kde( + total_samples, weights=numpy.exp(ntotal_logw) + ) + + if self.converged(r, kde_new, total_logl + total_logw, logp): + break + + self.kde = kde_new + + full_samples = [] + gsample = self.group_kde.resample(len(total_samples[0])) + i = 0 + for j, k in enumerate(self.vparam): + if k in param_group: + full_samples.append(total_samples[i]) + i += 1 + else: + full_samples.append(gsample[j]) + + self.group_kde = gaussian_kde(numpy.array(full_samples)) + + logging.info("Drawing final samples") + ksamples = self.draw_samples(self.num_samples) + logging.info("Calculating final likelihoods") + ksamples, logp, logl, logw = self.run_samples(ksamples) + self._samples = {k: ksamples[j, :] for j, k in enumerate(self.vparam)} + self._samples["loglikelihood"] = logl + logging.info("Reweighting to equal samples") + + self._samples = resample_equal(self._samples, logw)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/snowline.html b/latest/html/_modules/pycbc/inference/sampler/snowline.html new file mode 100644 index 00000000000..f13265bada1 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/snowline.html @@ -0,0 +1,317 @@ + + + + + + pycbc.inference.sampler.snowline — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.snowline

+# Copyright (C) 2023  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for using the snowline sampler
+packages for parameter estimation.
+"""
+
+import sys
+import logging
+
+from pycbc.inference.io.snowline import SnowlineFile
+from pycbc.io.hdf import dump_state
+from pycbc.pool import use_mpi
+from .base import (BaseSampler, setup_output)
+from .base_cube import setup_calls
+
+
+#
+# =============================================================================
+#
+#                                   Samplers
+#
+# =============================================================================
+#
+
+
+[docs] +class SnowlineSampler(BaseSampler): + """This class is used to construct an Snowline sampler from the snowline + package. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models`` + """ + name = "snowline" + _io = SnowlineFile + + def __init__(self, model, **kwargs): + super().__init__(model) + + import snowline + log_likelihood_call, prior_call = setup_calls(model, copy_prior=True) + + self._sampler = snowline.ReactiveImportanceSampler( + list(self.model.variable_params), + log_likelihood_call, + transform=prior_call) + + do_mpi, _, rank = use_mpi() + self.main = (not do_mpi) or (rank == 0) + self.nlive = 0 + self.ndim = len(self.model.variable_params) + self.kwargs = kwargs + +
+[docs] + def run(self): + self.result = self._sampler.run(**self.kwargs) + if not self.main: + sys.exit(0) + self._sampler.print_results()
+ + + @property + def io(self): + return self._io + + @property + def niterations(self): + return self.result['niter'] + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, **kwds): + """ + Loads the sampler from the given config file. + """ + skeys = {} + opts = {'num_global_samples': int, + 'num_gauss_samples': int, + 'max_ncalls': int, + 'min_ess': int, + 'max_improvement_loops': int + } + for opt_name in opts: + if cp.has_option('sampler', opt_name): + value = cp.get('sampler', opt_name) + skeys[opt_name] = opts[opt_name](value) + inst = cls(model, **skeys) + + do_mpi, _, rank = use_mpi() + if not do_mpi or (rank == 0): + setup_output(inst, output_file) + return inst
+ + +
+[docs] + def checkpoint(self): + """ There is currently no checkpointing implemented""" + pass
+ + +
+[docs] + def resume_from_checkpoint(self): + """ There is currently no checkpointing implemented""" + pass
+ + +
+[docs] + def finalize(self): + logging.info("Writing samples to files") + for fn in [self.checkpoint_file, self.backup_file]: + self.write_results(fn)
+ + + @property + def model_stats(self): + return {} + + @property + def samples(self): + samples = self.result['samples'] + params = list(self.model.variable_params) + samples_dict = {p: samples[:, i] for i, p in enumerate(params)} + return samples_dict + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ---------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, self.samples.keys()) + # write log evidence + fp.write_logevidence(self.logz, self.logz_err) + + # write full results + dump_state(self.result, fp, + path='sampler_info', + dsetname='presult')
+ + + @property + def logz(self): + """Return bayesian evidence estimated by snowline sampler. + """ + return self.result['logz'] + + @property + def logz_err(self): + """Return error in bayesian evidence estimated by snowline sampler. + """ + return self.result['logzerr']
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inference/sampler/ultranest.html b/latest/html/_modules/pycbc/inference/sampler/ultranest.html new file mode 100644 index 00000000000..02025cfb270 --- /dev/null +++ b/latest/html/_modules/pycbc/inference/sampler/ultranest.html @@ -0,0 +1,373 @@ + + + + + + pycbc.inference.sampler.ultranest — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inference.sampler.ultranest

+# Copyright (C) 2020  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes and functions for using the ultranest sampler
+packages for parameter estimation.
+"""
+
+import sys
+import logging
+import numpy
+
+from pycbc.inference.io.ultranest import UltranestFile
+from pycbc.io.hdf import dump_state
+from pycbc.pool import use_mpi
+from .base import (BaseSampler, setup_output)
+from .base_cube import setup_calls
+
+
+#
+# =============================================================================
+#
+#                                   Samplers
+#
+# =============================================================================
+#
+
+
+[docs] +class UltranestSampler(BaseSampler): + """This class is used to construct an Ultranest sampler from the ultranest + package. + + Parameters + ---------- + model : model + A model from ``pycbc.inference.models``. + log_dir : str + Folder where files should be stored for resuming (optional). + stepsampling : bool + If false, uses rejection sampling. If true, uses + hit-and-run sampler, which scales better with dimensionality. + """ + name = "ultranest" + _io = UltranestFile + + def __init__(self, model, log_dir=None, + stepsampling=False, + enable_plots=False, + **kwargs): + super(UltranestSampler, self).__init__(model) + + import ultranest + log_likelihood_call, prior_call = setup_calls(model, copy_prior=True) + + # Check for cyclic boundaries + periodic = [] + cyclic = self.model.prior_distribution.cyclic + for param in self.variable_params: + if param in cyclic: + logging.info('Param: %s will be cyclic', param) + periodic.append(True) + else: + periodic.append(False) + + self._sampler = ultranest.ReactiveNestedSampler( + list(self.model.variable_params), + log_likelihood_call, + prior_call, log_dir=log_dir, + wrapped_params=periodic, + resume=True) + + if stepsampling: + import ultranest.stepsampler + self._sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler( + nsteps=100, adaptive_nsteps='move-distance', + region_filter=True) + + self.enable_plots = enable_plots + self.nlive = 0 + self.ndim = len(self.model.variable_params) + self.result = None + self.kwargs = kwargs # Keywords for the run method of ultranest + + do_mpi, _, rank = use_mpi() + self.main = (not do_mpi) or (rank == 0) + +
+[docs] + def run(self): + self.result = self._sampler.run(**self.kwargs) + if not self.main: + sys.exit(0) + self._sampler.print_results() + + if self.enable_plots: + self._sampler.plot()
+ + + @property + def io(self): + return self._io + + @property + def niterations(self): + return self.result['niter'] + +
+[docs] + @classmethod + def from_config(cls, cp, model, output_file=None, **kwds): + """ + Loads the sampler from the given config file. + """ + skeys = {} + opts = {'update_interval_iter_fraction': float, + 'update_interval_ncall': int, + 'log_interval': int, + 'show_status': bool, + 'dlogz': float, + 'dKL': float, + 'frac_remain': float, + 'Lepsilon': float, + 'min_ess': int, + 'max_iters': int, + 'max_ncalls': int, + 'log_dir': str, + 'stepsampling': bool, + 'enable_plots': bool, + 'max_num_improvement_loops': int, + 'min_num_live_points': int, + 'cluster_num_live_points:': int} + for opt_name in opts: + if cp.has_option('sampler', opt_name): + value = cp.get('sampler', opt_name) + skeys[opt_name] = opts[opt_name](value) + inst = cls(model, **skeys) + + do_mpi, _, rank = use_mpi() + if not do_mpi or (rank == 0): + setup_output(inst, output_file) + return inst
+ + +
+[docs] + def checkpoint(self): + pass
+ + +
+[docs] + def resume_from_checkpoint(self): + pass
+ + +
+[docs] + def finalize(self): + logging.info("Writing samples to files") + for fn in [self.checkpoint_file, self.backup_file]: + self.write_results(fn)
+ + + @property + def model_stats(self): + return {} + + @property + def samples(self): + from ultranest.utils import resample_equal + + # we'll do the resampling ourselves so we can pick up + # additional parameters + try: # Remove me on next ultranest release + wsamples = self.result['weighted_samples']['v'] + weights = self.result['weighted_samples']['w'] + logl = self.result['weighted_samples']['L'] + except KeyError: + wsamples = self.result['weighted_samples']['points'] + weights = self.result['weighted_samples']['weights'] + logl = self.result['weighted_samples']['logl'] + + wsamples = numpy.column_stack((wsamples, logl)) + params = list(self.model.variable_params) + ['loglikelihood'] + samples = resample_equal(wsamples, weights / weights.sum()) + samples_dict = {p: samples[:, i] for i, p in enumerate(params)} + return samples_dict + +
+[docs] + def write_results(self, filename): + """Writes samples, model stats, acceptance fraction, and random state + to the given file. + + Parameters + ---------- + filename : str + The file to write to. The file is opened using the ``io`` class + in an an append state. + """ + with self.io(filename, 'a') as fp: + # write samples + fp.write_samples(self.samples, self.samples.keys()) + # write log evidence + fp.write_logevidence(self.logz, self.logz_err) + + # write full ultranest formatted results + dump_state(self.result, fp, + path='sampler_info', + dsetname='presult')
+ + + @property + def logz(self): + """Return bayesian evidence estimated by ultranest sampler. + """ + return self.result['logz'] + + @property + def logz_err(self): + """Return error in bayesian evidence estimated by ultranest sampler. + """ + return self.result['logzerr']
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inject/inject.html b/latest/html/_modules/pycbc/inject/inject.html new file mode 100644 index 00000000000..9ef91db908a --- /dev/null +++ b/latest/html/_modules/pycbc/inject/inject.html @@ -0,0 +1,1480 @@ + + + + + + pycbc.inject.inject — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inject.inject

+# Copyright (C) 2012  Alex Nitz, Tito Dal Canton
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module provides utilities for injecting signals into data"""
+
+import os
+import numpy as np
+import copy
+import logging
+from abc import ABCMeta, abstractmethod
+
+import lal
+from ligo.lw import utils as ligolw_utils, ligolw, lsctables
+
+from pycbc import waveform, frame, libutils
+from pycbc.opt import LimitedSizeDict
+from pycbc.waveform import (get_td_waveform, fd_det,
+                            get_td_det_waveform_from_fd_det)
+from pycbc.waveform import utils as wfutils
+from pycbc.waveform import ringdown_td_approximants
+from pycbc.types import float64, float32, TimeSeries, load_timeseries
+from pycbc.detector import Detector
+from pycbc.conversions import tau0_from_mass1_mass2
+from pycbc.filter import resample_to_delta_t
+import pycbc.io
+from pycbc.io.ligolw import LIGOLWContentHandler
+
+logger = logging.getLogger('pycbc.inject.inject')
+
+sim = libutils.import_optional('lalsimulation')
+
+injection_func_map = {
+    np.dtype(float32): lambda *args: sim.SimAddInjectionREAL4TimeSeries(*args),
+    np.dtype(float64): lambda *args: sim.SimAddInjectionREAL8TimeSeries(*args),
+}
+
+# Map parameter names used in pycbc to names used in the sim_inspiral
+# table, if they are different
+sim_inspiral_map = {
+    'ra': 'longitude',
+    'dec': 'latitude',
+    'approximant': 'waveform',
+    }
+
+
+[docs] +def set_sim_data(inj, field, data): + """Sets data of a SimInspiral instance.""" + try: + sim_field = sim_inspiral_map[field] + except KeyError: + sim_field = field + # for tc, map to geocentric times + if sim_field == 'tc': + inj.geocent_end_time = int(data) + inj.geocent_end_time_ns = int(1e9*(data % 1)) + # for spin1 and spin2 we need data to be an array + if sim_field in ['spin1', 'spin2']: + setattr(inj, sim_field, [0, 0, data]) + else: + setattr(inj, sim_field, data)
+ + + +
+[docs] +def projector(detector_name, inj, hp, hc, distance_scale=1): + """ Use the injection row to project the polarizations into the + detector frame + """ + detector = Detector(detector_name) + + hp /= distance_scale + hc /= distance_scale + + try: + tc = inj.tc + ra = inj.ra + dec = inj.dec + except: + tc = inj.time_geocent + ra = inj.longitude + dec = inj.latitude + + hp.start_time += tc + hc.start_time += tc + + # taper the polarizations + try: + hp_tapered = wfutils.taper_timeseries(hp, inj.taper) + hc_tapered = wfutils.taper_timeseries(hc, inj.taper) + except AttributeError: + hp_tapered = hp + hc_tapered = hc + + projection_method = 'lal' + if hasattr(inj, 'detector_projection_method'): + projection_method = inj.detector_projection_method + + logger.info('Injecting at %s, method is %s', tc, projection_method) + + # compute the detector response and add it to the strain + signal = detector.project_wave(hp_tapered, hc_tapered, + ra, dec, inj.polarization, + method=projection_method, + reference_time=tc,) + return signal
+ + +
+[docs] +def legacy_approximant_name(apx): + """Convert the old style xml approximant name to a name + and phase_order. Alex: I hate this function. Please delete this when we + use Collin's new tables. + """ + apx = str(apx) + try: + order = sim.GetOrderFromString(apx) + except: + print("Warning: Could not read phase order from string, using default") + order = -1 + name = sim.GetStringFromApproximant(sim.GetApproximantFromString(apx)) + return name, order
+ + + +class _XMLInjectionSet(object): + + """Manages sets of injections: reads injections from LIGOLW XML files + and injects them into time series. + + Parameters + ---------- + sim_file : string + Path to a LIGOLW XML file containing a SimInspiralTable + with injection definitions. + + Attributes + ---------- + indoc + table + """ + + def __init__(self, sim_file, **kwds): + self.indoc = ligolw_utils.load_filename( + sim_file, False, contenthandler=LIGOLWContentHandler) + self.table = lsctables.SimInspiralTable.get_table(self.indoc) + self.extra_args = kwds + + def apply(self, strain, detector_name, f_lower=None, distance_scale=1, + simulation_ids=None, + inj_filter_rejector=None, + injection_sample_rate=None,): + """Add injections (as seen by a particular detector) to a time series. + + Parameters + ---------- + strain : TimeSeries + Time series to inject signals into, of type float32 or float64. + detector_name : string + Name of the detector used for projecting injections. + f_lower : {None, float}, optional + Low-frequency cutoff for injected signals. If None, use value + provided by each injection. + distance_scale: {1, float}, optional + Factor to scale the distance of an injection with. The default is + no scaling. + simulation_ids: iterable, optional + If given, only inject signals with the given simulation IDs. + inj_filter_rejector: InjFilterRejector instance; optional, default=None + If given send each injected waveform to the InjFilterRejector + instance so that it can store a reduced representation of that + injection if necessary. + injection_sample_rate: float, optional + The sample rate to generate the signal before injection + + Returns + ------- + None + + Raises + ------ + TypeError + For invalid types of `strain`. + """ + if strain.dtype not in (float32, float64): + raise TypeError("Strain dtype must be float32 or float64, not " \ + + str(strain.dtype)) + + lalstrain = strain.lal() + earth_travel_time = lal.REARTH_SI / lal.C_SI + t0 = float(strain.start_time) - earth_travel_time + t1 = float(strain.end_time) + earth_travel_time + + # pick lalsimulation injection function + add_injection = injection_func_map[strain.dtype] + + delta_t = strain.delta_t + if injection_sample_rate is not None: + delta_t = 1.0 / injection_sample_rate + + injections = self.table + if simulation_ids: + injections = [inj for inj in injections \ + if inj.simulation_id in simulation_ids] + injection_parameters = [] + for inj in injections: + f_l = inj.f_lower if f_lower is None else f_lower + # roughly estimate if the injection may overlap with the segment + # Add 2s to end_time to account for ringdown and light-travel delay + end_time = inj.time_geocent + 2 + inj_length = tau0_from_mass1_mass2(inj.mass1, inj.mass2, f_l) + # Start time is taken as twice approx waveform length with a 1s + # safety buffer + start_time = inj.time_geocent - 2 * (inj_length + 1) + if end_time < t0 or start_time > t1: + continue + signal = self.make_strain_from_inj_object(inj, delta_t, + detector_name, f_lower=f_l, distance_scale=distance_scale) + signal = resample_to_delta_t(signal, strain.delta_t, method='ldas') + if float(signal.start_time) > t1: + continue + + signal = signal.astype(strain.dtype) + signal_lal = signal.lal() + add_injection(lalstrain, signal_lal, None) + injection_parameters.append(inj) + if inj_filter_rejector is not None: + sid = inj.simulation_id + inj_filter_rejector.generate_short_inj_from_inj(signal, sid) + + strain.data[:] = lalstrain.data.data[:] + + injected = copy.copy(self) + injected.table = lsctables.SimInspiralTable() + injected.table += injection_parameters + if inj_filter_rejector is not None: + inj_filter_rejector.injection_params = injected + return injected + + def make_strain_from_inj_object(self, inj, delta_t, detector_name, + f_lower=None, distance_scale=1): + """Make a h(t) strain time-series from an injection object as read from + a sim_inspiral table, for example. + + Parameters + ----------- + inj : injection object + The injection object to turn into a strain h(t). + delta_t : float + Sample rate to make injection at. + detector_name : string + Name of the detector used for projecting injections. + f_lower : {None, float}, optional + Low-frequency cutoff for injected signals. If None, use value + provided by each injection. + distance_scale: {1, float}, optional + Factor to scale the distance of an injection with. The default is + no scaling. + + Returns + -------- + signal : float + h(t) corresponding to the injection. + """ + f_l = inj.f_lower if f_lower is None else f_lower + + name, phase_order = legacy_approximant_name(inj.waveform) + + # compute the waveform time series + hp, hc = get_td_waveform( + inj, approximant=name, delta_t=delta_t, + phase_order=phase_order, + f_lower=f_l, distance=inj.distance, + **self.extra_args) + return projector(detector_name, + inj, hp, hc, distance_scale=distance_scale) + + def end_times(self): + """Return the end times of all injections""" + return [inj.time_geocent for inj in self.table] + + @staticmethod + def write(filename, samples, write_params=None, static_args=None): + """Writes the injection samples to the given xml. + + Parameters + ---------- + filename : str + The name of the file to write to. + samples : io.FieldArray + FieldArray of parameters. + write_params : list, optional + Only write the given parameter names. All given names must be keys + in ``samples``. Default is to write all parameters in ``samples``. + static_args : dict, optional + Dictionary mapping static parameter names to values. These are + written to the ``attrs``. + """ + xmldoc = ligolw.Document() + xmldoc.appendChild(ligolw.LIGO_LW()) + simtable = lsctables.New(lsctables.SimInspiralTable) + xmldoc.childNodes[0].appendChild(simtable) + if static_args is None: + static_args = {} + if write_params is None: + write_params = samples.fieldnames + for ii in range(samples.size): + sim = lsctables.SimInspiral() + # initialize all elements to None + for col in sim.__slots__: + setattr(sim, col, None) + for field in write_params: + data = samples[ii][field] + set_sim_data(sim, field, data) + # set any static args + for (field, value) in static_args.items(): + set_sim_data(sim, field, value) + simtable.append(sim) + ligolw_utils.write_filename(xmldoc, filename, compress='auto') + + +# ----------------------------------------------------------------------------- + + +class _HDFInjectionSet(metaclass=ABCMeta): + """Manages sets of injections: reads injections from hdf files + and injects them into time series. + + Parameters + ---------- + sim_file : string + Path to an hdf file containing injections. + \**kwds : + The rest of the keyword arguments are passed to the waveform generation + function when generating injections. + + Attributes + ---------- + table + static_args + extra_args + required_params : tuple + Parameter names that must exist in the injection HDF file in order to + create an injection of that type. + """ + + _tableclass = pycbc.io.FieldArray + injtype = None + required_params = () + + def __init__(self, sim_file, hdf_group=None, **kwds): + # open the file + fp = pycbc.io.HFile(sim_file, 'r') + group = fp if hdf_group is None else fp[hdf_group] + # get parameters + parameters = list(group.keys()) + # get all injection parameter values + injvals = {param: group[param][()] for param in parameters} + # make sure Numpy S strings are loaded as strings and not bytestrings + # (which could mess with approximant names, for example) + for k in injvals: + if injvals[k].dtype.kind == 'S': + injvals[k] = injvals[k].astype('U') + # if there were no variable args, then we only have a single injection + if len(parameters) == 0: + numinj = 1 + else: + numinj = tuple(injvals.values())[0].size + # add any static args in the file + try: + # ensure parameter names are string types + self.static_args = group.attrs['static_args'].astype('U') + except KeyError: + self.static_args = [] + parameters.extend(self.static_args) + # we'll expand the static args to be arrays with the same size as + # the other values + for param in self.static_args: + val = group.attrs[param] + # if val is a list or numpy array, we need to store it as an + # object; otherwise, we'll get a shape mismatch between fields + if isinstance(val, (np.ndarray, list, tuple)): + arr = np.empty(numinj, dtype=object) + for ii in range(numinj): + arr[ii] = val + else: + # otherwise, we can just repeat the value the needed number of + # times + arr = np.repeat(val, numinj) + # make sure any byte strings are stored as strings instead + if arr.dtype.char == 'S': + arr = arr.astype('U') + injvals[param] = arr + # make sure required parameters are provided + missing = set(self.required_params) - set(injvals.keys()) + if missing: + raise ValueError("required parameter(s) {} not found in the given " + "injection file".format(', '.join(missing))) + # initialize the table + self.table = self._tableclass.from_kwargs(**injvals) + # save the extra arguments + self.extra_args = kwds + fp.close() + + @abstractmethod + def apply(self, strain, detector_name, distance_scale=1, + simulation_ids=None, inj_filter_rejector=None, + **kwargs): + """Adds injections to a detector's time series.""" + pass + + @abstractmethod + def make_strain_from_inj_object(self, inj, delta_t, detector_name, + distance_scale=1, **kwargs): + """Make a h(t) strain time-series from an injection object. + """ + pass + + @abstractmethod + def end_times(self): + """Return the end times of all injections""" + pass + + @abstractmethod + def supported_approximants(self): + """Return a list of the supported approximants.""" + pass + + @classmethod + def write(cls, filename, samples, write_params=None, static_args=None, + **metadata): + """Writes the injection samples to the given hdf file. + + Parameters + ---------- + filename : str + The name of the file to write to. + samples : io.FieldArray + FieldArray of parameters. + write_params : list, optional + Only write the given parameter names. All given names must be keys + in ``samples``. Default is to write all parameters in ``samples``. + static_args : dict, optional + Dictionary mapping static parameter names to values. These are + written to the ``attrs``. + \**metadata : + All other keyword arguments will be written to the file's attrs. + """ + with pycbc.io.HFile(filename, 'w') as fp: + # write metadata + if static_args is None: + static_args = {} + fp.attrs["static_args"] = list(map(str, static_args.keys())) + fp.attrs['injtype'] = cls.injtype + for key, val in metadata.items(): + fp.attrs[key] = val + if write_params is None: + write_params = samples.fieldnames + for arg, val in static_args.items(): + try: + fp.attrs[arg] = val + except TypeError: + # can get this in python 3 if the val was numpy.str_ type + # try decoding it and writing + fp.attrs[arg] = str(val) + for field in write_params: + try: + fp[field] = samples[field] + except TypeError as e: + # can get this in python 3 if the val was a numpy.str_ type + # we'll try again as a string type + if samples[field].dtype.char == 'U': + fp[field] = samples[field].astype('S') + else: + raise e + + +
+[docs] +class CBCHDFInjectionSet(_HDFInjectionSet): + """Manages CBC injections. + """ + _tableclass = pycbc.io.WaveformArray + injtype = 'cbc' + required_params = ('tc',) + +
+[docs] + def apply(self, strain, detector_name, f_lower=None, distance_scale=1, + simulation_ids=None, + inj_filter_rejector=None, + injection_sample_rate=None,): + """Add injections (as seen by a particular detector) to a time series. + + Parameters + ---------- + strain : TimeSeries + Time series to inject signals into, of type float32 or float64. + detector_name : string + Name of the detector used for projecting injections. + f_lower : {None, float}, optional + Low-frequency cutoff for injected signals. If None, use value + provided by each injection. + distance_scale: {1, float}, optional + Factor to scale the distance of an injection with. The default is + no scaling. + simulation_ids: iterable, optional + If given, only inject signals with the given simulation IDs. + inj_filter_rejector: InjFilterRejector instance; optional, default=None + If given send each injected waveform to the InjFilterRejector + instance so that it can store a reduced representation of that + injection if necessary. + injection_sample_rate: float, optional + The sample rate to generate the signal before injection + + Returns + ------- + None + + Raises + ------ + TypeError + For invalid types of `strain`. + """ + if strain.dtype not in (float32, float64): + raise TypeError("Strain dtype must be float32 or float64, not " \ + + str(strain.dtype)) + + lalstrain = strain.lal() + if self.table[0]['approximant'] in fd_det: + t0 = float(strain.start_time) + t1 = float(strain.end_time) + else: + earth_travel_time = lal.REARTH_SI / lal.C_SI + t0 = float(strain.start_time) - earth_travel_time + t1 = float(strain.end_time) + earth_travel_time + + # pick lalsimulation injection function + add_injection = injection_func_map[strain.dtype] + + delta_t = strain.delta_t + if injection_sample_rate is not None: + delta_t = 1.0 / injection_sample_rate + + injections = self.table + if simulation_ids: + injections = injections[list(simulation_ids)] + + injected_ids = [] + for ii, inj in enumerate(injections): + f_l = inj.f_lower if f_lower is None else f_lower + # roughly estimate if the injection may overlap with the segment + # Add 2s to end_time to account for ringdown and light-travel delay + end_time = inj.tc + 2 + inj_length = tau0_from_mass1_mass2(inj.mass1, inj.mass2, f_l) + # Start time is taken as twice approx waveform length with a 1s + # safety buffer + start_time = inj.tc - 2 * (inj_length + 1) + if end_time < t0 or start_time > t1: + continue + signal = self.make_strain_from_inj_object(inj, delta_t, + detector_name, f_lower=f_l, + distance_scale=distance_scale) + signal = resample_to_delta_t(signal, strain.delta_t, method='ldas') + if float(signal.start_time) > t1: + continue + + signal = signal.astype(strain.dtype) + signal_lal = signal.lal() + add_injection(lalstrain, signal_lal, None) + injected_ids.append(ii) + if inj_filter_rejector is not None: + inj_filter_rejector.generate_short_inj_from_inj(signal, ii) + + strain.data[:] = lalstrain.data.data[:] + + injected = copy.copy(self) + injected.table = injections[np.array(injected_ids).astype(int)] + if inj_filter_rejector is not None: + if hasattr(inj_filter_rejector, 'injected'): + prev_p = inj_filter_rejector.injection_params + prev_id = inj_filter_rejector.injection_ids + injected = np.concatenate([prev_p, injected]) + injected_ids = np.concatenate([prev_id, injected_ids]) + + inj_filter_rejector.injection_params = injected + inj_filter_rejector.injection_ids = injected_ids + return injected
+ + +
+[docs] + def make_strain_from_inj_object(self, inj, delta_t, detector_name, + f_lower=None, distance_scale=1): + """Make a h(t) strain time-series from an injection object. + + Parameters + ----------- + inj : injection object + The injection object to turn into a strain h(t). Can be any + object which has waveform parameters as attributes, such as an + element in a ``WaveformArray``. + delta_t : float + Sample rate to make injection at. + detector_name : string + Name of the detector used for projecting injections. + f_lower : {None, float}, optional + Low-frequency cutoff for injected signals. If None, use value + provided by each injection. + distance_scale: {1, float}, optional + Factor to scale the distance of an injection with. The default is + no scaling. + + Returns + -------- + signal : float + h(t) corresponding to the injection. + """ + if f_lower is None: + f_l = inj.f_lower + else: + f_l = f_lower + + if inj['approximant'] in fd_det: + strain = get_td_det_waveform_from_fd_det( + inj, delta_t=delta_t, f_lower=f_l, + ifos=detector_name, **self.extra_args)[detector_name] + strain /= distance_scale + else: + # compute the waveform time series + hp, hc = get_td_waveform(inj, delta_t=delta_t, f_lower=f_l, + **self.extra_args) + strain = projector(detector_name, + inj, hp, hc, distance_scale=distance_scale) + return strain
+ + +
+[docs] + def end_times(self): + """Return the end times of all injections""" + return self.table.tc
+ + +
+[docs] + @staticmethod + def supported_approximants(): + all_apprxs = [] + for d in [waveform.waveform.td_wav, waveform.waveform.fd_wav]: + for key in d: + all_apprxs.extend(d[key]) + all_apprxs.extend(waveform.waveform.fd_det) + return list(set(all_apprxs))
+
+ + + +
+[docs] +class RingdownHDFInjectionSet(_HDFInjectionSet): + """Manages a ringdown injection: reads injection from hdf file + and injects it into time series. + """ + injtype = 'ringdown' + required_params = ('tc',) + +
+[docs] + def apply(self, strain, detector_name, distance_scale=1, + simulation_ids=None, inj_filter_rejector=None, + injection_sample_rate=None): + """Add injection (as seen by a particular detector) to a time series. + + Parameters + ---------- + strain : TimeSeries + Time series to inject signals into, of type float32 or float64. + detector_name : string + Name of the detector used for projecting injections. + distance_scale: float, optional + Factor to scale the distance of an injection with. The default (=1) + is no scaling. + simulation_ids: iterable, optional + If given, only inject signals with the given simulation IDs. + inj_filter_rejector: InjFilterRejector instance, optional + Not implemented. If not ``None``, a ``NotImplementedError`` will + be raised. + injection_sample_rate: float, optional + The sample rate to generate the signal before injection + + Returns + ------- + None + + Raises + ------ + NotImplementedError + If an ``inj_filter_rejector`` is provided. + TypeError + For invalid types of `strain`. + """ + if inj_filter_rejector is not None: + raise NotImplementedError("Ringdown injections do not support " + "inj_filter_rejector") + if strain.dtype not in (float32, float64): + raise TypeError("Strain dtype must be float32 or float64, not " \ + + str(strain.dtype)) + + lalstrain = strain.lal() + + # pick lalsimulation injection function + add_injection = injection_func_map[strain.dtype] + + delta_t = strain.delta_t + if injection_sample_rate is not None: + delta_t = 1.0 / injection_sample_rate + + injections = self.table + if simulation_ids: + injections = injections[list(simulation_ids)] + for ii in range(injections.size): + injection = injections[ii] + signal = self.make_strain_from_inj_object( + injection, delta_t, detector_name, + distance_scale=distance_scale) + signal = resample_to_delta_t(signal, strain.delta_t, method='ldas') + signal = signal.astype(strain.dtype) + signal_lal = signal.lal() + add_injection(lalstrain, signal_lal, None) + + strain.data[:] = lalstrain.data.data[:]
+ + +
+[docs] + def make_strain_from_inj_object(self, inj, delta_t, detector_name, + distance_scale=1): + """Make a h(t) strain time-series from an injection object as read from + an hdf file. + + Parameters + ----------- + inj : injection object + The injection object to turn into a strain h(t). + delta_t : float + Sample rate to make injection at. + detector_name : string + Name of the detector used for projecting injections. + distance_scale: float, optional + Factor to scale the distance of an injection with. The default (=1) + is no scaling. + + Returns + -------- + signal : float + h(t) corresponding to the injection. + """ + # compute the waveform time series + hp, hc = ringdown_td_approximants[inj['approximant']]( + inj, delta_t=delta_t, **self.extra_args) + return projector(detector_name, + inj, hp, hc, distance_scale=distance_scale)
+ + +
+[docs] + def end_times(self): + """Return the approximate end times of all injections. + + Currently, this just assumes all ringdowns are 2 seconds long. + """ + # the start times are the tcs + tcs = self.table.tc + # FIXME: this could be figured out using the ringdown module + return tcs + 2
+ + +
+[docs] + @staticmethod + def supported_approximants(): + return list(waveform.ringdown_td_approximants.keys())
+
+ + + +
+[docs] +class IncoherentFromFileHDFInjectionSet(_HDFInjectionSet): + """Manages injecting an arbitrary time series loaded from a file. + + The injections must have the following attributes set: + + * ``filename``: (str) the name of the file to load containing the time + series. The file type and format can be a frame file or anything + understood by :py:func:`pycbc.types.timeseries.load_timeseries`. If a + frame file (ends in ``.gwf``) is specified, a ``channel`` attribute must + also be set. + + * ``DETECTOR_gps_time``: (float) The GPS time at which the time series + should be added to the ``DETECTOR`` data, where ``DETECTOR`` is the name + of the instrument to inject into (e.g., ``h1_gps_time``). **The time + series will only be injected into a detector if a GPS time is given for + that detector.** Set to -inf, nan, or do not provide a GPS time for a + particular detector if you do not want to inject into that detector. + + * ``ref_point``: (str or float) What to use as the reference time of the + injected time series. The time series will be injected into the detector + such that the ``ref_point`` in the time series occurs at the specifed + ``DETECTOR_gps_time``. Options are: ``'start'``, ``'end'``, ``'center'``, + ``'absmax'``, or a float giving the number of seconds into the time + series. + + In addition, the following attributes may optionally be provided: + + * ``channel``: (str): If the filename points to a frame file, the channel + to load in that file. Must be provided for frame files. + + * ``DETECTOR_phase_shift``: (float) Apply a phase shift to the time series + before adding it to detector ``DETECTOR``. + + * ``DETECTOR_amp_scale``: (float) Scale the amplitude by the given amount + before adding it to detector ``DETECTOR``. + + * ``slice_start``: (float) Slice the time series starting at + ``ref_point + slice_start`` before injecting into the data. Measured in + seconds. + + * ``slice_end``: (float) Slice the time series ending at + ``ref_point + slice_end`` before injecting into the data. Measured in + seconds. + + * ``left_taper_width``: (float) Taper the start of the time series (after + slicing) using half a kaiser window over the given number of seconds. + See `:py:func:waveform.utils.td_taper` for more details. + + * ``right_taper_width``: (float) Taper the end of the time series (after + slicing) using half a kaiser window over the given number of seconds. + See `:py:func:waveform.utils.td_taper` for more details. + + The signal will be resampled to the same sample rate as the data it is + being injected into. + + In order to use with ``pycbc_create_injections``, set the ``approximant`` + name to ``'incoherent_from_file'``. + """ + injtype = 'incoherent_from_file' + required_params = ('filename', 'ref_point') + _buffersize = 10 + _buffer = None + _rtbuffer = None + +
+[docs] + def end_times(self): + raise NotImplementedError("IncoherentFromFile times cannot be " + "determined without loading time series")
+ + +
+[docs] + @staticmethod + def supported_approximants(): + return ['incoherent_from_file']
+ + +
+[docs] + def loadts(self, inj): + """Loads an injection time series. + + After the first time a time series is loaded it will be added to an + internal buffer for faster in case another injection uses the same + series. + """ + if self._buffer is None: + # create the buffer + self._buffer = LimitedSizeDict(size_limit=self._buffersize) + try: + return self._buffer[inj.filename] + except KeyError: + pass + # not in buffer, so load + if inj.filename.endswith('.gwf'): + try: + channel = inj.channel + except AttributeError as _err: + # Py3.XX: uncomment the "from _err" when we drop 2.7 + raise ValueError("Must provide a channel for " + "frame files") #from _err + ts = frame.read_frame(inj.filename, channel) + else: + ts = load_timeseries(inj.filename) + # cache + self._buffer[inj.filename] = ts + return ts
+ + +
+[docs] + def set_ref_time(self, inj, ts): + """Sets t=0 of the given time series based on what the given + injection's ``ref_point`` is. + """ + try: + ref_point = inj.ref_point + except AttributeError as _err: + # Py3.XX: uncomment the "from _err" when we drop 2.7 + raise ValueError("Must provide a ref_point for {} injections" + .format(self.injtype)) #from _err + # try to get from buffer + if self._rtbuffer is None: + self._rtbuffer = LimitedSizeDict(size_limit=self._buffersize) + try: + reftime = self._rtbuffer[inj.filename, ref_point] + except KeyError: + if ref_point == "start": + reftime = 0. + elif ref_point == "end": + reftime = -len(ts)*ts.delta_t + elif ref_point == "center": + reftime = -len(ts)*ts.delta_t/2. + elif ref_point == "absmax": + reftime = -ts.abs_arg_max()*ts.delta_t + elif isinstance(ref_point, (float, int)): + reftime = -float(ref_point) + else: + raise ValueError("Unrecognized ref_point {} provided" + .format(ref_point)) + self._rtbuffer[inj.filename, ref_point] = reftime + ts._epoch = reftime
+ + +
+[docs] + @staticmethod + def slice_and_taper(inj, ts): + """Slices and tapers a timeseries based on the injection settings. + + This assumes that ``set_ref_time`` has been applied to the timeseries + first. A copy of the time series will be returned even if no slicing + or tapering is done. + """ + try: + tstart = inj.slice_start + except AttributeError: + tstart = ts.start_time + try: + tend = inj.slice_end + except AttributeError: + tend = ts.end_time + ts = ts.time_slice(tstart, tend).copy() + # now taper + try: + twidth = inj.left_taper_width + except AttributeError: + twidth = 0 + if twidth: + ts = wfutils.td_taper(ts, ts.start_time, ts.start_time+twidth, + side='left') + try: + twidth = inj.right_taper_width + except AttributeError: + twidth = 0 + if twidth: + ts = wfutils.td_taper(ts, ts.end_time-twidth, ts.end_time, + side='right') + return ts
+ + +
+[docs] + def apply(self, strain, detector_name, distance_scale=1, + injection_sample_rate=None, inj_filter_rejector=None): + if inj_filter_rejector is not None: + raise NotImplementedError("IncoherentFromFile injections do not " + "support inj_filter_rejector") + if injection_sample_rate is not None: + delta_t = 1./injection_sample_rate + else: + delta_t = strain.delta_t + injections = self.table + for inj in injections: + # Check if we should inject or not... + # loading the time series like this is a bit brute-force, since + # we really only need to know the delta_t and length of the + # timeseries if the ref_point is anything but absmax, but that + # would require adding logic to figure out how to get that metadata + # based on the filetype and ref_point + ts = self.loadts(inj) + # set the ref time + self.set_ref_time(inj, ts) + # determine if we inject or not based on the times + try: + injtime = inj['{}_gps_time'.format(detector_name).lower()] + except ValueError: + injtime = -np.inf + if np.isnan(injtime): + # nan means don't inject + injtime = -np.inf + start_time = injtime + ts.start_time + end_time = injtime + ts.end_time + inject = (start_time < strain.end_time and + end_time > strain.start_time) + if inject: + ts = self.make_strain_from_inj_object( + inj, delta_t, detector_name, + distance_scale=distance_scale, ts=ts) + if ts.delta_t != strain.delta_t: + ts = resample_to_delta_t(ts, strain.delta_t, method='ldas') + strain.inject(ts, copy=False)
+ + +
+[docs] + def make_strain_from_inj_object(self, inj, delta_t, detector_name, + distance_scale=1, ts=None): + if ts is None: + ts = load_timeseries(inj.filename) + self.set_ref_time(inj, ts) + # slice and taper + ts = self.slice_and_taper(inj, ts) + # shift reference to the detector time + ts._epoch += inj['{}_gps_time'.format(detector_name).lower()] + # resample + ts = resample_to_delta_t(ts, delta_t, method='ldas') + # apply any phase shift + try: + phase_shift = inj[ + '{}_phase_shift'.format(detector_name).lower()] + except ValueError: + phase_shift = 0 + if phase_shift: + fs = ts.to_frequencyseries() + fs *= np.exp(1j*phase_shift) + ts = fs.to_timeseries() + # apply any scaling + try: + amp_scale = inj[ + '{}_amp_scale'.format(detector_name).lower()] + except ValueError: + amp_scale = 1. + amp_scale /= distance_scale + ts *= amp_scale + return ts
+
+ + + +hdfinjtypes = { + CBCHDFInjectionSet.injtype: CBCHDFInjectionSet, + RingdownHDFInjectionSet.injtype: RingdownHDFInjectionSet, + IncoherentFromFileHDFInjectionSet.injtype: + IncoherentFromFileHDFInjectionSet, +} + + +
+[docs] +def get_hdf_injtype(sim_file): + """Gets the HDFInjectionSet class to use with the given file. + + This looks for the ``injtype`` in the given file's top level ``attrs``. If + that attribute isn't set, will default to :py:class:`CBCHDFInjectionSet`. + + Parameters + ---------- + sim_file : str + Name of the file. The file must already exist. + + Returns + ------- + HDFInjectionSet : + The type of HDFInjectionSet to use. + """ + with pycbc.io.HFile(sim_file, 'r') as fp: + try: + ftype = fp.attrs['injtype'] + except KeyError: + ftype = CBCHDFInjectionSet.injtype + try: + return hdfinjtypes[ftype] + except KeyError: + # may get a key error if the file type was stored as unicode instead + # of string; if so, try decoding it + try: + ftype = str(ftype.decode()) + except AttributeError: + # not actually a byte error; passing will reraise the KeyError + pass + return hdfinjtypes[ftype]
+ + + +
+[docs] +def hdf_injtype_from_approximant(approximant): + """Gets the HDFInjectionSet class to use with the given approximant. + + Parameters + ---------- + approximant : str + Name of the approximant. + + Returns + ------- + HDFInjectionSet : + The type of HDFInjectionSet to use. + """ + retcls = None + for cls in hdfinjtypes.values(): + if approximant in cls.supported_approximants(): + retcls = cls + if retcls is None: + # none were found, raise an error + raise ValueError("Injection file type unknown for approximant {}" + .format(approximant)) + return retcls
+ + + +
+[docs] +class InjectionSet(object): + """Manages sets of injections and injects them into time series. + + Injections are read from either LIGOLW XML files or HDF files. + + Parameters + ---------- + sim_file : string + Path to an hdf file or a LIGOLW XML file that contains a + SimInspiralTable. + \**kwds : + The rest of the keyword arguments are passed to the waveform generation + function when generating injections. + + Attributes + ---------- + table + """ + + def __init__(self, sim_file, **kwds): + ext = os.path.basename(sim_file) + if ext.endswith(('.xml', '.xml.gz', '.xmlgz')): + self._injhandler = _XMLInjectionSet(sim_file, **kwds) + self.indoc = self._injhandler.indoc + else: + # assume hdf file + self._injhandler = get_hdf_injtype(sim_file)(sim_file, **kwds) + self.table = self._injhandler.table + self.extra_args = self._injhandler.extra_args + self.apply = self._injhandler.apply + self.make_strain_from_inj_object = \ + self._injhandler.make_strain_from_inj_object + self.end_times = self._injhandler.end_times + +
+[docs] + @staticmethod + def write(filename, samples, write_params=None, static_args=None, + injtype=None, **metadata): + """Writes the injection samples to the given hdf file. + + Parameters + ---------- + filename : str + The name of the file to write to. + samples : io.FieldArray + FieldArray of parameters. + write_params : list, optional + Only write the given parameter names. All given names must be keys + in ``samples``. Default is to write all parameters in ``samples``. + static_args : dict, optional + Dictionary mapping static parameter names to values. These are + written to the ``attrs``. + injtype : str, optional + Specify which `HDFInjectionSet` class to use for writing. If not + provided, will try to determine it by looking for an approximant in + the ``static_args``, followed by the ``samples``. + \**metadata : + All other keyword arguments will be written to the file's attrs. + """ + # DELETE the following "if" once xml is dropped + ext = os.path.basename(filename) + if ext.endswith(('.xml', '.xml.gz', '.xmlgz')): + _XMLInjectionSet.write(filename, samples, write_params, + static_args) + else: + # try determine the injtype if it isn't given + if injtype is None: + if static_args is not None and 'approximant' in static_args: + injcls = hdf_injtype_from_approximant( + static_args['approximant']) + elif 'approximant' in samples.fieldnames: + apprxs = np.unique(samples['approximant']) + # make sure they all correspond to the same injection type + injcls = [hdf_injtype_from_approximant(a) for a in apprxs] + if not all(c == injcls[0] for c in injcls): + raise ValueError("injections must all be of the same " + "type") + injcls = injcls[0] + else: + raise ValueError("Could not find an approximant in the " + "static args or samples to determine the " + "injection type. Please specify an " + "injtype instead.") + else: + injcls = hdfinjtypes[injtype] + injcls.write(filename, samples, write_params, static_args, + **metadata)
+ + +
+[docs] + @staticmethod + def from_cli(opt): + """Return an instance of InjectionSet configured as specified + on the command line. + """ + if opt.injection_file is None: + return None + + kwa = {} + if opt.injection_f_ref is not None: + kwa['f_ref'] = opt.injection_f_ref + if opt.injection_f_final is not None: + kwa['f_final'] = opt.injection_f_final + return InjectionSet(opt.injection_file, **kwa)
+
+ + + +
+[docs] +class SGBurstInjectionSet(object): + """Manages sets of sine-Gaussian burst injections: reads injections + from LIGOLW XML files and injects them into time series. + + Parameters + ---------- + sim_file : string + Path to a LIGOLW XML file containing a SimBurstTable + with injection definitions. + + Attributes + ---------- + indoc + table + """ + + def __init__(self, sim_file, **kwds): + self.indoc = ligolw_utils.load_filename( + sim_file, False, contenthandler=LIGOLWContentHandler) + self.table = lsctables.SimBurstTable.get_table(self.indoc) + self.extra_args = kwds + +
+[docs] + def apply(self, strain, detector_name, f_lower=None, distance_scale=1): + """Add injections (as seen by a particular detector) to a time series. + + Parameters + ---------- + strain : TimeSeries + Time series to inject signals into, of type float32 or float64. + detector_name : string + Name of the detector used for projecting injections. + f_lower : {None, float}, optional + Low-frequency cutoff for injected signals. If None, use value + provided by each injection. + distance_scale: {1, foat}, optional + Factor to scale the distance of an injection with. The default is + no scaling. + + Returns + ------- + None + + Raises + ------ + TypeError + For invalid types of `strain`. + """ + if strain.dtype not in (float32, float64): + raise TypeError("Strain dtype must be float32 or float64, not " \ + + str(strain.dtype)) + + lalstrain = strain.lal() + #detector = Detector(detector_name) + earth_travel_time = lal.REARTH_SI / lal.C_SI + t0 = float(strain.start_time) - earth_travel_time + t1 = float(strain.end_time) + earth_travel_time + + # pick lalsimulation injection function + add_injection = injection_func_map[strain.dtype] + + for inj in self.table: + # roughly estimate if the injection may overlap with the segment + end_time = inj.time_geocent + #CHECK: This is a hack (10.0s); replace with an accurate estimate + inj_length = 10.0 + eccentricity = 0.0 + polarization = 0.0 + start_time = end_time - 2 * inj_length + if end_time < t0 or start_time > t1: + continue + + # compute the waveform time series + hp, hc = sim.SimBurstSineGaussian(float(inj.q), + float(inj.frequency),float(inj.hrss),float(eccentricity), + float(polarization),float(strain.delta_t)) + hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch) + hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch) + hp._epoch += float(end_time) + hc._epoch += float(end_time) + if float(hp.start_time) > t1: + continue + + # compute the detector response, taper it if requested + # and add it to the strain + strain = wfutils.taper_timeseries(strain, inj.taper) + signal_lal = hp.astype(strain.dtype).lal() + add_injection(lalstrain, signal_lal, None) + + strain.data[:] = lalstrain.data.data[:]
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/inject/injfilterrejector.html b/latest/html/_modules/pycbc/inject/injfilterrejector.html new file mode 100644 index 00000000000..c84d83b75b1 --- /dev/null +++ b/latest/html/_modules/pycbc/inject/injfilterrejector.html @@ -0,0 +1,538 @@ + + + + + + pycbc.inject.injfilterrejector — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.inject.injfilterrejector

+# Copyright (C) 2013 Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This module contains functions to filter injections with only useful templates.
+
+This module implements a set of checks to test for each segment and template
+combination whether injections contained within the segment are sufficiently
+"similar" to the template to require a matched-filter. There are a few ways of
+testing the "similarity" of templates and injections.
+
+* A chirp time threshold rejects templates if chirp time difference is large
+* A coarse match threshold rejects templates if a coarse overlap is small
+
+"""
+
+import numpy as np
+from pycbc import DYN_RANGE_FAC
+from pycbc.filter import match
+from pycbc.pnutils import nearest_larger_binary_number
+from pycbc.pnutils import mass1_mass2_to_tau0_tau3
+from pycbc.types import FrequencySeries, zeros
+from pycbc.types import MultiDetOptionAction
+
+_injfilterrejector_group_help = \
+    ("Options that, if injections are present in "
+     "this run, are responsible for performing pre-checks between injections "
+     "in the data being filtered and the current search template to determine "
+     "if the template has any chance of actually detecting the injection. "
+     "The parameters of this test are given by the various options below. "
+     "The --injection-filter-rejector-chirp-time-window and "
+     "--injection-filter-rejector-match-threshold options need to be provided "
+     "if those tests are desired. Other options will take default values "
+     "unless overriden. More details on these options follow.")
+
+_injfilterer_cthresh_help = \
+    ("If this value is not None and an "
+     "injection file is given then we will calculate the difference in "
+     "chirp time (tau_0) between the template and each injection in the "
+     "analysis segment. If the difference is greate than this threshold for "
+     "all injections then filtering is not performed. By default this will "
+     "be None.")
+_injfilterer_mthresh_help = \
+    ("If this value is not None and an "
+     "injection file is provided then we will calculate a 'coarse match' "
+     "between the template and each injection in the analysis segment. If the "
+     "match is less than this threshold for all injections then filtering is "
+     "not performed. Parameters for the 'coarse match' follow. By default "
+     "this value will be None.")
+_injfilterer_deltaf_help = \
+    ("If injections are present and a match threshold is "
+     "provided, this option specifies the frequency spacing that will be used "
+     "for injections, templates and PSD when computing the 'coarse match'. "
+     "Templates will be generated directly with this spacing. The PSD and "
+     "injections will be resampled.")
+_injfilterer_fmax_help = \
+    ("If injections are present and a match threshold is "
+     "provided, this option specifies the maximum frequency that will be used "
+     "for injections, templates and PSD when computing the 'coarse match'. "
+     "Templates will be generated directly with this max frequency. The PSD "
+     "and injections' frequency series will be truncated.")
+_injfilterer_buffer_help = \
+    ("If injections are present and either a match "
+     "threshold or a chirp-time window is given, we will determine if "
+     "injections are 'in' the specified analysis chunk by using the end "
+     "times. If this value is non-zero the analysis chunk is extended on both "
+     "sides by this amount before determining if injections are within the "
+     "given window.")
+_injfilterer_flower_help = \
+    ("If injections are present and either a match "
+     "threshold or a chirp-time window is given, this value is used to set "
+     "the lower frequency for determine chirp times or for calculating "
+     "matches. If this value is None the lower frequency used for the full "
+     "matched-filter is used. Otherwise this value is used.")
+
+
+
+[docs] +def insert_injfilterrejector_option_group(parser): + """Add options for injfilterrejector to executable.""" + injfilterrejector_group = \ + parser.add_argument_group(_injfilterrejector_group_help) + curr_arg = "--injection-filter-rejector-chirp-time-window" + injfilterrejector_group.add_argument(curr_arg, type=float, default=None, + help=_injfilterer_cthresh_help) + curr_arg = "--injection-filter-rejector-match-threshold" + injfilterrejector_group.add_argument(curr_arg, type=float, default=None, + help=_injfilterer_mthresh_help) + curr_arg = "--injection-filter-rejector-coarsematch-deltaf" + injfilterrejector_group.add_argument(curr_arg, type=float, default=1., + help=_injfilterer_deltaf_help) + curr_arg = "--injection-filter-rejector-coarsematch-fmax" + injfilterrejector_group.add_argument(curr_arg, type=float, default=256., + help=_injfilterer_fmax_help) + curr_arg = "--injection-filter-rejector-seg-buffer" + injfilterrejector_group.add_argument(curr_arg, type=int, default=10, + help=_injfilterer_buffer_help) + curr_arg = "--injection-filter-rejector-f-lower" + injfilterrejector_group.add_argument(curr_arg, type=int, default=None, + help=_injfilterer_flower_help)
+ + + +
+[docs] +def insert_injfilterrejector_option_group_multi_ifo(parser): + """Add options for injfilterrejector to executable.""" + injfilterrejector_group = \ + parser.add_argument_group(_injfilterrejector_group_help) + curr_arg = "--injection-filter-rejector-chirp-time-window" + injfilterrejector_group.add_argument( + curr_arg, type=float, default=None, nargs='+', metavar='IFO:VALUE', + action=MultiDetOptionAction, help=_injfilterer_cthresh_help) + curr_arg = "--injection-filter-rejector-match-threshold" + injfilterrejector_group.add_argument( + curr_arg, type=float, default=None, nargs='+', metavar='IFO:VALUE', + action=MultiDetOptionAction, help=_injfilterer_mthresh_help) + curr_arg = "--injection-filter-rejector-coarsematch-deltaf" + injfilterrejector_group.add_argument( + curr_arg, type=float, default=1., nargs='+', metavar='IFO:VALUE', + action=MultiDetOptionAction, help=_injfilterer_deltaf_help) + curr_arg = "--injection-filter-rejector-coarsematch-fmax" + injfilterrejector_group.add_argument( + curr_arg, type=float, default=256., nargs='+', metavar='IFO:VALUE', + action=MultiDetOptionAction, help=_injfilterer_fmax_help) + curr_arg = "--injection-filter-rejector-seg-buffer" + injfilterrejector_group.add_argument( + curr_arg, type=int, default=10, nargs='+', metavar='IFO:VALUE', + action=MultiDetOptionAction, help=_injfilterer_buffer_help) + curr_arg = "--injection-filter-rejector-f-lower" + injfilterrejector_group.add_argument( + curr_arg, type=int, default=None, help=_injfilterer_flower_help, + metavar='IFO:VALUE', action=MultiDetOptionAction, nargs='+')
+ + + +
+[docs] +class InjFilterRejector(object): + """Class for holding parameters for using injection/template pre-filtering. + + This class is responsible for identifying where a matched-filter operation + between templates and data is unncessary because the injections contained + in the data will not match well with the given template. + """ + + def __init__(self, injection_file, chirp_time_window, + match_threshold, f_lower, coarsematch_deltaf=1., + coarsematch_fmax=256, seg_buffer=10): + """Initialise InjFilterRejector instance.""" + # Determine if InjFilterRejector is to be enabled + if injection_file is None or injection_file == 'False' or\ + (chirp_time_window is None and match_threshold is None): + self.enabled = False + return + else: + self.enabled = True + + # Store parameters + self.chirp_time_window = chirp_time_window + self.match_threshold = match_threshold + self.coarsematch_deltaf = coarsematch_deltaf + self.coarsematch_fmax = coarsematch_fmax + self.seg_buffer = seg_buffer + self.f_lower = f_lower + assert(self.f_lower is not None) + + # Variables for storing arrays (reduced injections, memory + # for templates, reduced PSDs ...) + self.short_injections = {} + self._short_template_mem = None + self._short_psd_storage = {} + self._short_template_id = None + +
+[docs] + @classmethod + def from_cli(cls, opt): + """Create an InjFilterRejector instance from command-line options.""" + injection_file = opt.injection_file + chirp_time_window = \ + opt.injection_filter_rejector_chirp_time_window + match_threshold = opt.injection_filter_rejector_match_threshold + coarsematch_deltaf = opt.injection_filter_rejector_coarsematch_deltaf + coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax + seg_buffer = opt.injection_filter_rejector_seg_buffer + if opt.injection_filter_rejector_f_lower is not None: + f_lower = opt.injection_filter_rejector_f_lower + else: + # NOTE: Uses main low-frequency cutoff as default option. This may + # need some editing if using this in multi_inspiral, which I + # leave for future work, or if this is being used in another + # code which doesn't have --low-frequency-cutoff + f_lower = opt.low_frequency_cutoff + return cls(injection_file, chirp_time_window, match_threshold, + f_lower, coarsematch_deltaf=coarsematch_deltaf, + coarsematch_fmax=coarsematch_fmax, + seg_buffer=seg_buffer)
+ + +
+[docs] + @classmethod + def from_cli_single_ifo(cls, opt, ifo): + """Create an InjFilterRejector instance from command-line options.""" + injection_file = opt.injection_file[ifo] + chirp_time_window = \ + opt.injection_filter_rejector_chirp_time_window[ifo] + match_threshold = opt.injection_filter_rejector_match_threshold[ifo] + coarsematch_deltaf = \ + opt.injection_filter_rejector_coarsematch_deltaf[ifo] + coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax[ifo] + seg_buffer = opt.injection_filter_rejector_seg_buffer[ifo] + if opt.injection_filter_rejector_f_lower[ifo] is not None: + f_lower = opt.injection_filter_rejector_f_lower[ifo] + else: + # NOTE: Uses main low-frequency cutoff as default option. This may + # need some editing if using this in multi_inspiral, which I + # leave for future work, or if this is being used in another + # code which doesn't have --low-frequency-cutoff + f_lower = opt.low_frequency_cutoff + return cls(injection_file, chirp_time_window, + match_threshold, f_lower, + coarsematch_deltaf, coarsematch_fmax, + seg_buffer=seg_buffer)
+ + +
+[docs] + @classmethod + def from_cli_multi_ifos(cls, opt, ifos): + """Create an InjFilterRejector instance from command-line options.""" + inj_filter_rejectors = {} + for ifo in ifos: + inj_filter_rejectors[ifo] = cls.from_cli_single_ifo(opt, ifo) + return inj_filter_rejectors
+ + +
+[docs] + def generate_short_inj_from_inj(self, inj_waveform, simulation_id): + """Generate and a store a truncated representation of inj_waveform.""" + if not self.enabled or not self.match_threshold: + # Do nothing! + return + if simulation_id in self.short_injections: + err_msg = "An injection with simulation id " + err_msg += str(simulation_id) + err_msg += " has already been added. This suggests " + err_msg += "that your injection file contains injections with " + err_msg += "duplicate simulation_ids. This is not allowed." + raise ValueError(err_msg) + curr_length = len(inj_waveform) + new_length = int(nearest_larger_binary_number(curr_length)) + # Don't want length less than 1/delta_f + while new_length * inj_waveform.delta_t < 1./self.coarsematch_deltaf: + new_length = new_length * 2 + inj_waveform.resize(new_length) + inj_tilde = inj_waveform.to_frequencyseries() + # Dynamic range is important here! + inj_tilde_np = inj_tilde.numpy() * DYN_RANGE_FAC + delta_f = inj_tilde.get_delta_f() + new_freq_len = int(self.coarsematch_fmax / delta_f + 1) + # This shouldn't be a problem if injections are generated at + # 16384 Hz ... It is only a problem of injection sample rate + # gives a lower Nyquist than the trunc_f_max. If this error is + # ever raised one could consider zero-padding the injection. + assert(new_freq_len <= len(inj_tilde)) + df_ratio = int(self.coarsematch_deltaf/delta_f) + inj_tilde_np = inj_tilde_np[:new_freq_len:df_ratio] + new_inj = FrequencySeries(inj_tilde_np, dtype=np.complex64, + delta_f=self.coarsematch_deltaf) + self.short_injections[simulation_id] = new_inj
+ + +
+[docs] + def template_segment_checker(self, bank, t_num, segment): + """Test if injections in segment are worth filtering with template. + + Using the current template, current segment, and injections within that + segment. Test if the injections and sufficiently "similar" to any of + the injections to justify actually performing a matched-filter call. + Ther are two parts to this test: First we check if the chirp time of + the template is within a provided window of any of the injections. If + not then stop here, it is not worth filtering this template, segment + combination for this injection set. If this check passes we compute a + match between a coarse representation of the template and a coarse + representation of each of the injections. If that match is above a + user-provided value for any of the injections then filtering can + proceed. This is currently only available if using frequency-domain + templates. + + Parameters + ----------- + FIXME + + Returns + -------- + FIXME + """ + if not self.enabled: + # If disabled, always filter (ie. return True) + return True + + # Get times covered by segment analyze and add buffer + seg_start_time = segment.start_time - self.seg_buffer + seg_end_time = segment.end_time + self.seg_buffer + + # Chirp time test + if self.chirp_time_window is not None: + m1 = bank.table[t_num]['mass1'] + m2 = bank.table[t_num]['mass2'] + tau0_temp, _ = mass1_mass2_to_tau0_tau3(m1, m2, self.f_lower) + for inj in self.injection_params.table: + if isinstance(inj, np.record): + # hdf format file + end_time = inj['tc'] + else: + # must be an xml file originally + end_time = inj.geocent_end_time + \ + 1E-9 * inj.geocent_end_time_ns + + if not(seg_start_time <= end_time <= seg_end_time): + continue + tau0_inj, _ = \ + mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2, + self.f_lower) + tau_diff = abs(tau0_temp - tau0_inj) + if tau_diff <= self.chirp_time_window: + break + else: + # Get's here if all injections are outside chirp-time window + return False + + # Coarse match test + if self.match_threshold: + if self._short_template_mem is None: + # Set the memory for the short templates + wav_len = 1 + int(self.coarsematch_fmax / + self.coarsematch_deltaf) + self._short_template_mem = zeros(wav_len, dtype=np.complex64) + + # Set the current short PSD to red_psd + try: + red_psd = self._short_psd_storage[id(segment.psd)] + except KeyError: + # PSD doesn't exist yet, so make it! + curr_psd = segment.psd.numpy() + step_size = int(self.coarsematch_deltaf / segment.psd.delta_f) + max_idx = int(self.coarsematch_fmax / segment.psd.delta_f) + 1 + red_psd_data = curr_psd[:max_idx:step_size] + red_psd = FrequencySeries(red_psd_data, #copy=False, + delta_f=self.coarsematch_deltaf) + self._short_psd_storage[id(curr_psd)] = red_psd + + # Set htilde to be the current short template + if not t_num == self._short_template_id: + # Set the memory for the short templates if unset + if self._short_template_mem is None: + wav_len = 1 + int(self.coarsematch_fmax / + self.coarsematch_deltaf) + self._short_template_mem = zeros(wav_len, + dtype=np.complex64) + # Generate short waveform + htilde = bank.generate_with_delta_f_and_max_freq( + t_num, self.coarsematch_fmax, self.coarsematch_deltaf, + low_frequency_cutoff=bank.table[t_num].f_lower, + cached_mem=self._short_template_mem) + self._short_template_id = t_num + self._short_template_wav = htilde + else: + htilde = self._short_template_wav + + for ii, inj in enumerate(self.injection_params.table): + if isinstance(inj, np.record): + # hdf format file + end_time = inj['tc'] + sim_id = self.injection_ids[ii] + else: + # must be an xml file originally + end_time = inj.geocent_end_time + \ + 1E-9 * inj.geocent_end_time_ns + sim_id = inj.simulation_id + + if not(seg_start_time < end_time < seg_end_time): + continue + curr_inj = self.short_injections[sim_id] + o, _ = match(htilde, curr_inj, psd=red_psd, + low_frequency_cutoff=self.f_lower) + if o > self.match_threshold: + break + else: + # Get's here if all injections are outside match threshold + return False + + return True
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io.html b/latest/html/_modules/pycbc/io.html new file mode 100644 index 00000000000..ea0079e8465 --- /dev/null +++ b/latest/html/_modules/pycbc/io.html @@ -0,0 +1,155 @@ + + + + + + pycbc.io — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io

+import logging
+from astropy.utils.data import download_file
+from .hdf import *
+from .record import *
+from .gracedb import *
+
+logger = logging.getLogger('pycbc.io')
+
+
+
+[docs] +def get_file(url, retry=5, **args): + """ Retrieve file with retry upon failure + + Uses the astropy download_file but adds a retry feature for flaky + connections. See astropy for full options + """ + i = 0 + while True: + i += 1 + try: + return download_file(url, **args) + except Exception as e: + logger.warning("Failed on attempt %d to download %s", i, url) + if i >= retry: + logger.error("Giving up on %s", url) + raise e
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io/gracedb.html b/latest/html/_modules/pycbc/io/gracedb.html new file mode 100644 index 00000000000..c84f13ddc49 --- /dev/null +++ b/latest/html/_modules/pycbc/io/gracedb.html @@ -0,0 +1,725 @@ + + + + + + pycbc.io.gracedb — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io.gracedb

+"""
+Class and function for use in dealing with GraceDB uploads
+"""
+
+import logging
+import os
+import numpy
+import json
+import copy
+from multiprocessing.dummy import threading
+
+import lal
+from ligo.lw import ligolw
+from ligo.lw import lsctables
+from ligo.lw import utils as ligolw_utils
+
+import pycbc
+from pycbc import version as pycbc_version
+from pycbc import pnutils
+from pycbc.io.ligolw import (
+    return_empty_sngl,
+    create_process_table,
+    make_psd_xmldoc,
+    snr_series_to_xml
+)
+from pycbc.results import generate_asd_plot, generate_snr_plot
+from pycbc.results import source_color
+from pycbc.mchirp_area import calc_probabilities
+
+logger = logging.getLogger('pycbc.io.gracedb')
+
+
+
+[docs] +class CandidateForGraceDB(object): + """This class provides an interface for uploading candidates to GraceDB. + """ + + def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): + """Initialize a representation of a zerolag candidate for upload to + GraceDB. + + Parameters + ---------- + coinc_ifos: list of strs + A list of the originally triggered ifos with SNR above threshold + for this candidate, before possible significance followups. + ifos: list of strs + A list of ifos which may have triggers identified in coinc_results + for this candidate: ifos potentially contributing to significance + coinc_results: dict of values + A dictionary of values. The format is defined in + `pycbc/events/coinc.py` and matches the on-disk representation in + the hdf file for this time. + psds: dict of FrequencySeries + Dictionary providing PSD estimates for all detectors observing. + low_frequency_cutoff: float + Minimum valid frequency for the PSD estimates. + high_frequency_cutoff: float, optional + Maximum frequency considered for the PSD estimates. Default None. + skyloc_data: dict of dicts, optional + Dictionary providing SNR time series for each detector, to be used + in sky localization with BAYESTAR. The format should be + `skyloc_data['H1']['snr_series']`. More detectors can be present + than in `ifos`; if so, extra detectors will only be used for sky + localization. + channel_names: dict of strings, optional + Strain channel names for each detector. Will be recorded in the + `sngl_inspiral` table. + padata: PAstroData instance + Organizes info relevant to the astrophysical probability of the + candidate. + mc_area_args: dict of dicts, optional + Dictionary providing arguments to be used in source probability + estimation with `pycbc/mchirp_area.py`. + """ + self.coinc_results = coinc_results + self.psds = kwargs['psds'] + self.basename = None + if kwargs.get('gracedb'): + self.gracedb = kwargs['gracedb'] + + # Determine if the candidate should be marked as HWINJ + self.is_hardware_injection = ('HWINJ' in coinc_results + and coinc_results['HWINJ']) + + # We may need to apply a time offset for premerger search + self.time_offset = 0 + rtoff = f'foreground/{ifos[0]}/time_offset' + if rtoff in coinc_results: + self.time_offset = coinc_results[rtoff] + + # Check for ifos with SNR peaks in coinc_results + self.et_ifos = [i for i in ifos if f'foreground/{i}/end_time' in + coinc_results] + + if 'skyloc_data' in kwargs: + sld = kwargs['skyloc_data'] + assert len({sld[ifo]['snr_series'].delta_t for ifo in sld}) == 1, \ + "delta_t for all ifos do not match" + snr_ifos = sld.keys() # Ifos with SNR time series calculated + self.snr_series = {ifo: sld[ifo]['snr_series'] for ifo in snr_ifos} + # Extra ifos have SNR time series but not sngl inspiral triggers + + for ifo in snr_ifos: + # Ifos used for sky loc must have a PSD + assert ifo in self.psds + self.snr_series[ifo].start_time += self.time_offset + else: + self.snr_series = None + snr_ifos = self.et_ifos + + # Set up the bare structure of the xml document + outdoc = ligolw.Document() + outdoc.appendChild(ligolw.LIGO_LW()) + + proc_id = create_process_table(outdoc, program_name='pycbc', + detectors=snr_ifos).process_id + + # Set up coinc_definer table + coinc_def_table = lsctables.New(lsctables.CoincDefTable) + coinc_def_id = lsctables.CoincDefID(0) + coinc_def_row = lsctables.CoincDef() + coinc_def_row.search = "inspiral" + coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs" + coinc_def_row.coinc_def_id = coinc_def_id + coinc_def_row.search_coinc_type = 0 + coinc_def_table.append(coinc_def_row) + outdoc.childNodes[0].appendChild(coinc_def_table) + + # Set up coinc inspiral and coinc event tables + coinc_id = lsctables.CoincID(0) + coinc_event_table = lsctables.New(lsctables.CoincTable) + coinc_event_row = lsctables.Coinc() + coinc_event_row.coinc_def_id = coinc_def_id + coinc_event_row.nevents = len(snr_ifos) + coinc_event_row.instruments = ','.join(snr_ifos) + coinc_event_row.time_slide_id = lsctables.TimeSlideID(0) + coinc_event_row.process_id = proc_id + coinc_event_row.coinc_event_id = coinc_id + coinc_event_row.likelihood = 0. + coinc_event_table.append(coinc_event_row) + outdoc.childNodes[0].appendChild(coinc_event_table) + + # Set up sngls + sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) + coinc_event_map_table = lsctables.New(lsctables.CoincMapTable) + + # Marker variable recording template info from a valid sngl trigger + sngl_populated = None + network_snrsq = 0 + for sngl_id, ifo in enumerate(snr_ifos): + sngl = return_empty_sngl(nones=True) + sngl.event_id = lsctables.SnglInspiralID(sngl_id) + sngl.process_id = proc_id + sngl.ifo = ifo + names = [n.split('/')[-1] for n in coinc_results + if f'foreground/{ifo}' in n] + for name in names: + val = coinc_results[f'foreground/{ifo}/{name}'] + if name == 'end_time': + val += self.time_offset + sngl.end = lal.LIGOTimeGPS(val) + else: + # Sngl inspirals have a restricted set of attributes + try: + setattr(sngl, name, val) + except AttributeError: + pass + if sngl.mass1 and sngl.mass2: + sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta( + sngl.mass1, sngl.mass2) + sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( + sngl.mass1, sngl.mass2) + sngl_populated = sngl + if sngl.snr: + sngl.eff_distance = sngl.sigmasq ** 0.5 / sngl.snr + network_snrsq += sngl.snr ** 2.0 + if 'channel_names' in kwargs and ifo in kwargs['channel_names']: + sngl.channel = kwargs['channel_names'][ifo] + sngl_inspiral_table.append(sngl) + + # Set up coinc_map entry + coinc_map_row = lsctables.CoincMap() + coinc_map_row.table_name = 'sngl_inspiral' + coinc_map_row.coinc_event_id = coinc_id + coinc_map_row.event_id = sngl.event_id + coinc_event_map_table.append(coinc_map_row) + + if self.snr_series is not None: + snr_series_to_xml(self.snr_series[ifo], outdoc, sngl.event_id) + + # Set merger time to the mean of trigger peaks over coinc_results ifos + self.merger_time = \ + numpy.mean([coinc_results[f'foreground/{ifo}/end_time'] for ifo in + self.et_ifos]) \ + + self.time_offset + + outdoc.childNodes[0].appendChild(coinc_event_map_table) + outdoc.childNodes[0].appendChild(sngl_inspiral_table) + + # Set up the coinc inspiral table + coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable) + coinc_inspiral_row = lsctables.CoincInspiral() + # This seems to be used as FAP, which should not be in gracedb + coinc_inspiral_row.false_alarm_rate = 0. + coinc_inspiral_row.minimum_duration = 0. + coinc_inspiral_row.instruments = tuple(snr_ifos) + coinc_inspiral_row.coinc_event_id = coinc_id + coinc_inspiral_row.mchirp = sngl_populated.mchirp + coinc_inspiral_row.mass = sngl_populated.mtotal + coinc_inspiral_row.end_time = sngl_populated.end_time + coinc_inspiral_row.end_time_ns = sngl_populated.end_time_ns + coinc_inspiral_row.snr = network_snrsq ** 0.5 + far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar']) + coinc_inspiral_row.combined_far = far + coinc_inspiral_table.append(coinc_inspiral_row) + outdoc.childNodes[0].appendChild(coinc_inspiral_table) + + # Append the PSDs + psds_lal = {} + for ifo, psd in self.psds.items(): + kmin = int(kwargs['low_frequency_cutoff'] / psd.delta_f) + fseries = lal.CreateREAL8FrequencySeries( + "psd", psd.epoch, kwargs['low_frequency_cutoff'], psd.delta_f, + lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin) + fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0 + psds_lal[ifo] = fseries + make_psd_xmldoc(psds_lal, outdoc) + + # P astro calculation + if 'padata' in kwargs: + if 'p_terr' in kwargs: + raise RuntimeError( + "Both p_astro calculation data and a " + "previously calculated p_terr value were provided, this " + "doesn't make sense!" + ) + assert len(coinc_ifos) < 3, \ + f"p_astro can't handle {coinc_ifos} coinc ifos!" + trigger_data = { + 'mass1': sngl_populated.mass1, + 'mass2': sngl_populated.mass2, + 'spin1z': sngl_populated.spin1z, + 'spin2z': sngl_populated.spin2z, + 'network_snr': network_snrsq ** 0.5, + 'far': far, + 'triggered': coinc_ifos, + # Consider all ifos potentially relevant to detection, + # ignore those that only contribute to sky loc + 'sensitive': self.et_ifos} + horizons = {i: self.psds[i].dist for i in self.et_ifos} + self.p_astro, self.p_terr = \ + kwargs['padata'].do_pastro_calc(trigger_data, horizons) + elif 'p_terr' in kwargs: + self.p_astro, self.p_terr = 1 - kwargs['p_terr'], kwargs['p_terr'] + else: + self.p_astro, self.p_terr = None, None + + # Source probabilities and hasmassgap estimation + self.probabilities = None + self.hasmassgap = None + if 'mc_area_args' in kwargs: + eff_distances = [sngl.eff_distance for sngl in sngl_inspiral_table] + self.probabilities = calc_probabilities( + coinc_inspiral_row.mchirp, + coinc_inspiral_row.snr, + min(eff_distances), + kwargs['mc_area_args'] + ) + if 'embright_mg_max' in kwargs['mc_area_args']: + hasmg_args = copy.deepcopy(kwargs['mc_area_args']) + hasmg_args['mass_gap'] = True + hasmg_args['mass_bdary']['gap_max'] = \ + kwargs['mc_area_args']['embright_mg_max'] + self.hasmassgap = calc_probabilities( + coinc_inspiral_row.mchirp, + coinc_inspiral_row.snr, + min(eff_distances), + hasmg_args + )['Mass Gap'] + + # Combine p astro and source probs + if self.p_astro is not None and self.probabilities is not None: + self.astro_probs = {cl: pr * self.p_astro for + cl, pr in self.probabilities.items()} + self.astro_probs['Terrestrial'] = self.p_terr + else: + self.astro_probs = None + + self.outdoc = outdoc + self.time = sngl_populated.end + +
+[docs] + def save(self, fname): + """Write a file representing this candidate in a LIGOLW XML format + compatible with GraceDB. + + Parameters + ---------- + fname: str + Name of file to write to disk. + """ + kwargs = {} + if threading.current_thread() is not threading.main_thread(): + # avoid an error due to no ability to do signal handling in threads + kwargs['trap_signals'] = None + ligolw_utils.write_filename(self.outdoc, fname, \ + compress='auto', **kwargs) + + save_dir = os.path.dirname(fname) + # Save EMBright properties info as json + if self.hasmassgap is not None: + self.embright_file = os.path.join(save_dir, 'pycbc.em_bright.json') + with open(self.embright_file, 'w') as embrightf: + json.dump({'HasMassGap': self.hasmassgap}, embrightf) + logger.info('EM Bright file saved as %s', self.embright_file) + + # Save multi-cpt p astro as json + if self.astro_probs is not None: + self.multipa_file = os.path.join(save_dir, 'pycbc.p_astro.json') + with open(self.multipa_file, 'w') as multipaf: + json.dump(self.astro_probs, multipaf) + logger.info('Multi p_astro file saved as %s', self.multipa_file) + + # Save source probabilities in a json file + if self.probabilities is not None: + self.prob_file = os.path.join(save_dir, 'src_probs.json') + with open(self.prob_file, 'w') as probf: + json.dump(self.probabilities, probf) + logger.info('Source probabilities file saved as %s', self.prob_file) + # Don't save any other files! + return + + # Save p astro / p terr as json + if self.p_astro is not None: + self.pastro_file = os.path.join(save_dir, 'pa_pterr.json') + with open(self.pastro_file, 'w') as pastrof: + json.dump({'p_astro': self.p_astro, 'p_terr': self.p_terr}, + pastrof) + logger.info('P_astro file saved as %s', self.pastro_file)
+ + +
+[docs] + def upload(self, fname, gracedb_server=None, testing=True, + extra_strings=None, search='AllSky', labels=None): + """Upload this candidate to GraceDB, and annotate it with a few useful + plots and comments. + + Parameters + ---------- + fname: str + The name to give the xml file associated with this trigger + gracedb_server: string, optional + URL to the GraceDB web API service for uploading the event. + If omitted, the default will be used. + testing: bool + Switch to determine if the upload should be sent to gracedb as a + test trigger (True) or a production trigger (False). + search: str + String going into the "search" field of the GraceDB event. + labels: list + Optional list of labels to tag the new event with. + """ + import pylab as pl + + if fname.endswith('.xml.gz'): + self.basename = fname.replace('.xml.gz', '') + elif fname.endswith('.xml'): + self.basename = fname.replace('.xml', '') + else: + raise ValueError("Upload filename must end in .xml or .xml.gz, got" + " %s" % fname) + + # First make sure the event is saved on disk + # as GraceDB operations can fail later + self.save(fname) + + # hardware injections need to be maked with the INJ tag + if self.is_hardware_injection: + labels = (labels or []) + ['INJ'] + + # connect to GraceDB if we are not reusing a connection + if not hasattr(self, 'gracedb'): + logger.info('Connecting to GraceDB') + gdbargs = {'reload_certificate': True, 'reload_buffer': 300} + if gracedb_server: + gdbargs['service_url'] = gracedb_server + try: + from ligo.gracedb.rest import GraceDb + self.gracedb = GraceDb(**gdbargs) + except Exception as exc: + logger.error('Failed to create GraceDB client') + logger.error(exc) + + # create GraceDB event + logger.info('Uploading %s to GraceDB', fname) + group = 'Test' if testing else 'CBC' + gid = None + try: + response = self.gracedb.create_event( + group, + "pycbc", + fname, + search=search, + labels=labels + ) + gid = response.json()["graceid"] + logger.info("Uploaded event %s", gid) + except Exception as exc: + logger.error('Failed to create GraceDB event') + logger.error(str(exc)) + + # Upload em_bright properties JSON + if self.hasmassgap is not None and gid is not None: + try: + self.gracedb.write_log( + gid, 'EM Bright properties JSON file upload', + filename=self.embright_file, + tag_name=['em_bright'] + ) + logger.info('Uploaded em_bright properties for %s', gid) + except Exception as exc: + logger.error( + 'Failed to upload em_bright properties file ' + 'for %s', + gid + ) + logger.error(str(exc)) + + # Upload multi-cpt p_astro JSON + if self.astro_probs is not None and gid is not None: + try: + self.gracedb.write_log( + gid, 'Multi-component p_astro JSON file upload', + filename=self.multipa_file, + tag_name=['p_astro'], + label='PASTRO_READY' + ) + logger.info('Uploaded multi p_astro for %s', gid) + except Exception as exc: + logger.error( + 'Failed to upload multi p_astro file for %s', + gid + ) + logger.error(str(exc)) + + # If there is p_astro but no probabilities, upload p_astro JSON + if hasattr(self, 'pastro_file') and gid is not None: + try: + self.gracedb.write_log( + gid, '2-component p_astro JSON file upload', + filename=self.pastro_file, + tag_name=['sig_info'] + ) + logger.info('Uploaded p_astro for %s', gid) + except Exception as exc: + logger.error('Failed to upload p_astro file for %s', gid) + logger.error(str(exc)) + + # plot the SNR timeseries and noise PSDs + if self.snr_series is not None: + snr_series_fname = self.basename + '.hdf' + snr_series_plot_fname = self.basename + '_snr.png' + asd_series_plot_fname = self.basename + '_asd.png' + + triggers = { + ifo: (self.coinc_results[f'foreground/{ifo}/end_time'] + + self.time_offset, + self.coinc_results[f'foreground/{ifo}/snr']) + for ifo in self.et_ifos + } + ref_time = int(self.merger_time) + generate_snr_plot(self.snr_series, snr_series_plot_fname, + triggers, ref_time) + + generate_asd_plot(self.psds, asd_series_plot_fname) + + for ifo in sorted(self.snr_series): + curr_snrs = self.snr_series[ifo] + curr_snrs.save(snr_series_fname, group='%s/snr' % ifo) + + # Additionally save the PSDs into the snr_series file + for ifo in sorted(self.psds): + # Undo dynamic range factor + curr_psd = self.psds[ifo].astype(numpy.float64) + curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0 + curr_psd.save(snr_series_fname, group='%s/psd' % ifo) + + # Upload SNR series in HDF format and plots + if self.snr_series is not None and gid is not None: + try: + self.gracedb.write_log( + gid, 'SNR timeseries HDF file upload', + filename=snr_series_fname + ) + self.gracedb.write_log( + gid, 'SNR timeseries plot upload', + filename=snr_series_plot_fname, + tag_name=['background'], + displayName=['SNR timeseries'] + ) + self.gracedb.write_log( + gid, 'ASD plot upload', + filename=asd_series_plot_fname, + tag_name=['psd'], displayName=['ASDs'] + ) + except Exception as exc: + logger.error( + 'Failed to upload SNR timeseries and ASD for %s', + gid + ) + logger.error(str(exc)) + + # If 'self.prob_file' exists, make pie plot and do uploads. + # The pie plot only shows relative astrophysical source + # probabilities, not p_astro vs p_terrestrial + if hasattr(self, 'prob_file'): + self.prob_plotf = self.prob_file.replace('.json', '.png') + # Don't try to plot zero probabilities + prob_plot = {k: v for (k, v) in self.probabilities.items() + if v != 0.0} + labels, sizes = zip(*prob_plot.items()) + colors = [source_color(label) for label in labels] + fig, ax = pl.subplots() + ax.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', + textprops={'fontsize': 15}) + ax.axis('equal') + fig.savefig(self.prob_plotf) + pl.close() + if gid is not None: + try: + self.gracedb.write_log( + gid, + 'Source probabilities JSON file upload', + filename=self.prob_file, + tag_name=['pe'] + ) + logger.info('Uploaded source probabilities for %s', gid) + self.gracedb.write_log( + gid, + 'Source probabilities plot upload', + filename=self.prob_plotf, + tag_name=['pe'] + ) + logger.info( + 'Uploaded source probabilities pie chart for %s', + gid + ) + except Exception as exc: + logger.error( + 'Failed to upload source probability results for %s', + gid + ) + logger.error(str(exc)) + + if gid is not None: + try: + # Add code version info + gracedb_tag_with_version(self.gracedb, gid) + # Add any annotations to the event log + for text in (extra_strings or []): + self.gracedb.write_log( + gid, text, tag_name=['analyst_comments']) + except Exception as exc: + logger.error( + 'Something failed during annotation of analyst ' + 'comments for event %s on GraceDB.', + fname + ) + logger.error(str(exc)) + + return gid
+
+ + + +
+[docs] +def gracedb_tag_with_version(gracedb, event_id): + """Add a GraceDB log entry reporting PyCBC's version and install location. + """ + version_str = 'Using PyCBC version {}{} at {}' + version_str = version_str.format( + pycbc_version.version, + ' (release)' if pycbc_version.release else '', + os.path.dirname(pycbc.__file__) + ) + gracedb.write_log(event_id, version_str)
+ + + +__all__ = [ + 'CandidateForGraceDB', 'gracedb_tag_with_version', +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io/hdf.html b/latest/html/_modules/pycbc/io/hdf.html new file mode 100644 index 00000000000..d9506e78a73 --- /dev/null +++ b/latest/html/_modules/pycbc/io/hdf.html @@ -0,0 +1,2007 @@ + + + + + + pycbc.io.hdf — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io.hdf

+"""
+Convenience classes for accessing hdf5 trigger files
+"""
+
+import h5py
+import numpy as np
+import logging
+import inspect
+import pickle
+
+from itertools import chain
+from io import BytesIO
+from lal import LIGOTimeGPS
+
+from ligo.lw import ligolw
+from ligo.lw import lsctables
+from ligo.lw import utils as ligolw_utils
+
+from pycbc.io.ligolw import (
+    return_search_summary,
+    return_empty_sngl,
+    create_process_table
+)
+from pycbc import events, conversions, pnutils
+from pycbc.events import ranking, veto
+from pycbc.events import mean_if_greater_than_zero
+
+logger = logging.getLogger('pycbc.io.hdf')
+
+
+class HGroup(h5py.Group):
+    """ Low level extensions to the h5py group object
+    """
+    def create_group(self, name, track_order=None):
+        """
+        Wrapper around h5py's create_group in order to redirect to the
+        manual HGroup object defined here
+        """
+        if track_order is None:
+            track_order = h5py.h5.get_config().track_order
+
+        with h5py._objects.phil:
+            name, lcpl = self._e(name, lcpl=True)
+            gcpl = HGroup._gcpl_crt_order if track_order else None
+            gid = h5py.h5g.create(self.id, name, lcpl=lcpl, gcpl=gcpl)
+            return HGroup(gid)
+
+    def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
+        """
+        Wrapper around h5py's create_dataset so that checksums are used
+        """
+        if isinstance(data, np.ndarray) and not data.dtype == object:
+            kwds['fletcher32'] = True
+        return h5py.Group.create_dataset(
+            self,
+            name,
+            shape=shape,
+            dtype=dtype,
+            data=data,
+            **kwds
+        )
+
+
+
+[docs] +class HFile(HGroup, h5py.File): + """ Low level extensions to the capabilities of reading an hdf5 File + """ +
+[docs] + def select(self, fcn, *args, chunksize=10**6, derived=None, group='', + return_data=True, premask=None): + """ Return arrays from an hdf5 file that satisfy the given function + + Parameters + ---------- + fcn : a function + A function that accepts the same number of argument as keys given + and returns a boolean array of the same length. + + args : strings + A variable number of strings that are keys into the hdf5. These must + refer to arrays of equal length. + + chunksize : {10**6, int}, optional + Number of elements to read and process at a time. + + derived : dictionary + Dictionary keyed on argument name (must be given in args), values + are a tuple of: the function to be computed, and the required + datasets. The function must take in a dictionary keyed on those + dataset names. + + group : string, optional + The group within the h5py file containing the datasets, e.g. in + standard offline merged trigger files, this would be the IFO. This + can be included in the args manually, but is required in the case + of derived functions, e.g. newsnr. + + return_data : bool, optional, default True + If True, return the data for elements passing the function. + + premask : array of boolean values, optional + The pre-mask to apply to the triggers at read-in. + + Returns + ------- + indices: np.ndarray + An array of indices of elements passing the function. + + return_tuple : tuple of np.ndarrays + A variable number of arrays depending on the number of + args provided, + If return_data is True, arrays are the values of each + arg. + If return_data is False, this is None. + + >>> f = HFile(filename) + >>> snr = f.select(lambda snr: snr > 6, 'H1/snr') + """ + + # Required datasets are the arguments requested and datasets given + # for any derived functions + derived = derived if derived is not None else {} + dsets = [a for a in list(args) if a not in derived] + for _, rqd_list in derived.values(): + dsets += rqd_list + + # remove any duplicates from req_dsets + dsets = list(set(dsets)) + + # Get the pointers to the h5py Datasets, + # check they can all be used together + refs = {} + size = None + for ds in dsets: + refs[ds] = self[group + '/' + ds] + if (size is not None) and (refs[ds].size != size): + raise RuntimeError(f"Dataset {ds} is {self[ds].size} " + "entries long, which does not match " + f"previous input datasets ({size}).") + size = refs[ds].size + + # Apply any pre-masks + if premask is None: + mask = np.ones(size, dtype=bool) + else: + mask = premask + + if not mask.dtype == bool: + # mask is an array of indices rather than booleans, + # make it a bool array + new_mask = np.zeros(size, dtype=bool) + new_mask[mask] = True + mask = new_mask + + if not mask.size == size: + # You get here if you are using a boolean premask which + # isn't the same size as the arrays + raise RuntimeError(f"Using premask of size {mask.size} which " + f"does not match the input datasets ({size}).") + + # datasets being returned (possibly) + data = {} + indices = np.array([], dtype=np.uint64) + for arg in args: + data[arg] = [] + + # Loop through the chunks: + i = 0 + while i < size: + r = i + chunksize if i + chunksize < size else size + + if not any(mask[i:r]): + # Nothing allowed through the mask in this chunk + i += chunksize + continue + + if all(mask[i:r]): + # Everything allowed through the mask in this chunk + submask = np.arange(r - i) + else: + submask = np.flatnonzero(mask[i:r]) + + # Read each chunk's worth of data + partial_data = {arg: refs[arg][i:r][mask[i:r]] + for arg in dsets} + partial = [] + for a in args: + if a in derived.keys(): + # If this is a derived dataset, calculate it + derived_fcn = derived[a][0] + partial += [derived_fcn(partial_data)] + else: + # otherwise, just read from the file + partial += [partial_data[a]] + + # Find where it passes the function + keep = fcn(*partial) + + # Keep the indices which pass the function: + indices = np.concatenate([indices, submask[keep] + i]) + + if return_data: + # Store the dataset results that pass the function + for arg, part in zip(args, partial): + data[arg].append(part[keep]) + + i += chunksize + + if return_data: + return_tuple = tuple(np.concatenate(data[arg]) + for arg in args) + else: + return_tuple = None + + return indices.astype(np.uint64), return_tuple
+
+ + + +
+[docs] +class DictArray(object): + """ Utility for organizing sets of arrays of equal length. + + Manages a dictionary of arrays of equal length. This can also + be instantiated with a set of hdf5 files and the key values. The full + data is always in memory and all operations create new instances of the + DictArray. + """ + def __init__(self, data=None, files=None, groups=None): + """ Create a DictArray + + Parameters + ---------- + data: dict, optional + Dictionary of equal length numpy arrays + files: list of filenames, optional + List of hdf5 file filenames. Incompatibile with the `data` option. + groups: list of strings + List of keys into each file. Required by the files option. + """ + # Check that input fits with how the DictArray is set up + if data and files: + raise RuntimeError('DictArray can only have data or files as ' + 'input, not both.') + if data is None and files is None: + raise RuntimeError('DictArray needs either data or files at' + 'initialization. To set up an empty instance' + 'use DictArray(data={})') + if files and not groups: + raise RuntimeError('If files are given then need groups.') + + self.data = data + self.groups = groups + if files: + self.data = {} + for g in groups: + self.data[g] = [] + + for f in files: + d = HFile(f) + for g in groups: + if g in d: + self.data[g].append(d[g][:]) + d.close() + + for k in self.data: + if not len(self.data[k]) == 0: + self.data[k] = np.concatenate(self.data[k]) + + for k in self.data: + setattr(self, k, self.data[k]) + + def _return(self, data): + return self.__class__(data=data) + + def __len__(self): + return len(self.data[tuple(self.data.keys())[0]]) + + def __add__(self, other): + if self.data == {}: + logger.debug('Adding data to a DictArray instance which ' + 'was initialized with an empty dict') + return self._return(data=other) + + data = {} + for k in self.data: + try: + data[k] = np.concatenate([self.data[k], other.data[k]]) + except KeyError: + logger.info('%s does not exist in other data', k) + return self._return(data=data) + +
+[docs] + def select(self, idx): + """ Return a new DictArray containing only the indexed values + """ + data = {} + for k in self.data: + # Make sure each entry is an array (not a scalar) + data[k] = np.array(self.data[k][idx]) + return self._return(data=data)
+ + +
+[docs] + def remove(self, idx): + """ Return a new DictArray that does not contain the indexed values + """ + data = {} + for k in self.data: + data[k] = np.delete(self.data[k], np.array(idx, dtype=int)) + return self._return(data=data)
+ + +
+[docs] + def save(self, outname): + f = HFile(outname, "w") + for k in self.attrs: + f.attrs[k] = self.attrs[k] + + for k in self.data: + f.create_dataset(k, data=self.data[k], + compression='gzip', + compression_opts=9, + shuffle=True) + f.close()
+
+ + + +
+[docs] +class StatmapData(DictArray): + def __init__(self, data=None, seg=None, attrs=None, files=None, + groups=('stat', 'time1', 'time2', 'trigger_id1', + 'trigger_id2', 'template_id', 'decimation_factor', + 'timeslide_id')): + super(StatmapData, self).__init__(data=data, files=files, + groups=groups) + + if data: + self.seg=seg + self.attrs=attrs + elif files: + f = HFile(files[0], "r") + self.seg = f['segments'] + self.attrs = f.attrs + + def _return(self, data): + return self.__class__(data=data, attrs=self.attrs, seg=self.seg) + +
+[docs] + def cluster(self, window): + """ Cluster the dict array, assuming it has the relevant Coinc colums, + time1, time2, stat, and timeslide_id + """ + # If no events, do nothing + if len(self.time1) == 0 or len(self.time2) == 0: + return self + from pycbc.events import cluster_coincs + interval = self.attrs['timeslide_interval'] + cid = cluster_coincs(self.stat, self.time1, self.time2, + self.timeslide_id, interval, window) + return self.select(cid)
+ + +
+[docs] + def save(self, outname): + super(StatmapData, self).save(outname) + with HFile(outname, "w") as f: + for key in self.seg.keys(): + f['segments/%s/start' % key] = self.seg[key]['start'][:] + f['segments/%s/end' % key] = self.seg[key]['end'][:]
+
+ + + +
+[docs] +class MultiifoStatmapData(StatmapData): + def __init__(self, data=None, seg=None, attrs=None, + files=None, ifos=None): + groups = ['decimation_factor', 'stat', 'template_id', 'timeslide_id'] + for ifo in ifos: + groups += ['%s/time' % ifo] + groups += ['%s/trigger_id' % ifo] + + super(MultiifoStatmapData, self).__init__(data=data, files=files, + groups=groups, attrs=attrs, + seg=seg) + + def _return(self, data): + ifolist = self.attrs['ifos'].split(' ') + return self.__class__(data=data, attrs=self.attrs, seg=self.seg, + ifos=ifolist) + +
+[docs] + def cluster(self, window): + """ Cluster the dict array, assuming it has the relevant Coinc colums, + time1, time2, stat, and timeslide_id + """ + # If no events, do nothing + pivot_ifo = self.attrs['pivot'] + fixed_ifo = self.attrs['fixed'] + if len(self.data['%s/time' % pivot_ifo]) == 0 or len(self.data['%s/time' % fixed_ifo]) == 0: + return self + from pycbc.events import cluster_coincs + interval = self.attrs['timeslide_interval'] + cid = cluster_coincs(self.stat, + self.data['%s/time' % pivot_ifo], + self.data['%s/time' % fixed_ifo], + self.timeslide_id, + interval, + window) + return self.select(cid)
+
+ + + +
+[docs] +class FileData(object): + def __init__(self, fname, group=None, columnlist=None, filter_func=None): + """ + Parameters + ---------- + group : string + Name of group to be read from the file + columnlist : list of strings + Names of columns to be read; if None, use all existing columns + filter_func : string + String should evaluate to a Boolean expression using attributes + of the class instance derived from columns: ex. 'self.snr < 6.5' + """ + if not fname: raise RuntimeError("Didn't get a file!") + + self.fname = fname + self.h5file = HFile(fname, "r") + if group is None: + if len(self.h5file.keys()) == 1: + group, = self.h5file.keys() + else: + raise RuntimeError("Didn't get a group!") + self.group_key = group + self.group = self.h5file[group] + self.columns = columnlist if columnlist is not None \ + else list(self.group.keys()) + self.filter_func = filter_func + self._mask = None + +
+[docs] + def close(self): + self.h5file.close()
+ + + @property + def mask(self): + """ + Create a mask implementing the requested filter on the datasets + + Returns + ------- + array of Boolean + True for dataset indices to be returned by the get_column method + """ + if self.filter_func is None: + raise RuntimeError("Can't get a mask without a filter function!") + else: + # only evaluate if no previous calculation was done + if self._mask is None: + # get required columns into the namespace as numpy arrays + for column in self.columns: + if column in self.filter_func: + setattr(self, column, self.group[column][:]) + self._mask = eval(self.filter_func) + return self._mask + +
+[docs] + def get_column(self, col): + """ + Method designed to be analogous to legacy pylal.SnglInspiralUtils + functionality + + Parameters + ---------- + col : string + Name of the dataset to be returned + + Returns + ------- + numpy array + Values from the dataset, filtered if requested + """ + # catch corner case with an empty file (group with no datasets) + if not len(self.group.keys()): + return np.array([]) + vals = self.group[col] + if self.filter_func: + return vals[self.mask] + else: + return vals[:]
+
+ + + +
+[docs] +class DataFromFiles(object): + + def __init__(self, filelist, group=None, columnlist=None, filter_func=None): + self.files = filelist + self.group = group + self.columns = columnlist + self.filter_func = filter_func + +
+[docs] + def get_column(self, col): + """ + Loop over files getting the requested dataset values from each + + Parameters + ---------- + col : string + Name of the dataset to be returned + + Returns + ------- + numpy array + Values from the dataset, filtered if requested and + concatenated in order of file list + """ + logger.info('getting %s', col) + vals = [] + for f in self.files: + d = FileData(f, group=self.group, columnlist=self.columns, + filter_func=self.filter_func) + vals.append(d.get_column(col)) + # Close each file since h5py has an upper limit on the number of + # open file objects (approx. 1000) + d.close() + logger.info('- got %i values', sum(len(v) for v in vals)) + return np.concatenate(vals)
+
+ + + +
+[docs] +class SingleDetTriggers(object): + """ + Provides easy access to the parameters of single-detector CBC triggers. + """ + def __init__(self, trig_file, detector, bank_file=None, veto_file=None, + segment_name=None, premask=None, filter_rank=None, + filter_threshold=None, chunksize=10**6, filter_func=None): + """ + Create a SingleDetTriggers instance + + Parameters + ---------- + trig_file : string or os.pathtype, required + HDF file containing trigger information + + detector : string, required + The detectior being used, this is used to access the + triggers in trig_file + + bank_file: string or os.pathtype, optional + hdf file containing template bank information + + veto_file: string or os.pathtype, optional + File used to define vetoes + + segment_name : string, optional + Segment name being used in the veto_file + + premask : array of indices or boolean, optional + Array of used triggers + + filter_rank : string, optional + The ranking, as defined by ranking.py to compare to + filter_threshold + + filter_threshold: float, required if filter_rank is used + Threshold to filter the ranking values + + chunksize : int , default 10**6 + Size of chunks to read in for the filter_rank / threshold. + """ + logger.info('Loading triggers') + self.trigs_f = HFile(trig_file, 'r') + self.trigs = self.trigs_f[detector] + self.ntriggers = self.trigs['end_time'].size + self.ifo = detector # convenience attributes + self.detector = detector + if bank_file: + logger.info('Loading bank') + self.bank = HFile(bank_file, 'r') + else: + # empty dict in place of non-existent hdf file + self.bank = {} + + # Apply some masks to start off with - here we should try and apply + # them in the order which cuts most things earliest. + + # Apply any pre-masks + if premask is None: + self.mask = np.ones(self.ntriggers, dtype=bool) + else: + self.mask = None + self.apply_mask(premask) + + if filter_rank: + assert filter_threshold is not None + logger.info("Applying threshold of %.3f on %s", + filter_threshold, filter_rank) + fcn_dsets = (ranking.sngls_ranking_function_dict[filter_rank], + ranking.required_datasets[filter_rank]) + idx, _ = self.trigs_f.select( + lambda rank: rank > filter_threshold, + filter_rank, + derived={filter_rank: fcn_dsets}, + return_data=False, + premask=self.mask, + group=detector, + chunksize=chunksize, + ) + logger.info("%d triggers remain", idx.size) + # If self.mask already has values, need to take these into account: + self.and_masks(idx) + + if filter_func: + # Apply a filter on the triggers which is _not_ a ranking statistic + for rank_str in ranking.sngls_ranking_function_dict.keys(): + if f'self.{rank_str}' in filter_func: + logger.warning('Supplying the ranking (%s) in ' + 'filter_func is inefficient, suggest to ' + 'use filter_rank instead.', rank_str) + logger.info('Setting up filter function') + for c in self.trigs.keys(): + if c in filter_func: + setattr(self, '_'+c, self.trigs[c][:]) + for c in self.bank.keys(): + if c in filter_func: + # get template parameters corresponding to triggers + setattr(self, '_'+c, + np.array(self.bank[c])[self.trigs['template_id'][:]]) + + filter_mask = eval(filter_func.replace('self.', 'self._')) + # remove the dummy attributes + for c in chain(self.trigs.keys(), self.bank.keys()): + if c in filter_func: delattr(self, '_'+c) + + self.apply_mask(filter_mask) + logger.info('%i triggers remain after cut on %s', + sum(self.mask), filter_func) + + if veto_file: + logger.info('Applying veto segments') + # veto_mask is an array of indices into the trigger arrays + # giving the surviving triggers + logger.info('%i triggers before vetoes', self.mask_size) + veto_mask, _ = events.veto.indices_outside_segments( + self.end_time, [veto_file], + ifo=detector, segment_name=segment_name) + + # Update mask accordingly + self.apply_mask(veto_mask) + logger.info('%i triggers remain after vetoes', + self.mask_size) + + def __getitem__(self, key): + # Is key in the TRIGGER_MERGE file? + try: + return self.get_column(key) + except KeyError: + pass + + # Is key in the bank file? + try: + self.checkbank(key) + return self.bank[key][:][self.template_id] + except (RuntimeError, KeyError) as exc: + err_msg = "Cannot find {} in input files".format(key) + raise ValueError(err_msg) from exc + +
+[docs] + def checkbank(self, param): + if self.bank == {}: + return RuntimeError("Can't get %s values without a bank file" + % param)
+ + +
+[docs] + def trig_dict(self): + """Returns dict of the masked trigger values""" + mtrigs = {} + for k in self.trigs: + if len(self.trigs[k]) == len(self.trigs['end_time']): + if self.mask is not None: + mtrigs[k] = self.trigs[k][self.mask] + else: + mtrigs[k] = self.trigs[k][:] + mtrigs['ifo'] = self.ifo + return mtrigs
+ + +
+[docs] + @classmethod + def get_param_names(cls): + """Returns a list of plottable CBC parameter variables""" + return [m[0] for m in inspect.getmembers(cls) \ + if type(m[1]) == property]
+ + +
+[docs] + def apply_mask(self, logic_mask): + """Apply a mask over the top of the current mask + + Parameters + ---------- + logic_mask : boolean array or numpy array of indices + """ + if self.mask is None: + self.mask = np.zeros(self.ntriggers, dtype=bool) + self.mask[logic_mask] = True + elif hasattr(self.mask, 'dtype') and (self.mask.dtype == 'bool'): + if hasattr(logic_mask, 'dtype') and (logic_mask.dtype == 'bool'): + # So both new and old masks are boolean, numpy slice assignment + # can be used directly, with no additional memory. + self.mask[self.mask] = logic_mask + else: + # So logic_mask is either an array, or list, of integers. + # This case is a little tricksy, so we begin by converting the + # list/array to a boolean, and then do what we did above. + new_logic_mask = np.zeros(np.sum(self.mask), dtype=bool) + new_logic_mask[logic_mask] = True + self.mask[self.mask] = new_logic_mask + else: + self.mask = list(np.array(self.mask)[logic_mask])
+ + +
+[docs] + def and_masks(self, logic_mask): + """Apply a mask to be combined as a logical and with the current mask. + + Parameters + ---------- + logic_mask : boolean array or numpy array/list of indices + """ + if self.mask_size == self.ntriggers: + # No mask exists, just update to use the given mask + self.apply_mask(logic_mask) + return + + # Use intersection of the indices of True values in the masks + if hasattr(logic_mask, 'dtype') and (logic_mask.dtype == 'bool'): + new_indices = np.flatnonzero(logic_mask) + else: + new_indices = np.array(logic_mask) + + if hasattr(self.mask, 'dtype') and (self.mask.dtype == 'bool'): + orig_indices = np.flatnonzero(self.mask) + else: + orig_indices = np.array(self.mask) + + self.mask[:] = False + and_indices = np.intersect1d(new_indices, orig_indices) + self.mask[and_indices.astype(np.uint64)] = True
+ + +
+[docs] + def mask_to_n_loudest_clustered_events(self, rank_method, + statistic_threshold=None, + n_loudest=10, + cluster_window=10, + statistic_kwargs=None): + """Edits the mask property of the class to point to the N loudest + single detector events as ranked by ranking statistic. + + Events are clustered so that no more than 1 event within +/- + cluster_window will be considered. Can apply a threshold on the + statistic using statistic_threshold + """ + + if statistic_kwargs is None: + statistic_kwargs = {} + sds = rank_method.single(self.trig_dict()) + stat = rank_method.rank_stat_single( + (self.ifo, sds), + **statistic_kwargs + ) + if len(stat) == 0: + # No triggers at all, so just return here + self.apply_mask(np.array([], dtype=np.uint64)) + self.stat = np.array([], dtype=np.uint64) + return + + times = self.end_time + if statistic_threshold is not None: + # Threshold on statistic + keep = stat >= statistic_threshold + stat = stat[keep] + times = times[keep] + self.apply_mask(keep) + + if len(stat) == 0: + logger.warning("No triggers after thresholding") + return + else: + logger.info("%d triggers after thresholding", len(stat)) + + index = stat.argsort()[::-1] + new_times = [] + new_index = [] + # Loop through triggers - loudest first + for curr_idx in index: + curr_time = times[curr_idx] + for time in new_times: + # Have we already got a louder trigger within the window? + if abs(curr_time - time) < cluster_window: + break + else: + # Store if no other triggers within cluster window + new_index.append(curr_idx) + new_times.append(curr_time) + if len(new_index) >= n_loudest: + # We have as many triggers as we want now + break + + # For indexing, indices need to be a numpy array, in order + index = np.array(new_index) + index.sort() + # Apply to the existing mask + self.apply_mask(index) + self.stat = stat[index]
+ + + @property + def mask_size(self): + if self.mask is None: + return self.ntriggers + if isinstance(self.mask, list): + return len(self.mask) + return np.count_nonzero(self.mask) + + @property + def template_id(self): + return self.get_column('template_id').astype(int) + + @property + def mass1(self): + self.checkbank('mass1') + return self.bank['mass1'][:][self.template_id] + + @property + def mass2(self): + self.checkbank('mass2') + return self.bank['mass2'][:][self.template_id] + + @property + def spin1z(self): + self.checkbank('spin1z') + return self.bank['spin1z'][:][self.template_id] + + @property + def spin2z(self): + self.checkbank('spin2z') + return self.bank['spin2z'][:][self.template_id] + + @property + def spin2x(self): + self.checkbank('spin2x') + return self.bank['spin2x'][:][self.template_id] + + @property + def spin2y(self): + self.checkbank('spin2y') + return self.bank['spin2y'][:][self.template_id] + + @property + def spin1x(self): + self.checkbank('spin1x') + return self.bank['spin1x'][:][self.template_id] + + @property + def spin1y(self): + self.checkbank('spin1y') + return self.bank['spin1y'][:][self.template_id] + + @property + def inclination(self): + self.checkbank('inclination') + return self.bank['inclination'][:][self.template_id] + + @property + def f_lower(self): + self.checkbank('f_lower') + return self.bank['f_lower'][:][self.template_id] + + @property + def approximant(self): + self.checkbank('approximant') + return self.bank['approximant'][:][self.template_id] + + @property + def mtotal(self): + return self.mass1 + self.mass2 + + @property + def mchirp(self): + return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2) + + @property + def eta(self): + return conversions.eta_from_mass1_mass2(self.mass1, self.mass2) + + @property + def effective_spin(self): + # FIXME assumes aligned spins + return conversions.chi_eff(self.mass1, self.mass2, + self.spin1z, self.spin2z) + + # IMPROVEME: would like to have a way to access all get_freq and/or + # other pnutils.* names rather than hard-coding each one + # - eg make this part of a fancy interface to the bank file ? + @property + def f_seobnrv2_peak(self): + return pnutils.get_freq('fSEOBNRv2Peak', self.mass1, self.mass2, + self.spin1z, self.spin2z) + + @property + def f_seobnrv4_peak(self): + return pnutils.get_freq('fSEOBNRv4Peak', self.mass1, self.mass2, + self.spin1z, self.spin2z) + + @property + def end_time(self): + return self.get_column('end_time') + + @property + def template_duration(self): + return self.get_column('template_duration') + + @property + def snr(self): + return self.get_column('snr') + + @property + def sgchisq(self): + return self.get_column('sg_chisq') + + @property + def u_vals(self): + return self.get_column('u_vals') + + @property + def rchisq(self): + return self.get_column('chisq') \ + / (self.get_column('chisq_dof') * 2 - 2) + + @property + def psd_var_val(self): + return self.get_column('psd_var_val') + + @property + def newsnr(self): + return ranking.newsnr(self.snr, self.rchisq) + + @property + def newsnr_sgveto(self): + return ranking.newsnr_sgveto(self.snr, self.rchisq, self.sgchisq) + + @property + def newsnr_sgveto_psdvar(self): + return ranking.newsnr_sgveto_psdvar(self.snr, self.rchisq, + self.sgchisq, self.psd_var_val) + + @property + def newsnr_sgveto_psdvar_threshold(self): + return ranking.newsnr_sgveto_psdvar_threshold(self.snr, self.rchisq, + self.sgchisq, self.psd_var_val) + +
+[docs] + def get_ranking(self, rank_name, **kwargs): + return ranking.get_sngls_ranking_from_trigs(self, rank_name, **kwargs)
+ + +
+[docs] + def get_column(self, cname): + """ + Read columns while applying the mask + """ + # Fiducial value that seems to work, not extensively tuned. + MFRAC = 0.3 + + # If the mask accesses few enough elements then directly use it + # This can be slower than reading in all the elements if most of them + # will be read. + if isinstance(self.mask, list) or \ + self.mask_size < (self.ntriggers * MFRAC): + return self.trigs[cname][self.mask] + + # We have a lot of elements to read so we resort to readin the entire + # array before masking. + elif self.mask is not None: + return self.trigs[cname][:][self.mask] + else: + return self.trigs[cname][:]
+
+ + + +
+[docs] +class ForegroundTriggers(object): + + # Injection files are expected to only have 'exclusive' IFAR/FAP values, + # should use has_inc=False for these. + def __init__(self, coinc_file, bank_file, sngl_files=None, n_loudest=None, + group='foreground', has_inc=True): + self.coinc_file = FileData(coinc_file, group=group) + if 'ifos' in self.coinc_file.h5file.attrs: + self.ifos = self.coinc_file.h5file.attrs['ifos'].split(' ') + else: + raise ValueError("File doesn't have an 'ifos' attribute!", + coinc_file) + self.sngl_files = {} + if sngl_files is not None: + for sngl_file in sngl_files: + curr_dat = FileData(sngl_file) + curr_ifo = curr_dat.group_key + self.sngl_files[curr_ifo] = curr_dat + + if not all([ifo in self.sngl_files.keys() for ifo in self.ifos]): + print("sngl_files: {}".format(sngl_files)) + print("self.ifos: {}".format(self.ifos)) + raise RuntimeError("IFOs in statmap file not all represented " + "by single-detector trigger files.") + if not sorted(self.sngl_files.keys()) == sorted(self.ifos): + logger.warning("WARNING: Single-detector trigger files " + "given for IFOs not in the statmap file") + + self.bank_file = HFile(bank_file, "r") + self.n_loudest = n_loudest + + self._inclusive = has_inc + self._sort_arr = None + self._template_id = None + self._trig_ids = None + self.get_active_segments() + + @property + def sort_arr(self): + if self._sort_arr is None: + if self._inclusive: + try: + ifar = self.coinc_file.get_column('ifar') + except KeyError: + logger.warning("WARNING: Can't find inclusive IFAR!" + "Using exclusive IFAR instead ...") + ifar = self.coinc_file.get_column('ifar_exc') + self._inclusive = False + else: + ifar = self.coinc_file.get_column('ifar_exc') + sorting = ifar.argsort()[::-1] + if self.n_loudest: + sorting = sorting[:self.n_loudest] + self._sort_arr = sorting + return self._sort_arr + + @property + def template_id(self): + if self._template_id is None: + template_id = self.get_coincfile_array('template_id') + self._template_id = template_id.astype(int) + return self._template_id + + @property + def trig_id(self): + if self._trig_ids is not None: + return self._trig_ids + + self._trig_ids = {} + for ifo in self.ifos: + self._trig_ids[ifo] = self.get_coincfile_array(ifo + '/trigger_id') + return self._trig_ids + +
+[docs] + def get_coincfile_array(self, variable): + return self.coinc_file.get_column(variable)[self.sort_arr]
+ + +
+[docs] + def get_bankfile_array(self, variable): + try: + return self.bank_file[variable][:][self.template_id] + except IndexError: + if len(self.template_id) == 0: + return np.array([]) + raise
+ + +
+[docs] + def get_snglfile_array_dict(self, variable): + return_dict = {} + for ifo in self.ifos: + try: + # Make sure we don't change the internal cached trig_id array + tid = np.copy(self.trig_id[ifo]) + # Put in *some* value for the invalid points to avoid failure + lgc = tid == -1 + tid[lgc] = 0 + # Get the appropriate variable dataset + group = self.sngl_files[ifo].group + if not len(group.keys()): + # There are no groups to consider - move on to next IFO + continue + dataset = group[variable] + # Convert the trigger ids into a boolean aray so that we can + # read only the triggers we want directly from the file + mask = np.zeros(dataset.size, dtype=bool) + mask[tid] = True + needed_data = dataset[mask] + # Get order and duplicate information back that was lost in + # the boolean mask assignment + _, order_duplicate_index = np.unique( + tid, + return_inverse=True + ) + curr = needed_data[order_duplicate_index] + except IndexError: + if len(self.trig_id[ifo]) == 0: + curr = np.array([]) + lgc = curr == 0 + else: + raise + return_dict[ifo] = (curr, np.logical_not(lgc)) + return return_dict
+ + +
+[docs] + def get_active_segments(self): + self.active_segments = {} + for ifo in self.ifos: + starts = self.sngl_files[ifo].get_column('search/start_time') + ends = self.sngl_files[ifo].get_column('search/end_time') + self.active_segments[ifo] = veto.start_end_to_segments(starts, + ends)
+ + +
+[docs] + def get_end_time(self): + times_gen = (self.get_coincfile_array('{}/time'.format(ifo)) + for ifo in self.ifos) + ref_times = np.array([mean_if_greater_than_zero(t)[0] + for t in zip(*times_gen)]) + return ref_times
+ + +
+[docs] + def get_ifos(self): + """ + Returns + ------- + ifos_list + List of lists of ifo names involved in each foreground event. + Ifos will be listed in the same order as self.ifos + """ + # Ian thinks this could be coded more simply and efficiently + # Note also that effectively the same thing is done as part of the + # to_coinc_hdf_object method + ifo_or_minus = [] + for ifo in self.ifos: + ifo_trigs = np.where(self.get_coincfile_array(ifo + '/time') < 0, + '-', ifo) + ifo_or_minus.append(ifo_trigs) + ifos_list = [list(trig[trig != '-']) + for trig in iter(np.array(ifo_or_minus).T)] + return ifos_list
+ + +
+[docs] + def to_coinc_xml_object(self, file_name): + outdoc = ligolw.Document() + outdoc.appendChild(ligolw.LIGO_LW()) + + ifos = sorted(self.sngl_files) + proc_table = create_process_table( + outdoc, + program_name='pycbc', + detectors=ifos + ) + proc_id = proc_table.process_id + + search_summ_table = lsctables.New(lsctables.SearchSummaryTable) + coinc_h5file = self.coinc_file.h5file + try: + start_time = coinc_h5file['segments']['coinc']['start'][:].min() + end_time = coinc_h5file['segments']['coinc']['end'][:].max() + except KeyError: + start_times = [] + end_times = [] + for ifo_comb in coinc_h5file['segments']: + if ifo_comb == 'foreground_veto': + continue + seg_group = coinc_h5file['segments'][ifo_comb] + start_times.append(seg_group['start'][:].min()) + end_times.append(seg_group['end'][:].max()) + start_time = min(start_times) + end_time = max(end_times) + num_trigs = len(self.sort_arr) + search_summary = return_search_summary(start_time, end_time, + num_trigs, ifos) + search_summ_table.append(search_summary) + outdoc.childNodes[0].appendChild(search_summ_table) + + sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) + coinc_def_table = lsctables.New(lsctables.CoincDefTable) + coinc_event_table = lsctables.New(lsctables.CoincTable) + coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable) + coinc_event_map_table = lsctables.New(lsctables.CoincMapTable) + time_slide_table = lsctables.New(lsctables.TimeSlideTable) + + # Set up time_slide table + time_slide_id = lsctables.TimeSlideID(0) + for ifo in ifos: + time_slide_row = lsctables.TimeSlide() + time_slide_row.instrument = ifo + time_slide_row.time_slide_id = time_slide_id + time_slide_row.offset = 0 + time_slide_row.process_id = proc_id + time_slide_table.append(time_slide_row) + + # Set up coinc_definer table + coinc_def_id = lsctables.CoincDefID(0) + coinc_def_row = lsctables.CoincDef() + coinc_def_row.search = "inspiral" + coinc_def_row.description = \ + "sngl_inspiral<-->sngl_inspiral coincidences" + coinc_def_row.coinc_def_id = coinc_def_id + coinc_def_row.search_coinc_type = 0 + coinc_def_table.append(coinc_def_row) + + bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z'] + bank_col_vals = {} + for name in bank_col_names: + bank_col_vals[name] = self.get_bankfile_array(name) + + coinc_event_names = ['ifar', 'time', 'fap', 'stat'] + coinc_event_vals = {} + for name in coinc_event_names: + if name == 'time': + coinc_event_vals[name] = self.get_end_time() + else: + coinc_event_vals[name] = self.get_coincfile_array(name) + + sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq', + 'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof', + 'end_time', 'template_duration', 'coa_phase', + 'sigmasq'] + sngl_col_vals = {} + for name in sngl_col_names: + sngl_col_vals[name] = self.get_snglfile_array_dict(name) + + sngl_event_count = 0 + for idx in range(len(self.sort_arr)): + # Set up IDs and mapping values + coinc_id = lsctables.CoincID(idx) + + # Set up sngls + sngl_mchirps = [] + sngl_mtots = [] + net_snrsq = 0 + triggered_ifos = [] + for ifo in ifos: + # If this ifo is not participating in this coincidence then + # ignore it and move on. + if not sngl_col_vals['snr'][ifo][1][idx]: + continue + triggered_ifos += [ifo] + event_id = lsctables.SnglInspiralID(sngl_event_count) + sngl_event_count += 1 + sngl = return_empty_sngl() + sngl.event_id = event_id + sngl.ifo = ifo + net_snrsq += sngl_col_vals['snr'][ifo][0][idx]**2 + for name in sngl_col_names: + val = sngl_col_vals[name][ifo][0][idx] + if name == 'end_time': + sngl.end = LIGOTimeGPS(val) + elif name == 'chisq': + # Use reduced chisquared to be consistent with Live + dof = 2. * sngl_col_vals['chisq_dof'][ifo][0][idx] - 2. + sngl.chisq = val / dof + else: + setattr(sngl, name, val) + for name in bank_col_names: + val = bank_col_vals[name][idx] + setattr(sngl, name, val) + sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta( + sngl.mass1, sngl.mass2) + sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( + sngl.mass1, sngl.mass2) + sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr + # If exact match is not used, get masses from single triggers + sngl_mchirps += [sngl.mchirp] + sngl_mtots += [sngl.mtotal] + + sngl_inspiral_table.append(sngl) + + # Set up coinc_map entry + coinc_map_row = lsctables.CoincMap() + coinc_map_row.table_name = 'sngl_inspiral' + coinc_map_row.coinc_event_id = coinc_id + coinc_map_row.event_id = event_id + coinc_event_map_table.append(coinc_map_row) + + # Take the mean if exact match is not used + sngl_combined_mchirp = np.mean(sngl_mchirps) + sngl_combined_mtot = np.mean(sngl_mtots) + + # Set up coinc inspiral and coinc event tables + coinc_event_row = lsctables.Coinc() + coinc_inspiral_row = lsctables.CoincInspiral() + coinc_event_row.coinc_def_id = coinc_def_id + coinc_event_row.nevents = len(triggered_ifos) + # NB, `coinc_event_row.instruments = triggered_ifos does not give a + # correct result with ligo.lw 1.7.1 + coinc_event_row.instruments = ','.join(sorted(triggered_ifos)) + coinc_inspiral_row.instruments = triggered_ifos + coinc_event_row.time_slide_id = time_slide_id + coinc_event_row.process_id = proc_id + coinc_event_row.coinc_event_id = coinc_id + coinc_inspiral_row.coinc_event_id = coinc_id + coinc_inspiral_row.mchirp = sngl_combined_mchirp + coinc_inspiral_row.mass = sngl_combined_mtot + coinc_inspiral_row.end = LIGOTimeGPS(coinc_event_vals['time'][idx]) + coinc_inspiral_row.snr = net_snrsq**0.5 + coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx] + coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx] + # Transform to Hz + coinc_inspiral_row.combined_far = \ + conversions.sec_to_year(coinc_inspiral_row.combined_far) + coinc_event_row.likelihood = coinc_event_vals['stat'][idx] + coinc_inspiral_row.minimum_duration = 0. + coinc_event_table.append(coinc_event_row) + coinc_inspiral_table.append(coinc_inspiral_row) + + outdoc.childNodes[0].appendChild(coinc_def_table) + outdoc.childNodes[0].appendChild(coinc_event_table) + outdoc.childNodes[0].appendChild(coinc_event_map_table) + outdoc.childNodes[0].appendChild(time_slide_table) + outdoc.childNodes[0].appendChild(coinc_inspiral_table) + outdoc.childNodes[0].appendChild(sngl_inspiral_table) + + ligolw_utils.write_filename(outdoc, file_name)
+ + +
+[docs] + def to_coinc_hdf_object(self, file_name): + ofd = HFile(file_name,'w') + + # Some fields are special cases + logger.info("Outputting search results") + time = self.get_end_time() + # time will be used later to determine active ifos + ofd['time'] = time + + if self._inclusive: + ofd['ifar'] = self.get_coincfile_array('ifar') + ofd['p_value'] = self.get_coincfile_array('fap') + + ofd['ifar_exclusive'] = self.get_coincfile_array('ifar_exc') + ofd['p_value_exclusive'] = self.get_coincfile_array('fap_exc') + + # Coinc fields + for field in ['stat']: + ofd[field] = self.get_coincfile_array(field) + + logger.info("Outputting template information") + # Bank fields + for field in ['mass1','mass2','spin1z','spin2z']: + ofd[field] = self.get_bankfile_array(field) + + mass1 = self.get_bankfile_array('mass1') + mass2 = self.get_bankfile_array('mass2') + ofd['chirp_mass'], _ = pnutils.mass1_mass2_to_mchirp_eta(mass1, mass2) + + logger.info("Outputting single-trigger information") + logger.info("reduced chisquared") + chisq_vals_valid = self.get_snglfile_array_dict('chisq') + chisq_dof_vals_valid = self.get_snglfile_array_dict('chisq_dof') + for ifo in self.ifos: + chisq_vals = chisq_vals_valid[ifo][0] + chisq_valid = chisq_vals_valid[ifo][1] + chisq_dof_vals = chisq_dof_vals_valid[ifo][0] + rchisq = chisq_vals / (2. * chisq_dof_vals - 2.) + rchisq[np.logical_not(chisq_valid)] = -1. + ofd[ifo + '_chisq'] = rchisq + + # Single-detector fields + for field in ['sg_chisq', 'end_time', 'sigmasq', + 'psd_var_val']: + logger.info(field) + try: + vals_valid = self.get_snglfile_array_dict(field) + except KeyError: + logger.info("%s is not present in the " + "single-detector files", field) + + for ifo in self.ifos: + # Some of the values will not be valid for all IFOs, + # the `valid` parameter out of get_snglfile_array_dict + # tells us this, and we set the values to -1 + vals = vals_valid[ifo][0] + valid = vals_valid[ifo][1] + vals[np.logical_not(valid)] = -1. + ofd[f'{ifo}_{field}'] = vals + + snr_vals_valid = self.get_snglfile_array_dict('snr') + network_snr_sq = np.zeros_like(snr_vals_valid[self.ifos[0]][0]) + for ifo in self.ifos: + vals = snr_vals_valid[ifo][0] + valid = snr_vals_valid[ifo][1] + vals[np.logical_not(valid)] = -1. + ofd[ifo + '_snr'] = vals + network_snr_sq[valid] += vals[valid] ** 2.0 + ofd['network_snr'] = np.sqrt(network_snr_sq) + + logger.info("Triggered detectors") + # Create a n_ifos by n_events matrix, with the ifo letter if the + # event contains a trigger from the ifo, empty string if not + triggered_matrix = [[ifo[0] if v else '' + for v in snr_vals_valid[ifo][1]] + for ifo in self.ifos] + # Combine the ifo letters to make a single string per event + triggered_detectors = [''.join(triggered).encode('ascii') + for triggered in zip(*triggered_matrix)] + ofd.create_dataset('trig', data=triggered_detectors, + dtype='<S3') + + logger.info("active detectors") + # Create a n_ifos by n_events matrix, with the ifo letter if the + # ifo was active at the event time, empty string if not + active_matrix = [[ifo[0] if t in self.active_segments[ifo] + else '' for t in time] + for ifo in self.ifos] + # Combine the ifo letters to make a single string per event + active_detectors = [''.join(active_at_time).encode('ascii') + for active_at_time in zip(*active_matrix)] + ofd.create_dataset('obs', data=active_detectors, + dtype='<S3') + + ofd.close()
+
+ + + +
+[docs] +class ReadByTemplate(object): + # Default assignment to {} is OK for a variable used only in __init__ + def __init__(self, filename, bank=None, segment_name=None, veto_files=None, + gating_veto_windows={}): + self.filename = filename + self.file = HFile(filename, 'r') + self.ifo = tuple(self.file.keys())[0] + self.valid = None + self.bank = HFile(bank, 'r') if bank else {} + + # Determine the segments which define the boundaries of valid times + # to use triggers + key = '%s/search/' % self.ifo + s, e = self.file[key + 'start_time'][:], self.file[key + 'end_time'][:] + self.segs = veto.start_end_to_segments(s, e).coalesce() + if segment_name is None: + segment_name = [] + if veto_files is None: + veto_files = [] + for vfile, name in zip(veto_files, segment_name): + veto_segs = veto.select_segments_by_definer(vfile, ifo=self.ifo, + segment_name=name) + self.segs = (self.segs - veto_segs).coalesce() + if self.ifo in gating_veto_windows: + gating_veto = gating_veto_windows[self.ifo].split(',') + gveto_before = float(gating_veto[0]) + gveto_after = float(gating_veto[1]) + if gveto_before > 0 or gveto_after < 0: + raise ValueError("Gating veto window values must be negative " + "before gates and positive after gates.") + if not (gveto_before == 0 and gveto_after == 0): + autogate_times = np.unique( + self.file[self.ifo + '/gating/auto/time'][:]) + if self.ifo + '/gating/file' in self.file: + detgate_times = self.file[self.ifo + '/gating/file/time'][:] + else: + detgate_times = [] + gate_times = np.concatenate((autogate_times, detgate_times)) + gating_veto_segs = veto.start_end_to_segments( + gate_times + gveto_before, + gate_times + gveto_after + ).coalesce() + self.segs = (self.segs - gating_veto_segs).coalesce() + self.valid = veto.segments_to_start_end(self.segs) + +
+[docs] + def get_data(self, col, num): + """Get a column of data for template with id 'num'. + + Parameters + ---------- + col: str + Name of column to read + num: int + The template id to read triggers for + + Returns + ------- + data: numpy.ndarray + The requested column of data + """ + ref = self.file['%s/%s_template' % (self.ifo, col)][num] + return self.file['%s/%s' % (self.ifo, col)][ref]
+ + +
+[docs] + def set_template(self, num): + """Set the active template to read from. + + Parameters + ---------- + num: int + The template id to read triggers for. + + Returns + ------- + trigger_id: numpy.ndarray + The indices of this templates triggers. + """ + self.template_num = num + times = self.get_data('end_time', num) + + # Determine which of these template's triggers are kept after + # applying vetoes + if self.valid: + self.keep = veto.indices_within_times(times, self.valid[0], + self.valid[1]) +# logger.info('applying vetoes') + else: + self.keep = np.arange(0, len(times)) + + if self.bank != {}: + self.param = {} + if 'parameters' in self.bank.attrs: + for col in self.bank.attrs['parameters']: + self.param[col] = self.bank[col][self.template_num] + else: + for col in self.bank: + self.param[col] = self.bank[col][self.template_num] + + # Calculate the trigger id by adding the relative offset in self.keep + # to the absolute beginning index of this templates triggers stored + # in 'template_boundaries' + trigger_id = self.keep + \ + self.file['%s/template_boundaries' % self.ifo][num] + return trigger_id
+ + + def __getitem__(self, col): + """ Return the column of data for current active template after + applying vetoes + + Parameters + ---------- + col: str + Name of column to read + + Returns + ------- + data: numpy.ndarray + The requested column of data + """ + if self.template_num is None: + raise ValueError('You must call set_template to first pick the ' + 'template to read data from') + data = self.get_data(col, self.template_num) + data = data[self.keep] if self.valid else data + return data
+ + + +chisq_choices = ['traditional', 'cont', 'bank', 'max_cont_trad', 'sg', + 'max_bank_cont', 'max_bank_trad', 'max_bank_cont_trad'] + +
+[docs] +def get_chisq_from_file_choice(hdfile, chisq_choice): + f = hdfile + if chisq_choice in ['traditional','max_cont_trad', 'max_bank_trad', + 'max_bank_cont_trad']: + trad_chisq = f['chisq'][:] + # We now need to handle the case where chisq is not actually calculated + # 0 is used as a sentinel value + trad_chisq_dof = f['chisq_dof'][:] + trad_chisq /= (trad_chisq_dof * 2 - 2) + if chisq_choice in ['cont', 'max_cont_trad', 'max_bank_cont', + 'max_bank_cont_trad']: + cont_chisq = f['cont_chisq'][:] + cont_chisq_dof = f['cont_chisq_dof'][:] + cont_chisq /= cont_chisq_dof + if chisq_choice in ['bank', 'max_bank_cont', 'max_bank_trad', + 'max_bank_cont_trad']: + bank_chisq = f['bank_chisq'][:] + bank_chisq_dof = f['bank_chisq_dof'][:] + bank_chisq /= bank_chisq_dof + if chisq_choice == 'sg': + chisq = f['sg_chisq'][:] + elif chisq_choice == 'traditional': + chisq = trad_chisq + elif chisq_choice == 'cont': + chisq = cont_chisq + elif chisq_choice == 'bank': + chisq = bank_chisq + elif chisq_choice == 'max_cont_trad': + chisq = np.maximum(trad_chisq, cont_chisq) + elif chisq_choice == 'max_bank_cont': + chisq = np.maximum(bank_chisq, cont_chisq) + elif chisq_choice == 'max_bank_trad': + chisq = np.maximum(bank_chisq, trad_chisq) + elif chisq_choice == 'max_bank_cont_trad': + chisq = np.maximum(np.maximum(bank_chisq, cont_chisq), trad_chisq) + else: + err_msg = "Do not recognize --chisq-choice %s" % chisq_choice + raise ValueError(err_msg) + return chisq
+ + +
+[docs] +def save_dict_to_hdf5(dic, filename): + """ + Parameters + ---------- + dic: + python dictionary to be converted to hdf5 format + filename: + desired name of hdf5 file + """ + with HFile(filename, 'w') as h5file: + recursively_save_dict_contents_to_group(h5file, '/', dic)
+ + +
+[docs] +def recursively_save_dict_contents_to_group(h5file, path, dic): + """ + Parameters + ---------- + h5file: + h5py file to be written to + path: + path within h5py file to saved dictionary + dic: + python dictionary to be converted to hdf5 format + """ + for key, item in dic.items(): + if isinstance(item, (np.ndarray, np.int64, np.float64, str, int, float, + bytes, tuple, list)): + h5file[path + str(key)] = item + elif isinstance(item, dict): + recursively_save_dict_contents_to_group(h5file, path + key + '/', item) + else: + raise ValueError('Cannot save %s type' % type(item))
+ + +
+[docs] +def load_hdf5_to_dict(h5file, path): + """ + Parameters + ---------- + h5file: + h5py file to be loaded as a dictionary + path: + path within h5py file to load: '/' for the whole h5py file + + Returns + ------- + dic: + dictionary with hdf5 file group content + """ + dic = {} + for key, item in h5file[path].items(): + if isinstance(item, h5py.Dataset): + dic[key] = item[()] + elif isinstance(item, h5py.Group): + dic[key] = load_hdf5_to_dict(h5file, path + key + '/') + else: + raise ValueError('Cannot load %s type' % type(item)) + return dic
+ + +
+[docs] +def combine_and_copy(f, files, group): + """ Combine the same column from multiple files and save to a third""" + # ensure that the files input is stable for iteration order + assert isinstance(files, (list, tuple)) + f[group] = np.concatenate([fi[group][:] if group in fi else \ + np.array([], dtype=np.uint32) for fi in files])
+ + +
+[docs] +def name_all_datasets(files): + assert isinstance(files, (list, tuple)) + datasets = [] + for fi in files: + datasets += get_all_subkeys(fi, '/') + return set(datasets)
+ + +
+[docs] +def get_all_subkeys(grp, key): + subkey_list = [] + subkey_start = key + if key == '': + grpk = grp + else: + grpk = grp[key] + for sk in grpk.keys(): + path = subkey_start + '/' + sk + if isinstance(grp[path], h5py.Dataset): + subkey_list.append(path.lstrip('/')) + else: + subkey_list += get_all_subkeys(grp, path) + # returns an empty list if there is no dataset or subgroup within the group + return subkey_list
+ + +# +# ============================================================================= +# +# Checkpointing utilities +# +# ============================================================================= +# + + +
+[docs] +def dump_state(state, fp, path=None, dsetname='state', + protocol=pickle.HIGHEST_PROTOCOL): + """Dumps the given state to an hdf5 file handler. + + The state is stored as a raw binary array to ``{path}/{dsetname}`` in the + given hdf5 file handler. If a dataset with the same name and path is + already in the file, the dataset will be resized and overwritten with the + new state data. + + Parameters + ---------- + state : any picklable object + The sampler state to dump to file. Can be the object returned by + any of the samplers' `.state` attribute (a dictionary of dictionaries), + or any picklable object. + fp : h5py.File + An open hdf5 file handler. Must have write capability enabled. + path : str, optional + The path (group name) to store the state dataset to. Default (None) + will result in the array being stored to the top level. + dsetname : str, optional + The name of the dataset to store the binary array to. Default is + ``state``. + protocol : int, optional + The protocol version to use for pickling. See the :py:mod:`pickle` + module for more details. + """ + memfp = BytesIO() + pickle.dump(state, memfp, protocol=protocol) + dump_pickle_to_hdf(memfp, fp, path=path, dsetname=dsetname)
+ + + +
+[docs] +def dump_pickle_to_hdf(memfp, fp, path=None, dsetname='state'): + """Dumps pickled data to an hdf5 file object. + + Parameters + ---------- + memfp : file object + Bytes stream of pickled data. + fp : h5py.File + An open hdf5 file handler. Must have write capability enabled. + path : str, optional + The path (group name) to store the state dataset to. Default (None) + will result in the array being stored to the top level. + dsetname : str, optional + The name of the dataset to store the binary array to. Default is + ``state``. + """ + memfp.seek(0) + bdata = np.frombuffer(memfp.read(), dtype='S1') + if path is not None: + dsetname = path + '/' + dsetname + if dsetname not in fp: + fp.create_dataset(dsetname, shape=bdata.shape, maxshape=(None,), + dtype=bdata.dtype) + elif bdata.size != fp[dsetname].shape[0]: + fp[dsetname].resize((bdata.size,)) + fp[dsetname][:] = bdata
+ + + +
+[docs] +def load_state(fp, path=None, dsetname='state'): + """Loads a sampler state from the given hdf5 file object. + + The sampler state is expected to be stored as a raw bytes array which can + be loaded by pickle. + + Parameters + ---------- + fp : h5py.File + An open hdf5 file handler. + path : str, optional + The path (group name) that the state data is stored to. Default (None) + is to read from the top level. + dsetname : str, optional + The name of the dataset that the state data is stored to. Default is + ``state``. + """ + if path is not None: + fp = fp[path] + bdata = fp[dsetname][()].tobytes() + return pickle.load(BytesIO(bdata))
+ + + +__all__ = ('HFile', 'DictArray', 'StatmapData', 'MultiifoStatmapData', + 'FileData', 'DataFromFiles', 'SingleDetTriggers', + 'ForegroundTriggers', 'ReadByTemplate', 'chisq_choices', + 'get_chisq_from_file_choice', 'save_dict_to_hdf5', + 'recursively_save_dict_contents_to_group', 'load_hdf5_to_dict', + 'combine_and_copy', 'name_all_datasets', 'get_all_subkeys', + 'dump_state', 'dump_pickle_to_hdf', 'load_state') +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io/ligolw.html b/latest/html/_modules/pycbc/io/ligolw.html new file mode 100644 index 00000000000..0400d8e4e11 --- /dev/null +++ b/latest/html/_modules/pycbc/io/ligolw.html @@ -0,0 +1,501 @@ + + + + + + pycbc.io.ligolw — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io.ligolw

+# Copyright (C) 2020 Leo Singer, 2021 Tito Dal Canton
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+"""Tools for dealing with LIGOLW XML files."""
+
+import os
+import sys
+import numpy
+from ligo.lw import lsctables
+from ligo.lw import ligolw
+from ligo.lw.ligolw import Param, LIGOLWContentHandler \
+    as OrigLIGOLWContentHandler
+from ligo.lw.lsctables import TableByName
+from ligo.lw.table import Column, TableStream
+from ligo.lw.types import FormatFunc, FromPyType, ToPyType
+from ligo.lw.utils import process as ligolw_process
+from ligo.lw.param import Param as LIGOLWParam
+from ligo.lw.array import Array as LIGOLWArray
+import pycbc.version as pycbc_version
+
+
+__all__ = (
+    'default_null_value',
+    'return_empty_sngl',
+    'return_search_summary',
+    'create_process_table',
+    'legacy_row_id_converter',
+    'get_table_columns',
+    'LIGOLWContentHandler'
+)
+
+ROWID_PYTYPE = int
+ROWID_TYPE = FromPyType[ROWID_PYTYPE]
+ROWID_FORMATFUNC = FormatFunc[ROWID_TYPE]
+IDTypes = set([u"ilwd:char", u"ilwd:char_u"])
+
+
+
+[docs] +def default_null_value(col_name, col_type): + """ + Associate a sensible "null" default value to a given LIGOLW column type. + """ + if col_type in ['real_4', 'real_8']: + return 0. + if col_type in ['int_4s', 'int_8s']: + # this case includes row IDs + return 0 + if col_type == 'lstring': + return '' + raise NotImplementedError(('Do not know how to initialize column ' + '{} of type {}').format(col_name, col_type))
+ + +
+[docs] +def return_empty_sngl(nones=False): + """ + Function to create a SnglInspiral object where all columns are populated + but all are set to values that test False (ie. strings to '', floats/ints + to 0, ...). This avoids errors when you try to create a table containing + columns you don't care about, but which still need populating. NOTE: This + will also produce a process_id and event_id with 0 values. For most + applications these should be set to their correct values. + + Parameters + ---------- + nones : bool (False) + If True, just set all columns to None. + + Returns + -------- + lsctables.SnglInspiral + The "empty" SnglInspiral object. + """ + + sngl = lsctables.SnglInspiral() + cols = lsctables.SnglInspiralTable.validcolumns + for entry in cols: + col_name = Column.ColumnName(entry) + value = None if nones else default_null_value(col_name, cols[entry]) + setattr(sngl, col_name, value) + return sngl
+ + +
+[docs] +def return_search_summary(start_time=0, end_time=0, nevents=0, ifos=None): + """ + Function to create a SearchSummary object where all columns are populated + but all are set to values that test False (ie. strings to '', floats/ints + to 0, ...). This avoids errors when you try to create a table containing + columns you don't care about, but which still need populating. NOTE: This + will also produce a process_id with 0 values. For most applications these + should be set to their correct values. + + It then populates columns if given them as options. + + Returns + -------- + lsctables.SeachSummary + The "empty" SearchSummary object. + """ + if ifos is None: + ifos = [] + + # create an empty search summary + search_summary = lsctables.SearchSummary() + cols = lsctables.SearchSummaryTable.validcolumns + for entry in cols: + col_name = Column.ColumnName(entry) + value = default_null_value(col_name, cols[entry]) + setattr(search_summary, col_name, value) + + # fill in columns + if ifos: + search_summary.instruments = ifos + if nevents: + search_summary.nevents = nevents + if start_time and end_time: + search_summary.in_start_time = int(start_time) + search_summary.in_start_time_ns = int(start_time % 1 * 1e9) + search_summary.in_end_time = int(end_time) + search_summary.in_end_time_ns = int(end_time % 1 * 1e9) + search_summary.out_start_time = int(start_time) + search_summary.out_start_time_ns = int(start_time % 1 * 1e9) + search_summary.out_end_time = int(end_time) + search_summary.out_end_time_ns = int(end_time % 1 * 1e9) + + return search_summary
+ + +
+[docs] +def create_process_table(document, program_name=None, detectors=None, + comment=None, options=None): + """Create a LIGOLW process table with sane defaults, add it to a LIGOLW + document, and return it. + """ + + if program_name is None: + program_name = os.path.basename(sys.argv[0]) + if options is None: + options = {} + + # ligo.lw does not like `cvs_entry_time` being an empty string + cvs_entry_time = pycbc_version.date or None + + opts = options.copy() + key_del = [] + for key, value in opts.items(): + if type(value) not in tuple(FromPyType.keys()): + key_del.append(key) + if len(key_del) != 0: + for key in key_del: + opts.pop(key) + + process = ligolw_process.register_to_xmldoc( + document, + program_name, + opts, + version=pycbc_version.version, + cvs_repository='pycbc/'+pycbc_version.git_branch, + cvs_entry_time=cvs_entry_time, + instruments=detectors, + comment=comment + ) + return process
+ + +
+[docs] +def legacy_row_id_converter(ContentHandler): + """Convert from old-style to new-style row IDs on the fly. + + This is loosely adapted from :func:`ligo.lw.utils.ilwd.strip_ilwdchar`. + + Notes + ----- + When building a ContentHandler, this must be the _outermost_ decorator, + outside of :func:`ligo.lw.lsctables.use_in`, :func:`ligo.lw.param.use_in`, + or :func:`ligo.lw.table.use_in`. + """ + + def endElementNS(self, uri_localname, qname, + __orig_endElementNS=ContentHandler.endElementNS): + """Convert values of <Param> elements from ilwdchar to int.""" + if isinstance(self.current, Param) and self.current.Type in IDTypes: + old_type = ToPyType[self.current.Type] + old_val = str(old_type(self.current.pcdata)) + new_value = ROWID_PYTYPE(old_val.split(":")[-1]) + self.current.Type = ROWID_TYPE + self.current.pcdata = ROWID_FORMATFUNC(new_value) + __orig_endElementNS(self, uri_localname, qname) + + remapped = {} + + def startColumn(self, parent, attrs, + __orig_startColumn=ContentHandler.startColumn): + """Convert types in <Column> elements from ilwdchar to int. + + Notes + ----- + This method is adapted from + :func:`ligo.lw.utils.ilwd.strip_ilwdchar`. + + """ + result = __orig_startColumn(self, parent, attrs) + + # If this is an ilwdchar column, then create a function to convert its + # rows' values for use in the startStream method below. + if result.Type in IDTypes: + old_type = ToPyType[result.Type] + + def converter(old_value): + return ROWID_PYTYPE(str(old_type(old_value)).split(":")[-1]) + + remapped[(id(parent), result.Name)] = converter + result.Type = ROWID_TYPE + + # If this is an ilwdchar column, then normalize the column name. + if parent.Name in TableByName: + validcolumns = TableByName[parent.Name].validcolumns + if result.Name not in validcolumns: + stripped_column_to_valid_column = { + Column.ColumnName(name): name for name in validcolumns} + if result.Name in stripped_column_to_valid_column: + result.setAttribute( + 'Name', stripped_column_to_valid_column[result.Name]) + + return result + + def startStream(self, parent, attrs, + __orig_startStream=ContentHandler.startStream): + """Convert values in table <Stream> elements from ilwdchar to int. + + Notes + ----- + This method is adapted from + :meth:`ligo.lw.table.TableStream.config`. + + """ + result = __orig_startStream(self, parent, attrs) + if isinstance(result, TableStream): + loadcolumns = set(parent.columnnames) + if parent.loadcolumns is not None: + # FIXME: convert loadcolumns attributes to sets to + # avoid the conversion. + loadcolumns &= set(parent.loadcolumns) + result._tokenizer.set_types([ + (remapped.pop((id(parent), colname), pytype) + if colname in loadcolumns else None) + for pytype, colname + in zip(parent.columnpytypes, parent.columnnames)]) + return result + + ContentHandler.endElementNS = endElementNS + ContentHandler.startColumn = startColumn + ContentHandler.startStream = startStream + + return ContentHandler
+ + +def _build_series(series, dim_names, comment, delta_name, delta_unit): + Attributes = ligolw.sax.xmlreader.AttributesImpl + elem = ligolw.LIGO_LW( + Attributes({'Name': str(series.__class__.__name__)})) + if comment is not None: + elem.appendChild(ligolw.Comment()).pcdata = comment + elem.appendChild(ligolw.Time.from_gps(series.epoch, 'epoch')) + elem.appendChild(LIGOLWParam.from_pyvalue('f0', series.f0, unit='s^-1')) + delta = getattr(series, delta_name) + if numpy.iscomplexobj(series.data.data): + data = numpy.row_stack(( + numpy.arange(len(series.data.data)) * delta, + series.data.data.real, + series.data.data.imag + )) + else: + data = numpy.row_stack(( + numpy.arange(len(series.data.data)) * delta, + series.data.data + )) + a = LIGOLWArray.build(series.name, data, dim_names=dim_names) + a.Unit = str(series.sampleUnits) + dim0 = a.getElementsByTagName(ligolw.Dim.tagName)[0] + dim0.Unit = delta_unit + dim0.Start = series.f0 + dim0.Scale = delta + elem.appendChild(a) + return elem + +def make_psd_xmldoc(psddict, xmldoc=None): + """Add a set of PSDs to a LIGOLW XML document. If the document is not + given, a new one is created first. + """ + xmldoc = ligolw.Document() if xmldoc is None else xmldoc.childNodes[0] + + # the PSDs must be children of a LIGO_LW with name "psd" + root_name = 'psd' + Attributes = ligolw.sax.xmlreader.AttributesImpl + lw = xmldoc.appendChild( + ligolw.LIGO_LW(Attributes({'Name': root_name}))) + + for instrument, psd in psddict.items(): + xmlseries = _build_series( + psd, + ('Frequency,Real', 'Frequency'), + None, + 'deltaF', + 's^-1' + ) + fs = lw.appendChild(xmlseries) + fs.appendChild(LIGOLWParam.from_pyvalue('instrument', instrument)) + return xmldoc + +def snr_series_to_xml(snr_series, document, sngl_inspiral_id): + """Save an SNR time series into an XML document, in a format compatible + with BAYESTAR. + """ + snr_lal = snr_series.lal() + snr_lal.name = 'snr' + snr_lal.sampleUnits = '' + snr_xml = _build_series( + snr_lal, + ('Time', 'Time,Real,Imaginary'), + None, + 'deltaT', + 's' + ) + snr_node = document.childNodes[-1].appendChild(snr_xml) + eid_param = LIGOLWParam.from_pyvalue('event_id', sngl_inspiral_id) + snr_node.appendChild(eid_param) + +
+[docs] +def get_table_columns(table): + """Return a list of columns that are present in the given table, in a + format that can be passed to `lsctables.New()`. + + The split on ":" is needed for columns like `process:process_id`, which + must be listed as `process:process_id` in `lsctables.New()`, but are + listed as just `process_id` in the `columnnames` attribute of the given + table. + """ + columns = [] + for col in table.validcolumns: + att = col.split(':')[-1] + if att in table.columnnames: + columns.append(col) + return columns
+ + + +
+[docs] +@legacy_row_id_converter +@lsctables.use_in +class LIGOLWContentHandler(OrigLIGOLWContentHandler): + "Dummy class needed for loading LIGOLW files"
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io/live.html b/latest/html/_modules/pycbc/io/live.html new file mode 100644 index 00000000000..e2291646383 --- /dev/null +++ b/latest/html/_modules/pycbc/io/live.html @@ -0,0 +1,318 @@ + + + + + + pycbc.io.live — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io.live

+import logging
+import os
+import pathlib
+import datetime
+import numpy
+
+from lal import gpstime as lalgps
+
+logger = logging.getLogger('pycbc.io.live')
+
+
+def maximum_string(numbers):
+    """
+    Find the maximum possible length string to match
+    all values between two numbers
+
+    Parameters
+    ----------
+    numbers : list of integers
+        A list of integers from which to determine the longest
+        common string prefix. E.g. '12345', '12346', '12356'
+        returns '123'
+    """
+    # The max length of the number will be the integer above log10
+    # of the biggest number
+    maxlen = int(numpy.ceil(numpy.log10(max(numbers))))
+    # Convert the numbers to (possibly leading zero-padded) strings
+    strings = [f"{{n:0{maxlen:d}d}}".format(n=n) for n in numbers]
+    # Count how many digits are the same:
+    n_digits = 0
+    for str_digit in zip(*strings):
+        if len(numpy.unique(str_digit)) == 1:
+            # This digit is the same for all numbers
+            n_digits += 1
+        else:
+            break
+    return strings[0][:n_digits]
+
+
+def filter_file(filename, start_time, end_time):
+    """
+    Indicate whether the filename indicates that the file is within the
+    start and end times
+    Parameters
+    ----------
+    filename : string
+        Filename which matches the format
+        {id_string}-{start_time}-{duration}.hdf
+    start_time : float
+        Start of search window, i.e. GPS time of when the
+        file cannot end before
+    end_time : float
+        End of search window, i.e. GPS time of when the
+        file cannot start after
+
+    Returns
+    -------
+    boolean
+        Does any of the file lie within the start/end times
+    """
+    # FIX ME eventually - this uses the gps time and duration from the filename
+    # Is there a better way? (i.e. trigger gps times in the file or
+    # add an attribute)
+    fend = filename.split('-')[-2:]
+    file_start = float(fend[0])
+    duration = float(fend[1][:-4])
+
+    return ((file_start + duration) >= start_time) and (file_start <= end_time)
+
+
+
+[docs] +def add_live_trigger_selection_options(parser): + """ + Add options required for obtaining the right set of PyCBC live triggers + into an argument parser + """ + finding_group = parser.add_argument_group('Trigger Finding') + finding_group.add_argument( + "--trigger-directory", + metavar="PATH", + required=True, + help="Directory containing trigger files, directory " + "can contain subdirectories. Required." + ) + finding_group.add_argument( + "--gps-start-time", + type=int, + required=True, + help="Start time of the analysis. Integer, required" + ) + finding_group.add_argument( + "--gps-end-time", + type=int, + required=True, + help="End time of the analysis. Integer, required" + ) + finding_group.add_argument( + "--date-directories", + action="store_true", + help="Indicate if the trigger files are stored in " + "directories by date." + ) + default_dd_format = "%Y_%m_%d" + finding_group.add_argument( + "--date-directory-format", + default=default_dd_format, + help="Format of date, see datetime strftime " + "documentation for details. Default: " + "%%Y_%%m_%%d" + ) + finding_group.add_argument( + "--file-identifier", + default="H1L1V1-Live", + help="String required in filename to be considered for " + "analysis. Default: 'H1L1V1-Live'." + )
+ + + +
+[docs] +def find_trigger_files(directory, gps_start_time, gps_end_time, + id_string='*', date_directories=False, + date_directory_format="%Y_%m_%d"): + """ + Find a list of PyCBC live trigger files which are between the gps + start and end times given + """ + + # Find the string at the start of the gps time which will match all + # files in this range - this helps to cut which ones we need to + # compare later + num_match = maximum_string([gps_start_time, gps_end_time]) + + # ** means recursive, so for large directories, this is expensive. + # It is not too bad if date_directories is set, as we don't waste time + # in directories where there cant be any files. + glob_string = f'**/*{id_string}*{num_match}*.hdf' + if date_directories: + # convert the GPS times into dates, and only use the directories + # of those dates to search + # Add a day on either side to ensure we get files which straddle + # the boundary + one_day = datetime.timedelta(days=1) + date_check = lalgps.gps_to_utc(gps_start_time) - one_day + date_end = lalgps.gps_to_utc(gps_end_time) + one_day + matching_files = [] + while date_check < date_end: + date_dir = date_check.strftime(date_directory_format) + subdir = os.path.join(directory, date_dir) + matching_files_gen = pathlib.Path(subdir).glob(glob_string) + matching_files += [f.as_posix() for f in matching_files_gen] + date_check += one_day + else: + # Grab all hdf files in the directory + matching_files_gen = pathlib.Path(directory).glob(glob_string) + matching_files = [f.as_posix() for f in matching_files_gen] + + # Is the file in the time window? + matching_files = [f for f in matching_files + if filter_file(f, gps_start_time, gps_end_time)] + + return sorted(matching_files)
+ + + +
+[docs] +def find_trigger_files_from_cli(args): + """ + Wrapper around the find_trigger_files function to use when called using + options from the add_live_trigger_selection_options function + """ + return find_trigger_files( + args.trigger_directory, + args.gps_start_time, + args.gps_end_time, + id_string=args.file_identifier, + date_directories=args.date_directories, + date_directory_format=args.date_directory_format + )
+ + + +__all__ = [ + 'add_live_trigger_selection_options', + 'find_trigger_files', + 'find_trigger_files_from_cli', +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/io/record.html b/latest/html/_modules/pycbc/io/record.html new file mode 100644 index 00000000000..f7669f8609c --- /dev/null +++ b/latest/html/_modules/pycbc/io/record.html @@ -0,0 +1,2111 @@ + + + + + + pycbc.io.record — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.io.record

+# Copyright (C) 2015  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                           Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides definitions of, and helper functions for, FieldArray.
+FieldArray are wrappers of numpy recarrays with additional functionality
+useful for storing and retrieving data created by a search for gravitationa
+waves.
+"""
+
+import types, re, copy, numpy, inspect
+from ligo.lw import types as ligolw_types
+from pycbc import coordinates, conversions, cosmology
+from pycbc.population import population_models
+from pycbc.waveform import parameters
+
+# what functions are given to the eval in FieldArray's __getitem__:
+_numpy_function_lib = {_x: _y for _x,_y in numpy.__dict__.items()
+                       if isinstance(_y, (numpy.ufunc, float))}
+
+#
+# =============================================================================
+#
+#                           Data type mappings
+#
+# =============================================================================
+#
+# add ligolw_types to numpy sctypeDict
+# but don't include bindings that numpy already defines
+numpy.sctypeDict.update({_k: _val
+                         for (_k, _val) in ligolw_types.ToNumPyType.items()
+                         if _k not in numpy.sctypeDict})
+
+# Annoyingly, numpy has no way to store NaNs in an integer field to indicate
+# the equivalent of None. This can be problematic for fields that store ids:
+# if an array has an id field with value 0, it isn't clear if this is because
+# the id is the first element, or if no id was set. To clear up the ambiguity,
+# we define here an integer to indicate 'id not set'.
+ID_NOT_SET = -1
+EMPTY_OBJECT = None
+VIRTUALFIELD_DTYPE = 'VIRTUAL'
+
+def set_default_empty(array):
+    if array.dtype.names is None:
+        # scalar dtype, just set
+        if array.dtype.str[1] == 'i':
+            # integer, set to ID_NOT_SET
+            array[:] = ID_NOT_SET
+        elif array.dtype.str[1] == 'O':
+            # object, set to EMPTY_OBJECT
+            array[:] = EMPTY_OBJECT
+    else:
+        for name in array.dtype.names:
+            set_default_empty(array[name])
+
+def default_empty(shape, dtype):
+    """Numpy's empty array can have random values in it. To prevent that, we
+    define here a default emtpy array. This default empty is a numpy.zeros
+    array, except that objects are set to None, and all ints to ID_NOT_SET.
+    """
+    default = numpy.zeros(shape, dtype=dtype)
+    set_default_empty(default)
+    return default
+
+# set default data types
+_default_types_status = {
+    'default_strlen': 50,
+    'ilwd_as_int': True,
+    'lstring_as_obj': False
+}
+
+def lstring_as_obj(true_or_false=None):
+    """Toggles whether lstrings should be treated as strings or as objects.
+    When FieldArrays is first loaded, the default is True.
+
+    Parameters
+    ----------
+    true_or_false : {None|bool}
+        Pass True to map lstrings to objects; False otherwise. If None
+        provided, just returns the current state.
+
+    Return
+    ------
+    current_stat : bool
+        The current state of lstring_as_obj.
+
+    Examples
+    --------
+    >>> from pycbc.io import FieldArray
+    >>> FieldArray.lstring_as_obj()
+        True
+    >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
+    FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,),
+           (0.0,), (0.0,)],
+          dtype=[('foo', 'O')])
+    >>> FieldArray.lstring_as_obj(False)
+        False
+    >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
+    FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',),
+           ('0.0',), ('0.0',), ('0.0',), ('0.0',)],
+          dtype=[('foo', 'S50')])
+    """
+    if true_or_false is not None:
+        _default_types_status['lstring_as_obj'] = true_or_false
+        # update the sctypeDict
+        numpy.sctypeDict[u'lstring'] = numpy.object_ \
+            if _default_types_status['lstring_as_obj'] \
+            else 'S%i' % _default_types_status['default_strlen']
+    return _default_types_status['lstring_as_obj']
+
+def ilwd_as_int(true_or_false=None):
+    """Similar to lstring_as_obj, sets whether or not ilwd:chars should be
+    treated as strings or as ints. Default is True.
+    """
+    if true_or_false is not None:
+        _default_types_status['ilwd_as_int'] = true_or_false
+        numpy.sctypeDict[u'ilwd:char'] = int \
+            if _default_types_status['ilwd_as_int'] \
+            else 'S%i' % default_strlen
+    return _default_types_status['ilwd_as_int']
+
+def default_strlen(strlen=None):
+    """Sets the default string length for lstring and ilwd:char, if they are
+    treated as strings. Default is 50.
+    """
+    if strlen is not None:
+        _default_types_status['default_strlen'] = strlen
+        # update the sctypeDicts as needed
+        lstring_as_obj(_default_types_status['lstring_as_obj'])
+        ilwd_as_int(_default_types_status['ilwd_as_int'])
+    return _default_types_status['default_strlen']
+
+# set the defaults
+lstring_as_obj(True)
+ilwd_as_int(True)
+
+
+#
+# =============================================================================
+#
+#                           Helper functions
+#
+# =============================================================================
+#
+
+
+#
+#   Argument syntax parsing
+#
+# this parser will pull out sufields as separate identifiers from their parent
+# field; e.g., foo.bar --> ['foo', 'bar']
+_pyparser = re.compile(r'(?P<identifier>[\w_][\w\d_]*)')
+# this parser treats subfields as one identifier with their parent field;
+# e.g., foo.bar --> ['foo.bar']
+_fieldparser = re.compile(r'(?P<identifier>[\w_][.\w\d_]*)')
+def get_vars_from_arg(arg):
+    """Given a python string, gets the names of any identifiers use in it.
+    For example, if ``arg = '3*narf/foo.bar'``, this will return
+    ``set(['narf', 'foo', 'bar'])``.
+    """
+    return set(_pyparser.findall(arg))
+
+def get_fields_from_arg(arg):
+    """Given a python string, gets FieldArray field names used in it. This
+    differs from get_vars_from_arg in that any identifier with a '.' in it
+    will be treated as one identifier. For example, if
+    ``arg = '3*narf/foo.bar'``, this will return ``set(['narf', 'foo.bar'])``.
+    """
+    return set(_fieldparser.findall(arg))
+
+# this parser looks for fields inside a class method function. This is done by
+# looking for variables that start with self.{x} or self["{x}"]; e.g.,
+# self.a.b*3 + self.c, self['a.b']*3 + self.c, self.a.b*3 + self["c"], all
+# return set('a.b', 'c').
+_instfieldparser = re.compile(
+    r'''self(?:\.|(?:\[['"]))(?P<identifier>[\w_][.\w\d_]*)''')
+def get_instance_fields_from_arg(arg):
+    """Given a python string definining a method function on an instance of an
+    FieldArray, returns the field names used in it. This differs from
+    get_fields_from_arg in that it looks for variables that start with 'self'.
+    """
+    return set(_instfieldparser.findall(arg))
+
+def get_needed_fieldnames(arr, names):
+    """Given a FieldArray-like array and a list of names, determines what
+    fields are needed from the array so that using the names does not result
+    in an error.
+
+    Parameters
+    ----------
+    arr : instance of a FieldArray or similar
+        The array from which to determine what fields to get.
+    names : (list of) strings
+        A list of the names that are desired. The names may be either a field,
+        a virtualfield, a property, a method of ``arr``, or any function of
+        these. If a virtualfield/property or a method, the source code of that
+        property/method will be analyzed to pull out what fields are used in
+        it.
+
+    Returns
+    -------
+    set
+        The set of the fields needed to evaluate the names.
+    """
+    fieldnames = set([])
+    # we'll need the class that the array is an instance of to evaluate some
+    # things
+    cls = arr.__class__
+    if isinstance(names, str):
+        names = [names]
+    # parse names for variables, incase some of them are functions of fields
+    parsed_names = set([])
+    for name in names:
+        parsed_names.update(get_fields_from_arg(name))
+    # only include things that are in the array's namespace
+    names = list(parsed_names & (set(dir(arr)) | set(arr.fieldnames)))
+    for name in names:
+        if name in arr.fieldnames:
+            # is a field, just add the name
+            fieldnames.update([name])
+        else:
+            # the name is either a virtualfield, a method, or some other
+            # property; we need to evaluate the source code to figure out what
+            # fields we need
+            try:
+                # the underlying functions of properties need to be retrieved
+                # using their fget attribute
+                func = getattr(cls, name).fget
+            except AttributeError:
+                # no fget attribute, assume is an instance method
+                func = getattr(arr, name)
+            # evaluate the source code of the function
+            try:
+                sourcecode = inspect.getsource(func)
+            except TypeError:
+                # not a function, just pass
+                continue
+            # evaluate the source code for the fields
+            possible_fields = get_instance_fields_from_arg(sourcecode)
+            # some of the variables returned by possible fields may themselves
+            # be methods/properties that depend on other fields. For instance,
+            # mchirp relies on eta and mtotal, which each use mass1 and mass2;
+            # we therefore need to anayze each of the possible fields
+            fieldnames.update(get_needed_fieldnames(arr, possible_fields))
+    return fieldnames
+
+
+def get_dtype_descr(dtype):
+    """Numpy's ``dtype.descr`` will return empty void fields if a dtype has
+    offsets specified. This function tries to fix that by not including
+    fields that have no names and are void types.
+    """
+    dts = []
+    for dt in dtype.descr:
+        if (dt[0] == '' and dt[1][1] == 'V'):
+            continue
+
+        # Downstream codes (numpy, etc) can't handle metadata in dtype
+        if isinstance(dt[1], tuple):
+            dt = (dt[0], dt[1][0])
+
+        dts.append(dt)
+    return dts
+
+
+def combine_fields(dtypes):
+    """Combines the fields in the list of given dtypes into a single dtype.
+
+    Parameters
+    ----------
+    dtypes : (list of) numpy.dtype(s)
+        Either a numpy.dtype, or a list of numpy.dtypes.
+
+    Returns
+    -------
+    numpy.dtype
+        A new dtype combining the fields in the list of dtypes.
+    """
+    if not isinstance(dtypes, list):
+        dtypes = [dtypes]
+    # Note: incase any of the dtypes have offsets, we won't include any fields
+    # that have no names and are void
+    new_dt = numpy.dtype([dt for dtype in dtypes \
+        for dt in get_dtype_descr(dtype)])
+    return new_dt
+
+
+def _ensure_array_list(arrays):
+    """Ensures that every element in a list is an instance of a numpy array."""
+    # Note: the isinstance test is needed below so that instances of FieldArray
+    # are not converted to numpy arrays
+    return [numpy.array(arr, ndmin=1) if not isinstance(arr, numpy.ndarray)
+            else arr for arr in arrays]
+
+
+def merge_arrays(merge_list, names=None, flatten=True, outtype=None):
+    """Merges the given arrays into a single array. The arrays must all have
+    the same shape. If one or more of the given arrays has multiple fields,
+    all of the fields will be included as separate fields in the new array.
+
+    Parameters
+    ----------
+    merge_list : list of arrays
+        The list of arrays to merge.
+    names : {None | sequence of strings}
+        Optional, the names of the fields in the output array. If flatten is
+        True, must be the same length as the total number of fields in
+        merge_list.  Otherise, must be the same length as the number of
+        arrays in merge_list.  If None provided, and flatten is True, names
+        used will be the same as the name of the fields in the given arrays.
+        If the datatype has no name, or flatten is False, the new field will
+        be `fi` where i is the index of the array in arrays.
+    flatten : bool
+        Make all of the fields in the given arrays separate fields in the
+        new array. Otherwise, each array will be added as a field. If an
+        array has fields, they will be subfields in the output array. Default
+        is True.
+    outtype : {None | class}
+        Cast the new array to the given type. Default is to return a
+        numpy structured array.
+
+    Returns
+    -------
+    new array : {numpy.ndarray | outtype}
+        A new array with all of the fields in all of the arrays merged into
+        a single array.
+    """
+    # make sure everything in merge_list is an array
+    merge_list = _ensure_array_list(merge_list)
+    if not all(merge_list[0].shape == arr.shape for arr in merge_list):
+        raise ValueError("all of the arrays in merge_list must have the " +
+            "same shape")
+    if flatten:
+        new_dt = combine_fields([arr.dtype for arr in merge_list])
+    else:
+        new_dt = numpy.dtype([('f%i' %ii, arr.dtype.descr) \
+            for ii,arr in enumerate(merge_list)])
+    new_arr = merge_list[0].__class__(merge_list[0].shape, dtype=new_dt)
+    # ii is a counter to keep track of which fields from the new array
+    # go with which arrays in merge list
+    ii = 0
+    for arr in merge_list:
+        if arr.dtype.names is None:
+            new_arr[new_dt.names[ii]] = arr
+            ii += 1
+        else:
+            for field in arr.dtype.names:
+                new_arr[field] = arr[field]
+                ii += 1
+    # set the names if desired
+    if names is not None:
+        new_arr.dtype.names = names
+    # ditto the outtype
+    if outtype is not None:
+        new_arr = new_arr.view(type=outtype)
+    return new_arr
+
+def add_fields(input_array, arrays, names=None, assubarray=False):
+    """Adds the given array(s) as new field(s) to the given input array.
+    Returns a new instance of the input_array with the new fields added.
+
+    Parameters
+    ----------
+    input_array : instance of a numpy.ndarray or numpy recarray
+        The array to to add the fields to.
+    arrays : (list of) numpy array(s)
+        The arrays to add. If adding multiple arrays, must be a list;
+        if adding a single array, can just be that array.
+    names : (list of) strings
+        Optional, the name(s) of the new fields in the output array. If
+        adding multiple fields, must be a list of strings with the same
+        length as the list of arrays. If None provided, names used will
+        be the same as the name of the datatype in the given arrays.
+        If the datatype has no name, the new field will be ``'fi'`` where
+        i is the index of the array in arrays.
+    assubarray : bool
+        Add the list of arrays as a single subarray field. If True, and names
+        provided, names should be a string or a length-1 sequence. Default is
+        False, in which case each array will be added as a separate field.
+
+    Returns
+    -------
+    new_array : new instance of `input_array`
+        A copy of the `input_array` with the desired fields added.
+    """
+    if not isinstance(arrays, list):
+        arrays = [arrays]
+    # ensure that all arrays in arrays are arrays
+    arrays = _ensure_array_list(arrays)
+    # set the names
+    if names is not None:
+        if isinstance(names, str):
+            names = [names]
+        # check if any names are subarray names; if so, we have to add them
+        # separately
+        subarray_names = [name for name in names if len(name.split('.')) > 1]
+    else:
+        subarray_names = []
+    if any(subarray_names):
+        subarrays = [arrays[ii] for ii,name in enumerate(names) \
+            if name in subarray_names]
+        # group together by subarray
+        groups = {}
+        for name,arr in zip(subarray_names, subarrays):
+            key = name.split('.')[0]
+            subkey = '.'.join(name.split('.')[1:])
+            try:
+                groups[key].append((subkey, arr))
+            except KeyError:
+                groups[key] = [(subkey, arr)]
+        # now cycle over the groups, adding all of the fields in each group
+        # as a subarray
+        for group_name in groups:
+            # we'll create a dictionary out of the subarray field names ->
+            # subarrays
+            thisdict = dict(groups[group_name])
+            # check if the input array has this field; if so, remove it, then
+            # add it back with the other new arrays
+            if group_name in input_array.fieldnames:
+                # get the data
+                new_subarray = input_array[group_name]
+                # add the new fields to the subarray
+                new_subarray = add_fields(new_subarray, thisdict.values(),
+                    thisdict.keys())
+                # remove the original from the input array
+                input_array = input_array.without_fields(group_name)
+            else:
+                new_subarray = thisdict.values()
+            # add the new subarray to input_array as a subarray
+            input_array = add_fields(input_array, new_subarray,
+                names=group_name, assubarray=True)
+            # set the subarray names
+            input_array[group_name].dtype.names = thisdict.keys()
+        # remove the subarray names from names
+        keep_idx = [ii for ii,name in enumerate(names) \
+            if name not in subarray_names]
+        names = [names[ii] for ii in keep_idx]
+        # if there's nothing left, just return
+        if names == []:
+            return input_array
+        # also remove the subarray arrays
+        arrays = [arrays[ii] for ii in keep_idx]
+    if assubarray:
+        # merge all of the arrays into a single array
+        if len(arrays) > 1:
+            arrays = [merge_arrays(arrays, flatten=True)]
+        # now merge all the fields as a single subarray
+        merged_arr = numpy.empty(len(arrays[0]),
+            dtype=[('f0', arrays[0].dtype.descr)])
+        merged_arr['f0'] = arrays[0]
+        arrays = [merged_arr]
+    merge_list = [input_array] + arrays
+    if names is not None:
+        names = list(input_array.dtype.names) + names
+    # merge into a single array
+    return merge_arrays(merge_list, names=names, flatten=True,
+        outtype=type(input_array))
+
+
+#
+# =============================================================================
+#
+#                           Base FieldArray definitions
+#
+# =============================================================================
+#
+
+# We'll include functions in various pycbc modules in FieldArray's function
+# library. All modules used must have an __all__ list defined.
+_modules_for_functionlib = [conversions, coordinates, cosmology,
+                            population_models]
+_fieldarray_functionlib = {_funcname : getattr(_mod, _funcname)
+                              for _mod in _modules_for_functionlib
+                              for _funcname in getattr(_mod, '__all__')}
+
+
+[docs] +class FieldArray(numpy.recarray): + """ + Subclass of numpy.recarray that adds additional functionality. + + Initialization is done the same way as numpy.recarray, with the addition + that a "name" attribute can be passed to name the output array. When you + initialize an array it creates a new zeroed array. This is similar to + numpy.recarray, except that ``numpy.recarray(shape)`` will create an empty + array, whereas here the default is to zero all of the elements (see + ``default_zero`` for definition of zero for different data types). If you + prefer an empty array, set ``zero=False`` when initializing. + + You cannot pass an array or sequence as input as you do with numpy.array. + To initialize an FieldArray from an already existing arrays, use the + ``FieldArray.from_arrays`` class method. To initialize from a list of + tuples, use ``FieldArray.from_records``. See the docstring for those methods + for details. For more information on initalizing an empty array, see + ``numpy.recarray`` help. + + Some additional features: + + * **Arbitrary functions**: + + You can retrive functions on fields in the same manner that you access + individual fields. For example, if you have a FieldArray ``x`` with + fields ``a`` and ``b``, you can access each field with + ``x['a'], x['b']``. You can also do ``x['a*b/(a+b)**2.']``, + ``x[cos(a)*sin(b)]``, etc. Boolean operations are also possible, e.g., + ``x['(a < 3) & (b < 2)']``. Syntax for functions is python. Any numpy + ufunc, as well as all functions listed in the functionlib attribute, may + be used. Note that while fields may be accessed as attributes (e.g, + field ``a`` can be accessed via ``x['a']`` or ``x.a``), functions on + multiple fields may not (``x.a+b`` does not work, for obvious reasons). + + * **Subfields and '.' indexing**: + Structured arrays, which are the base class for recarrays and, by + inheritance, FieldArray, allows for fields to themselves have fields. For + example, an array ``x`` may have fields ``a`` and ``b``, with ``b`` having + subfields ``c`` and ``d``. You can access subfields using other index + notation or attribute notation. So, the subfields ``d`` may be retrieved + via ``x['b']['d']``, ``x.b.d``, ``x['b'].d`` or ``x['b.d']``. Likewise, + functions can be carried out on the subfields, as they can on fields. If + ``d`` is a float field, we could get the log of it via ``x['log(b.d)']``. + There is no limit to the number of subfields. So, ``c`` could also have + subfield ``c0``, which would be accessed via ``x.c.c0``, or any of the + other methods. + + .. warning:: + Record arrays also allow you to set values of a field using attribute + notation. However, this can lead to unexpected results if you + accidently misspell the attribute. For example, if ``x`` has field + ``foo``, and you misspell this when setting, e.g., you try to do + ``x.fooo = numpy.arange(x.size)``, ``foo`` will not be set, nor will + you get an error. Instead, the attribute ``fooo`` will be added to + ``x``. If you tried to do this using index notation, however --- + ``x['fooo'] = numpy.arange(x.size)`` --- you will + get an ``AttributeError`` as you might expect. For this reason, it is + recommended that you always use index notation when *setting* values; + you can use either index or attribute notation when *retrieving* + values. + + * **Properties and methods as fields**: + If a propety or instance method is defined for a class that inherits from + FieldArray, those can be accessed in the same way as fields are. For + example, define ``Foo`` as: + + .. code-block:: python + + class Foo(FieldArray): + @property + def bar(self): + return self['a']**2. + + def narf(self, y): + return self['a'] + y + + Then if we have an instance: ``foo = Foo(100, dtype=[('a', float)])``. + The ``bar`` and ``narf`` attributes may be accessed via field notation: + ``foo.bar``, ``foo['bar']``, ``foo.narf(10)`` and ``foo['narf(10)']``. + + * **Virtual fields**: + Virtual fields are methods wrapped as properties that operate on one or + more fields, thus returning an array of values. To outside code virtual + fields look the same as fields, and can be called similarily. Internally, + no additional data is stored; the operation is performed on the fly when + the virtual field is called. Virtual fields can be added to an array + instance with the add_virtualfields method. Alternatively, virtual fields + can be defined by sub-classing FieldArray: + + .. code-block:: python + + class Foo(FieldArray): + _virtualfields = ['bar'] + @property + def bar(self): + return self['a']**2. + + The fields property returns the names of both fields and virtual fields. + + .. note:: + + It can happen that a field, virtual field, or function in the + functionlib have that same name. In that case, precedence is: field, + virtual field, function. For example, if a function called 'foo' is in + the function library, and a virtual field is added call 'foo', then + `a['foo']` will return the virtual field rather than the function. + Likewise, if the array is initialized with a field called `foo`, or a + field with that name is added, `a['foo']` will return that field + rather than the virtual field and/or the function. + + Parameters + ---------- + shape : {int | tuple} + The shape of the new array. + name : {None | str} + Optional, what to name the new array. The array's ``name`` attribute + is set to this. + + For details on other keyword arguments, see ``numpy.recarray`` help. + + Attributes + ---------- + name : str + Instance attribute. The name of the array. + + Examples + -------- + .. note:: For some predefined arrays with default fields, see the other + array classes defined below. + + Create an empty array with four rows and two fields called `foo` and + `bar`, both of which are floats: + + >>> x = FieldArray(4, dtype=[('foo', float), ('bar', float)]) + + Set/retrieve a fields using index or attribute syntax: + + >>> x['foo'] = [1.,2.,3.,4.] + >>> x['bar'] = [5.,6.,7.,8.] + >>> x + FieldArray([(1.0, 5.0), (2.0, 6.0), (3.0, 7.0), (4.0, 8.0)], + dtype=[('foo', '<f8'), ('bar', '<f8')]) + >>> x.foo + array([ 1., 2., 3., 4.]) + >>> x['bar'] + array([ 5., 6., 7., 8.]) + + Get the names of the fields: + + >>> x.fieldnames + ('foo', 'bar') + + Rename the fields to `a` and `b`: + + >>> x.dtype.names = ['a', 'b'] + >>> x.fieldnames + ('a', 'b') + + Retrieve a function of the fields as if it were a field: + + >>> x['sin(a/b)'] + array([ 0.19866933, 0.3271947 , 0.41557185, 0.47942554]) + + Add a virtual field: + + >>> def c(self): + ... return self['a'] + self['b'] + ... + >>> x = x.add_virtualfields('c', c) + >>> x.fields + ('a', 'b', 'c') + >>> x['c'] + array([ 6., 8., 10., 12.]) + + Create an array with subfields: + + >>> x = FieldArray(4, dtype=[('foo', [('cat', float), ('hat', int)]), ('bar', float)]) + >>> x.fieldnames + ['foo.cat', 'foo.hat', 'bar'] + + Load from a list of arrays (in this case, from an hdf5 file): + + >>> bankhdf = h5py.File('bank/H1L1-BANK2HDF-1117400416-928800.hdf') + >>> bankhdf.keys() + [u'mass1', u'mass2', u'spin1z', u'spin2z', u'template_hash'] + >>> templates = FieldArray.from_arrays(bankhdf.values(), names=bankhdf.keys()) + >>> templates.fieldnames + ('mass1', 'mass2', 'spin1z', 'spin2z', 'template_hash') + >>> templates.mass1 + array([ 1.71731389, 1.10231435, 2.99999857, ..., 1.67488706, + 1.00531888, 2.11106491], dtype=float32) + + Sort by a field without having to worry about also sorting the other + fields: + + >>> templates[['mass1', 'mass2']] + array([(1.7173138856887817, 1.2124452590942383), + (1.1023143529891968, 1.0074082612991333), + (2.9999985694885254, 1.0578444004058838), ..., + (1.6748870611190796, 1.1758257150650024), + (1.0053188800811768, 1.0020891427993774), + (2.111064910888672, 1.0143394470214844)], + dtype=[('mass1', '<f4'), ('mass2', '<f4')]) + >>> templates.sort(order='mass1') + >>> templates[['mass1', 'mass2']] + array([(1.000025987625122, 1.0000133514404297), + (1.0002814531326294, 1.0002814531326294), + (1.0005437135696411, 1.0005437135696411), ..., + (2.999999523162842, 1.371169090270996), + (2.999999523162842, 1.4072519540786743), (3.0, 1.4617927074432373)], + dtype=[('mass1', '<f4'), ('mass2', '<f4')]) + + Convert a LIGOLW xml table: + + >>> type(sim_table) + ligo.lw.lsctables.SimInspiralTable + >>> sim_array = FieldArray.from_ligolw_table(sim_table) + >>> sim_array.mass1 + array([ 2.27440691, 1.85058105, 1.61507106, ..., 2.0504961 , + 2.33554196, 2.02732205], dtype=float32) + >>> sim_array.waveform + array([u'SpinTaylorT2', u'SpinTaylorT2', u'SpinTaylorT2', ..., + u'SpinTaylorT2', u'SpinTaylorT2', u'SpinTaylorT2'], dtype=object) + + >>> sim_array = FieldArray.from_ligolw_table(sim_table, columns=['simulation_id', 'mass1', 'mass2']) + >>> sim_array + FieldArray([(0, 2.274406909942627, 2.6340370178222656), + (1, 1.8505810499191284, 2.8336880207061768), + (2, 1.6150710582733154, 2.2336490154266357), ..., + (11607, 2.0504961013793945, 2.6019821166992188), + (11608, 2.3355419635772705, 1.2164380550384521), + (11609, 2.0273220539093018, 2.2453839778900146)], + dtype=[('simulation_id', '<i8'), ('mass1', '<f4'), ('mass2', '<f4')]) + + Add a field to the array: + + >>> optimal_snrs = numpy.random.uniform(4.,40., size=len(sim_array)) + >>> sim_array = sim_array.add_fields(optimal_snrs, 'optimal_snrs') + >>> sim_array.fieldnames + ('simulation_id', 'mass1', 'mass2', 'optimal_snrs') + + Notes + ----- + Input arrays with variable-length strings in one or more fields can be + tricky to deal with. Numpy arrays are designed to use fixed-length + datasets, so that quick memory access can be achieved. To deal with + variable-length strings, there are two options: 1. set the data type to + object, or 2. set the data type to a string with a fixed length larger + than the longest string in the input array. + + The first option, using objects, essentially causes the array to store a + pointer to the string. This is the most flexible option, as it allows + strings in the array to be updated to any length. However, operations on + object fields are slower, as numpy cannot take advantage of its fast + memory striding abilities (see `this question/answer on stackoverflow + <http://stackoverflow.com/a/14639568/1366472>`_ for details). Also, + numpy's support of object arrays is more limited. In particular, prior + to version 1.9.2, you cannot create a view of an array that changes the + dtype if the array has any fields that are objects, even if the view does + not affect the object fields. (This has since been relaxed.) + + The second option, using strings of a fixed length, solves the issues + with object fields. However, if you try to change one of the strings + after the array is created, the string will be truncated at whatever + string length is used. Additionally, if you choose too large of a string + length, you can substantially increase the memory overhead for large + arrays. + + """ + _virtualfields = [] + _functionlib = _fieldarray_functionlib + __persistent_attributes__ = ['name', '_virtualfields', '_functionlib'] + + def __new__(cls, shape, name=None, zero=True, **kwargs): + """Initializes a new empty array. + """ + obj = super(FieldArray, cls).__new__(cls, shape, **kwargs).view( + type=cls) + obj.name = name + obj.__persistent_attributes__ = [a + for a in cls.__persistent_attributes__] + obj._functionlib = {f: func for f,func in cls._functionlib.items()} + obj._virtualfields = [f for f in cls._virtualfields] + # zero out the array if desired + if zero: + default = default_empty(1, dtype=obj.dtype) + obj[:] = default + return obj + + def __array_finalize__(self, obj): + """Default values are set here. + + See <https://docs.scipy.org/doc/numpy/user/basics.subclassing.html> for + details. + """ + if obj is None: + return + # copy persistent attributes + try: + obj.__copy_attributes__(self) + except AttributeError: + pass + + def __copy_attributes__(self, other, default=None): + """Copies the values of all of the attributes listed in + `self.__persistent_attributes__` to other. + """ + [setattr(other, attr, copy.deepcopy(getattr(self, attr, default))) \ + for attr in self.__persistent_attributes__] + + def __getattribute__(self, attr, no_fallback=False): + """Allows fields to be accessed as attributes. + """ + # first try to get the attribute + try: + return numpy.ndarray.__getattribute__(self, attr) + except AttributeError as e: + # don't try getitem, which might get back here + if no_fallback: + raise(e) + + # might be a field, try to retrive it using getitem + if attr in self.fields: + return self.__getitem__(attr) + # otherwise, unrecognized + raise AttributeError(e) + + def __setitem__(self, item, values): + """Wrap's recarray's setitem to allow attribute-like indexing when + setting values. + """ + if type(item) is int and type(values) is numpy.ndarray: + # numpy >=1.14 only accepts tuples + values = tuple(values) + try: + return super(FieldArray, self).__setitem__(item, values) + except ValueError: + # we'll get a ValueError if a subarray is being referenced using + # '.'; so we'll try to parse it out here + fields = item.split('.') + if len(fields) > 1: + for field in fields[:-1]: + self = self[field] + item = fields[-1] + # now try again + return super(FieldArray, self).__setitem__(item, values) + + def __getbaseitem__(self, item): + """Gets an item assuming item is either an index or a fieldname. + """ + # We cast to a ndarray to avoid calling array_finalize, which can be + # slow + out = self.view(numpy.ndarray)[item] + # if there are no fields, then we can just return + if out.dtype.fields is None: + return out + # if there are fields, but only a single entry, we'd just get a + # record by casting to self, so just cast immediately to recarray + elif out.ndim == 0: + return out.view(numpy.recarray) + # otherwise, cast back to an instance of self + else: + return out.view(type(self)) + + def __getsubitem__(self, item): + """Gets a subfield using `field.subfield` notation. + """ + try: + return self.__getbaseitem__(item) + except ValueError as err: + subitems = item.split('.') + if len(subitems) > 1: + return self.__getbaseitem__(subitems[0] + ).__getsubitem__('.'.join(subitems[1:])) + else: + raise ValueError(err) + + def __getitem__(self, item): + """Wraps recarray's `__getitem__` so that math functions on fields and + attributes can be retrieved. Any function in numpy's library may be + used. + """ + try: + return self.__getsubitem__(item) + except ValueError: + # + # arg isn't a simple argument of row, so we'll have to eval it + # + if not hasattr(self, '_code_cache'): + self._code_cache = {} + + if item not in self._code_cache: + code = compile(item, '<string>', 'eval') + + # get the function library + item_dict = dict(_numpy_function_lib.items()) + item_dict.update(self._functionlib) + + # parse to get possible fields + itemvars_raw = get_fields_from_arg(item) + + itemvars = [] + for it in itemvars_raw: + try: + float(it) + is_num = True + except ValueError: + is_num = False + + if not is_num: + itemvars.append(it) + + self._code_cache[item] = (code, itemvars, item_dict) + + code, itemvars, item_dict = self._code_cache[item] + added = {} + for it in itemvars: + if it in self.fieldnames: + # pull out the fields: note, by getting the parent fields + # we also get the sub fields name + added[it] = self.__getbaseitem__(it) + elif (it in self.__dict__) or (it in self._virtualfields): + # pull out any needed attributes + added[it] = self.__getattribute__(it, no_fallback=True) + else: + # add any aliases + aliases = self.aliases + if it in aliases: + added[it] = self.__getbaseitem__(aliases[it]) + if item_dict is not None: + item_dict.update(added) + + ans = eval(code, {"__builtins__": None}, item_dict) + for k in added: + item_dict.pop(k) + return ans + + def __contains__(self, field): + """Returns True if the given field name is in self's fields.""" + return field in self.fields + +
+[docs] + def sort(self, axis=-1, kind='quicksort', order=None): + """Sort an array, in-place. + + This function extends the standard numpy record array in-place sort + to allow the basic use of Field array virtual fields. Only a single + field is currently supported when referencing a virtual field. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + """ + try: + numpy.recarray.sort(self, axis=axis, kind=kind, order=order) + except ValueError: + if isinstance(order, list): + raise ValueError("Cannot process more than one order field") + self[:] = self[numpy.argsort(self[order])]
+ + +
+[docs] + def addattr(self, attrname, value=None, persistent=True): + """Adds an attribute to self. If persistent is True, the attribute will + be made a persistent attribute. Persistent attributes are copied + whenever a view or copy of this array is created. Otherwise, new views + or copies of this will not have the attribute. + """ + setattr(self, attrname, value) + # add as persistent + if persistent and attrname not in self.__persistent_attributes__: + self.__persistent_attributes__.append(attrname)
+ + +
+[docs] + def add_methods(self, names, methods): + """Adds the given method(s) as instance method(s) of self. The + method(s) must take `self` as a first argument. + """ + if isinstance(names, str): + names = [names] + methods = [methods] + for name,method in zip(names, methods): + setattr(self, name, types.MethodType(method, self))
+ + +
+[docs] + def add_properties(self, names, methods): + """Returns a view of self with the given methods added as properties. + + From: <http://stackoverflow.com/a/2954373/1366472>. + """ + cls = type(self) + cls = type(cls.__name__, (cls,), dict(cls.__dict__)) + if isinstance(names, str): + names = [names] + methods = [methods] + for name,method in zip(names, methods): + setattr(cls, name, property(method)) + return self.view(type=cls)
+ + +
+[docs] + def add_virtualfields(self, names, methods): + """Returns a view of this array with the given methods added as virtual + fields. Specifically, the given methods are added using add_properties + and their names are added to the list of virtual fields. Virtual fields + are properties that are assumed to operate on one or more of self's + fields, thus returning an array of values. + """ + if isinstance(names, str): + names = [names] + methods = [methods] + out = self.add_properties(names, methods) + if out._virtualfields is None: + out._virtualfields = [] + out._virtualfields.extend(names) + return out
+ + +
+[docs] + def add_functions(self, names, functions): + """Adds the given functions to the function library. + + Functions are added to this instance of the array; all copies of + and slices of this array will also have the new functions included. + + Parameters + ---------- + names : (list of) string(s) + Name or list of names of the functions. + functions : (list of) function(s) + The function(s) to call. + """ + if isinstance(names, str): + names = [names] + functions = [functions] + if len(functions) != len(names): + raise ValueError("number of provided names must be same as number " + "of functions") + self._functionlib.update(dict(zip(names, functions)))
+ + +
+[docs] + def del_functions(self, names): + """Removes the specified function names from the function library. + + Functions are removed from this instance of the array; all copies + and slices of this array will also have the functions removed. + + Parameters + ---------- + names : (list of) string(s) + Name or list of names of the functions to remove. + """ + if isinstance(names, str): + names = [names] + for name in names: + self._functionlib.pop(name)
+ + +
+[docs] + @classmethod + def from_arrays(cls, arrays, name=None, **kwargs): + """Creates a new instance of self from the given (list of) array(s). + This is done by calling numpy.rec.fromarrays on the given arrays with + the given kwargs. The type of the returned array is cast to this + class, and the name (if provided) is set. + + Parameters + ---------- + arrays : (list of) numpy array(s) + A list of the arrays to create the FieldArray from. + name : {None|str} + What the output array should be named. + + For other keyword parameters, see the numpy.rec.fromarrays help. + + Returns + ------- + array : instance of this class + An array that is an instance of this class in which the field + data is from the given array(s). + """ + obj = numpy.rec.fromarrays(arrays, **kwargs).view(type=cls) + obj.name = name + return obj
+ + +
+[docs] + @classmethod + def from_records(cls, records, name=None, **kwargs): + """Creates a new instance of self from the given (list of) record(s). + + A "record" is a tuple in which each element is the value of one field + in the resulting record array. This is done by calling + `numpy.rec.fromrecords` on the given records with the given kwargs. + The type of the returned array is cast to this class, and the name + (if provided) is set. + + Parameters + ---------- + records : (list of) tuple(s) + A list of the tuples to create the FieldArray from. + name : {None|str} + What the output array should be named. + + Other Parameters + ---------------- + For other keyword parameters, see the `numpy.rec.fromrecords` help. + + Returns + ------- + array : instance of this class + An array that is an instance of this class in which the field + data is from the given record(s). + """ + obj = numpy.rec.fromrecords(records, **kwargs).view( + type=cls) + obj.name = name + return obj
+ + +
+[docs] + @classmethod + def from_kwargs(cls, **kwargs): + """Creates a new instance of self from the given keyword arguments. + Each argument will correspond to a field in the returned array, with + the name of the field given by the keyword, and the value(s) whatever + the keyword was set to. Each keyword may be set to a single value or + a list of values. The number of values that each argument is set to + must be the same; this will be the size of the returned array. + + Examples + -------- + Create an array with fields 'mass1' and 'mass2': + >>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.]) + >>> a.fieldnames + ('mass1', 'mass2') + >>> a.mass1, a.mass2 + (array([ 1.1, 3. ]), array([ 2., 3.])) + + Create an array with only a single element in it: + >>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.) + >>> a.mass1, a.mass2 + (array([ 1.1]), array([ 2.])) + """ + arrays = [] + names = [] + for p,vals in kwargs.items(): + if not isinstance(vals, numpy.ndarray): + if not isinstance(vals, list): + vals = [vals] + vals = numpy.array(vals) + arrays.append(vals) + names.append(p) + return cls.from_arrays(arrays, names=names)
+ + + +
+[docs] + @classmethod + def from_ligolw_table(cls, table, columns=None, cast_to_dtypes=None): + """Converts the given ligolw table into an FieldArray. The `tableName` + attribute is copied to the array's `name`. + + Parameters + ---------- + table : LIGOLw table instance + The table to convert. + columns : {None|list} + Optionally specify a list of columns to retrieve. All of the + columns must be in the table's validcolumns attribute. If None + provided, all the columns in the table will be converted. + dtype : {None | dict} + Override the columns' dtypes using the given dictionary. The + dictionary should be keyed by the column names, with the values + a tuple that can be understood by numpy.dtype. For example, to + cast a ligolw column called "foo" to a field called "bar" with + type float, cast_to_dtypes would be: ``{"foo": ("bar", float)}``. + + Returns + ------- + array : FieldArray + The input table as an FieldArray. + """ + name = table.tableName.split(':')[0] + if columns is None: + # get all the columns + columns = table.validcolumns + else: + # note: this will raise a KeyError if one or more columns is + # not in the table's validcolumns + new_columns = {} + for col in columns: + new_columns[col] = table.validcolumns[col] + columns = new_columns + if cast_to_dtypes is not None: + dtype = [cast_to_dtypes[col] for col in columns] + else: + dtype = list(columns.items()) + # get the values + if _default_types_status['ilwd_as_int']: + # columns like `process:process_id` have corresponding attributes + # with names that are only the part after the colon, so we split + input_array = \ + [tuple(getattr(row, col.split(':')[-1]) if dt != 'ilwd:char' + else int(getattr(row, col)) + for col,dt in columns.items()) + for row in table] + else: + input_array = \ + [tuple(getattr(row, col) for col in columns) for row in table] + # return the values as an instance of cls + return cls.from_records(input_array, dtype=dtype, + name=name)
+ + +
+[docs] + def to_array(self, fields=None, axis=0): + """Returns an `numpy.ndarray` of self in which the fields are included + as an extra dimension. + + Parameters + ---------- + fields : {None, (list of) strings} + The fields to get. All of the fields must have the same datatype. + If None, will try to return all of the fields. + axis : {0, int} + Which dimension to put the fields in in the returned array. For + example, if `self` has shape `(l,m,n)` and `k` fields, the + returned array will have shape `(k,l,m,n)` if `axis=0`, `(l,k,m,n)` + if `axis=1`, etc. Setting `axis=-1` will put the fields in the + last dimension. Default is 0. + + Returns + ------- + numpy.ndarray + The desired fields as a numpy array. + """ + if fields is None: + fields = self.fieldnames + if isinstance(fields, str): + fields = [fields] + return numpy.stack([self[f] for f in fields], axis=axis)
+ + + @property + def fieldnames(self): + """Returns a tuple listing the field names in self. Equivalent to + `array.dtype.names`, where `array` is self. + """ + return self.dtype.names + + @property + def virtualfields(self): + """Returns a tuple listing the names of virtual fields in self. + """ + if self._virtualfields is None: + vfs = tuple() + else: + vfs = tuple(self._virtualfields) + return vfs + + @property + def functionlib(self): + """Returns the library of functions that are available when calling + items. + """ + return self._functionlib + + @property + def fields(self): + """Returns a tuple listing the names of fields and virtual fields in + self.""" + return tuple(list(self.fieldnames) + list(self.virtualfields)) + + @property + def aliases(self): + """Returns a dictionary of the aliases, or "titles", of the field names + in self. An alias can be specified by passing a tuple in the name + part of the dtype. For example, if an array is created with + ``dtype=[(('foo', 'bar'), float)]``, the array will have a field + called `bar` that has alias `foo` that can be accessed using + either `arr['foo']` or `arr['bar']`. Note that the first string + in the dtype is the alias, the second the name. This function returns + a dictionary in which the aliases are the keys and the names are the + values. Only fields that have aliases are returned. + """ + return dict(c[0] for c in self.dtype.descr if isinstance(c[0], tuple)) + +
+[docs] + def add_fields(self, arrays, names=None, assubarray=False): + """ + Adds the given arrays as new fields to self. Returns a new instance + with the new fields added. Note: this array does not change; the + returned array is a new copy. + + Parameters + ---------- + arrays : (list of) numpy array(s) + The arrays to add. If adding multiple arrays, must be a list; + if adding a single array, can just be that array. + names : (list of) strings + Optional, the name(s) of the new fields in the output array. If + adding multiple fields, must be a list of strings with the same + length as the list of arrays. If None provided, names used will + be the same as the name of the datatype in the given arrays. + If the datatype has no name, the new field will be ``'fi'`` where + i is the index of the array in arrays. + assubarray : bool + Add the list of arrays as a single subarray field. If True, and + names provided, names should be a string or a length-1 sequence. + Default is False, in which case each array will be added as a + separate field. + + Returns + ------- + new_array : new instance of this array + A copy of this array with the desired fields added. + """ + newself = add_fields(self, arrays, names=names, assubarray=assubarray) + self.__copy_attributes__(newself) + return newself
+ + +
+[docs] + def parse_boolargs(self, args): + """Returns an array populated by given values, with the indices of + those values dependent on given boolen tests on self. + + The given `args` should be a list of tuples, with the first element the + return value and the second argument a string that evaluates to either + True or False for each element in self. + + Each boolean argument is evaluated on elements for which every prior + boolean argument was False. For example, if array `foo` has a field + `bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`, + then the returned array will have `1`s at the indices for + which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`, + and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`. + + The last argument in the list may have "else", an empty string, None, + or simply list a return value. In any of these cases, any element not + yet populated will be assigned the last return value. + + Parameters + ---------- + args : {(list of) tuples, value} + One or more return values and boolean argument determining where + they should go. + + Returns + ------- + return_values : array + An array with length equal to self, with values populated with the + return values. + leftover_indices : array + An array of indices that evaluated to False for all arguments. + These indices will not have been popluated with any value, + defaulting to whatever numpy uses for a zero for the return + values' dtype. If there are no leftovers, an empty array is + returned. + + Examples + -------- + Given the following array: + + >>> arr = FieldArray(5, dtype=[('mtotal', float)]) + >>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.]) + + Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the + elements 1 and 4 are leftover): + + >>> arr.parse_boolargs(('TaylorF2', 'mtotal<4')) + (array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''], + dtype='|S8'), + array([1, 4])) + + Return `"TaylorF2"` for all elements with `mtotal < 4`, + `"SEOBNR_ROM_DoubleSpin"` otherwise: + + >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')]) + (array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', + 'SEOBNRv2_ROM_DoubleSpin'], + dtype='|S23'), + array([], dtype=int64)) + + The following will also return the same: + + >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)]) + >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')]) + >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) + + Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"` + for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"` + otherwise: + + >>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) + (array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', + 'SEOBNRv2_ROM_DoubleSpin'], + dtype='|S23'), + array([], dtype=int64)) + + Just return `"TaylorF2"` for all elements: + + >>> arr.parse_boolargs('TaylorF2') + (array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'], + dtype='|S8'), + array([], dtype=int64)) + + """ + if not isinstance(args, list): + args = [args] + # format the arguments + return_vals = [] + bool_args = [] + for arg in args: + if not isinstance(arg, tuple): + return_val = arg + bool_arg = None + elif len(arg) == 1: + return_val = arg[0] + bool_arg = None + elif len(arg) == 2: + return_val, bool_arg = arg + else: + raise ValueError("argument not formatted correctly") + return_vals.append(return_val) + bool_args.append(bool_arg) + # get the output dtype + outdtype = numpy.array(return_vals).dtype + out = numpy.zeros(self.size, dtype=outdtype) + mask = numpy.zeros(self.size, dtype=bool) + leftovers = numpy.ones(self.size, dtype=bool) + for ii,(boolarg,val) in enumerate(zip(bool_args, return_vals)): + if boolarg is None or boolarg == '' or boolarg.lower() == 'else': + if ii+1 != len(bool_args): + raise ValueError("only the last item may not provide " + "any boolean arguments") + mask = leftovers + else: + mask = leftovers & self[boolarg] + out[mask] = val + leftovers &= ~mask + return out, numpy.where(leftovers)[0]
+ + +
+[docs] + def append(self, other): + """Appends another array to this array. + + The returned array will have all of the class methods and virutal + fields of this array, including any that were added using `add_method` + or `add_virtualfield`. If this array and other array have one or more + string fields, the dtype for those fields are updated to a string + length that can encompass the longest string in both arrays. + + .. note:: + Increasing the length of strings only works for fields, not + sub-fields. + + Parameters + ---------- + other : array + The array to append values from. It must have the same fields and + dtype as this array, modulo the length of strings. If the other + array does not have the same dtype, a TypeError is raised. + + Returns + ------- + array + An array with others values appended to this array's values. The + returned array is an instance of the same class as this array, + including all methods and virtual fields. + """ + try: + return numpy.append(self, other).view(type=self.__class__) + except TypeError: + # see if the dtype error was due to string fields having different + # lengths; if so, we'll make the joint field the larger of the + # two + str_fields = [name for name in self.fieldnames + if _isstring(self.dtype[name])] + # get the larger of the two + new_strlens = dict( + [[name, + max(self.dtype[name].itemsize, other.dtype[name].itemsize)] + for name in str_fields] + ) + # cast both to the new string lengths + new_dt = [] + for dt in self.dtype.descr: + name = dt[0] + if name in new_strlens: + dt = (name, self.dtype[name].type, new_strlens[name]) + new_dt.append(dt) + new_dt = numpy.dtype(new_dt) + return numpy.append( + self.astype(new_dt), + other.astype(new_dt) + ).view(type=self.__class__)
+ + +
+[docs] + @classmethod + def parse_parameters(cls, parameters, possible_fields): + """Parses a list of parameters to get the list of fields needed in + order to evaluate those parameters. + + Parameters + ---------- + parameters : (list of) string(s) + The list of desired parameters. These can be (functions of) fields + or virtual fields. + possible_fields : (list of) string(s) + The list of possible fields. + + Returns + ------- + list : + The list of names of the fields that are needed in order to + evaluate the given parameters. + """ + if isinstance(possible_fields, str): + possible_fields = [possible_fields] + possible_fields = list(map(str, possible_fields)) + # we'll just use float as the dtype, as we just need this for names + arr = cls(1, dtype=list(zip(possible_fields, + len(possible_fields)*[float]))) + # try to perserve order + return list(get_needed_fieldnames(arr, parameters))
+
+ + +def _isstring(dtype): + """Given a numpy dtype, determines whether it is a string. Returns True + if the dtype is string or unicode. + """ + return dtype.type == numpy.unicode_ or dtype.type == numpy.bytes_ + + +def aliases_from_fields(fields): + """Given a dictionary of fields, will return a dictionary mapping the + aliases to the names. + """ + return dict(c for c in fields if isinstance(c, tuple)) + + +def fields_from_names(fields, names=None): + """Given a dictionary of fields and a list of names, will return a + dictionary consisting of the fields specified by names. Names can be + either the names of fields, or their aliases. + """ + + if names is None: + return fields + if isinstance(names, str): + names = [names] + aliases_to_names = aliases_from_fields(fields) + names_to_aliases = dict(zip(aliases_to_names.values(), + aliases_to_names.keys())) + outfields = {} + for name in names: + try: + outfields[name] = fields[name] + except KeyError: + if name in aliases_to_names: + key = (name, aliases_to_names[name]) + elif name in names_to_aliases: + key = (names_to_aliases[name], name) + else: + raise KeyError('default fields has no field %s' % name) + outfields[key] = fields[key] + return outfields + + +# +# ============================================================================= +# +# FieldArray with default fields +# +# ============================================================================= +# + +class _FieldArrayWithDefaults(FieldArray): + """ + Subclasses FieldArray, adding class attribute ``_staticfields``, and + class method ``default_fields``. The ``_staticfields`` should be a + dictionary that defines some field names and corresponding dtype. The + ``default_fields`` method returns a dictionary of the static fields + and any default virtualfields that were added. A field array can then + be initialized in one of 3 ways: + + 1. With just a shape. In this case, the returned array will have all + of the default fields. + + 2. With a shape and a list of names, given by the ``names`` keyword + argument. The names may be default fields, virtual fields, a method or + property of the class, or any python function of these things. If a + virtual field, method, or property is in the names, the needed underlying + fields will be included in the return array. For example, if the class + has a virtual field called 'mchirp', which is a function of fields called + 'mass1' and 'mass2', then 'mchirp' or any function of 'mchirp' may be + included in the list of names (e.g., names=['mchirp**(5/6)']). If so, the + returned array will have fields 'mass1' and 'mass2' even if these were + not specified in names, so that 'mchirp' may be used without error. + names must be names of either default fields or virtualfields, else a + KeyError is raised. + + 3. With a shape and a dtype. Any field specified by the dtype will be + used. The fields need not be in the list of default fields, and/or the + dtype can be different than that specified by the default fields. + + If additional fields are desired beyond the default fields, these can + be specified using the ``additional_fields`` keyword argument; these should + be provided in the same way as ``dtype``; i.e, as a list of (name, dtype) + tuples. + + This class does not define any static fields, and ``default_fields`` just + returns an empty dictionary. This class is mostly meant to be subclassed + by other classes, so they can add their own defaults. + """ + + _staticfields = {} + @classmethod + def default_fields(cls, include_virtual=True, **kwargs): + """The default fields and their dtypes. By default, this returns + whatever the class's ``_staticfields`` and ``_virtualfields`` is set + to as a dictionary of fieldname, dtype (the dtype of virtualfields is + given by VIRTUALFIELD_DTYPE). This function should be overridden by + subclasses to add dynamic fields; i.e., fields that require some input + parameters at initialization. Keyword arguments can be passed to this + to set such dynamic fields. + """ + output = cls._staticfields.copy() + if include_virtual: + output.update({name: VIRTUALFIELD_DTYPE + for name in cls._virtualfields}) + return output + + def __new__(cls, shape, name=None, additional_fields=None, + field_kwargs=None, **kwargs): + """The ``additional_fields`` should be specified in the same way as + ``dtype`` is normally given to FieldArray. The ``field_kwargs`` are + passed to the class's default_fields method as keyword arguments. + """ + if field_kwargs is None: + field_kwargs = {} + if 'names' in kwargs and 'dtype' in kwargs: + raise ValueError("Please provide names or dtype, not both") + default_fields = cls.default_fields(include_virtual=False, + **field_kwargs) + if 'names' in kwargs: + names = kwargs.pop('names') + if isinstance(names, str): + names = [names] + # evaluate the names to figure out what base fields are needed + # to do this, we'll create a small default instance of self (since + # no names are specified in the following initialization, this + # block of code is skipped) + arr = cls(1, field_kwargs=field_kwargs) + # try to perserve order + sortdict = dict([[nm, ii] for ii,nm in enumerate(names)]) + names = list(get_needed_fieldnames(arr, names)) + names.sort(key=lambda x: sortdict[x] if x in sortdict + else len(names)) + # add the fields as the dtype argument for initializing + kwargs['dtype'] = [(fld, default_fields[fld]) for fld in names] + if 'dtype' not in kwargs: + kwargs['dtype'] = list(default_fields.items()) + # add the additional fields + if additional_fields is not None: + if not isinstance(additional_fields, list): + additional_fields = [additional_fields] + if not isinstance(kwargs['dtype'], list): + kwargs['dtype'] = [kwargs['dtype']] + kwargs['dtype'] += additional_fields + return super(_FieldArrayWithDefaults, cls).__new__(cls, shape, + name=name, **kwargs) + + def add_default_fields(self, names, **kwargs): + """ + Adds one or more empty default fields to self. + + Parameters + ---------- + names : (list of) string(s) + The names of the fields to add. Must be a field in self's default + fields. + + Other keyword args are any arguments passed to self's default fields. + + Returns + ------- + new array : instance of this array + A copy of this array with the field added. + """ + if isinstance(names, str): + names = [names] + default_fields = self.default_fields(include_virtual=False, **kwargs) + # parse out any virtual fields + arr = self.__class__(1, field_kwargs=kwargs) + # try to perserve order + sortdict = dict([[nm, ii] for ii,nm in enumerate(names)]) + names = list(get_needed_fieldnames(arr, names)) + names.sort(key=lambda x: sortdict[x] if x in sortdict + else len(names)) + fields = [(name, default_fields[name]) for name in names] + arrays = [] + names = [] + for name,dt in fields: + arrays.append(default_empty(self.size, dtype=[(name, dt)])) + names.append(name) + return self.add_fields(arrays, names) + + @classmethod + def parse_parameters(cls, parameters, possible_fields=None): + """Parses a list of parameters to get the list of fields needed in + order to evaluate those parameters. + + Parameters + ---------- + parameters : (list of) strings + The list of desired parameters. These can be (functions of) fields + or virtual fields. + possible_fields : {None, dict} + Specify the list of possible fields. Must be a dictionary given + the names, and dtype of each possible field. If None, will use this + class's `_staticfields`. + + Returns + ------- + list : + The list of names of the fields that are needed in order to + evaluate the given parameters. + """ + if possible_fields is not None: + # make sure field names are strings and not unicode + possible_fields = dict([[f, dt] + for f,dt in possible_fields.items()]) + class ModifiedArray(cls): + _staticfields = possible_fields + cls = ModifiedArray + return cls(1, names=parameters).fieldnames + +# +# ============================================================================= +# +# WaveformArray +# +# ============================================================================= +# + +
+[docs] +class WaveformArray(_FieldArrayWithDefaults): + """ + A FieldArray with some default fields and properties commonly used + by CBC waveforms. This may be initialized in one of 3 ways: + + 1. With just the size of the array. In this case, the returned array will + have all of the default field names. Example: + + >>> warr = WaveformArray(10) + >>> warr.fieldnames + ('distance', + 'spin2x', + 'mass1', + 'mass2', + 'lambda1', + 'polarization', + 'spin2y', + 'spin2z', + 'spin1y', + 'spin1x', + 'spin1z', + 'inclination', + 'coa_phase', + 'dec', + 'tc', + 'lambda2', + 'ra') + + 2. With some subset of the default field names. Example: + + >>> warr = WaveformArray(10, names=['mass1', 'mass2']) + >>> warr.fieldnames + ('mass1', 'mass2') + + The list of names may include virtual fields, and methods, as well as + functions of these. If one or more virtual fields or methods are specified, + the source code is analyzed to pull out whatever underlying fields are + needed. Example: + + >>> warr = WaveformArray(10, names=['mchirp**(5/6)', 'chi_eff', 'cos(coa_phase)']) + >>> warr.fieldnames + ('spin2z', 'mass1', 'mass2', 'coa_phase', 'spin1z') + + 3. By specifying a dtype. In this case, only the provided fields will + be used, even if they are not in the defaults. Example: + + >>> warr = WaveformArray(10, dtype=[('foo', float)]) + >>> warr.fieldnames + ('foo',) + + Additional fields can also be specified using the additional_fields + keyword argument. Example: + + >>> warr = WaveformArray(10, names=['mass1', 'mass2'], additional_fields=[('bar', float)]) + >>> warr.fieldnames + ('mass1', 'mass2', 'bar') + + .. note:: + If an array is initialized with all of the default fields (case 1, + above), then the names come from waveform.parameters; i.e., they + are actually Parameter instances, not just strings. This means that the + field names carry all of the metadata that a Parameter has. For + example: + + >>> warr = WaveformArray(10) + >>> warr.fields[0] + 'distance' + >>> warr.fields[0].description + 'Luminosity distance to the binary (in Mpc).' + >>> warr.fields[0].label + '$d_L$ (Mpc)' + + """ + _staticfields = (parameters.cbc_intrinsic_params + + parameters.extrinsic_params).dtype_dict + + _virtualfields = [ + parameters.mchirp, parameters.eta, parameters.mtotal, + parameters.q, parameters.primary_mass, parameters.secondary_mass, + parameters.chi_eff, + parameters.spin_px, parameters.spin_py, parameters.spin_pz, + parameters.spin_sx, parameters.spin_sy, parameters.spin_sz, + parameters.spin1_a, parameters.spin1_azimuthal, parameters.spin1_polar, + parameters.spin2_a, parameters.spin2_azimuthal, parameters.spin2_polar, + parameters.remnant_mass] + + @property + def primary_mass(self): + """Returns the larger of self.mass1 and self.mass2.""" + return conversions.primary_mass(self.mass1, self.mass2) + + @property + def secondary_mass(self): + """Returns the smaller of self.mass1 and self.mass2.""" + return conversions.secondary_mass(self.mass1, self.mass) + + @property + def mtotal(self): + """Returns the total mass.""" + return conversions.mtotal_from_mass1_mass2(self.mass1, self.mass2) + + @property + def q(self): + """Returns the mass ratio m1/m2, where m1 >= m2.""" + return conversions.q_from_mass1_mass2(self.mass1, self.mass2) + + @property + def eta(self): + """Returns the symmetric mass ratio.""" + return conversions.eta_from_mass1_mass2(self.mass1, self.mass2) + + @property + def mchirp(self): + """Returns the chirp mass.""" + return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2) + + @property + def chi_eff(self): + """Returns the effective spin.""" + return conversions.chi_eff(self.mass1, self.mass2, self.spin1z, + self.spin2z) + + @property + def spin_px(self): + """Returns the x-component of the spin of the primary mass.""" + return conversions.primary_spin(self.mass1, self.mass2, self.spin1x, + self.spin2x) + + @property + def spin_py(self): + """Returns the y-component of the spin of the primary mass.""" + return conversions.primary_spin(self.mass1, self.mass2, self.spin1y, + self.spin2y) + + @property + def spin_pz(self): + """Returns the z-component of the spin of the primary mass.""" + return conversions.primary_spin(self.mass1, self.mass2, self.spin1z, + self.spin2z) + + @property + def spin_sx(self): + """Returns the x-component of the spin of the secondary mass.""" + return conversions.secondary_spin(self.mass1, self.mass2, self.spin1x, + self.spin2x) + + @property + def spin_sy(self): + """Returns the y-component of the spin of the secondary mass.""" + return conversions.secondary_spin(self.mass1, self.mass2, self.spin1y, + self.spin2y) + + @property + def spin_sz(self): + """Returns the z-component of the spin of the secondary mass.""" + return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z, + self.spin2z) + + @property + def spin1_a(self): + """Returns the dimensionless spin magnitude of mass 1.""" + return coordinates.cartesian_to_spherical_rho( + self.spin1x, self.spin1y, self.spin1z) + + @property + def spin1_azimuthal(self): + """Returns the azimuthal spin angle of mass 1.""" + return coordinates.cartesian_to_spherical_azimuthal( + self.spin1x, self.spin1y) + + @property + def spin1_polar(self): + """Returns the polar spin angle of mass 1.""" + return coordinates.cartesian_to_spherical_polar( + self.spin1x, self.spin1y, self.spin1z) + + @property + def spin2_a(self): + """Returns the dimensionless spin magnitude of mass 2.""" + return coordinates.cartesian_to_spherical_rho( + self.spin1x, self.spin1y, self.spin1z) + + @property + def spin2_azimuthal(self): + """Returns the azimuthal spin angle of mass 2.""" + return coordinates.cartesian_to_spherical_azimuthal( + self.spin2x, self.spin2y) + + @property + def spin2_polar(self): + """Returns the polar spin angle of mass 2.""" + return coordinates.cartesian_to_spherical_polar( + self.spin2x, self.spin2y, self.spin2z) + + @property + def remnant_mass(self): + """Returns the remnant mass for an NS-BH binary.""" + return conversions.remnant_mass_from_mass1_mass2_cartesian_spin_eos( + self.mass1, self.mass2, + spin1x=self.spin1x, + spin1y=self.spin1y, + spin1z=self.spin1z)
+ + + +__all__ = ['FieldArray', 'WaveformArray'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/libutils.html b/latest/html/_modules/pycbc/libutils.html new file mode 100644 index 00000000000..eedb6741f63 --- /dev/null +++ b/latest/html/_modules/pycbc/libutils.html @@ -0,0 +1,391 @@ + + + + + + pycbc.libutils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.libutils

+# Copyright (C) 2014 Josh Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+This module provides a simple interface for loading a shared library via ctypes,
+allowing it to be specified in an OS-independent way and searched for preferentially
+according to the paths that pkg-config specifies.
+"""
+
+import importlib
+import logging
+import inspect
+import os
+import fnmatch
+import ctypes
+import sys
+import subprocess
+from ctypes.util import find_library
+from collections import deque
+from subprocess import getoutput
+
+logger = logging.getLogger('pycbc.libutils')
+
+# Be careful setting the mode for opening libraries! Some libraries (e.g.
+# libgomp) seem to require the DEFAULT_MODE is used. Others (e.g. FFTW when
+# MKL is also present) require that os.RTLD_DEEPBIND is used. If seeing
+# segfaults around this code, play about this this!
+DEFAULT_RTLD_MODE = ctypes.DEFAULT_MODE
+
+
+
+[docs] +def pkg_config(pkg_libraries): + """Use pkg-config to query for the location of libraries, library directories, + and header directories + + Arguments: + pkg_libries(list): A list of packages as strings + + Returns: + libraries(list), library_dirs(list), include_dirs(list) + """ + libraries=[] + library_dirs=[] + include_dirs=[] + + # Check that we have the packages + for pkg in pkg_libraries: + if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: + pass + else: + print("Could not find library {0}".format(pkg)) + sys.exit(1) + + # Get the pck-config flags + if len(pkg_libraries)>0 : + # PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths. + # On system-wide LAL installs, this is needed for swig to find lalswig.i + for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): + if token.startswith("-l"): + libraries.append(token[2:]) + elif token.startswith("-L"): + library_dirs.append(token[2:]) + elif token.startswith("-I"): + include_dirs.append(token[2:]) + + return libraries, library_dirs, include_dirs
+ + +
+[docs] +def pkg_config_header_strings(pkg_libraries): + """ Returns a list of header strings that could be passed to a compiler + """ + _, _, header_dirs = pkg_config(pkg_libraries) + + header_strings = [] + + for header_dir in header_dirs: + header_strings.append("-I" + header_dir) + + return header_strings
+ + +
+[docs] +def pkg_config_check_exists(package): + return (os.system('pkg-config --exists {0} 2>/dev/null'.format(package)) == 0)
+ + +
+[docs] +def pkg_config_libdirs(packages): + """ + Returns a list of all library paths that pkg-config says should be included when + linking against the list of packages given as 'packages'. An empty return list means + that the package may be found in the standard system locations, irrespective of + pkg-config. + """ + + # don't try calling pkg-config if NO_PKGCONFIG is set in environment + if os.environ.get("NO_PKGCONFIG", None): + return [] + + # if calling pkg-config failes, don't continue and don't try again. + with open(os.devnull, "w") as FNULL: + try: + subprocess.check_call(["pkg-config", "--version"], stdout=FNULL) + except: + print( + "PyCBC.libutils: pkg-config call failed, " + "setting NO_PKGCONFIG=1", + file=sys.stderr, + ) + os.environ['NO_PKGCONFIG'] = "1" + return [] + + # First, check that we can call pkg-config on each package in the list + for pkg in packages: + if not pkg_config_check_exists(pkg): + raise ValueError("Package {0} cannot be found on the pkg-config search path".format(pkg)) + + libdirs = [] + for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs-only-L {0}".format(' '.join(packages))).split(): + if token.startswith("-L"): + libdirs.append(token[2:]) + return libdirs
+ + +
+[docs] +def get_libpath_from_dirlist(libname, dirs): + """ + This function tries to find the architecture-independent library given by libname in the first + available directory in the list dirs. 'Architecture-independent' means omitting any prefix such + as 'lib' or suffix such as 'so' or 'dylib' or version number. Within the first directory in which + a matching pattern can be found, the lexicographically first such file is returned, as a string + giving the full path name. The only supported OSes at the moment are posix and mac, and this + function does not attempt to determine which is being run. So if for some reason your directory + has both '.so' and '.dylib' libraries, who knows what will happen. If the library cannot be found, + None is returned. + """ + dirqueue = deque(dirs) + while (len(dirqueue) > 0): + nextdir = dirqueue.popleft() + possible = [] + # Our directory might be no good, so try/except + try: + for libfile in os.listdir(nextdir): + if fnmatch.fnmatch(libfile,'lib'+libname+'.so*') or \ + fnmatch.fnmatch(libfile,'lib'+libname+'.dylib*') or \ + fnmatch.fnmatch(libfile,'lib'+libname+'.*.dylib*') or \ + fnmatch.fnmatch(libfile,libname+'.dll') or \ + fnmatch.fnmatch(libfile,'cyg'+libname+'-*.dll'): + possible.append(libfile) + except OSError: + pass + # There might be more than one library found, we want the highest-numbered + if (len(possible) > 0): + possible.sort() + return os.path.join(nextdir,possible[-1]) + # If we get here, we didn't find it... + return None
+ + +
+[docs] +def get_ctypes_library(libname, packages, mode=DEFAULT_RTLD_MODE): + """ + This function takes a library name, specified in architecture-independent fashion (i.e. + omitting any prefix such as 'lib' or suffix such as 'so' or 'dylib' or version number) and + a list of packages that may provide that library, and according first to LD_LIBRARY_PATH, + then the results of pkg-config, and falling back to the system search path, will try to + return a CDLL ctypes object. If 'mode' is given it will be used when loading the library. + """ + libdirs = [] + # First try to get from LD_LIBRARY_PATH + if "LD_LIBRARY_PATH" in os.environ: + libdirs += os.environ["LD_LIBRARY_PATH"].split(":") + # Next try to append via pkg_config + try: + libdirs += pkg_config_libdirs(packages) + except ValueError: + pass + # We might be using conda/pip/virtualenv or some combination. This can + # leave lib files in a directory that LD_LIBRARY_PATH or pkg_config + # can miss. + libdirs.append(os.path.join(sys.prefix, "lib")) + + # Note that the function below can accept an empty list for libdirs, in + # which case it will return None + fullpath = get_libpath_from_dirlist(libname, libdirs) + + if fullpath is None: + # This won't actually return a full-path, but it should be something + # that can be found by CDLL + fullpath = find_library(libname) + + if fullpath is None: + # We got nothin' + return None + else: + if mode is None: + return ctypes.CDLL(fullpath) + else: + return ctypes.CDLL(fullpath, mode=mode)
+ + +
+[docs] +def import_optional(library_name): + """ Try to import library but and return stub if not found + + Parameters + ---------- + library_name: str + The name of the python library to import + + Returns + ------- + library: library or stub + Either returns the library if importing is sucessful or it returns + a stub which raises an import error and message when accessed. + """ + try: + return importlib.import_module(library_name) + except ImportError: + # module wasn't found so let's return a stub instead to inform + # the user what has happened when they try to use related functions + class no_module(object): + def __init__(self, library): + self.library = library + + def __getattribute__(self, attr): + if attr == 'library': + return super().__getattribute__(attr) + + lib = self.library + + curframe = inspect.currentframe() + calframe = inspect.getouterframes(curframe, 2) + fun = calframe[1][3] + msg =""" The function {} tried to access + '{}' of library '{}', however, + '{}' is not currently installed. To enable this + functionality install '{}' (e.g. through pip + / conda / system packages / source). + """.format(fun, attr, lib, lib, lib) + raise ImportError(inspect.cleandoc(msg)) + return no_module(library_name)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/live/significance_fits.html b/latest/html/_modules/pycbc/live/significance_fits.html new file mode 100644 index 00000000000..b120b9dc05e --- /dev/null +++ b/latest/html/_modules/pycbc/live/significance_fits.html @@ -0,0 +1,303 @@ + + + + + + pycbc.live.significance_fits — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.live.significance_fits

+"""
+Functions for defining the live significance fits
+"""
+
+import logging
+import h5py
+import numpy
+
+logger = logging.getLogger('pycbc.live.single_fits')
+
+
+
+[docs] +def add_live_significance_trigger_pruning_options(parser): + """ + Add options used for pruning in live singles significance fits + """ + pruning_group = parser.add_argument_group("Trigger pruning") + pruning_group.add_argument( + "--prune-loudest", + type=int, + help="Maximum number of loudest trigger clusters to " + "remove from each bin." + ) + pruning_group.add_argument( + "--prune-window", + type=float, + help="Window (seconds) either side of the --prune-loudest " + "loudest triggers in each duration bin to remove." + ) + pruning_group.add_argument( + "--prune-stat-threshold", + type=float, + help="Minimum statistic value to consider a " + "trigger for pruning." + )
+ + + +
+[docs] +def verify_live_significance_trigger_pruning_options(args, parser): + """ + Verify options used for pruning in live singles significance fits + """ + # Pruning options are mutually required or not needed + prune_options = [args.prune_loudest, args.prune_window, + args.prune_stat_threshold] + + if any(prune_options) and not all(prune_options): + parser.error("Require all or none of --prune-loudest, " + "--prune-window and --prune-stat-threshold")
+ + + +
+[docs] +def add_live_significance_duration_bin_options(parser): + """ + Add options used to calculate duration bin edges in live + singles significance fits + """ + durbin_group = parser.add_argument_group('Duration Bins') + durbin_group.add_argument( + "--duration-bin-edges", + nargs='+', + type=float, + help="Durations to use for bin edges. " + "Use if specifying exact bin edges, " + "Not compatible with --duration-bin-start, " + "--duration-bin-end and --num-duration-bins" + ) + durbin_group.add_argument( + "--duration-bin-start", + type=float, + help="Shortest duration to use for duration bins." + "Not compatible with --duration-bins, requires " + "--duration-bin-end and --num-duration-bins." + ) + durbin_group.add_argument( + "--duration-bin-end", type=float, + help="Longest duration to use for duration bins." + ) + durbin_group.add_argument( + "--duration-from-bank", + help="Path to the template bank file to get max/min " + "durations from." + ) + durbin_group.add_argument( + "--num-duration-bins", + type=int, + help="How many template duration bins to split the bank " + "into before fitting." + ) + durbin_group.add_argument( + "--duration-bin-spacing", + choices=['linear', 'log'], + default='log', + help="How to set spacing for bank split " + "if using --num-duration-bins and " + "--duration-bin-start + --duration-bin-end " + "or --duration-from-bank." + )
+ + + +
+[docs] +def verify_live_significance_duration_bin_options(args, parser): + """ + Verify options used to calculate duration bin edges in live + singles significance fits + """ + # Check the bin options + if args.duration_bin_edges: + if (args.duration_bin_start or args.duration_bin_end or + args.duration_from_bank or args.num_duration_bins): + parser.error("Cannot use --duration-bin-edges with " + "--duration-bin-start, --duration-bin-end, " + "--duration-from-bank or --num-duration-bins.") + else: + if not args.num_duration_bins: + parser.error("--num-duration-bins must be set if not using " + "--duration-bin-edges.") + if not ((args.duration_bin_start and args.duration_bin_end) or + args.duration_from_bank): + parser.error("--duration-bin-start & --duration-bin-end or " + "--duration-from-bank must be set if not using " + "--duration-bin-edges.") + if args.duration_bin_end and \ + args.duration_bin_end <= args.duration_bin_start: + parser.error("--duration-bin-end must be greater than " + "--duration-bin-start, got " + f"{args.duration_bin_end} and {args.duration_bin_start}")
+ + + +
+[docs] +def duration_bins_from_cli(args): + """Create the duration bins from CLI options. + """ + if args.duration_bin_edges: + # direct bin specification + return numpy.array(args.duration_bin_edges) + # calculate bins from min/max and number + min_dur = args.duration_bin_start + max_dur = args.duration_bin_end + if args.duration_from_bank: + # read min/max duration directly from the bank itself + with h5py.File(args.duration_from_bank, 'r') as bank_file: + temp_durs = bank_file['template_duration'][:] + min_dur, max_dur = min(temp_durs), max(temp_durs) + if args.duration_bin_spacing == 'log': + return numpy.logspace( + numpy.log10(min_dur), + numpy.log10(max_dur), + args.num_duration_bins + 1 + ) + if args.duration_bin_spacing == 'linear': + return numpy.linspace( + min_dur, + max_dur, + args.num_duration_bins + 1 + ) + raise RuntimeError("Invalid duration bin specification")
+ + + +__all__ = [ + 'add_live_significance_trigger_pruning_options', + 'verify_live_significance_trigger_pruning_options', + 'add_live_significance_duration_bin_options', + 'verify_live_significance_duration_bin_options', + 'duration_bins_from_cli', +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/live/snr_optimizer.html b/latest/html/_modules/pycbc/live/snr_optimizer.html new file mode 100644 index 00000000000..9728ee01acf --- /dev/null +++ b/latest/html/_modules/pycbc/live/snr_optimizer.html @@ -0,0 +1,539 @@ + + + + + + pycbc.live.snr_optimizer — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.live.snr_optimizer

+# Copyright (C) 2023 Arthur Tolley, Gareth Cabourn Davies
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module contains functions for optimizing the signal-to-noise ratio
+of triggers produced by PyCBC Live. Also contained within this module are the
+command line arguments required and options group for the SNR optimization.
+This module is primarily used in the pycbc_optimize_snr program.
+"""
+
+import time
+import logging
+import types
+import numpy
+from scipy.optimize import differential_evolution, shgo
+from pycbc import (
+    DYN_RANGE_FAC, waveform
+)
+from pycbc.types import zeros
+import pycbc.waveform.bank
+from pycbc.filter import matched_filter_core
+import pycbc.conversions as cv
+
+logger = logging.getLogger('pycbc.live.snr_optimizer')
+
+try:
+    import pyswarms as ps
+except:
+    ps = None
+
+# Set a minimum mass for points tried in optimization allowing for
+# minimal slop relative to the lightest template
+MIN_CPT_MASS = 0.99
+
+# Set a large maximum total mass
+MAX_MTOTAL = 500.
+
+Nfeval = 0
+start_time = time.time()
+
+
+
+[docs] +def callback_func(Xi, convergence=0): + global Nfeval + logger.info("Currently at %d %s", Nfeval, convergence) + # Time out if the optimization takes longer than 6 minutes + if (time.time() - start_time) > 360: + return True + Nfeval += 1
+ + + +
+[docs] +def compute_network_snr_core(v, data, coinc_times, ifos, flen, approximant, + flow, f_end, delta_f, sample_rate, raise_err=False): + """ + Compute network SNR as a function over mchirp, eta, and two aligned + spin components, stored in that order in the sequence v. + + Parameters + ---------- + v : list + A list containing the input values for mchirp, eta, and spin + components. + data : dict + A dictionary containing keys of ifos ('H1', 'L1') and + values of the frequency series data for those ifos + coinc_times : dict + A dictionary containing the coincidence times for the network. + ifos : list + A list of the ifos, e.g. ['H1', 'L1'] + flen : float + The length of the data. + approximant : str + The approximant used for the waveform model. + flow : float + The lower frequency bound. + f_end : float + The upper frequency bound. + delta_f : float + The frequency spacing. + sample_rate : float + The sampling rate of the data. + raise_err : bool, optional + A flag indicating whether to raise an error if an exception + occurs during the computation. Defaults to False. + + Returns + ------- + network_snr : float + The computed network SNR (Signal-to-Noise Ratio) value. + snr_series_dict : dict + A dictionary containing the snr timeseries from each ifo. + + """ + distance = 1.0 / DYN_RANGE_FAC + mtotal = cv.mtotal_from_mchirp_eta(v[0], v[1]) + mass1 = cv.mass1_from_mtotal_eta(mtotal, v[1]) + mass2 = cv.mass2_from_mtotal_eta(mtotal, v[1]) + + # enforce broadly accepted search space boundaries + if mass1 < MIN_CPT_MASS or mass2 < MIN_CPT_MASS or mtotal > MAX_MTOTAL: + return -numpy.inf, {} + + try: + htilde = waveform.get_waveform_filter( + zeros(flen, dtype=numpy.complex64), + approximant=approximant, + mass1=mass1, mass2=mass2, spin1z=v[2], spin2z=v[3], + f_lower=flow, f_final=f_end, delta_f=delta_f, + delta_t=1./sample_rate, distance=distance) + except RuntimeError: + if raise_err: + raise + # Assume a failure in the waveform approximant + # due to the choice of parameters and carry on + return -numpy.inf, {} + + if not hasattr(htilde, 'params'): + htilde.params = dict(mass1=mass1, mass2=mass2, + spin1z=v[2], spin2z=v[3]) + if not hasattr(htilde, 'end_idx'): + htilde.end_idx = int(f_end / htilde.delta_f) + htilde.approximant = approximant + htilde.sigmasq = types.MethodType(pycbc.waveform.bank.sigma_cached, + htilde) + htilde.min_f_lower = flow + htilde.end_frequency = f_end + htilde.f_lower = flow + network_snrsq = 0 + snr_series_dict = {} + for ifo in ifos: + sigmasq = htilde.sigmasq(data[ifo].psd) + snr, _, norm = matched_filter_core(htilde, data[ifo], + h_norm=sigmasq) + duration = 0.095 + half_dur_samples = int(snr.sample_rate * duration / 2) + onsource_idx = (float(coinc_times[ifo] - snr.start_time) * + snr.sample_rate) + onsource_idx = int(round(onsource_idx)) + onsource_slice = slice(onsource_idx - half_dur_samples, + onsource_idx + half_dur_samples + 1) + snr_series = snr[onsource_slice] * norm + snr_series_dict[ifo] = snr * norm + snr_series_dict['sigmasq_' + ifo] = sigmasq + network_snrsq += max(abs(snr_series._data)) ** 2. + + return network_snrsq ** 0.5, snr_series_dict
+ + + +
+[docs] +def compute_minus_network_snr(v, *argv): + if len(argv) == 1: + argv = argv[0] + nsnr, _ = compute_network_snr_core(v, *argv) + logger.debug('snr: %s', nsnr) + return -nsnr
+ + + +
+[docs] +def compute_minus_network_snr_pso(v, *argv, **kwargs): + argv = kwargs['args'] + nsnr_array = numpy.array([ + compute_network_snr_core(v_i, *argv)[0] + for v_i in v]) + return -nsnr_array
+ + + +
+[docs] +def optimize_di(bounds, cli_args, extra_args, initial_point): + # Convert from dict to array with parameters in a given order + bounds = numpy.array([ + bounds['mchirp'], + bounds['eta'], + bounds['spin1z'], + bounds['spin2z'] + ]) + # Initialize the population with random values within specified bounds + population = numpy.random.uniform( + bounds[:, 0], + bounds[:, 1], + size=(int(cli_args.snr_opt_di_popsize), len(bounds)) + ) + if cli_args.snr_opt_include_candidate: + # add the initial point to the population + population = numpy.concatenate((population[:-1], + initial_point)) + logger.debug('Initial population: %s', population) + + results = differential_evolution( + compute_minus_network_snr, + bounds, + maxiter=int(cli_args.snr_opt_di_maxiter), + workers=(cli_args.cores or -1), + popsize=int(cli_args.snr_opt_di_popsize), + mutation=(0.5, 1), + recombination=0.7, + callback=callback_func, + args=extra_args, + init=population + ) + return results.x
+ + + +
+[docs] +def optimize_shgo(bounds, cli_args, extra_args, initial_point): # pylint: disable=unused-argument + bounds = [ + bounds['mchirp'], + bounds['eta'], + bounds['spin1z'], + bounds['spin2z'] + ] + results = shgo( + compute_minus_network_snr, + bounds=bounds, + args=extra_args, + iters=cli_args.snr_opt_shgo_iters, + n=cli_args.snr_opt_shgo_samples, + sampling_method="sobol" + ) + return results.x
+ + + +
+[docs] +def optimize_pso(bounds, cli_args, extra_args, initial_point): + options = { + 'c1': cli_args.snr_opt_pso_c1, + 'c2': cli_args.snr_opt_pso_c2, + 'w': cli_args.snr_opt_pso_w + } + min_bounds = numpy.array([ + bounds['mchirp'][0], + bounds['eta'][0], + bounds['spin1z'][0], + bounds['spin2z'][0] + ]) + max_bounds = numpy.array([ + bounds['mchirp'][1], + bounds['eta'][1], + bounds['spin1z'][1], + bounds['spin2z'][1] + ]) + + # Initialize the population with random values within specified bounds + population = numpy.random.uniform( + min_bounds, + max_bounds, + size=(int(cli_args.snr_opt_pso_particles), len(bounds)) + ) + + if cli_args.snr_opt_include_candidate: + # add the initial point to the population + population = numpy.concatenate((population[:-1], + initial_point)) + logger.debug('Initial population: %s', population) + + optimizer = ps.single.GlobalBestPSO( + n_particles=int(cli_args.snr_opt_pso_particles), + dimensions=4, + options=options, + bounds=(min_bounds, max_bounds), + init_pos=population + ) + _, results = optimizer.optimize( + compute_minus_network_snr_pso, + iters=int(cli_args.snr_opt_pso_iters), + n_processes=cli_args.cores, + args=extra_args + ) + return results
+ + + +optimize_funcs = { + 'differential_evolution': optimize_di, + 'shgo': optimize_shgo, + 'pso': optimize_pso +} + +# The following sets the default values of the options, but allows us to check +# if the option has been given on the command line + +# For each optimizer, we have a dictionary of the options, its help +# message and default value + +option_dict = { + 'differential_evolution': { + 'maxiter': ('The maximum number of generations over which the entire ' + 'population is evolved.', 50), + 'popsize': ('A multiplier for setting the total population size.', + 100), + }, + 'shgo': { + 'samples': ('Number of sampling points used in the construction of ' + 'the simplicial complex.', 76), + 'iters': ('Number of iterations used in the construction of the ' + 'simplicial complex.', 3), + }, + 'pso': { + 'iters': ('Number of iterations used in the particle swarm ' + 'optimization.', 5), + 'particles': ('Number of particles used in the swarm.', 250), + 'c1': ('The hyperparameter c1: the cognitive parameter.', 0.5), + 'c2': ('The hyperparameter c2: the social parameter.', 2.0), + 'w': ('The hyperparameter w: the inertia parameter.', 0.01), + } +} + +
+[docs] +def insert_snr_optimizer_options(parser): + opt_opt_group = parser.add_argument_group("SNR optimizer configuration " + "options.") + # Option to choose which optimizer to use: + optimizer_choices = sorted(list(option_dict.keys())) + opt_opt_group.add_argument('--snr-opt-method', + default='differential_evolution', + choices=optimizer_choices, + help='SNR Optimizer choices: ' + ', '.join(optimizer_choices)) + + # Add the generic options + opt_opt_group.add_argument('--snr-opt-include-candidate', + action='store_true', + help='Include parameters of the candidate event in the initialized ' + 'array for the optimizer. Only relevant for --snr-opt-method pso ' + 'or differential_evolution') + opt_opt_group.add_argument('--snr-opt-seed', + default='42', + help='Seed to supply to the random generation of initial array to ' + 'pass to the optimizer. Only relevant for --snr-opt-method pso ' + 'or differential_evolution. Set to ''random'' for a random seed') + + # For each optimizer, add the possible options + for optimizer, option_subdict in option_dict.items(): + optimizer_name = optimizer.replace('_', '-') + if optimizer_name == 'differential-evolution': + optimizer_name = 'di' + for opt_name, opt_help_default in option_subdict.items(): + option_name = f"--snr-opt-{optimizer_name}-{opt_name}" + opt_opt_group.add_argument(option_name, + type=float, + help=f'Only relevant for --snr-opt-method {optimizer}: ' + + opt_help_default[0] + + f' Default = {opt_help_default[1]}')
+ + + +
+[docs] +def check_snr_optimizer_options(args, parser): + """ + Deal with default options and required parameters given optimizer option + """ + options = {} + options['differential_evolution'] = [args.snr_opt_di_maxiter, + args.snr_opt_di_popsize] + options['shgo'] = [args.snr_opt_shgo_samples, args.snr_opt_shgo_iters] + options['pso'] = [args.snr_opt_pso_iters, args.snr_opt_pso_particles, + args.snr_opt_pso_c1, args.snr_opt_pso_c2, + args.snr_opt_pso_w] + + if args.snr_opt_method == 'pso' and ps is None: + parser.error('You need to install pyswarms to use the pso optimizer.') + + # Check all the options are suitable for the chosen optimizer + for k in options.keys(): + if args.snr_opt_method == k: + continue + if any(options[k]): + parser.error("Argument has been supplied which is not suitable " + + f"for the optimizer given ({args.snr_opt_method})") + + # Give the arguments the default values according to the dictionary + optimizer_name = args.snr_opt_method.replace('_', '-') + if optimizer_name == 'differential-evolution': + optimizer_name = 'di' + for key, value in option_dict[args.snr_opt_method].items(): + key_name = f'snr_opt_{optimizer_name}_{key}' + if not getattr(args, key_name): + setattr(args, key_name, value[1])
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/live/supervision.html b/latest/html/_modules/pycbc/live/supervision.html new file mode 100644 index 00000000000..2e747fd7b9f --- /dev/null +++ b/latest/html/_modules/pycbc/live/supervision.html @@ -0,0 +1,299 @@ + + + + + + pycbc.live.supervision — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.live.supervision

+# Copyright (C) 2023 Arthur Tolley, Gareth Cabourn Davies
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+This module contains functions for supervising codes to run regularly
+during pycbc_live production, taking input from the search and returning
+files which can be used in the search.
+This module is primarily used in the pycbc_live_supervise_* programs.
+"""
+
+import logging
+import subprocess
+import time
+import os
+from datetime import datetime
+from dateutil.relativedelta import relativedelta
+
+import pycbc
+
+logger = logging.getLogger('pycbc.live.supervision')
+
+
+
+
+
+
+
+[docs] +def dict_to_args(opts_dict): + """ + Convert an option dictionary into a list to be used by subprocess.run + """ + dargs = [] + for option, value in opts_dict.items(): + dargs.append('--' + option.strip()) + if value == '': + # option is a flag, do nothing + continue + if len(value.split()) > 1: + # value is a list, append individually + for v in value.split(): + dargs.append(v) + else: + # Single value option - append once + dargs.append(value) + return dargs
+ + + +
+[docs] +def mail_volunteers_error(controls, mail_body_lines, subject): + """ + Email a list of people, defined by mail-volunteers-file + To be used for errors or unusual occurences + """ + with open(controls['mail-volunteers-file'], 'r') as mail_volunteers_file: + volunteers = [volunteer.strip() for volunteer in + mail_volunteers_file.readlines()] + logger.info("Emailing %s with warnings", ' '.join(volunteers)) + mail_command = [ + 'mail', + '-s', + subject + ] + mail_command += volunteers + mail_body = '\n'.join(mail_body_lines) + try: + subprocess.run(mail_command, input=mail_body, text=True, check=True) + except subprocess.CalledProcessError as sub_err: + logging.error("Could not send mail on error") + raise sub_err
+ + + +
+[docs] +def run_and_error(command_arguments, controls): + """ + Wrapper around subprocess.run to catch errors and send emails if required + """ + logger.info("Running %s", " ".join(command_arguments)) + command_output = subprocess.run( + command_arguments, + capture_output=True + ) + + if command_output.returncode: + error_contents = [' '.join(command_arguments), '\n', + command_output.stderr.decode()] + if 'mail-volunteers-file' in controls: + mail_volunteers_error( + controls, + error_contents, + f"PyCBC live could not run {command_arguments[0]}" + ) + err_msg = f"Could not run {command_arguments[0]}:\n" + err_msg += ' '.join(error_contents) + raise subprocess.SubprocessError(err_msg)
+ + + +
+[docs] +def wait_for_utc_time(target_str): + """Wait until the UTC time is as given by `target_str`, in HH:MM:SS format. + """ + target_hour, target_minute, target_second = map(int, target_str.split(':')) + now = datetime.utcnow() + # for today's target, take now and replace the time + target_today = now + relativedelta( + hour=target_hour, minute=target_minute, second=target_second + ) + # for tomorrow's target, take now, add one day, and replace the time + target_tomorrow = now + relativedelta( + days=1, hour=target_hour, minute=target_minute, second=target_second + ) + next_target = target_today if now <= target_today else target_tomorrow + sleep_seconds = (next_target - now).total_seconds() + logger.info('Waiting %.0f s', sleep_seconds) + time.sleep(sleep_seconds)
+ + + +
+[docs] +def ensure_directories(control_values, day_str): + """ + Ensure that the required directories exist + """ + output_dir = os.path.join( + control_values['output-directory'], + day_str + ) + pycbc.makedir(output_dir) + if 'public-dir' in control_values: + # The public directory wil be in subdirectories for the year, month, + # day, e.g. 2024_04_12 will be in 2024/04/12. + public_dir = os.path.join( + control_values['public-dir'], + *day_str.split('_') + ) + pycbc.makedir(public_dir)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/mchirp_area.html b/latest/html/_modules/pycbc/mchirp_area.html new file mode 100644 index 00000000000..7f5c66ef9fc --- /dev/null +++ b/latest/html/_modules/pycbc/mchirp_area.html @@ -0,0 +1,455 @@ + + + + + + pycbc.mchirp_area — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.mchirp_area

+# Module with utilities for estimating candidate events source probabilities
+# Initial code by A. Curiel Barroso, August 2019
+# Modified by V. Villa-Ortega, January 2020, March 2021
+
+"""Functions to compute the area corresponding to different CBC on the m1 & m2
+plane when given a central mchirp value and uncertainty.
+It also includes a function that calculates the source frame when given the
+detector frame mass and redshift.
+"""
+
+import math
+import logging
+import numpy as np
+
+from scipy.integrate import quad
+from astropy.cosmology import FlatLambdaCDM
+
+from pycbc.cosmology import _redshift
+from pycbc.conversions import mass2_from_mchirp_mass1 as m2mcm1
+
+logger = logging.getLogger('pycbc.mchirp_area')
+
+
+
+[docs] +def insert_args(parser): + mchirp_group = parser.add_argument_group("Arguments for estimating the " + "source probabilities of a " + "candidate event using the snr, " + "mchirp, and effective distance.") + mchirp_group.add_argument('--src-class-mass-limits', type=float, nargs=3, + metavar=('MIN_M2', 'MAX_NS', 'MAX_M1'), + default=[1.0, 3.0, 45.0], + help="Minimum and maximum values for the mass " + "of the binary components and maximum mass " + "of a neutron star, used as limits " + "when computing the area corresponding" + "to different CBC sources.") + mchirp_group.add_argument('--src-class-mass-gap-max', type=float, + metavar=('MAX_GAP'), + help="Upper limit of the mass gap, corresponding" + " to the minimum mass of a black hole. " + "Used as limit of integration of the " + "different CBC regions when considering " + "the MassGap category.") + mchirp_group.add_argument('--src-class-mchirp-to-delta', type=float, + metavar='m0', required=True, + help='Coefficient to estimate the value of the ' + 'mchirp uncertainty by mchirp_delta = ' + 'm0 * mchirp.') + mchirp_group.add_argument('--src-class-eff-to-lum-distance', type=float, + metavar='a0', required=True, + help='Coefficient to estimate the value of the ' + 'luminosity distance from the minimum ' + 'eff distance by D_lum = a0 * min(D_eff).') + mchirp_group.add_argument('--src-class-lum-distance-to-delta', type=float, + nargs=2, metavar=('b0', 'b1'), required=True, + help='Coefficients to estimate the value of the ' + 'uncertainty on the luminosity distance ' + 'from the estimated luminosity distance and' + ' the coinc snr by delta_lum = D_lum * ' + 'exp(b0) * coinc_snr ** b1.') + mchirp_group.add_argument('--src-class-mass-gap-separate', + action='store_true', + help='Gives separate probabilities for each kind' + ' of mass gap CBC sources: GNS, GG, BHG.') + mchirp_group.add_argument('--src-class-lal-cosmology', + action='store_true', + help='Uses the Planck15 cosmology defined in ' + 'lalsuite instead of the astropy Planck15 ' + 'default model.')
+ + + +
+[docs] +def from_cli(args, parser): + mass_limits_sorted = sorted(args.src_class_mass_limits) + if args.src_class_mass_gap_max: + if args.src_class_mass_gap_max < mass_limits_sorted[1]: + parser.error('MAX_GAP value cannot be lower than MAX_NS limit') + return {'mass_limits': + {'max_m1': mass_limits_sorted[2], + 'min_m2': mass_limits_sorted[0]}, + 'mass_bdary': + {'ns_max': mass_limits_sorted[1], + 'gap_max': args.src_class_mass_gap_max}, + 'estimation_coeff': + {'a0': args.src_class_eff_to_lum_distance, + 'b0': args.src_class_lum_distance_to_delta[0], + 'b1': args.src_class_lum_distance_to_delta[1], + 'm0': args.src_class_mchirp_to_delta}, + 'mass_gap': True, + 'mass_gap_separate': args.src_class_mass_gap_separate, + 'lal_cosmology': args.src_class_lal_cosmology} + return {'mass_limits': + {'max_m1': mass_limits_sorted[2], + 'min_m2': mass_limits_sorted[0]}, + 'mass_bdary': + {'ns_max': mass_limits_sorted[1], + 'gap_max': mass_limits_sorted[1]}, + 'estimation_coeff': + {'a0': args.src_class_eff_to_lum_distance, + 'b0': args.src_class_lum_distance_to_delta[0], + 'b1': args.src_class_lum_distance_to_delta[1], + 'm0': args.src_class_mchirp_to_delta}, + 'mass_gap': False, + 'mass_gap_separate': args.src_class_mass_gap_separate, + 'lal_cosmology': args.src_class_lal_cosmology}
+ + + +
+[docs] +def redshift_estimation(distance, distance_std, lal_cosmology): + """Takes values of distance and its uncertainty and returns a + dictionary with estimates of the redshift and its uncertainty. + If the argument 'lal_cosmology' is True, it uses Planck15 cosmology + model as defined in lalsuite instead of the astropy default. + Constants for lal_cosmology taken from Planck15_lal_cosmology() in + https://git.ligo.org/lscsoft/pesummary/-/blob/master/pesummary/gw/ + cosmology.py. + """ + if lal_cosmology: + cosmology = FlatLambdaCDM(H0=67.90, Om0=0.3065) + else: + cosmology = None + z_estimation = _redshift(distance, cosmology=cosmology) + z_est_max = _redshift((distance + distance_std), + cosmology=cosmology) + z_est_min = _redshift((distance - distance_std), + cosmology=cosmology) + z_std_estimation = 0.5 * (z_est_max - z_est_min) + z = {'central': z_estimation, 'delta': z_std_estimation} + return z
+ + + +
+[docs] +def src_mass_from_z_det_mass(z, del_z, mdet, del_mdet): + """Takes values of redshift, redshift uncertainty, detector mass and its + uncertainty and computes the source mass and its uncertainty. + """ + msrc = mdet / (1. + z) + del_msrc = msrc * ((del_mdet / mdet) ** 2. + + (del_z / (1. + z)) ** 2.) ** 0.5 + return (msrc, del_msrc)
+ + + +
+[docs] +def intmc(mc, x_min, x_max): + """Returns the integral of m2 over m1 between x_min and x_max, + assuming that mchirp is fixed. + """ + integral = quad(lambda x, mc: m2mcm1(mc, x), x_min, x_max, args=mc) + return integral[0]
+ + + +
+[docs] +def get_area(trig_mc, lim_h1, lim_h2, lim_v1, lim_v2): + """Returns the area under the chirp mass contour in a region of the m1-m2 + plane (m1 > m2). + + Parameters + ---------- + trig_mc : sequence of two values + first represents central estimate of mchirp in source frame, + second its uncertainty + lim_h1, lim_h2 : floats or the string 'diagonal' + upper and lower horizontal limits of the region (limits on m2) + lim_v1, lim_v2 : floats + right and left vertical limits of the region (limits on m1) + + Returns + ------- + area : float + """ + mc_max = trig_mc[0] + trig_mc[1] + mc_min = trig_mc[0] - trig_mc[1] + # The points where the equal mass line and a chirp mass + # curve intersect is m1 = m2 = 2**0.2 * mchirp + mi_max = (2.**0.2) * mc_max + mi_min = (2.**0.2) * mc_min + + if lim_h1 == 'diagonal': + max_h1 = mi_max + min_h1 = mi_min + fun_sup = lambda x: x + else: + max_h1 = m2mcm1(mc_max, lim_h1) + min_h1 = m2mcm1(mc_min, lim_h1) + fun_sup = lambda x: lim_h1 + + max_h2 = m2mcm1(mc_max, lim_h2) + min_h2 = m2mcm1(mc_min, lim_h2) + fun_inf = lambda x: lim_h2 + + lim_max1 = np.clip(max_h1, lim_v1, lim_v2) + lim_max2 = np.clip(max_h2, lim_v1, lim_v2) + lim_min1 = np.clip(min_h1, lim_v1, lim_v2) + lim_min2 = np.clip(min_h2, lim_v1, lim_v2) + + int_max = intmc(mc_max, lim_max1, lim_max2) + int_min = intmc(mc_min, lim_min1, lim_min2) + intline_sup = quad(fun_sup, lim_min1, lim_max1)[0] + intline_inf = quad(fun_inf, lim_min2, lim_max2)[0] + area = int_max + intline_sup - int_min - intline_inf + return area
+ + + +
+[docs] +def calc_areas( + trig_mc_det, + mass_limits, + mass_bdary, + z, + mass_gap, + mass_gap_separate): + """Computes the area inside the lines of the second component mass as a + function of the first component mass for the two extreme values + of mchirp: mchirp +/- mchirp_uncertainty, for each region of the source + classifying diagram. + """ + trig_mc = src_mass_from_z_det_mass(z["central"], z["delta"], + trig_mc_det["central"], + trig_mc_det["delta"]) + m2_min = mass_limits["min_m2"] + m1_max = mass_limits["max_m1"] + ns_max = mass_bdary["ns_max"] + gap_max = mass_bdary["gap_max"] + + abbh = get_area(trig_mc, 'diagonal', gap_max, gap_max, m1_max) + abhg = get_area(trig_mc, gap_max, ns_max, gap_max, m1_max) + ansbh = get_area(trig_mc, ns_max, m2_min, gap_max, m1_max) + agg = get_area(trig_mc, 'diagonal', ns_max, ns_max, gap_max) + agns = get_area(trig_mc, ns_max, m2_min, ns_max, gap_max) + abns = get_area(trig_mc, 'diagonal', m2_min, m2_min, ns_max) + + if mass_gap: + if mass_gap_separate: + return { + "BNS": abns, + "GNS": agns, + "NSBH": ansbh, + "GG": agg, + "BHG": abhg, + "BBH": abbh + } + return { + "BNS": abns, + "NSBH": ansbh, + "BBH": abbh, + "Mass Gap": agns + agg + abhg + } + return { + "BNS": abns, + "NSBH": ansbh, + "BBH": abbh + }
+ + + +
+[docs] +def calc_probabilities(mchirp, snr, eff_distance, src_args): + """Computes the different probabilities that a candidate event belongs to + each CBC source category taking as arguments the chirp mass, the + coincident SNR and the effective distance, and estimating the + chirp mass uncertainty, the luminosity distance (and its uncertainty) + and the redshift (and its uncertainty). Probability is estimated to be + directly proportional to the area of the corresponding CBC region. + """ + mass_limits = src_args['mass_limits'] + mass_bdary = src_args['mass_bdary'] + coeff = src_args['estimation_coeff'] + trig_mc_det = {'central': mchirp, 'delta': mchirp * coeff['m0']} + dist_estimation = coeff['a0'] * eff_distance + dist_std_estimation = (dist_estimation * math.exp(coeff['b0']) * + snr ** coeff['b1']) + z = redshift_estimation(dist_estimation, dist_std_estimation, + src_args['lal_cosmology']) + mass_gap = src_args['mass_gap'] + mass_gap_separate = src_args['mass_gap_separate'] + + # If the mchirp is greater than the mchirp corresponding to two masses + # equal to the maximum mass, the probability for BBH is 100%. + # If it is less than the mchirp corresponding to two masses equal to the + # minimum mass, the probability for BNS is 100%. + mc_max = mass_limits['max_m1'] / (2 ** 0.2) + mc_min = mass_limits['min_m2'] / (2 ** 0.2) + + if trig_mc_det['central'] > mc_max * (1 + z['central']): + if mass_gap: + if mass_gap_separate: + probabilities = {"BNS": 0.0, "GNS": 0.0, "NSBH": 0.0, + "GG": 0.0, "BHG": 0.0, "BBH": 1.0} + else: + probabilities = {"BNS": 0.0, "NSBH": 0.0, "BBH": 1.0, + "Mass Gap": 0.0} + else: + probabilities = {"BNS": 0.0, "NSBH": 0.0, "BBH": 1.0} + + elif trig_mc_det['central'] < mc_min * (1 + z['central']): + if mass_gap: + if mass_gap_separate: + probabilities = {"BNS": 1.0, "GNS": 0.0, "NSBH": 0.0, + "GG": 0.0, "BHG": 0.0, "BBH": 0.0} + else: + probabilities = {"BNS": 1.0, "NSBH": 0.0, "BBH": 0.0, + "Mass Gap": 0.0} + else: + probabilities = {"BNS": 1.0, "NSBH": 0.0, "BBH": 0.0} + + else: + areas = calc_areas(trig_mc_det, mass_limits, mass_bdary, z, + mass_gap, mass_gap_separate) + total_area = sum(areas.values()) + probabilities = {key: areas[key] / total_area for key in areas} + + return probabilities
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/neutron_stars/eos_utils.html b/latest/html/_modules/pycbc/neutron_stars/eos_utils.html new file mode 100644 index 00000000000..64f87b69ccb --- /dev/null +++ b/latest/html/_modules/pycbc/neutron_stars/eos_utils.html @@ -0,0 +1,353 @@ + + + + + + pycbc.neutron_stars.eos_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.neutron_stars.eos_utils

+# Copyright (C) 2022 Francesco Pannarale, Andrew Williamson,
+# Samuel Higginbotham
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+Utility functions for handling NS equations of state
+"""
+import os.path
+import numpy as np
+from scipy.interpolate import interp1d
+import lalsimulation as lalsim
+from . import NS_SEQUENCES, NS_DATA_DIRECTORY
+from .pg_isso_solver import PG_ISSO_solver
+
+
+
+[docs] +def load_ns_sequence(eos_name): + """ + Load the data of an NS non-rotating equilibrium sequence generated + using the equation of state (EOS) chosen by the user. + File format is: grav mass (Msun), baryonic mass (Msun), compactness + + Parameters + ----------- + eos_name : string + NS equation of state label ('2H' is the only supported + choice at the moment) + + Returns + ---------- + ns_sequence : numpy.array + contains the sequence data in the form NS gravitational + mass (in solar masses), NS baryonic mass (in solar + masses), NS compactness (dimensionless) + max_ns_g_mass : float + the maximum NS gravitational mass (in solar masses) in + the sequence (this is the mass of the most massive stable + NS) + """ + ns_sequence_file = os.path.join( + NS_DATA_DIRECTORY, 'equil_{}.dat'.format(eos_name)) + if eos_name not in NS_SEQUENCES: + raise NotImplementedError( + f'{eos_name} does not have an implemented NS sequence file! ' + f'To implement, the file {ns_sequence_file} must exist and ' + 'contain: NS gravitational mass (in solar masses), NS baryonic ' + 'mass (in solar masses), NS compactness (dimensionless)') + ns_sequence = np.loadtxt(ns_sequence_file) + max_ns_g_mass = max(ns_sequence[:, 0]) + return (ns_sequence, max_ns_g_mass)
+ + + +
+[docs] +def interp_grav_mass_to_baryon_mass(ns_g_mass, ns_sequence, extrapolate=False): + """ + Determines the baryonic mass of an NS given its gravitational + mass and an NS equilibrium sequence (in solar masses). + + Parameters + ----------- + ns_g_mass : float + NS gravitational mass (in solar masses) + ns_sequence : numpy.array + Contains the sequence data in the form NS gravitational + mass (in solar masses), NS baryonic mass (in solar + masses), NS compactness (dimensionless) + extrapolate : boolean, optional + Invoke extrapolation in scipy.interpolate.interp1d. + Default is False (so ValueError is raised for ns_g_mass out of bounds) + + Returns + ---------- + float + """ + x = ns_sequence[:, 0] + y = ns_sequence[:, 1] + fill_value = "extrapolate" if extrapolate else np.nan + f = interp1d(x, y, fill_value=fill_value) + return f(ns_g_mass)
+ + + +
+[docs] +def interp_grav_mass_to_compactness(ns_g_mass, ns_sequence, extrapolate=False): + """ + Determines the dimensionless compactness parameter of an NS given + its gravitational mass and an NS equilibrium sequence. + + Parameters + ----------- + ns_g_mass : float + NS gravitational mass (in solar masses) + ns_sequence : numpy.array + Contains the sequence data in the form NS gravitational + mass (in solar masses), NS baryonic mass (in solar + masses), NS compactness (dimensionless) + extrapolate : boolean, optional + Invoke extrapolation in scipy.interpolate.interp1d. + Default is False (so ValueError is raised for ns_g_mass out of bounds) + + Returns + ---------- + float + """ + x = ns_sequence[:, 0] + y = ns_sequence[:, 2] + fill_value = "extrapolate" if extrapolate else np.nan + f = interp1d(x, y, fill_value=fill_value) + return f(ns_g_mass)
+ + + +
+[docs] +def initialize_eos(ns_mass, eos, extrapolate=False): + """Load an equation of state and return the compactness and baryonic + mass for a given neutron star mass + + Parameters + ---------- + ns_mass : {float, array} + The gravitational mass of the neutron star, in solar masses. + eos : str + Name of the equation of state. + extrapolate : boolean, optional + Invoke extrapolation in scipy.interpolate.interp1d in the low-mass + regime. In the high-mass regime, the maximum NS mass supported by the + equation of state is not allowed to be exceeded. Default is False + (so ValueError is raised whenever ns_mass is out of bounds). + + Returns + ------- + ns_compactness : float + Compactness parameter of the neutron star. + ns_b_mass : float + Baryonic mass of the neutron star. + """ + if isinstance(ns_mass, np.ndarray): + input_is_array = True + if eos in NS_SEQUENCES: + ns_seq, ns_max = load_ns_sequence(eos) + # Never extrapolate beyond the maximum NS mass allowed by the EOS + try: + if any(ns_mass > ns_max) and input_is_array: + raise ValueError( + f'Maximum NS mass for {eos} is {ns_max}, received masses ' + f'up to {max(ns_mass[ns_mass > ns_max])}') + except TypeError: + if ns_mass > ns_max and not input_is_array: + raise ValueError( + f'Maximum NS mass for {eos} is {ns_max}, received ' + f'{ns_mass}') + # Interpolate NS compactness and rest mass + ns_compactness = interp_grav_mass_to_compactness( + ns_mass, ns_seq, extrapolate=extrapolate) + ns_b_mass = interp_grav_mass_to_baryon_mass( + ns_mass, ns_seq, extrapolate=extrapolate) + elif eos in lalsim.SimNeutronStarEOSNames: + #eos_obj = lalsim.SimNeutronStarEOSByName(eos) + #eos_fam = lalsim.CreateSimNeutronStarFamily(eos_obj) + #r_ns = lalsim.SimNeutronStarRadius(ns_mass * lal.MSUN_SI, eos_obj) + #ns_compactness = lal.G_SI * ns_mass * lal.MSUN_SI / (r_ns * lal.C_SI**2) + raise NotImplementedError( + 'LALSimulation EOS interface not yet implemented!') + else: + raise NotImplementedError( + f'{eos} is not implemented! Available are: ' + f'{NS_SEQUENCES + list(lalsim.SimNeutronStarEOSNames)}') + return (ns_compactness, ns_b_mass)
+ + + +
+[docs] +def foucart18( + eta, ns_compactness, ns_b_mass, bh_spin_mag, bh_spin_pol): + """Function that determines the remnant disk mass of an NS-BH system + using the fit to numerical-relativity results discussed in + `Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018)`_. + + .. _Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018): + https://doi.org/10.1103/PhysRevD.98.081501 + + Parameters + ---------- + eta : {float, array} + The symmetric mass ratio of the system + (note: primary is assumed to be the BH). + ns_compactness : {float, array} + NS compactness parameter. + ns_b_mass : {float, array} + Baryonic mass of the NS. + bh_spin_mag: {float, array} + Dimensionless spin magnitude of the BH. + bh_spin_pol : {float, array} + The tilt angle of the BH spin. + """ + isso = PG_ISSO_solver(bh_spin_mag, bh_spin_pol) + # Fit parameters and tidal correction + alpha = 0.406 + beta = 0.139 + gamma = 0.255 + delta = 1.761 + fit = ( + alpha / eta ** (1/3) * (1 - 2 * ns_compactness) + - beta * ns_compactness / eta * isso + + gamma + ) + return ns_b_mass * np.where(fit > 0.0, fit, 0.0) ** delta
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/neutron_stars/pg_isso_solver.html b/latest/html/_modules/pycbc/neutron_stars/pg_isso_solver.html new file mode 100644 index 00000000000..998246ffa8e --- /dev/null +++ b/latest/html/_modules/pycbc/neutron_stars/pg_isso_solver.html @@ -0,0 +1,502 @@ + + + + + + pycbc.neutron_stars.pg_isso_solver — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.neutron_stars.pg_isso_solver

+# Copyright (C) 2022 Francesco Pannarale, Andrew Williamson,
+# Samuel Higginbotham
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+Innermost Stable Spherical Orbit (ISSO) solver in the Perez-Giz (PG)
+formalism. See `Stone, Loeb, Berger, PRD 87, 084053 (2013)`_.
+
+.. _Stone, Loeb, Berger, PRD 87, 084053 (2013):
+    http://dx.doi.org/10.1103/PhysRevD.87.084053
+"""
+
+import numpy as np
+from scipy.optimize import root_scalar
+
+
+
+[docs] +def ISCO_solution(chi, incl): + r"""Analytic solution of the innermost + stable circular orbit (ISCO) for the Kerr metric. + + ..See eq. (2.21) of + Bardeen, J. M., Press, W. H., Teukolsky, S. A. (1972)` + https://articles.adsabs.harvard.edu/pdf/1972ApJ...178..347B + + Parameters + ----------- + chi: float + the BH dimensionless spin parameter + incl: float + inclination angle between the BH spin and the orbital angular + momentum in radians + + Returns + ---------- + float + """ + chi2 = chi * chi + sgn = np.sign(np.cos(incl)) + Z1 = 1 + np.cbrt(1 - chi2) * (np.cbrt(1 + chi) + np.cbrt(1 - chi)) + Z2 = np.sqrt(3 * chi2 + Z1 * Z1) + return 3 + Z2 - sgn * np.sqrt((3 - Z1) * (3 + Z1 + 2 * Z2))
+ + + +
+[docs] +def ISSO_eq_at_pole(r, chi): + r"""Polynomial that enables the calculation of the Kerr polar + (:math:`\iota = \pm \pi / 2`) innermost stable spherical orbit + (ISSO) radius via the roots of + + .. math:: + + P(r) &= r^3 [r^2 (r - 6) + \chi^2 (3 r + 4)] \\ + &\quad + \chi^4 [3 r (r - 2) + \chi^2] \, , + + where :math:`\chi` is the BH dimensionless spin parameter. Physical + solutions are between 6 and + :math:`1 + \sqrt{3} + \sqrt{3 + 2 \sqrt{3}}`. + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + + Returns + ------- + float + """ + chi2 = chi * chi + return ( + r**3 * (r**2 * (r - 6) + chi2 * (3 * r + 4)) + + chi2 * chi2 * (3 * r * (r - 2) + chi2))
+ + + +
+[docs] +def ISSO_eq_at_pole_dr(r, chi): + """Partial derivative of :func:`ISSO_eq_at_pole` with respect to r. + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + + Returns + ------- + float + """ + chi2 = chi * chi + twlvchi2 = 12 * chi2 + sxchi4 = 6 * chi2 * chi2 + return ( + 6 * r**5 - 30 * r**4 + twlvchi2 * r**3 + twlvchi2 * r**2 + sxchi4 * r + + sxchi4)
+ + + +
+[docs] +def ISSO_eq_at_pole_dr2(r, chi): + """Double partial derivative of :func:`ISSO_eq_at_pole` with + respect to r. + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + + Returns + ------- + float + """ + chi2 = chi * chi + return ( + 30 * r**4 - 120 * r**3 + 36 * chi2 * r**2 + 24 * chi2 * r + + 6 * chi2 * chi2)
+ + + +
+[docs] +def PG_ISSO_eq(r, chi, incl): + r"""Polynomial that enables the calculation of a generic innermost + stable spherical orbit (ISSO) radius via the roots in :math:`r` of + + .. math:: + + S(r) &= r^8 Z(r) + \chi^2 (1 - \cos(\iota)^2) \\ + &\quad * [\chi^2 (1 - \cos(\iota)^2) Y(r) - 2 r^4 X(r)]\,, + + where + + .. math:: + + X(r) &= \chi^2 (\chi^2 (3 \chi^2 + 4 r (2 r - 3)) \\ + &\quad + r^2 (15 r (r - 4) + 28)) - 6 r^4 (r^2 - 4) \, , + + .. math:: + + Y(r) &= \chi^4 (\chi^4 + r^2 [7 r (3 r - 4) + 36]) \\ + &\quad + 6 r (r - 2) \\ + &\qquad * (\chi^6 + 2 r^3 + [\chi^2 (3 r + 2) + 3 r^2 (r - 2)]) \, , + + and :math:`Z(r) =` :func:`ISCO_eq`. Physical solutions are between + the equatorial ISSO (i.e. the ISCO) radius (:func:`ISCO_eq`) and + the polar ISSO radius (:func:`ISSO_eq_at_pole`). + See `Stone, Loeb, Berger, PRD 87, 084053 (2013)`_. + + .. _Stone, Loeb, Berger, PRD 87, 084053 (2013): + http://dx.doi.org/10.1103/PhysRevD.87.084053 + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + incl: float + inclination angle between the BH spin and the orbital angular + momentum in radians + + Returns + ------- + float + """ + chi2 = chi * chi + chi4 = chi2 * chi2 + r2 = r * r + r4 = r2 * r2 + three_r = 3 * r + r_minus_2 = r - 2 + sin_incl2 = (np.sin(incl))**2 + + X = ( + chi2 * ( + chi2 * (3 * chi2 + 4 * r * (2 * r - 3)) + + r2 * (15 * r * (r - 4) + 28)) + - 6 * r4 * (r2 - 4)) + Y = ( + chi4 * (chi4 + r2 * (7 * r * (three_r - 4) + 36)) + + 6 * r * r_minus_2 * ( + chi4 * chi2 + 2 * r2 * r * ( + chi2 * (three_r + 2) + 3 * r2 * r_minus_2))) + Z = (r * (r - 6))**2 - chi2 * (2 * r * (3 * r + 14) - 9 * chi2) + + return r4 * r4 * Z + chi2 * sin_incl2 * (chi2 * sin_incl2 * Y - 2 * r4 * X)
+ + + +
+[docs] +def PG_ISSO_eq_dr(r, chi, incl): + """Partial derivative of :func:`PG_ISSO_eq` with respect to r. + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + incl: float + inclination angle between the BH spin and the orbital angular + momentum in radians + + Returns + ------- + float + """ + sini = np.sin(incl) + sin2i = sini * sini + sin4i = sin2i * sin2i + chi2 = chi * chi + chi4 = chi2 * chi2 + chi6 = chi4 * chi2 + chi8 = chi4 * chi4 + chi10 = chi6 * chi4 + return ( + 12 * r**11 - 132 * r**10 + + r**9 * (120 * chi2 * sin2i - 60 * chi2 + 360) - r**8 * 252 * chi2 + + 8 * r**7 * ( + 36 * chi4 * sin4i - 30 * chi4 * sin2i + 9 * chi4 + - 48 * chi2 * sin2i) + + 7 * r**6 * (120 * chi4 * sin2i - 144 * chi4 * sin4i) + + 6 * r**5 * ( + 36 * chi6 * sin4i - 16 * chi6 * sin2i + 144 * chi4 * sin4i + - 56 * chi4 * sin2i) + + r**4 * (120 * chi6 * sin2i - 240 * chi6 * sin4i) + + r**3 * (84 * chi8 * sin4i - 24 * chi8 * sin2i - 192 * chi6 * sin4i) + - 84 * r**2 * chi8 * sin4i + + r * (12 * chi10 * sin4i + 72 * chi8 * sin4i) - 12 * chi10 * sin4i)
+ + + +
+[docs] +def PG_ISSO_eq_dr2(r, chi, incl): + """Second partial derivative of :func:`PG_ISSO_eq` with respect to + r. + + Parameters + ---------- + r: float + the radial coordinate in BH mass units + chi: float + the BH dimensionless spin parameter + incl: float + inclination angle between the BH spin and the orbital angular + momentum in radians + + Returns + ------- + float + """ + sini = np.sin(incl) + sin2i = sini * sini + sin4i = sin2i * sin2i + chi2 = chi * chi + chi4 = chi2 * chi2 + chi6 = chi4 * chi2 + chi8 = chi4 * chi4 + return ( + 132 * r**10 - 1320 * r**9 + + 90 * r**8 * (12 * chi2 * sin2i - 6 * chi2 + 36) - 2016 * chi2 * r**7 + + 56 * r**6 * ( + 36 * chi4 * sin4i - 30 * chi4 * sin2i + 9 * chi4 + - 48 * chi2 * sin2i) + + 42 * r**5 * (120 * chi4 * sin2i - 144 * chi4 * sin4i) + + 30 * r**4 * ( + 36 * chi6 * sin4i - 16 * chi6 * sin2i + 144 * chi4 * sin4i + - 56 * chi4 * sin2i) + + r**3 * (480 * chi6 * sin2i - 960 * chi6 * sin4i) + + r**2 * ( + 252 * chi8 * sin4i - 72 * chi8 * sin2i - 576 * chi6 * sin4i) + - r * 168 * chi8 * sin4i + + 12 * chi8 * chi2 * sin4i + 72 * chi8 * sin4i)
+ + + +
+[docs] +def PG_ISSO_solver(chi, incl): + """Function that determines the radius of the innermost stable + spherical orbit (ISSO) for a Kerr BH and a generic inclination + angle between the BH spin and the orbital angular momentum. + This function finds the appropriate root of :func:`PG_ISSO_eq`. + + Parameters + ---------- + chi: {float, array} + the BH dimensionless spin parameter + incl: {float, array} + the inclination angle between the BH spin and the orbital + angular momentum in radians + + Returns + ------- + solution: array + the radius of the orbit in BH mass units + """ + # Auxiliary variables + if np.isscalar(chi): + chi = np.array(chi, copy=False, ndmin=1) + incl = np.array(incl, copy=False, ndmin=1) + chi = np.abs(chi) + # ISCO radius for the given spin magnitude + rISCO_limit = ISCO_solution(chi, incl) + # If the inclination is 0 or pi, just output the ISCO radius + equatorial = np.isclose(incl, 0) | np.isclose(incl, np.pi) + if all(equatorial): + return rISCO_limit + + # ISSO radius for an inclination of pi/2 + # Initial guess is based on the extrema of the polar ISSO radius equation, + # that are: r=6 (chi=1) and r=1+sqrt(3)+sqrt(3+sqrt(12))=5.274... (chi=0) + initial_guess = [5.27451056440629 if c > 0.5 else 6 for c in chi] + rISSO_at_pole_limit = np.array([ + root_scalar( + ISSO_eq_at_pole, x0=g0, fprime=ISSO_eq_at_pole_dr, + fprime2=ISSO_eq_at_pole_dr2, args=(c)).root + for g0, c in zip(initial_guess, chi)]) + # If the inclination is pi/2, just output the ISSO radius at the pole(s) + polar = np.isclose(incl, 0.5*np.pi) + if all(polar): + return rISSO_at_pole_limit + + # Otherwise, find the ISSO radius for a generic inclination + initial_hi = np.maximum(rISCO_limit, rISSO_at_pole_limit) + initial_lo = np.minimum(rISCO_limit, rISSO_at_pole_limit) + brackets = [ + (bl, bh) if (c != 1 and PG_ISSO_eq(bl, c, inc) * + PG_ISSO_eq(bh, c, inc) < 0) else None + for bl, bh, c, inc in zip(initial_lo, initial_hi, chi, incl)] + solution = np.array([ + root_scalar( + PG_ISSO_eq, x0=g0, fprime=PG_ISSO_eq_dr, bracket=bracket, + fprime2=PG_ISSO_eq_dr2, args=(c, inc), xtol=1e-12).root + for g0, bracket, c, inc in zip(initial_hi, brackets, chi, incl)]) + oob = (solution < 1) | (solution > 9) + if any(oob): + solution = np.array([ + root_scalar( + PG_ISSO_eq, x0=g0, fprime=PG_ISSO_eq_dr, bracket=bracket, + fprime2=PG_ISSO_eq_dr2, args=(c, inc)).root + if ob else sol for g0, bracket, c, inc, ob, sol + in zip(initial_lo, brackets, chi, incl, oob, solution) + ]) + oob = (solution < 1) | (solution > 9) + if any(oob): + raise RuntimeError('Unable to obtain some solutions!') + return solution
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/noise/gaussian.html b/latest/html/_modules/pycbc/noise/gaussian.html new file mode 100644 index 00000000000..b760d43f4f3 --- /dev/null +++ b/latest/html/_modules/pycbc/noise/gaussian.html @@ -0,0 +1,300 @@ + + + + + + pycbc.noise.gaussian — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.noise.gaussian

+# Copyright (C) 2012  Alex Nitz
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module contains functions to generate gaussian noise colored with a
+noise spectrum.
+"""
+
+from pycbc import libutils
+from pycbc.types import TimeSeries, zeros
+from pycbc.types import complex_same_precision_as, FrequencySeries
+import lal
+import numpy.random
+
+lalsimulation = libutils.import_optional('lalsimulation')
+
+
+[docs] +def frequency_noise_from_psd(psd, seed=None): + """ Create noise with a given psd. + + Return noise coloured with the given psd. The returned noise + FrequencySeries has the same length and frequency step as the given psd. + Note that if unique noise is desired a unique seed should be provided. + + Parameters + ---------- + psd : FrequencySeries + The noise weighting to color the noise. + seed : {0, int} or None + The seed to generate the noise. If None specified, + the seed will not be reset. + + Returns + -------- + noise : FrequencySeriesSeries + A FrequencySeries containing gaussian noise colored by the given psd. + """ + sigma = 0.5 * (psd / psd.delta_f) ** (0.5) + if seed is not None: + numpy.random.seed(seed) + sigma = sigma.numpy() + dtype = complex_same_precision_as(psd) + + not_zero = (sigma != 0) + + sigma_red = sigma[not_zero] + noise_re = numpy.random.normal(0, sigma_red) + noise_co = numpy.random.normal(0, sigma_red) + noise_red = noise_re + 1j * noise_co + + noise = numpy.zeros(len(sigma), dtype=dtype) + noise[not_zero] = noise_red + + return FrequencySeries(noise, + delta_f=psd.delta_f, + dtype=dtype)
+ + +
+[docs] +def noise_from_psd(length, delta_t, psd, seed=None): + """ Create noise with a given psd. + + Return noise with a given psd. Note that if unique noise is desired + a unique seed should be provided. + + Parameters + ---------- + length : int + The length of noise to generate in samples. + delta_t : float + The time step of the noise. + psd : FrequencySeries + The noise weighting to color the noise. + seed : {0, int} + The seed to generate the noise. + + Returns + -------- + noise : TimeSeries + A TimeSeries containing gaussian noise colored by the given psd. + """ + noise_ts = TimeSeries(zeros(length), delta_t=delta_t) + + if seed is None: + seed = numpy.random.randint(2**32) + + randomness = lal.gsl_rng("ranlux", seed) + + N = int (1.0 / delta_t / psd.delta_f) + n = N//2+1 + stride = N//2 + + if n > len(psd): + raise ValueError("PSD not compatible with requested delta_t") + + psd = (psd[0:n]).lal() + psd.data.data[n-1] = 0 + psd.data.data[0] = 0 + + segment = TimeSeries(zeros(N), delta_t=delta_t).lal() + length_generated = 0 + + lalsimulation.SimNoise(segment, 0, psd, randomness) + while (length_generated < length): + if (length_generated + stride) < length: + noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride] + else: + noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated] + + length_generated += stride + lalsimulation.SimNoise(segment, stride, psd, randomness) + + return noise_ts
+ + +
+[docs] +def noise_from_string(psd_name, length, delta_t, seed=None, low_frequency_cutoff=10.0): + """ Create noise from an analytic PSD + + Return noise from the chosen PSD. Note that if unique noise is desired + a unique seed should be provided. + + Parameters + ---------- + psd_name : str + Name of the analytic PSD to use. + low_fr + length : int + The length of noise to generate in samples. + delta_t : float + The time step of the noise. + seed : {None, int} + The seed to generate the noise. + low_frequency_cutof : {10.0, float} + The low frequency cutoff to pass to the PSD generation. + + Returns + -------- + noise : TimeSeries + A TimeSeries containing gaussian noise colored by the given psd. + """ + import pycbc.psd + + # We just need enough resolution to resolve lines + delta_f = 1.0 / 8 + flen = int(.5 / delta_t / delta_f) + 1 + psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff) + return noise_from_psd(int(length), delta_t, psd, seed=seed)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/noise/reproduceable.html b/latest/html/_modules/pycbc/noise/reproduceable.html new file mode 100644 index 00000000000..4783e0a1c7a --- /dev/null +++ b/latest/html/_modules/pycbc/noise/reproduceable.html @@ -0,0 +1,363 @@ + + + + + + pycbc.noise.reproduceable — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.noise.reproduceable

+# Copyright (C) 2017  Alex Nitz
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+import numpy, pycbc.psd
+from pycbc.types import TimeSeries, complex_same_precision_as
+from numpy.random import RandomState
+
+# This constant need to be constant to be able to recover identical results.
+BLOCK_SAMPLES = 1638400
+
+
+[docs] +def block(seed, sample_rate): + """ Return block of normal random numbers + + Parameters + ---------- + seed : {None, int} + The seed to generate the noise.sd + sample_rate: float + Sets the variance of the white noise + + Returns + -------- + noise : numpy.ndarray + Array of random numbers + """ + num = BLOCK_SAMPLES + rng = RandomState(seed % 2**32) + variance = sample_rate / 2 + return rng.normal(size=num, scale=variance**0.5)
+ + +
+[docs] +def normal(start, end, sample_rate=16384, seed=0): + """ Generate data with a white Gaussian (normal) distribution + + Parameters + ---------- + start_time : int + Start time in GPS seconds to generate noise + end_time : int + End time in GPS seconds to generate noise + sample-rate: float + Sample rate to generate the data at. Keep constant if you want to + ensure continuity between disjoint time spans. + seed : {None, int} + The seed to generate the noise. + + Returns + -------- + noise : TimeSeries + A TimeSeries containing gaussian noise + """ + # This is reproduceable because we used fixed seeds from known values + block_dur = BLOCK_SAMPLES / sample_rate + s = int(numpy.floor(start / block_dur)) + e = int(numpy.floor(end / block_dur)) + + # The data evenly divides so the last block would be superfluous + if end % block_dur == 0: + e -= 1 + + sv = RandomState(seed).randint(-2**50, 2**50) + data = numpy.concatenate([block(i + sv, sample_rate) + for i in numpy.arange(s, e + 1, 1)]) + ts = TimeSeries(data, delta_t=1.0 / sample_rate, epoch=(s * block_dur)) + return ts.time_slice(start, end)
+ + +
+[docs] +def colored_noise(psd, start_time, end_time, + seed=0, sample_rate=16384, + low_frequency_cutoff=1.0, + filter_duration=128, + scale=1.0): + """ Create noise from a PSD + + Return noise from the chosen PSD. Note that if unique noise is desired + a unique seed should be provided. + + Parameters + ---------- + psd : pycbc.types.FrequencySeries + PSD to color the noise + start_time : int + Start time in GPS seconds to generate noise + end_time : int + End time in GPS seconds to generate nosie + seed : {None, int} + The seed to generate the noise. + sample_rate: {16384, float} + The sample rate of the output data. Keep constant if you want to + ensure continuity between disjoint time spans. + low_frequency_cutof : {1.0, float} + The low frequency cutoff to pass to the PSD generation. + filter_duration : {128, float} + The duration in seconds of the coloring filter + + Returns + -------- + noise : TimeSeries + A TimeSeries containing gaussian noise colored by the given psd. + """ + psd = psd.copy() + + flen = int(sample_rate / psd.delta_f) // 2 + 1 + oldlen = len(psd) + psd.resize(flen) + + # Want to avoid zeroes in PSD. + max_val = psd.max() + for i in range(len(psd)): + if i >= (oldlen-1): + psd.data[i] = psd[oldlen - 2] + if psd[i] == 0: + psd.data[i] = max_val + + fil_len = int(filter_duration * sample_rate) + wn_dur = int(end_time - start_time) + 2 * filter_duration + if psd.delta_f >= 1. / (2.*filter_duration): + # If the PSD is short enough, this method is less memory intensive than + # resizing and then calling inverse_spectrum_truncation + psd = pycbc.psd.interpolate(psd, 1.0 / (2. * filter_duration)) + # inverse_spectrum_truncation truncates the inverted PSD. To truncate + # the non-inverted PSD we give it the inverted PSD to truncate and then + # invert the output. + psd = 1. / pycbc.psd.inverse_spectrum_truncation( + 1./psd, + fil_len, + low_frequency_cutoff=low_frequency_cutoff, + trunc_method='hann') + psd = psd.astype(complex_same_precision_as(psd)) + # Zero-pad the time-domain PSD to desired length. Zeroes must be added + # in the middle, so some rolling between a resize is used. + psd = psd.to_timeseries() + psd.roll(fil_len) + psd.resize(int(wn_dur * sample_rate)) + psd.roll(-fil_len) + # As time series is still mirrored the complex frequency components are + # 0. But convert to real by using abs as in inverse_spectrum_truncate + psd = psd.to_frequencyseries() + else: + psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur) + psd = 1. / pycbc.psd.inverse_spectrum_truncation( + 1./psd, + fil_len, + low_frequency_cutoff=low_frequency_cutoff, + trunc_method='hann') + + kmin = int(low_frequency_cutoff / psd.delta_f) + psd[:kmin].clear() + asd = (psd.squared_norm())**0.25 + del psd + + white_noise = normal(start_time - filter_duration, + end_time + filter_duration, + seed=seed, + sample_rate=sample_rate) + white_noise = white_noise.to_frequencyseries() + # Here we color. Do not want to duplicate memory here though so use '*=' + white_noise *= asd*scale + del asd + colored = white_noise.to_timeseries(delta_t=1.0/sample_rate) + del white_noise + return colored.time_slice(start_time, end_time)
+ + +
+[docs] +def noise_from_string(psd_name, start_time, end_time, + seed=0, + sample_rate=16384, + low_frequency_cutoff=1.0, + filter_duration=128, + scale=1.0): + """ Create noise from an analytic PSD + + Return noise from the chosen PSD. Note that if unique noise is desired + a unique seed should be provided. + + Parameters + ---------- + psd_name : str + Name of the analytic PSD to use. + start_time : int + Start time in GPS seconds to generate noise + end_time : int + End time in GPS seconds to generate nosie + seed : {None, int} + The seed to generate the noise. + sample_rate: {16384, float} + The sample rate of the output data. Keep constant if you want to + ensure continuity between disjoint time spans. + low_frequency_cutof : {10.0, float} + The low frequency cutoff to pass to the PSD generation. + filter_duration : {128, float} + The duration in seconds of the coloring filter + + Returns + -------- + noise : TimeSeries + A TimeSeries containing gaussian noise colored by the given psd. + """ + delta_f = 1.0 / filter_duration + flen = int(sample_rate / delta_f) // 2 + 1 + psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff) + return colored_noise(psd, start_time, end_time, + seed=seed, + sample_rate=sample_rate, + low_frequency_cutoff=low_frequency_cutoff, + filter_duration=filter_duration, + scale=scale)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/opt.html b/latest/html/_modules/pycbc/opt.html new file mode 100644 index 00000000000..87486822650 --- /dev/null +++ b/latest/html/_modules/pycbc/opt.html @@ -0,0 +1,297 @@ + + + + + + pycbc.opt — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.opt

+# Copyright (C) 2015 Joshua Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+This module defines optimization flags and determines hardware features that some
+other modules and packages may use in addition to some optimized utilities.
+"""
+import os, sys
+import logging
+from collections import OrderedDict
+
+logger = logging.getLogger('pycbc.opt')
+
+# Work around different Python versions to get runtime
+# info on hardware cache sizes
+_USE_SUBPROCESS = False
+HAVE_GETCONF = False
+if os.environ.get("LEVEL2_CACHE_SIZE", None) or os.environ.get("NO_GETCONF", None):
+    HAVE_GETCONF = False
+elif sys.platform == 'darwin':
+    # Mac has getconf, but we can do nothing useful with it
+    HAVE_GETCONF = False
+else:
+    import subprocess
+    _USE_SUBPROCESS = True
+    HAVE_GETCONF = True
+
+if os.environ.get("LEVEL2_CACHE_SIZE", None):
+    LEVEL2_CACHE_SIZE = int(os.environ["LEVEL2_CACHE_SIZE"])
+    logger.info("opt: using LEVEL2_CACHE_SIZE %d from environment",
+                LEVEL2_CACHE_SIZE)
+elif HAVE_GETCONF:
+    if _USE_SUBPROCESS:
+        def getconf(confvar):
+            return int(subprocess.check_output(['getconf', confvar]))
+    else:
+
+[docs] + def getconf(confvar): + retlist = commands.getstatusoutput('getconf ' + confvar) + return int(retlist[1])
+ + + LEVEL1_DCACHE_SIZE = getconf('LEVEL1_DCACHE_SIZE') + LEVEL1_DCACHE_ASSOC = getconf('LEVEL1_DCACHE_ASSOC') + LEVEL1_DCACHE_LINESIZE = getconf('LEVEL1_DCACHE_LINESIZE') + LEVEL2_CACHE_SIZE = getconf('LEVEL2_CACHE_SIZE') + LEVEL2_CACHE_ASSOC = getconf('LEVEL2_CACHE_ASSOC') + LEVEL2_CACHE_LINESIZE = getconf('LEVEL2_CACHE_LINESIZE') + LEVEL3_CACHE_SIZE = getconf('LEVEL3_CACHE_SIZE') + LEVEL3_CACHE_ASSOC = getconf('LEVEL3_CACHE_ASSOC') + LEVEL3_CACHE_LINESIZE = getconf('LEVEL3_CACHE_LINESIZE') + + +
+[docs] +def insert_optimization_option_group(parser): + """ + Adds the options used to specify optimization-specific options. + + Parameters + ---------- + parser : object + OptionParser instance + """ + optimization_group = parser.add_argument_group("Options for selecting " + "optimization-specific settings") + + optimization_group.add_argument("--cpu-affinity", help=""" + A set of CPUs on which to run, specified in a format suitable + to pass to taskset.""") + optimization_group.add_argument("--cpu-affinity-from-env", help=""" + The name of an enivornment variable containing a set + of CPUs on which to run, specified in a format suitable + to pass to taskset.""")
+ + + +
+[docs] +def verify_optimization_options(opt, parser): + """Parses the CLI options, verifies that they are consistent and + reasonable, and acts on them if they are + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes + parser : object + OptionParser instance. + """ + + # Pin to specified CPUs if requested + requested_cpus = None + + if opt.cpu_affinity_from_env is not None: + if opt.cpu_affinity is not None: + logger.error( + "Both --cpu_affinity_from_env and --cpu_affinity specified" + ) + sys.exit(1) + + requested_cpus = os.environ.get(opt.cpu_affinity_from_env) + + if requested_cpus is None: + logger.error( + "CPU affinity requested from environment variable %s " + "but this variable is not defined", + opt.cpu_affinity_from_env + ) + sys.exit(1) + + if requested_cpus == '': + logger.error( + "CPU affinity requested from environment variable %s " + "but this variable is empty", + opt.cpu_affinity_from_env + ) + sys.exit(1) + + if requested_cpus is None: + requested_cpus = opt.cpu_affinity + + if requested_cpus is not None: + command = 'taskset -pc %s %d' % (requested_cpus, os.getpid()) + retcode = os.system(command) + + if retcode != 0: + logger.error( + 'taskset command <%s> failed with return code %d', + command, retcode + ) + sys.exit(1) + + logger.info("Pinned to CPUs %s ", requested_cpus)
+ + +
+[docs] +class LimitedSizeDict(OrderedDict): + """ Fixed sized dict for FIFO caching""" + + def __init__(self, *args, **kwds): + self.size_limit = kwds.pop("size_limit", None) + OrderedDict.__init__(self, *args, **kwds) + self._check_size_limit() + + def __setitem__(self, key, value): + OrderedDict.__setitem__(self, key, value) + self._check_size_limit() + + def _check_size_limit(self): + if self.size_limit is not None: + while len(self) > self.size_limit: + self.popitem(last=False)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/pnutils.html b/latest/html/_modules/pycbc/pnutils.html new file mode 100644 index 00000000000..a5a1cd4f6cb --- /dev/null +++ b/latest/html/_modules/pycbc/pnutils.html @@ -0,0 +1,1419 @@ + + + + + + pycbc.pnutils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.pnutils

+# Copyright (C) 2012  Alex Nitz
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module contains convenience pN functions. This includes calculating conversions
+between quantities.
+"""
+import logging
+import numpy
+
+import lal
+from scipy.optimize import bisect, brentq, minimize
+
+from pycbc import conversions, libutils
+
+logger = logging.getLogger('pycbc.pnutils')
+
+lalsim = libutils.import_optional('lalsimulation')
+
+
+[docs] +def nearest_larger_binary_number(input_len): + """ Return the nearest binary number larger than input_len. + """ + return int(2**numpy.ceil(numpy.log2(input_len)))
+ + +
+[docs] +def chirp_distance(dist, mchirp, ref_mass=1.4): + return conversions.chirp_distance(dist, mchirp, ref_mass=ref_mass)
+ + +
+[docs] +def mass1_mass2_to_mtotal_eta(mass1, mass2): + m_total = conversions.mtotal_from_mass1_mass2(mass1, mass2) + eta = conversions.eta_from_mass1_mass2(mass1, mass2) + return m_total,eta
+ + +
+[docs] +def mtotal_eta_to_mass1_mass2(m_total, eta): + mass1 = conversions.mass1_from_mtotal_eta(m_total, eta) + mass2 = conversions.mass2_from_mtotal_eta(m_total, eta) + return mass1,mass2
+ + +
+[docs] +def mass1_mass2_to_mchirp_eta(mass1, mass2): + m_chirp = conversions.mchirp_from_mass1_mass2(mass1, mass2) + eta = conversions.eta_from_mass1_mass2(mass1, mass2) + return m_chirp,eta
+ + +
+[docs] +def mchirp_eta_to_mass1_mass2(m_chirp, eta): + mtotal = conversions.mtotal_from_mchirp_eta(m_chirp, eta) + mass1 = conversions.mass1_from_mtotal_eta(mtotal, eta) + mass2 = conversions.mass2_from_mtotal_eta(mtotal, eta) + return mass1, mass2
+ + +
+[docs] +def mchirp_mass1_to_mass2(mchirp, mass1): + """ + This function takes a value of mchirp and one component mass and returns + the second component mass. As this is a cubic equation this requires + finding the roots and returning the one that is real. + Basically it can be shown that: + + m2^3 - a(m2 + m1) = 0 + + where + + a = Mc^5 / m1^3 + + this has 3 solutions but only one will be real. + """ + return conversions.mass2_from_mchirp_mass1(mchirp, mass1)
+ + +
+[docs] +def eta_mass1_to_mass2(eta, mass1, return_mass_heavier=False, force_real=True): + """ + This function takes values for eta and one component mass and returns the + second component mass. Similar to mchirp_mass1_to_mass2 this requires + finding the roots of a quadratic equation. Basically: + + eta m2^2 + (2 eta - 1)m1 m2 + eta m1^2 = 0 + + This has two solutions which correspond to mass1 being the heavier mass + or it being the lighter mass. By default the value corresponding to + mass1 > mass2 is returned. Use the return_mass_heavier kwarg to invert this + behaviour. + """ + return conversions.mass_from_knownmass_eta(mass1, eta, + known_is_secondary=return_mass_heavier, force_real=force_real)
+ + +
+[docs] +def mchirp_q_to_mass1_mass2(mchirp, q): + """ This function takes a value of mchirp and the mass ratio + mass1/mass2 and returns the two component masses. + + The map from q to eta is + + eta = (mass1*mass2)/(mass1+mass2)**2 = (q)/(1+q)**2 + + Then we can map from (mchirp,eta) to (mass1,mass2). + """ + eta = conversions.eta_from_q(q) + mass1 = conversions.mass1_from_mchirp_eta(mchirp, eta) + mass2 = conversions.mass2_from_mchirp_eta(mchirp, eta) + return mass1, mass2
+ + +
+[docs] +def A0(f_lower): + """used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437 + appendix 1, also lalinspiral/python/sbank/tau0tau3.py + """ + return conversions._a0(f_lower)
+ + +
+[docs] +def A3(f_lower): + """another parameter used for chirp times""" + return conversions._a3(f_lower)
+ + +
+[docs] +def mass1_mass2_to_tau0_tau3(mass1, mass2, f_lower): + tau0 = conversions.tau0_from_mass1_mass2(mass1, mass2, f_lower) + tau3 = conversions.tau3_from_mass1_mass2(mass1, mass2, f_lower) + return tau0,tau3
+ + +
+[docs] +def tau0_tau3_to_mtotal_eta(tau0, tau3, f_lower): + mtotal = conversions.mtotal_from_tau0_tau3(tau0, tau3, f_lower) + eta = conversions.eta_from_tau0_tau3(tau0, tau3, f_lower) + return mtotal, eta
+ + +
+[docs] +def tau0_tau3_to_mass1_mass2(tau0, tau3, f_lower): + m_total,eta = tau0_tau3_to_mtotal_eta(tau0, tau3, f_lower) + return mtotal_eta_to_mass1_mass2(m_total, eta)
+ + +
+[docs] +def mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2, + spin1z, spin2z): + _, eta = mass1_mass2_to_mtotal_eta(mass1, mass2) + # get_beta_sigma_from_aligned_spins() takes + # the spin of the heaviest body first + heavy_spin = numpy.where(mass2 <= mass1, spin1z, spin2z) + light_spin = numpy.where(mass2 > mass1, spin1z, spin2z) + beta, sigma, gamma = get_beta_sigma_from_aligned_spins( + eta, heavy_spin, light_spin) + return beta, sigma, gamma
+ + +
+[docs] +def get_beta_sigma_from_aligned_spins(eta, spin1z, spin2z): + """ + Calculate the various PN spin combinations from the masses and spins. + See <http://arxiv.org/pdf/0810.5336v3.pdf>. + + Parameters + ----------- + eta : float or numpy.array + Symmetric mass ratio of the input system(s) + spin1z : float or numpy.array + Spin(s) parallel to the orbit of the heaviest body(ies) + spin2z : float or numpy.array + Spin(s) parallel to the orbit of the smallest body(ies) + + Returns + -------- + beta : float or numpy.array + The 1.5PN spin combination + sigma : float or numpy.array + The 2PN spin combination + gamma : float or numpy.array + The 2.5PN spin combination + chis : float or numpy.array + (spin1z + spin2z) / 2. + """ + chiS = 0.5 * (spin1z + spin2z) + chiA = 0.5 * (spin1z - spin2z) + delta = (1 - 4 * eta) ** 0.5 + spinspin = spin1z * spin2z + beta = (113. / 12. - 19. / 3. * eta) * chiS + beta += 113. / 12. * delta * chiA + sigma = eta / 48. * (474 * spinspin) + sigma += (1 - 2 * eta) * (81. / 16. * (chiS * chiS + chiA * chiA)) + sigma += delta * (81. / 8. * (chiS * chiA)) + gamma = (732985. / 2268. - 24260. / 81. * eta - \ + 340. / 9. * eta * eta) * chiS + gamma += (732985. / 2268. + 140. / 9. * eta) * delta * chiA + return beta, sigma, gamma
+ + +
+[docs] +def solar_mass_to_kg(solar_masses): + return solar_masses * lal.MSUN_SI
+ + +
+[docs] +def parsecs_to_meters(distance): + return distance * lal.PC_SI
+ + +
+[docs] +def megaparsecs_to_meters(distance): + return parsecs_to_meters(distance) * 1e6
+ + +
+[docs] +def velocity_to_frequency(v, M): + return conversions.velocity_to_frequency(v, M)
+ + +
+[docs] +def frequency_to_velocity(f, M): + return conversions.frequency_to_velocity(f, M)
+ + +
+[docs] +def f_SchwarzISCO(M): + """ + Innermost stable circular orbit (ISCO) for a test particle + orbiting a Schwarzschild black hole + + Parameters + ---------- + M : float or numpy.array + Total mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + return conversions.f_schwarzchild_isco(M)
+ + +
+[docs] +def f_BKLISCO(m1, m2): + """ + Mass ratio dependent ISCO derived from estimates of the final spin + of a merged black hole in a paper by Buonanno, Kidder, Lehner + (arXiv:0709.3839). See also arxiv:0801.4297v2 eq.(5) + + Parameters + ---------- + m1 : float or numpy.array + First component mass in solar mass units + m2 : float or numpy.array + Second component mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + # q is defined to be in [0,1] for this formula + q = numpy.minimum(m1/m2, m2/m1) + return f_SchwarzISCO(m1+m2) * ( 1 + 2.8*q - 2.6*q*q + 0.8*q*q*q )
+ + +
+[docs] +def f_LightRing(M): + """ + Gravitational wave frequency corresponding to the light-ring orbit, + equal to 1/(3**(3/2) pi M) : see InspiralBankGeneration.c + + Parameters + ---------- + M : float or numpy.array + Total mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + return 1.0 / (3.0**(1.5) * lal.PI * M * lal.MTSUN_SI)
+ + +
+[docs] +def f_ERD(M): + """ + Effective RingDown frequency studied in Pan et al. (arXiv:0704.1964) + found to give good fit between stationary-phase templates and + numerical relativity waveforms [NB equal-mass & nonspinning!] + Equal to 1.07*omega_220/2*pi + + Parameters + ---------- + M : float or numpy.array + Total mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + return 1.07 * 0.5326 / (2*lal.PI * 0.955 * M * lal.MTSUN_SI)
+ + +
+[docs] +def f_FRD(m1, m2): + """ + Fundamental RingDown frequency calculated from the Berti, Cardoso and + Will (gr-qc/0512160) value for the omega_220 QNM frequency using + mass-ratio dependent fits to the final BH mass and spin from Buonanno + et al. (arXiv:0706.3732) : see also InspiralBankGeneration.c + + Parameters + ---------- + m1 : float or numpy.array + First component mass in solar mass units + m2 : float or numpy.array + Second component mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + m_total, eta = mass1_mass2_to_mtotal_eta(m1, m2) + tmp = ( (1. - 0.63*(1. - 3.4641016*eta + 2.9*eta**2)**(0.3)) / + (1. - 0.057191*eta - 0.498*eta**2) ) + return tmp / (2.*lal.PI * m_total*lal.MTSUN_SI)
+ + +
+[docs] +def f_LRD(m1, m2): + """ + Lorentzian RingDown frequency = 1.2*FRD which captures part of + the Lorentzian tail from the decay of the QNMs + + Parameters + ---------- + m1 : float or numpy.array + First component mass in solar mass units + m2 : float or numpy.array + Second component mass in solar mass units + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + return 1.2 * f_FRD(m1, m2)
+ + +def _get_freq(freqfunc, m1, m2, s1z, s2z): + """Wrapper of the LALSimulation function returning the frequency + for a given frequency function and template parameters. + + Parameters + ---------- + freqfunc : lalsimulation FrequencyFunction wrapped object e.g. + lalsimulation.fEOBNRv2RD + m1 : float-ish, i.e. castable to float + First component mass in solar masses + m2 : float-ish + Second component mass in solar masses + s1z : float-ish + First component dimensionless spin S_1/m_1^2 projected onto L + s2z : float-ish + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + f : float + Frequency in Hz + """ + return lalsim.SimInspiralGetFrequency( + solar_mass_to_kg(m1), + solar_mass_to_kg(m2), + 0, + 0, + float(s1z), + 0, + 0, + float(s2z), + int(freqfunc) + ) + +# vectorize to enable calls with numpy arrays +_vec_get_freq = numpy.vectorize(_get_freq) + +
+[docs] +def get_freq(freqfunc, m1, m2, s1z, s2z): + """ + Returns the LALSimulation function which evaluates the frequency + for the given frequency function and template parameters. + + Parameters + ---------- + freqfunc : string + Name of the frequency function to use, e.g., 'fEOBNRv2RD' + m1 : float or numpy.array + First component mass in solar masses + m2 : float or numpy.array + Second component mass in solar masses + s1z : float or numpy.array + First component dimensionless spin S_1/m_1^2 projected onto L + s2z : float or numpy.array + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + lalsim_ffunc = getattr(lalsim, freqfunc) + return _vec_get_freq(lalsim_ffunc, m1, m2, s1z, s2z)
+ + +def _get_final_freq(approx, m1, m2, s1z, s2z): + """Wrapper of the LALSimulation function returning the final (highest) + frequency for a given approximant an template parameters + + Parameters + ---------- + approx : lalsimulation approximant wrapped object e.g. + lalsimulation.EOBNRv2 + m1 : float-ish, i.e. castable to float + First component mass in solar masses + m2 : float-ish + Second component mass in solar masses + s1z : float-ish + First component dimensionless spin S_1/m_1^2 projected onto L + s2z : float-ish + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + f : float + Frequency in Hz + """ + return lalsim.SimInspiralGetFinalFreq( + solar_mass_to_kg(m1), + solar_mass_to_kg(m2), + 0, + 0, + float(s1z), + 0, + 0, + float(s2z), + int(approx) + ) + +# vectorize to enable calls with numpy arrays +_vec_get_final_freq = numpy.vectorize(_get_final_freq) + +
+[docs] +def get_final_freq(approx, m1, m2, s1z, s2z): + """Returns the final (highest) frequency for a given approximant using + given template parameters. + + NOTE: TaylorTx and TaylorFx are currently all given an ISCO cutoff !! + + Parameters + ---------- + approx : string + Name of the approximant e.g. 'EOBNRv2' + m1 : float or numpy.array + First component mass in solar masses + m2 : float or numpy.array + Second component mass in solar masses + s1z : float or numpy.array + First component dimensionless spin S_1/m_1^2 projected onto L + s2z : float or numpy.array + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + # Unfortunately we need a few special cases (quite hacky in the case of + # IMRPhenomXAS) because some useful approximants are not understood by + # GetApproximantFromString(). + if approx in ['IMRPhenomD', 'IMRPhenomXAS']: + return frequency_cutoff_from_name('IMRPhenomDPeak', m1, m2, s1z, s2z) + if approx == 'SEOBNRv5': + return frequency_cutoff_from_name('SEOBNRv5RD', m1, m2, s1z, s2z) + lalsim_approx = lalsim.GetApproximantFromString(approx) + return _vec_get_final_freq(lalsim_approx, m1, m2, s1z, s2z)
+ + +# Dictionary of functions with uniform API taking a +# parameter dict indexed on mass1, mass2, spin1z, spin2z +named_frequency_cutoffs = { + # functions depending on the total mass alone + "SchwarzISCO": lambda p: f_SchwarzISCO(p["mass1"]+p["mass2"]), + "LightRing" : lambda p: f_LightRing(p["mass1"]+p["mass2"]), + "ERD" : lambda p: f_ERD(p["mass1"]+p["mass2"]), + # functions depending on the 2 component masses + "BKLISCO" : lambda p: f_BKLISCO(p["mass1"], p["mass2"]), + "FRD" : lambda p: f_FRD(p["mass1"], p["mass2"]), + "LRD" : lambda p: f_LRD(p["mass1"], p["mass2"]), + # functions depending on 2 component masses and aligned spins + "MECO" : lambda p: meco_frequency(p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "HybridMECO" : lambda p: hybrid_meco_frequency( + p["mass1"], p["mass2"], p["spin1z"], p["spin2z"], qm1=None, qm2=None), + "IMRPhenomBFinal": lambda p: get_freq("fIMRPhenomBFinal", + p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "IMRPhenomCFinal": lambda p: get_freq("fIMRPhenomCFinal", + p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "IMRPhenomDPeak": lambda p: get_freq("fIMRPhenomDPeak", + p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "EOBNRv2RD" : lambda p: get_freq("fEOBNRv2RD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "EOBNRv2HMRD" : lambda p: get_freq("fEOBNRv2HMRD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv1RD" : lambda p: get_freq("fSEOBNRv1RD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv1Peak": lambda p: get_freq("fSEOBNRv1Peak", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv2RD": lambda p: get_freq("fSEOBNRv2RD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv2Peak": lambda p: get_freq("fSEOBNRv2Peak", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv4RD": lambda p: get_freq("fSEOBNRv4RD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv4Peak": lambda p: get_freq("fSEOBNRv4Peak", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv5RD": lambda p: get_freq("fSEOBNRv5RD", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]), + "SEOBNRv5Peak": lambda p: get_freq("fSEOBNRv5Peak", p["mass1"], p["mass2"], + p["spin1z"], p["spin2z"]) +} + +
+[docs] +def frequency_cutoff_from_name(name, m1, m2, s1z, s2z): + """ + Returns the result of evaluating the frequency cutoff function + specified by 'name' on a template with given parameters. + + Parameters + ---------- + name : string + Name of the cutoff function + m1 : float or numpy.array + First component mass in solar masses + m2 : float or numpy.array + Second component mass in solar masses + s1z : float or numpy.array + First component dimensionless spin S_1/m_1^2 projected onto L + s2z : float or numpy.array + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + f : float or numpy.array + Frequency in Hz + """ + params = {"mass1": m1, "mass2": m2, "spin1z": s1z, "spin2z": s2z} + return named_frequency_cutoffs[name](params)
+ + +def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"): + """Wrapper of lalsimulation template duration approximate formula""" + m1, m2, s1z, s2z, f_low = float(m1), float(m2), float(s1z), float(s2z),\ + float(f_low) + if approximant == "SEOBNRv2": + chi = lalsim.SimIMRPhenomBComputeChi(m1, m2, s1z, s2z) + time_length = lalsim.SimIMRSEOBNRv2ChirpTimeSingleSpin( + m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, f_low) + elif approximant == "IMRPhenomXAS": + time_length = lalsim.SimIMRPhenomXASDuration( + m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low) + elif approximant == "IMRPhenomD": + time_length = lalsim.SimIMRPhenomDChirpTime( + m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low) + elif approximant in ["SEOBNRv4", "SEOBNRv4_ROM"]: + # NB the LALSim function has f_low as first argument + time_length = lalsim.SimIMRSEOBNRv4ROMTimeOfFrequency( + f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z) + elif approximant in ["SEOBNRv5", "SEOBNRv5_ROM"]: + time_length = lalsim.SimIMRSEOBNRv5ROMTimeOfFrequency( + f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z) + elif approximant in ["SPAtmplt", "TaylorF2"]: + chi = lalsim.SimInspiralTaylorF2ReducedSpinComputeChi( + m1, m2, s1z, s2z + ) + time_length = lalsim.SimInspiralTaylorF2ReducedSpinChirpTime( + f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, -1 + ) + else: + raise RuntimeError("I can't calculate a duration for %s" % approximant) + # FIXME Add an extra factor of 1.1 for 'safety' since the duration + # functions are approximate + return time_length * 1.1 + +get_imr_duration = numpy.vectorize(_get_imr_duration) + +
+[docs] +def get_inspiral_tf(tc, mass1, mass2, spin1, spin2, f_low, n_points=100, + pn_2order=7, approximant='TaylorF2'): + """Compute the time-frequency evolution of an inspiral signal. + + Return a tuple of time and frequency vectors tracking the evolution of an + inspiral signal in the time-frequency plane. + """ + # handle param-dependent approximant specification + class Params: + pass + params = Params() + params.mass1 = mass1 + params.mass2 = mass2 + params.spin1z = spin1 + params.spin2z = spin2 + try: + approximant = eval(approximant, {'__builtins__': None}, + dict(params=params)) + except (NameError, TypeError): + pass + + if approximant in ['TaylorF2', 'SPAtmplt']: + from pycbc.waveform.spa_tmplt import findchirp_chirptime + + # FIXME spins are not taken into account + f_high = f_SchwarzISCO(mass1 + mass2) + def tof_func(f): + return findchirp_chirptime( + float(mass1), + float(mass2), + float(f), + pn_2order + ) + elif approximant.startswith('SEOBNRv'): + approximant_prefix = approximant[:len('SEOBNRv*')] + f_high = get_final_freq(approximant_prefix, mass1, mass2, spin1, spin2) + f_high *= 0.999 # avoid errors due to rounding + tof_func_map = { + # use HI function for v2 as it has wider freq range validity + 'SEOBNRv2': lalsim.SimIMRSEOBNRv2ROMDoubleSpinHITimeOfFrequency, + 'SEOBNRv4': lalsim.SimIMRSEOBNRv4ROMTimeOfFrequency, + 'SEOBNRv5': lalsim.SimIMRSEOBNRv5ROMTimeOfFrequency + } + def tof_func(f): + return tof_func_map[approximant_prefix]( + f, + solar_mass_to_kg(mass1), + solar_mass_to_kg(mass2), + float(spin1), + float(spin2) + ) + elif approximant in ['IMRPhenomD', 'IMRPhenomXAS']: + f_high = get_final_freq(approximant, mass1, mass2, spin1, spin2) + tof_func_map = { + 'IMRPhenomD': lalsim.SimIMRPhenomDChirpTime, + 'IMRPhenomXAS': lalsim.SimIMRPhenomXASDuration + } + def tof_func(f): + return tof_func_map[approximant]( + solar_mass_to_kg(mass1), + solar_mass_to_kg(mass2), + float(spin1), + float(spin2), + f + ) + else: + raise ValueError(f'Approximant {approximant} not supported') + track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high), n_points) + tof_func_vec = numpy.vectorize(tof_func) + track_t = tc - tof_func_vec(track_f) + return (track_t, track_f)
+ + + +##############################This code was taken from Andy ########### + + +def _energy_coeffs(m1, m2, chi1, chi2): + """ Return the center-of-mass energy coefficients up to 3.0pN (2.5pN spin) + """ + mtot = m1 + m2 + eta = m1*m2 / (mtot*mtot) + chi = (m1*chi1 + m2*chi2) / mtot + chisym = (chi1 + chi2) / 2. + beta = (113.*chi - 76.*eta*chisym)/12. + sigma12 = 79.*eta*chi1*chi2/8. + sigmaqm = 81.*m1*m1*chi1*chi1/(16.*mtot*mtot) \ + + 81.*m2*m2*chi2*chi2/(16.*mtot*mtot) + + energy0 = -0.5*eta + energy2 = -0.75 - eta/12. + energy3 = 0. + energy4 = -3.375 + (19*eta)/8. - pow(eta,2)/24. + energy5 = 0. + energy6 = -10.546875 - (155*pow(eta,2))/96. - (35*pow(eta,3))/5184. \ + + eta*(59.80034722222222 - (205*pow(lal.PI,2))/96.) + + energy3 += (32*beta)/113. + (52*chisym*eta)/113. + + energy4 += (-16*sigma12)/79. - (16*sigmaqm)/81. + energy5 += (96*beta)/113. + ((-124*beta)/339. - (522*chisym)/113.)*eta \ + - (710*chisym*pow(eta,2))/339. + + return (energy0, energy2, energy3, energy4, energy5, energy6) + +
+[docs] +def meco_velocity(m1, m2, chi1, chi2): + """ + Returns the velocity of the minimum energy cutoff for 3.5pN (2.5pN spin) + + Parameters + ---------- + m1 : float + First component mass in solar masses + m2 : float + Second component mass in solar masses + chi1 : float + First component dimensionless spin S_1/m_1^2 projected onto L + chi2 : float + Second component dimensionless spin S_2/m_2^2 projected onto L + + Returns + ------- + v : float + Velocity (dimensionless) + """ + _, energy2, energy3, energy4, energy5, energy6 = \ + _energy_coeffs(m1, m2, chi1, chi2) + def eprime(v): + return 2. + v * v * (4.*energy2 + v * (5.*energy3 \ + + v * (6.*energy4 + + v * (7.*energy5 + 8.*energy6 * v)))) + return bisect(eprime, 0.05, 1.0)
+ + +def _meco_frequency(m1, m2, chi1, chi2): + """Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin) + """ + return velocity_to_frequency(meco_velocity(m1, m2, chi1, chi2), m1+m2) + +meco_frequency = numpy.vectorize(_meco_frequency) + +def _dtdv_coeffs(m1, m2, chi1, chi2): + """ Returns the dt/dv coefficients up to 3.5pN (2.5pN spin) + """ + mtot = m1 + m2 + eta = m1*m2 / (mtot*mtot) + chi = (m1*chi1 + m2*chi2) / mtot + chisym = (chi1 + chi2) / 2. + beta = (113.*chi - 76.*eta*chisym)/12. + sigma12 = 79.*eta*chi1*chi2/8. + sigmaqm = 81.*m1*m1*chi1*chi1/(16.*mtot*mtot) \ + + 81.*m2*m2*chi2*chi2/(16.*mtot*mtot) + + dtdv0 = 1. # FIXME: Wrong but doesn't matter for now. + dtdv2 = (1./336.) * (743. + 924.*eta) + dtdv3 = -4. * lal.PI + beta + dtdv4 = (3058673. + 5472432.*eta + 4353552.*eta*eta)/1016064. - sigma12 - sigmaqm + dtdv5 = (1./672.) * lal.PI * (-7729. + 1092.*eta) + (146597.*beta/18984. + 42.*beta*eta/113. - 417307.*chisym*eta/18984. - 1389.*chisym*eta*eta/226.) + dtdv6 = 22.065 + 165.416*eta - 2.20067*eta*eta + 4.93152*eta*eta*eta + dtdv6log = 1712./315. + dtdv7 = (lal.PI/1016064.) * (-15419335. - 12718104.*eta + 4975824.*eta*eta) + + return (dtdv0, dtdv2, dtdv3, dtdv4, dtdv5, dtdv6, dtdv6log, dtdv7) + +def _dtdv_cutoff_velocity(m1, m2, chi1, chi2): + _, dtdv2, dtdv3, dtdv4, dtdv5, dtdv6, dtdv6log, dtdv7 = _dtdv_coeffs(m1, m2, chi1, chi2) + + def dtdv_func(v): + x = dtdv7 + x = v * x + dtdv6 + dtdv6log * 3. * numpy.log(v) + x = v * x + dtdv5 + x = v * x + dtdv4 + x = v * x + dtdv3 + x = v * x + dtdv2 + return v * v * x + 1. + + if dtdv_func(1.0) < 0.: + return bisect(dtdv_func, 0.05, 1.0) + else: + return 1.0 + +
+[docs] +def energy_coefficients(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1): + """ Return the energy coefficients. This assumes that the system has aligned spins only. + """ + implemented_phase_order = 7 + implemented_spin_order = 7 + if phase_order > implemented_phase_order: + raise ValueError("pN coeffiecients of that order have not been implemented") + elif phase_order == -1: + phase_order = implemented_phase_order + + if spin_order > implemented_spin_order: + raise ValueError("pN coeffiecients of that order have not been implemented") + elif spin_order == -1: + spin_order = implemented_spin_order + + qmdef1 = 1.0 + qmdef2 = 1.0 + + M = m1 + m2 + dm = (m1-m2)/M + m1M = m1 / M + m2M = m2 / M + + s1z = s1z * m1M * m1M + s2z = s2z * m2M * m2M + + _, eta = mass1_mass2_to_mchirp_eta(m1, m2) + + ecof = numpy.zeros(phase_order+1) + # Orbital terms + if phase_order >= 0: + ecof[0] = 1.0 + if phase_order >= 1: + ecof[1] = 0 + if phase_order >= 2: + ecof[2] = -(1.0/12.0) * (9.0 + eta) + if phase_order >= 3: + ecof[3] = 0 + if phase_order >= 4: + ecof[4] = (-81.0 + 57.0*eta - eta*eta) / 24.0 + if phase_order >= 5: + ecof[5] = 0 + if phase_order >= 6: + ecof[6] = - 675.0/64.0 + ( 34445.0/576.0 \ + - 205.0/96.0 * lal.PI * lal.PI ) * eta \ + - (155.0/96.0) *eta * eta - 35.0/5184.0 * eta * eta + # Spin terms + + ESO15s1 = 8.0/3.0 + 2.0*m2/m1 + ESO15s2 = 8.0/3.0 + 2.0*m1/m2 + + ESS2 = 1.0 / eta + EQM2s1 = qmdef1/2.0/m1M/m1M + EQM2s1L = -qmdef1*3.0/2.0/m1M/m1M + #EQM2s2 = qmdef2/2.0/m2M/m2M + EQM2s2L = -qmdef2*3.0/2.0/m2M/m2M + + ESO25s1 = 11.0 - 61.0*eta/9.0 + (dm/m1M) * (-3.0 + 10.*eta/3.0) + ESO25s2 = 11.0 - 61.0*eta/9.0 + (dm/m2M) * (3.0 - 10.*eta/3.0) + + ESO35s1 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 + (dm/m1M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0) + ESO35s2 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 - (dm/m2M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0) + + if spin_order >=3: + ecof[3] += ESO15s1 * s1z + ESO15s2 * s2z + if spin_order >=4: + ecof[4] += ESS2 * (s1z*s2z - 3.0*s1z*s2z) + ecof[4] += EQM2s1*s1z*s1z + EQM2s1*s2z*s2z + EQM2s1L*s1z*s1z + EQM2s2L*s2z*s2z + if spin_order >=5: + ecof[5] = ESO25s1*s1z + ESO25s2*s2z + if spin_order >=7: + ecof[7] += ESO35s1*s1z + ESO35s2*s2z + + return ecof
+ + +
+[docs] +def energy(v, mass1, mass2, s1z=0, s2z=0, phase_order=-1, spin_order=-1): + ecof = energy_coefficients(mass1, mass2, s1z, s2z, phase_order, spin_order) + _, eta = mass1_mass2_to_mchirp_eta(mass1, mass2) + amp = - (1.0/2.0) * eta + e = 0.0 + for i in numpy.arange(0, len(ecof), 1): + e += v**(i+2.0) * ecof[i] + + return e * amp
+ + +
+[docs] +def meco2(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1): + ecof = energy_coefficients(m1, m2, s1z, s2z, phase_order, spin_order) + + def test(v): + de = 0 + for i in numpy.arange(0, len(ecof), 1): + de += v**(i+1.0)* ecof[i] * (i + 2) + + return de + + return bisect(test, 0.001, 1.0)
+ + + +
+[docs] +def t2_cutoff_velocity(m1, m2, chi1, chi2): + return min(meco_velocity(m1,m2,chi1,chi2), _dtdv_cutoff_velocity(m1,m2,chi1,chi2))
+ + +
+[docs] +def t2_cutoff_frequency(m1, m2, chi1, chi2): + return velocity_to_frequency(t2_cutoff_velocity(m1, m2, chi1, chi2), m1 + m2)
+ + +t4_cutoff_velocity = meco_velocity +t4_cutoff_frequency = meco_frequency + +# Hybrid MECO in arXiv:1602.03134 +# To obtain the MECO, find minimum in v of eq. (6) + + +
+[docs] +def kerr_lightring(v, chi): + """Return the function whose first root defines the Kerr light ring""" + return 1 + chi * v**3 - 3 * v**2 * (1 - chi * v**3)**(1./3)
+ + + +
+[docs] +def kerr_lightring_velocity(chi): + """Return the velocity at the Kerr light ring""" + # If chi > 0.9996, the algorithm cannot solve the function + if chi >= 0.9996: + return brentq(kerr_lightring, 0, 0.8, args=(0.9996)) + else: + return brentq(kerr_lightring, 0, 0.8, args=(chi))
+ + + +
+[docs] +def hybridEnergy(v, m1, m2, chi1, chi2, qm1, qm2): + """Return hybrid MECO energy. + + Return the hybrid energy [eq. (6)] whose minimum defines the hybrid MECO + up to 3.5PN (including the 3PN spin-spin) + + Parameters + ---------- + m1 : float + Mass of the primary object in solar masses. + m2 : float + Mass of the secondary object in solar masses. + chi1: float + Dimensionless spin of the primary object. + chi2: float + Dimensionless spin of the secondary object. + qm1: float + Quadrupole-monopole term of the primary object (1 for black holes). + qm2: float + Quadrupole-monopole term of the secondary object (1 for black holes). + + Returns + ------- + h_E: float + The hybrid energy as a function of v + """ + pi_sq = numpy.pi**2 + v2, v3, v4, v5, v6, v7 = v**2, v**3, v**4, v**5, v**6, v**7 + chi1_sq, chi2_sq = chi1**2, chi2**2 + m1, m2 = float(m1), float(m2) + M = float(m1 + m2) + M_2, M_4 = M**2, M**4 + eta = m1 * m2 / M_2 + eta2, eta3 = eta**2, eta**3 + m1_2, m1_4 = m1**2, m1**4 + m2_2, m2_4 = m2**2, m2**4 + + chi = (chi1 * m1 + chi2 * m2) / M + Kerr = -1. + (1. - 2. * v2 * (1. - chi * v3)**(1./3.)) / \ + numpy.sqrt((1. - chi * v3) * (1. + chi * v3 - 3. * v2 * (1 - chi * v3)**(1./3.))) + + h_E = Kerr - \ + (v2 / 2.) * \ + ( + - eta * v2 / 12. - 2 * (chi1 + chi2) * eta * v3 / 3. + + (19. * eta / 8. - eta2 / 24. + chi1_sq * m1_2 * (1 - qm1) / M_2 + + chi2_sq * m2_2 * (1 - qm2) / M_2) * v4 + - 1. / 9. * (120. * (chi1 + chi2) * eta2 + + (76. * chi1 + 45. * chi2) * m1_2 * eta / M_2 + + (45. * chi1 + 76. * chi2) * m2_2 * eta / M_2) * v5 + + (34445. * eta / 576. - 205. * pi_sq * eta / 96. - 155. * eta2 / 96. - + 35. * eta3 / 5184. + + 5. / 18. * (21. * chi1_sq * (1. - qm1) * m1_4 / M_4 + + 21. * chi2_sq * (1. - qm2) * m2_4 / M_4 + + (chi1_sq * (56. - 27. * qm1) + 20. * chi1 * chi2) * eta * m1_2 / M_2 + + (chi2_sq * (56. - 27. * qm2) + 20. * chi1 * chi2) * eta * m2_2 / M_2 + + (chi1_sq * (31. - 9. * qm1) + 38. * chi1 * chi2 + + chi2_sq * (31. - 9. * qm2)) * eta2)) * v6 + - eta / 12. * (3. * (292. * chi1 + 81. * chi2) * m1_4 / M_4 + + 3. * (81. * chi1 + 292. * chi2) * m2_4 / M_4 + + 4. * (673. * chi1 + 360. * chi2) * eta * m1_2 / M_2 + + 4. * (360. * chi1 + 673. * chi2) * eta * m2_2 / M_2 + + 3012. * eta2 * (chi1 + chi2)) * v7 + ) + + return h_E
+ + + +
+[docs] +def hybrid_meco_velocity(m1, m2, chi1, chi2, qm1=None, qm2=None): + """Return the velocity of the hybrid MECO + + Parameters + ---------- + m1 : float + Mass of the primary object in solar masses. + m2 : float + Mass of the secondary object in solar masses. + chi1: float + Dimensionless spin of the primary object. + chi2: float + Dimensionless spin of the secondary object. + qm1: {None, float}, optional + Quadrupole-monopole term of the primary object (1 for black holes). + If None, will be set to qm1 = 1. + qm2: {None, float}, optional + Quadrupole-monopole term of the secondary object (1 for black holes). + If None, will be set to qm2 = 1. + + Returns + ------- + v: float + The velocity (dimensionless) of the hybrid MECO + """ + + if qm1 is None: + qm1 = 1 + if qm2 is None: + qm2 = 1 + + # Set bounds at 0.1 to skip v=0 and at the lightring velocity + chi = (chi1 * m1 + chi2 * m2) / (m1 + m2) + vmax = kerr_lightring_velocity(chi) - 0.01 + + return minimize(hybridEnergy, 0.2, args=(m1, m2, chi1, chi2, qm1, qm2), + bounds=[(0.1, vmax)]).x.item()
+ + + +
+[docs] +def hybrid_meco_frequency(m1, m2, chi1, chi2, qm1=None, qm2=None): + """Return the frequency of the hybrid MECO + + Parameters + ---------- + m1 : float + Mass of the primary object in solar masses. + m2 : float + Mass of the secondary object in solar masses. + chi1: float + Dimensionless spin of the primary object. + chi2: float + Dimensionless spin of the secondary object. + qm1: {None, float}, optional + Quadrupole-monopole term of the primary object (1 for black holes). + If None, will be set to qm1 = 1. + qm2: {None, float}, optional + Quadrupole-monopole term of the secondary object (1 for black holes). + If None, will be set to qm2 = 1. + + Returns + ------- + f: float + The frequency (in Hz) of the hybrid MECO + """ + if qm1 is None: + qm1 = 1 + if qm2 is None: + qm2 = 1 + + return velocity_to_frequency(hybrid_meco_velocity(m1, m2, chi1, chi2, qm1, qm2), m1 + m2)
+ + + +
+[docs] +def jframe_to_l0frame(mass1, mass2, f_ref, phiref=0., thetajn=0., phijl=0., + spin1_a=0., spin2_a=0., + spin1_polar=0., spin2_polar=0., + spin12_deltaphi=0.): + """Converts J-frame parameters into L0 frame. + + Parameters + ---------- + mass1 : float + The mass of the first component object in the + binary (in solar masses) + mass2 : float + The mass of the second component object in the + binary (in solar masses) + f_ref : float + The reference frequency. + thetajn : float + Angle between the line of sight and the total angular momentume J. + phijl : float + Azimuthal angle of L on its cone about J. + spin1_a : float + The dimensionless spin magnitude :math:`|\\vec{{s}}_1/m^2_1|`. + spin2_a : float + The dimensionless spin magnitude :math:`|\\vec{{s}}_2/m^2_2|`. + spin1_polar : float + Angle between L and the spin magnitude of the larger object. + spin2_polar : float + Angle betwen L and the spin magnitude of the smaller object. + spin12_deltaphi : float + Difference between the azimuthal angles of the spin of the larger + object (S1) and the spin of the smaller object (S2). + + Returns + ------- + dict : + Dictionary of: + + * inclination : float + Inclination (rad), defined as the angle between + the orbital angular momentum L and the + line-of-sight at the reference frequency. + * spin1x : float + The x component of the first binary component's + dimensionless spin. + * spin1y : float + The y component of the first binary component's + dimensionless spin. + * spin1z : float + The z component of the first binary component's + dimensionless spin. + * spin2x : float + The x component of the second binary component's + dimensionless spin. + * spin2y : float + The y component of the second binary component's + dimensionless spin. + * spin2z : float + The z component of the second binary component's + dimensionless spin. + """ + inclination, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z = \ + lalsim.SimInspiralTransformPrecessingNewInitialConditions( + thetajn, phijl, spin1_polar, spin2_polar, spin12_deltaphi, + spin1_a, spin2_a, mass1*lal.MSUN_SI, mass2*lal.MSUN_SI, f_ref, + phiref) + out = {'inclination': inclination, + 'spin1x': spin1x, + 'spin1y': spin1y, + 'spin1z': spin1z, + 'spin2x': spin2x, + 'spin2y': spin2y, + 'spin2z': spin2z} + return out
+ + +
+[docs] +def l0frame_to_jframe(mass1, mass2, f_ref, phiref=0., inclination=0., + spin1x=0., spin1y=0., spin1z=0., + spin2x=0., spin2y=0., spin2z=0.): + """Converts L0-frame parameters to J-frame. + + Parameters + ---------- + mass1 : float + The mass of the first component object in the + binary (in solar masses) + mass2 : float + The mass of the second component object in the + binary (in solar masses) + f_ref : float + The reference frequency. + phiref : float + The orbital phase at ``f_ref``. + inclination : float + Inclination (rad), defined as the angle between + the orbital angular momentum L and the + line-of-sight at the reference frequency. + spin1x : float + The x component of the first binary component's + dimensionless spin. + spin1y : float + The y component of the first binary component's + dimensionless spin. + spin1z : float + The z component of the first binary component's + dimensionless spin. + spin2x : float + The x component of the second binary component's + dimensionless spin. + spin2y : float + The y component of the second binary component's + dimensionless spin. + spin2z : float + The z component of the second binary component's + dimensionless spin. + + Returns + ------- + dict : + Dictionary of: + + * thetajn : float + Angle between the line of sight and the total angular momentume J. + * phijl : float + Azimuthal angle of L on its cone about J. + * spin1_a : float + The dimensionless spin magnitude :math:`|\\vec{{s}}_1/m^2_1|`. + * spin2_a : float + The dimensionless spin magnitude :math:`|\\vec{{s}}_2/m^2_2|`. + * spin1_polar : float + Angle between L and the spin magnitude of the larger object. + * spin2_polar : float + Angle betwen L and the spin magnitude of the smaller object. + * spin12_deltaphi : float + Difference between the azimuthal angles of the spin of the larger + object (S1) and the spin of the smaller object (S2). + """ + # Note: unlike other LALSimulation functions, this one takes masses in + # solar masses + thetajn, phijl, s1pol, s2pol, s12_deltaphi, spin1_a, spin2_a = \ + lalsim.SimInspiralTransformPrecessingWvf2PE( + inclination, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z, + mass1, mass2, f_ref, phiref) + out = {'thetajn': thetajn, + 'phijl': phijl, + 'spin1_polar': s1pol, + 'spin2_polar': s2pol, + 'spin12_deltaphi': s12_deltaphi, + 'spin1_a': spin1_a, + 'spin2_a': spin2_a} + return out
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/pool.html b/latest/html/_modules/pycbc/pool.html new file mode 100644 index 00000000000..9a56046653e --- /dev/null +++ b/latest/html/_modules/pycbc/pool.html @@ -0,0 +1,373 @@ + + + + + + pycbc.pool — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.pool

+""" Tools for creating pools of worker processes
+"""
+import multiprocessing.pool
+import functools
+from multiprocessing import TimeoutError, cpu_count, get_context
+import types
+import signal
+import atexit
+import logging
+
+logger = logging.getLogger('pycbc.pool')
+
+
+[docs] +def is_main_process(): + """ Check if this is the main control process and may handle one time tasks + """ + try: + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + return rank == 0 + except (ImportError, ValueError, RuntimeError): + return True
+ + +# Allow the pool to be interrupted, need to disable the children processes +# from intercepting the keyboard interrupt +def _noint(init, *args): + signal.signal(signal.SIGINT, signal.SIG_IGN) + if init is not None: + return init(*args) + +_process_lock = None +_numdone = None +def _lockstep_fcn(values): + """ Wrapper to ensure that all processes execute together """ + numrequired, fcn, args = values + with _process_lock: + _numdone.value += 1 + # yep this is an ugly busy loop, do something better please + # when we care about the performance of this call and not just the + # guarantee it provides (ok... maybe never) + while 1: + if _numdone.value == numrequired: + return fcn(args) + +def _shutdown_pool(p): + p.terminate() + p.join() + +
+[docs] +class BroadcastPool(multiprocessing.pool.Pool): + """ Multiprocessing pool with a broadcast method + """ + def __init__(self, processes=None, initializer=None, initargs=(), + context=None, **kwds): + global _process_lock + global _numdone + _process_lock = multiprocessing.Lock() + _numdone = multiprocessing.Value('i', 0) + noint = functools.partial(_noint, initializer) + + # Default is fork to preserve child memory inheritance and + # copy on write + if context is None: + context = get_context("fork") + super(BroadcastPool, self).__init__(processes, noint, initargs, + context=context, **kwds) + atexit.register(_shutdown_pool, self) + + def __len__(self): + return len(self._pool) + +
+[docs] + def broadcast(self, fcn, args): + """ Do a function call on every worker. + + Parameters + ---------- + fcn: funtion + Function to call. + args: tuple + The arguments for Pool.map + """ + results = self.map(_lockstep_fcn, [(len(self), fcn, args)] * len(self)) + _numdone.value = 0 + return results
+ + +
+[docs] + def allmap(self, fcn, args): + """ Do a function call on every worker with different arguments + + Parameters + ---------- + fcn: funtion + Function to call. + args: tuple + The arguments for Pool.map + """ + results = self.map(_lockstep_fcn, + [(len(self), fcn, arg) for arg in args]) + _numdone.value = 0 + return results
+ + +
+[docs] + def map(self, func, items, chunksize=None): + """ Catch keyboard interrupts to allow the pool to exit cleanly. + + Parameters + ---------- + func: function + Function to call + items: list of tuples + Arguments to pass + chunksize: int, Optional + Number of calls for each process to handle at once + """ + results = self.map_async(func, items, chunksize) + while True: + try: + return results.get(1800) + except TimeoutError: + pass + except KeyboardInterrupt: + self.terminate() + self.join() + raise KeyboardInterrupt
+ + +
+[docs] + def close_pool(self): + """ Close the pool and remove the reference + """ + self.close() + self.join() + atexit.unregister(_shutdown_pool)
+
+ + +def _dummy_broadcast(self, f, args): + self.map(f, [args] * self.size) + +
+[docs] +class SinglePool(object): +
+[docs] + def broadcast(self, fcn, args): + return self.map(fcn, [args])
+ + +
+[docs] + def map(self, f, items): + return [f(a) for a in items]
+ + + # This is single core, so imap and map + # would not behave differently. This is defined + # so that the general pool interfaces can use + # imap irrespective of the pool type. + imap = map + imap_unordered = map + +
+[docs] + def close_pool(self): + ''' Dummy function to be consistent with BroadcastPool + ''' + pass
+
+ + +
+[docs] +def use_mpi(require_mpi=False, log=True): + """ Get whether MPI is enabled and if so the current size and rank + """ + use_mpi = False + try: + from mpi4py import MPI + comm = MPI.COMM_WORLD + size = comm.Get_size() + rank = comm.Get_rank() + if size > 1: + use_mpi = True + if log: + logger.info( + 'Running under mpi with size: %s, rank: %s', + size, rank + ) + except ImportError as e: + if require_mpi: + print(e) + raise ValueError("Failed to load mpi, ensure mpi4py is installed") + if not use_mpi: + size = rank = 0 + return use_mpi, size, rank
+ + + +
+[docs] +def choose_pool(processes, mpi=False): + """ Get processing pool + """ + do_mpi, size, rank = use_mpi(require_mpi=mpi) + if do_mpi: + try: + import schwimmbad + pool = schwimmbad.choose_pool(mpi=do_mpi, + processes=(size - 1)) + pool.broadcast = types.MethodType(_dummy_broadcast, pool) + atexit.register(pool.close) + + if processes: + logger.info('NOTE: that for MPI process size determined by ' + 'MPI launch size, not the processes argument') + + if do_mpi and not mpi: + logger.info('NOTE: using MPI as this process was launched' + 'under MPI') + except ImportError: + raise ValueError("Failed to start up an MPI pool, " + "install mpi4py / schwimmbad") + elif processes == 1: + pool = SinglePool() + else: + if processes == -1: + processes = cpu_count() + pool = BroadcastPool(processes) + + pool.size = processes + if size: + pool.size = size + return pool
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/fgmc_functions.html b/latest/html/_modules/pycbc/population/fgmc_functions.html new file mode 100644 index 00000000000..d999dd4df69 --- /dev/null +++ b/latest/html/_modules/pycbc/population/fgmc_functions.html @@ -0,0 +1,941 @@ + + + + + + pycbc.population.fgmc_functions — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.fgmc_functions

+# Copyright (C) 2021 Thomas Dent
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+
+"""
+A set of helper functions for evaluating event rates, densities etc.
+
+See https://dcc.ligo.org/LIGO-T2100060/public for technical explanations
+"""
+
+from os.path import basename
+import bisect
+from itertools import chain as it_chain, combinations as it_comb
+import numpy as np
+
+from pycbc import conversions as conv
+from pycbc import events
+from pycbc.events.coinc import mean_if_greater_than_zero as coinc_meanigz
+from pycbc.events import triggers
+from pycbc.io.hdf import HFile
+
+
+
+[docs] +def filter_bin_lo_hi(values, lo, hi): + in_bin = np.sign((values - lo) * (hi - values)) + if np.any(in_bin == 0): + raise RuntimeError('Edge case! Bin edges', lo, hi, + 'value(s)', values[in_bin == 0]) + return in_bin == 1
+ + + +
+[docs] +def filter_tmplt_mchirp(bankf, lo_mchirp, hi_mchirp): + with HFile(bankf) as bank: + mchirp = conv.mchirp_from_mass1_mass2(bank['mass1'][:], bank['mass2'][:]) + # Boolean over template id + return filter_bin_lo_hi(mchirp, lo_mchirp, hi_mchirp)
+ + + +
+[docs] +def read_full_data(fullf, rhomin, tmplt_filter=None): + """Read the zero- and time-lagged triggers identified by a specific + set of templates. + + Parameters + ---------- + fullf: + File that stores zerolag and slide triggers + bankf: + File with template mass/spin information + rhomin: float + Ranking statistic threshold + tmplt_filter: array of Booleans + Filter over the array of templates stored in bankf + + Returns + ------- + dictionary + containing foreground triggers and background information + """ + with HFile(fullf, 'r') as full_data: + # apply template filter + tid_bkg = full_data['background_exc/template_id'][:] + tid_fg = full_data['foreground/template_id'][:] + bkg_inbin = tmplt_filter[tid_bkg] # Boolean over bg events + fg_inbin = tmplt_filter[tid_fg] # Boolean over fg events + zerolagstat = full_data['foreground/stat'][:][fg_inbin] + zerolagifar = full_data['foreground/ifar'][:][fg_inbin] + # arbitrarily choose time from one of the ifos + zerolagtime = full_data['foreground/time1'][:][fg_inbin] + + cstat_back_exc = full_data['background_exc/stat'][:][bkg_inbin] + dec_factors = full_data['background_exc/decimation_factor'][:][bkg_inbin] + + # filter on stat value + above = zerolagstat > rhomin + back_above = cstat_back_exc > rhomin + return {'zerolagstat': zerolagstat[above], + 'zerolagifar': zerolagifar[above], + 'zerolagtime': zerolagtime[above], + 'dec_factors': dec_factors[back_above], + 'cstat_back_exc': cstat_back_exc[back_above], + 'file_name': fullf}
+ + + +
+[docs] +def read_full_data_mchirp(fullf, bankf, rhomin, mc_lo, mc_hi): + tmp_filter = filter_tmplt_mchirp(bankf, mc_lo, mc_hi) + return read_full_data(fullf, rhomin, tmp_filter)
+ + + +
+[docs] +def log_rho_bg(trigs, counts, bins): + """ + trigs: zerolag event statistic values + counts: background histogram + bins: bin edges of the background histogram + + Returns: + log of background PDF at the zerolag statistic values, + fractional uncertainty due to Poisson count (set to 100% for empty bins) + """ + trigs = np.atleast_1d(trigs) + if len(trigs) == 0: # corner case + return np.array([]), np.array([]) + + assert np.all(trigs >= np.min(bins)), "can't have triggers below bin lower limit" + + N = sum(counts) + log_rhos = [] + fracerr = [] + + # If any zerolag triggers that are louder than the max bin, put one + # fictitious count in a bin that extends from the limits of the slide triggers + # out to the loudest trigger. + if np.any(trigs >= np.max(bins)): + N = N + 1 + + for t in trigs: + if t >= np.max(bins): + # For a trigger louder than the max bin, put one fictitious count in + # a bin that extends from the limits of the slide triggers out to the + # loudest trigger. Fractional error is 100% + log_rhos.append(-np.log(N) - np.log(np.max(trigs) - bins[-1])) + fracerr.append(1.) + else: + i = bisect.bisect(bins, t) - 1 + # If there are no counts for a foreground trigger put a fictitious + # count in the background bin + if counts[i] == 0: + counts[i] = 1 + log_rhos.append(np.log(counts[i]) - np.log(bins[i+1] - bins[i]) + - np.log(N)) + fracerr.append(counts[i] ** -0.5) + return np.array(log_rhos), np.array(fracerr)
+ + + +
+[docs] +def log_rho_fg_analytic(trigs, rhomin): + # PDF of a rho^-4 distribution defined above the threshold rhomin + return np.log(3.) + 3. * np.log(rhomin) - 4 * np.log(trigs)
+ + + +
+[docs] +def log_rho_fg(trigs, injstats, bins): + """ + trigs: zerolag event statistic values + injstats: injection event statistic values + bins: histogram bins + + Returns: + log of signal PDF at the zerolag statistic values, + fractional uncertainty from Poisson count + """ + trigs = np.atleast_1d(trigs) + if len(trigs) == 0: # corner case + return np.array([]) + + assert np.min(trigs) >= np.min(bins) + # allow 'very loud' triggers + bmax = np.max(bins) + if np.max(trigs) >= bmax: + print('Replacing stat values lying above highest bin') + print(str(bmax)) + trigs = np.where(trigs >= bmax, bmax - 1e-6, trigs) + assert np.max(trigs) < np.max(bins) # check it worked + + counts, bins = np.histogram(injstats, bins) + N = sum(counts) + dens = counts / np.diff(bins) / float(N) + fracerr = counts ** -0.5 + + tinds = np.searchsorted(bins, trigs) - 1 + return np.log(dens[tinds]), fracerr[tinds]
+ + + +
+[docs] +def get_start_dur(path): + fname = basename(path) # remove directory path + # file name is IFOS-DESCRIPTION-START-DURATION.type + pieces = fname.split('.')[0].split('-') + return pieces[2], pieces[3]
+ + + +
+[docs] +def in_coinc_time_incl(f, cstring, test_times): + """ filter to all times where coincs of type given by cstring exist + """ + in_time = np.zeros(len(test_times)) + starts = np.array(f['segments/%s/start' % cstring][:]) + ends = np.array(f['segments/%s/end' % cstring][:]) + idx_within_segment = events.indices_within_times(test_times, starts, ends) + in_time[idx_within_segment] = np.ones_like(idx_within_segment) + return in_time
+ + + +# what to change for more/fewer ifos +_ifoset = ('H1', 'L1', 'V1') + + +# all combinations of ifos with length mincount or more +# each returned as a tuple in same order as ifos +
+[docs] +def alltimes(ifos, mincount=1): + assert mincount <= len(ifos) + assert len(set(ifos)) == len(ifos) # can't work with duplicates + return it_chain.from_iterable(it_comb(ifos, r) for r in + np.arange(mincount, len(ifos) + 1))
+ + + +_alltimes = frozenset(alltimes(_ifoset, mincount=1)) +_alltimestring = frozenset([''.join(t) for t in _alltimes]) +_allctimes = frozenset(alltimes(_ifoset, mincount=2)) + + +
+[docs] +def ifos_from_combo(ct): + # extract ifos in alphabetical order from a coinc time string + return sorted([ct[i:i + 2] for i in range(0, len(ct), 2)])
+ + + +
+[docs] +def type_in_time(ct, cty): + # returns True if the given coinc type can exist in the coinc time ct + return all(i in ct for i in cty)
+ + + +
+[docs] +class EventRate(object): + def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp', + bin_lo=None, bin_hi=None): + """ + coinc_times: iterable of strings indicating combinations of ifos operating + coinc_types: list of strings indicating coinc event types to be considered + + """ + # allow for single-ifo time although not supported in pipeline yet + if hasattr(args, 'min_ifos'): + self.mincount = args.min_ifos + else: + self.mincount = 2 + if hasattr(args, 'network') and sorted(args.network) != list(_ifoset): + self.ifos = sorted(args.network) + else: + self.ifos = _ifoset + self.allctimes = frozenset(alltimes(self.ifos, mincount=self.mincount)) + self.allctimestring = frozenset([''.join(t) for t in self.allctimes]) + for ct in coinc_times: + assert ct in list(self.allctimestring) + self.ctimes = coinc_times + + if coinc_types is None: + # all types possible during given times + self.coinc_types = self.allctimestring + else: + # any coinc type must also be a time (?) + for ty in coinc_types: + assert ty in list(self.allctimes) + self.coinc_types = frozenset([''.join(t) for t in coinc_types]) + if args.verbose: + print('Using', self.coinc_types, 'coincs in', + self.allctimestring, 'times') + self.args = args + self.thr = self.args.stat_threshold + self.bin_param = bin_param + self.lo = bin_lo + self.hi = bin_hi + self.bank = None + self.massspins = None + self.tpars = None + self.tmplt_filter = None + self.in_bin = None + self.incl_livetimes = {} + self.livetimes = {} + +
+[docs] + def add_bank(self, bank_file): + self.bank = bank_file + with HFile(self.bank, 'r') as b: + tids = np.arange(len(b['mass1'][:])) + # tuples of m1, m2, s1z, s2z in template id order + self.massspins = triggers.get_mass_spin(b, tids)
+ + +
+[docs] + def filter_templates(self): + """ + calculate array of Booleans in template id order to filter events + """ + assert self.massspins is not None + assert self.lo is not None + assert self.hi is not None + if self.args.verbose: + print('Cutting on %s between %f - %f' % + (self.bin_param, self.lo, self.hi)) + self.tpars = triggers.get_param(self.bin_param, None, *self.massspins) + self.in_bin = filter_bin_lo_hi(self.tpars, self.lo, self.hi)
+ + +
+[docs] + def make_bins(self, maxval, choice='bg'): + # allow options to be strings describing bin formulae as well as floats? + try: + linbw = getattr(self.args, choice + '_bin_width') + logbw = getattr(self.args, choice + '_log_bin_width') + except AttributeError: + pass + if linbw is not None: + n_bins = int((maxval - self.thr) / float(linbw)) + bins = np.linspace(self.thr - 0.0001, maxval, n_bins + 1) + elif logbw is not None: + n_bins = int(np.log(maxval / self.thr) / float(logbw)) + bins = np.logspace(np.log10(self.thr) - 0.0001, np.log10(maxval), + n_bins + 1) + else: + raise RuntimeError("Can't make bins without a %s bin width option!" + % choice) + if self.args.verbose: + print(str(n_bins) + ' ' + choice + ' stat bins') + return bins
+ + +
+[docs] + def get_ctypes(self, tdict): + # tdict is a ifo -> trigger time dictionary + ifotimes = zip(*[tdict[i] for i in self.ifos]) + cty = [] + for ts in ifotimes: + # if an ifo doesn't participate, time is sentinel value -1 + cty.append(''.join([i for i, t in zip(self.ifos, ts) if t > 0])) + # return is array of coinc types strings + return np.array(cty)
+ + +
+[docs] + def moreifotimes(self, ctstring): + # get list of coinc times with more ifos than ctstring + allctime_moreifos = [ct for ct in self.allctimestring if + len(ct) > len(ctstring)] + # only return those when at least the same ifos are operating + ret = [] + ifos = ifos_from_combo(ctstring) + for act in allctime_moreifos: + if all(i in act for i in ifos): + ret.append(act) + return ret
+ + +
+[docs] + def in_coinc_time_excl(self, f, cstring, test_times): + """ filter to all times where exactly the ifos in cstring are observing + """ + if len(cstring) == max(len(s) for s in self.allctimestring): + # ctime string already uniquely specifies time + return in_coinc_time_incl(f, cstring, test_times) + in_time = in_coinc_time_incl(f, cstring, test_times) + # if 'more-ifo' coinc times exist, exclude them + for combo in self.moreifotimes(cstring): + in_moreifo_time = in_coinc_time_incl(f, combo, test_times) + # subtract one if in more-ifo time + in_time -= in_moreifo_time + # if subtraction yields anything other than 1, set to 0 + np.putmask(in_time, in_time != 1, 0) + return in_time
+ + +
+[docs] + def get_livetimes(self, fi): + with HFile(fi, 'r') as f: + for ct in self.ctimes: + # 'inclusive' time when at least the ifos specified by ct are on + fgt = conv.sec_to_year(f[ct].attrs['foreground_time']) + # index dict on chunk start time / coinc type + self.incl_livetimes[(get_start_dur(fi)[0], ct)] = fgt + # subtract times during which 1 more ifo was on, + # ie subtract H1L1* time from H1L1; subtract H1* time from H1; etc + for combo in self.moreifotimes(ct): + if len(combo) == len(ct) + 2: + fgt -= conv.sec_to_year(f[combo].attrs['foreground_time']) + # index dict on chunk start time / coinc time + self.livetimes[(get_start_dur(fi)[0], ct)] = fgt
+
+ + + +
+[docs] +class ForegroundEvents(EventRate): + def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp', + bin_lo=None, bin_hi=None): + EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types, + bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi) + self.thr = self.args.stat_threshold + # set of arrays in parallel containing zerolag event properties + self.starttimes = [] + self.gpst = np.array([]) + self.stat = np.array([]) + self.ifar = np.array([]) + self.masspars = np.array([]) + self.start = np.array([]) + self.ctime = np.array([], dtype=object) # allow unequal length strings + self.ctype = np.array([], dtype=object) + self.bg_pdf = np.array([]) + self.sg_pdf = np.array([]) + +
+[docs] + def add_zerolag(self, full_file): + start = get_start_dur(full_file)[0] + self.starttimes.append(start) + with HFile(full_file, 'r') as f: + # get stat values & threshold + _stats = f['foreground/stat'][:] + _keepstat = _stats > self.thr + + # get templates & apply filter + _tids = f['foreground/template_id'][:] + # we need the template filter to have already been made + assert self.in_bin is not None + _keep = np.logical_and(_keepstat, self.in_bin[_tids]) + massp = self.tpars[_tids][_keep] # filtered template params + + # assign times and coinc types + _times = {} + for i in self.ifos: + _times[i] = f['foreground/' + i + '/time'][:][_keep] + # if an ifo doesn't participate, time is sentinel value -1 + # event time is mean of remaining positive GPS times + meantimes = np.array([coinc_meanigz(ts)[0] + for ts in zip(*_times.values())]) + _ctype = self.get_ctypes(_times) + if len(_ctype) == 0: + if self.args.verbose: + print('No events in ' + start) + return + # filter events + in_ctypes = np.array([cty in self.coinc_types for cty in _ctype]) + meantimes = meantimes[in_ctypes] + # get coinc time as strings + # (strings may have different lengths) + _ctime = np.repeat(np.array([''], dtype=object), len(meantimes)) + for ct in self.allctimestring: + intime = self.in_coinc_time_excl(f, ct, meantimes) + _ctime[intime == 1] = ct + if self.args.verbose: + print('Got %i events in %s time' % (len(_ctime[intime == 1]), ct)) + # store + self.stat = np.append(self.stat, _stats[_keep][in_ctypes]) + try: # injection analyses only have 'ifar_exc', not 'ifar' + self.ifar = np.append(self.ifar, + f['foreground/ifar'][:][_keep][in_ctypes]) + except KeyError: + self.ifar = np.append(self.ifar, + f['foreground/ifar_exc'][:][_keep][in_ctypes]) + self.gpst = np.append(self.gpst, meantimes) + self.masspars = np.append(self.masspars, massp) + self.start = np.append(self.start, int(start) * + np.ones_like(meantimes)) + self.ctime = np.append(self.ctime, _ctime) + self.ctype = np.append(self.ctype, _ctype[in_ctypes])
+ + +
+[docs] + def get_bg_pdf(self, bg_rate): + assert isinstance(bg_rate, BackgroundEventRate) + self.bg_pdf = np.zeros_like(self.stat) # initialize + + # do the calculation by chunk / coinc time / coinc type + for st in self.starttimes: + for ct in self.allctimestring: + for cty in self.coinc_types: + if not type_in_time(ct, cty): + continue + _idx = np.logical_and((self.ctime == ct), (self.ctype == cty)) + _idx = np.logical_and(_idx, (self.start == int(st))) + _vals = self.stat[_idx] + if len(_vals) == 0: + continue + # evaluate bg pdf for specific chunk, coinc time & type + _pdf = bg_rate.eval_pdf(st, ct, cty, _vals) + # store + self.bg_pdf[_idx] = _pdf + if self.args.verbose: + print('Found bg PDFs for ' + cty + ' coincs from ' + st)
+ + +
+[docs] + def get_sg_pdf(self, sg_rate): + assert isinstance(sg_rate, SignalEventRate) + self.sg_pdf = np.zeros_like(self.stat) + + for st in self.starttimes: + for ct in self.allctimestring: + for cty in self.coinc_types: + if not type_in_time(ct, cty): + continue + _idx = np.logical_and((self.ctime == ct), (self.ctype == cty)) + _idx = np.logical_and(_idx, (self.start == int(st))) + _vals = self.stat[_idx] + if len(_vals) == 0: + continue + # norm of PDF is chunk-dependent so need the chunk start time + _pdf = sg_rate.eval_pdf(st, ct, cty, _vals) + # store + self.sg_pdf[_idx] = _pdf + if self.args.verbose: + print('Found sg PDFs for %s coincs in %s time from %s' % + (cty, ct, st))
+
+ + + +
+[docs] +class BackgroundEventRate(EventRate): + def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp', + bin_lo=None, bin_hi=None): + EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types, + bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi) + self.thr = self.args.stat_threshold + # BG values in dict indexed on tuple (chunk start, coinc type) + self.bg_vals = {} + self.bg_dec = {} + # BG livetimes + self.bg_livetimes = {} + # BG hist stored as bin heights / edges + self.bg_hist = {} + # Expected counts of BG events + self.exp_bg = {} + # Total expected BG count + self.norm = 0 + +
+[docs] + def add_background(self, full_file): + start = get_start_dur(full_file)[0] + self.get_livetimes(full_file) + + with HFile(full_file, 'r') as ff: + # get stat values and threshold + _bgstat = ff['background_exc/stat'][:] + _keepstat = _bgstat > self.thr + + # get template ids and filter + _bgtid = ff['background_exc/template_id'][:] + # need the template filter to have already been made + assert self.in_bin is not None + _keep = np.logical_and(_keepstat, self.in_bin[_bgtid]) + _bgstat = _bgstat[_keep] + _bgdec = ff['background_exc/decimation_factor'][:][_keep] + + # assign coinc types + _times = {} + for i in self.ifos: + # NB times are time-shifted between ifos + _times[i] = ff['background_exc/' + i + '/time'][:][_keep] + _ctype = self.get_ctypes(_times) + for cty in self.coinc_types: + self.bg_vals[(start, cty)] = _bgstat[_ctype == cty] + self.bg_dec[(start, cty)] = _bgdec[_ctype == cty] + # get bg livetime for noise rate estimate + # - convert to years + self.bg_livetimes[(start, cty)] = conv.sec_to_year( + ff[cty].attrs['background_time_exc']) + + # make histogram + bins = self.make_bins(np.max(_bgstat[_ctype == cty]), 'bg') + # hack to make larger bins for H1L1V1 + if cty == 'H1L1V1': + if self.args.verbose: + print('Halving bg bins for triple bg hist') + bins = bins[::2].copy() # take every 2nd bin edge + self.bg_hist[(start, cty)] = \ + np.histogram(_bgstat[_ctype == cty], + weights=_bgdec[_ctype == cty], bins=bins) + # get expected number of bg events for this chunk and coinc type + self.exp_bg[(start, cty)] = _bgdec[_ctype == cty].sum() * \ + self.incl_livetimes[(start, cty)] / \ + self.bg_livetimes[(start, cty)]
+ + +
+[docs] + def plot_bg(self): + from matplotlib import pyplot as plt + for chunk_type, hist in self.bg_hist.items(): + print('Plotting', chunk_type, 'background PDF ...') + xplot = np.linspace(self.thr, self.args.plot_max_stat, 500) + heights, bins = hist[0], hist[1] + logpdf, _ = log_rho_bg(xplot, heights, bins) + plt.plot(xplot, np.exp(logpdf)) + # plot error bars at bin centres + lpdf, fracerr = log_rho_bg(0.5 * (bins[:-1] + bins[1:]), heights, bins) + plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf), + yerr=np.exp(lpdf) * fracerr, fmt='none') + plt.semilogy() + plt.grid(True) + plt.xlim(xmax=self.args.plot_max_stat + 0.5) + plt.ylim(ymin=0.7 * np.exp(logpdf.min())) + plt.xlabel('Ranking statistic') + plt.ylabel('Background PDF') + plt.savefig(self.args.plot_dir + '%s-bg_pdf-%s' % + (chunk_type[1], chunk_type[0]) + '.png') + plt.close()
+ + +
+[docs] + def get_norms(self): + for count in self.exp_bg.values(): + self.norm += count
+ + +
+[docs] + def eval_pdf(self, chunk, ctime, ctype, statvals): + # given statistic values all in the same data chunk and coinc type, + # evaluate the background pdf normalized over all chunks & types + assert self.norm > 0 + chunk_type = (chunk, ctype) + # fraction of expected noise events in given chunk & coinc type + frac_chunk_type = self.exp_bg[chunk_type] / self.norm + # fraction of inj in specified chunk, coinc type *and* time + frac_in_time = self.livetimes[(chunk, ctime)] /\ + self.incl_livetimes[chunk_type] + # unpack heights / bins from bg hist object + local_pdfs, _ = log_rho_bg(statvals, *self.bg_hist[chunk_type]) + return local_pdfs + np.log(frac_chunk_type * frac_in_time)
+
+ + + +
+[docs] +class SignalEventRate(EventRate): + def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp', + bin_lo=None, bin_hi=None): + EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types, + bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi) + self.thr = self.args.stat_threshold + self.starts = [] # bookkeeping + # for the moment roll all inj chunks together + # but sort both by coinc time and coinc type + self.inj_vals = {} # dict indexed on tuple (coinc time, coinc type) + self.fg_bins = {} + self.norm = 0 + +
+[docs] + def add_injections(self, inj_file, fg_file): + # fg_file only needed for coinc time info :/ + self.starts.append(get_start_dur(inj_file)[0]) + self.get_livetimes(inj_file) + + with HFile(inj_file, 'r') as jf: + # get stat values and threshold + _injstat = jf['found_after_vetoes/stat'][:] + _keepstat = _injstat > self.thr + + # get template ids and filter + _injtid = jf['found_after_vetoes/template_id'][:] + assert self.in_bin is not None + _keep = np.logical_and(_keepstat, self.in_bin[_injtid]) + _injstat = _injstat[_keep] + + # assign coinc types + _times = {} + for i in self.ifos: + _times[i] = jf['found_after_vetoes/' + i + '/time'][:][_keep] + meantimes = np.array([coinc_meanigz(ts)[0] + for ts in zip(*_times.values())]) + _ctype = self.get_ctypes(_times) + # get coinc time as strings + # (strings may have different lengths) + _ctime = np.repeat(np.array([''], dtype=object), len(meantimes)) + for ct in self.allctimestring: + # get coinc time info from segments in fg file + intime = self.in_coinc_time_excl( + HFile(fg_file, 'r'), ct, meantimes) + _ctime[intime == 1] = ct # do we need this? + if self.args.verbose: + print('Got %i ' % (intime == 1).sum() + 'inj in %s time' % ct) + # filter by coinc type and add to array + for cty in self.coinc_types: + if not type_in_time(ct, cty): + continue + my_vals = _injstat[np.logical_and(_ctype == cty, intime == 1)] + if self.args.verbose: + print('%d ' % len(my_vals) + 'are %s coincs' % cty) + if (ct, cty) not in self.inj_vals: # initialize + self.inj_vals[(ct, cty)] = np.array([]) + if len(my_vals) > 0: + self.inj_vals[(ct, cty)] = \ + np.append(self.inj_vals[(ct, cty)], my_vals) + del intime, my_vals
+ + +
+[docs] + def make_all_bins(self): + for ct in self.allctimestring: + for cty in self.coinc_types: + if not type_in_time(ct, cty): + continue + vals = self.inj_vals[(ct, cty)] + # get norm of fg histogram by taking bins out to max injection stat + binmax = vals.max() * 1.01 + self.fg_bins[(ct, cty)] = self.make_bins(binmax, 'inj')
+ + +
+[docs] + def plot_inj(self): + from matplotlib import pyplot as plt + for ct in self.allctimestring: + for cty in self.coinc_types: + if not type_in_time(ct, cty): + continue + print('Plotting ' + cty + ' signal PDF in ' + ct + ' time ...') + samples = self.inj_vals[(ct, cty)] + bins = self.fg_bins[(ct, cty)] + xplot = np.logspace(np.log10(self.thr), + np.log10(samples.max()), 500) + logpdf, _ = log_rho_fg(xplot, samples, bins) + plt.plot(xplot, np.exp(logpdf)) + # plot error bars at bin centres + lpdf, fracerr = log_rho_fg(0.5 * (bins[:-1] + bins[1:]), + samples, bins) + plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf), + yerr=np.exp(lpdf) * fracerr, fmt='none') + plt.semilogy() + plt.grid(True) + # zoom in on the 'interesting' range + plt.xlim(xmin=self.thr, xmax=2. * self.args.plot_max_stat) + plt.ylim(ymin=0.7 * np.exp(logpdf.min())) + plt.title(r'%i injs plotted, \# of bins %i' % + (len(samples), len(bins) - 1)) + plt.xlabel('Ranking statistic') + plt.ylabel('Signal PDF') + plt.savefig(self.args.plot_dir + '%s-fg_pdf-%s' % (ct, cty) + + '.png') + plt.close()
+ + +
+[docs] + def get_norms(self): + for vals in self.inj_vals.values(): + # injections don't have weights/decimation + self.norm += float(len(vals))
+ + +
+[docs] + def eval_pdf(self, chunk, ctime, ctype, statvals): + # given statistic values in the same chunk, coinc time and coinc type, + # evaluate the signal pdf normalized over all chunks, times and types + assert self.norm > 0 + time_type = (ctime, ctype) + # fraction of inj in specified coinc time and type + frac_time_type = float(len(self.inj_vals[time_type])) / self.norm + # total livetime for specified coinc time + total_coinc_time = sum([self.livetimes[(ch, ctime)] for ch in self.starts]) + # fraction of inj in specified chunk *and* coinc time/type + this_norm = frac_time_type * self.livetimes[(chunk, ctime)] / \ + total_coinc_time + local_pdfs, _ = log_rho_fg(statvals, self.inj_vals[time_type], + self.fg_bins[time_type]) + return local_pdfs + np.log(this_norm)
+
+ + +__all__ = ['filter_bin_lo_hi', 'filter_tmplt_mchirp', 'read_full_data', + 'read_full_data_mchirp', 'log_rho_bg', 'log_rho_fg_analytic', + 'log_rho_fg', 'get_start_dur', 'in_coinc_time_incl', 'alltimes', + 'ifos_from_combo', 'type_in_time', 'EventRate', 'ForegroundEvents', + 'BackgroundEventRate', 'SignalEventRate'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/fgmc_laguerre.html b/latest/html/_modules/pycbc/population/fgmc_laguerre.html new file mode 100644 index 00000000000..f16f6ee6734 --- /dev/null +++ b/latest/html/_modules/pycbc/population/fgmc_laguerre.html @@ -0,0 +1,324 @@ + + + + + + pycbc.population.fgmc_laguerre — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.fgmc_laguerre

+# Copyright (C) 2016 Jolien Creighton
+#           (C) 2021 Jolien Creighton & Thomas Dent
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+
+"""
+Based ultimately on code used for O1 rate calculations, see
+https://git.ligo.org/RatesAndPopulations/lvc-rates-and-pop/-/blob/master/bin/O1_scripts/lvc_rates_calc_posterior
+and technical documentation at https://dcc.ligo.org/LIGO-T1700029/public
+"""
+
+import numpy
+import scipy.stats as sst
+import scipy.special as ssp
+import scipy.integrate as sig
+import scipy.optimize as sop
+
+
+
+[docs] +class augmented_rv_continuous(sst.rv_continuous): + + def __init__(self, unit='dimensionless', texunit=r'\mbox{dimensionless}', + texsymb=r'x', **kwargs): + ''' + Parameters + ---------- + unit : string, optional + units of independent variable + texunit : string, optional + units of independent variable, in tex format + texsymb : string, optional + symbol of independent variable, in tex format + ''' + + super(augmented_rv_continuous, self).__init__(**kwargs) + self._hpd_interval_vec = numpy.vectorize(self._hpd_interval_scalar) + self.unit = unit + self.texunit = texunit + self.texsymb = texsymb + + def _hpd_interval_scalar(self, alpha): + + def width(a): + return self.ppf(alpha + self.cdf(a)) - a + + a = self.ppf(1e-6) # a is displaced slightly from 0 + b = self.ppf(alpha) + if self.pdf(a) >= self.pdf(b): # upper limit + return self.a, b + a = sop.fminbound(width, a, self.ppf(1.0 - alpha)) + b = self.ppf(alpha + self.cdf(a)) + return a, b + +
+[docs] + def hpd_interval(self, alpha): + ''' + Confidence interval of highest probability density. + + Parameters + ---------- + alpha : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + ''' + if isinstance(alpha, (float, numpy.number)): + a, b = self._hpd_interval_scalar(alpha) + else: + a, b = self._hpd_interval_vec(alpha) + return a, b
+
+ + + +
+[docs] +class count_posterior(augmented_rv_continuous): + ''' + Count posterior distribution. + ''' + + def __init__(self, logbf, laguerre_n, Lambda0, prior=-0.5, + name='count posterior', unit='signals/experiment', + texunit=r'\mathrm{signals}/\mathrm{experiment}', + texsymb=r'\Lambda_1'): + ''' + Parameters + ---------- + logbf : array_like + logs of normalized foreground over background pdf ratios of events + laguerre_n: int + degree of generalized Laguerre polynomial for quadrature formula + Lambda0 : float + background rate (default=len(bayesfac)) + prior : float or count_posterior, optional + prior distribution power law of improper prior if float + or count posterior distribution if count_posterior + (default=-0.5: Jeffreys prior) + ''' + super(count_posterior, self).__init__(a=0.0, b=numpy.inf, name=name, + unit=unit, texunit=texunit, + texsymb=texsymb) + self.Lambda0 = Lambda0 + # weighted Bayes factor + self.k = numpy.exp(numpy.array(logbf)) / self.Lambda0 + + # power-law priors + self.alpha = prior + if prior == 0: + self.prior = lambda x: 1.0 + elif prior > 0: + self.prior = lambda x: x ** prior + else: + # regularize at x = 0 + self.prior = lambda x: (x + self.xtol) ** prior + + # pre-compute Gaussian-Generalized-Laguerre quadrature + # abscissas and weights, along with pdf at these abscissas + self.x, w = ssp.la_roots(laguerre_n, self.alpha) + self.p = numpy.array([ww * numpy.prod(1.0 + self.k * xx) + for xx, ww in zip(self.x, w)]) + self.norm = 1.0 / sum(self.p) + self.p *= self.norm + + def _pdf(self, x): + # discourage underflows by evaluating ln L and using ln(1+x) function + logL = -x + numpy.sum(numpy.log1p(self.k * x)) + P = numpy.exp(logL) * self.prior(x) + return self.norm * P + + def _cdf(self, x): + return sig.quad(self._pdf, 0.0, x) + +
+[docs] + def expect(self, func): + ''' + Calculate expected value of a function with respect to the + distribution. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + E[x] = Integral(f(x) * dist.pdf(x)) + + Parameters + ---------- + func : callable + Function for which integral is calculated. Takes only one argument. + + Returns + ------- + expect : float + The calculated expected value. + ''' + # FIXME: not as feature rich as the expect method this overrides + return sum(pp * func(xx) for xx, pp in zip(self.x, self.p))
+ + + def _munp(self, n): + return self.expect(lambda x: x**n) + +
+[docs] + def p_bg(self, logbf): + ''' + Calculate the false alarm probabilities of the events. + + Parameters + ---------- + logbf : array_like + Logs of foreground over background probability ratios of events. + ''' + # get weighted bayes factor + k = numpy.exp(numpy.asarray(logbf)) / self.Lambda0 + + P0 = numpy.dot(1./(1. + numpy.outer(k, self.x)), self.p) + if isinstance(k, (float, int, numpy.number)): + return P0.item() + if isinstance(k, numpy.ndarray) and k.ndim == 0: + return P0.item() + # except in special cases above, return array of values + return P0
+
+ + +__all__ = ['augmented_rv_continuous', 'count_posterior'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/fgmc_plots.html b/latest/html/_modules/pycbc/population/fgmc_plots.html new file mode 100644 index 00000000000..5ee169cf3d8 --- /dev/null +++ b/latest/html/_modules/pycbc/population/fgmc_plots.html @@ -0,0 +1,428 @@ + + + + + + pycbc.population.fgmc_plots — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.fgmc_plots

+# Copyright (C) 2021 Jolien Creighton & Thomas Dent
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+
+import json
+import numpy
+
+from matplotlib import figure
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+
+
+def plot_setup(*args):
+    # reduce scale of codeclimate complaints
+    fig = figure.Figure()
+    FigureCanvas(fig)
+    ax = fig.gca()
+    ax.grid(True)
+    return fig, ax
+
+
+def plotodds(rankstats, p_b):
+    # odds vs ranking stat
+    fig, ax = plot_setup()
+    ax.loglog()
+    ax.plot(rankstats, (1.0 - p_b) / p_b, 'k.')
+    ax.plot([rankstats.min(), rankstats.max()], [1.0, 1.0], 'c--')
+    ax.set_title(r'Foreground/Background Odds')
+    ax.set_xlabel(r'ranking statistic')
+    ax.set_ylabel(r'$P_1/P_0$')
+    ax.set_xlim(0.99 * rankstats.min(), 1.2 * rankstats.max())
+    return fig
+
+
+def plotpbg(rankstats, p_b):
+    # p_terr vs ranking stat
+    fig, ax = plot_setup()
+    ax.loglog()
+    ax.plot(rankstats, p_b, 'k.')
+    ax.set_title(r'Probability of background origin')
+    ax.set_xlabel(r'ranking statistic')
+    ax.set_ylabel(r'$P_0$')
+    ax.set_xlim(0.99 * rankstats.min(), 1.2 * rankstats.max())
+    return fig
+
+
+def plotoddsifar(ifar, p_b):
+    # odds vs IFAR
+    fig, ax = plot_setup()
+    ax.loglog()
+    ax.plot(ifar, (1.0 - p_b) / p_b, 'k.')
+    ax.plot([ifar.min(), ifar.max()], [1.0, 1.0], 'c--')
+    ax.set_title(r'Foreground/Background Odds')
+    ax.set_xlabel(r'IFAR')
+    ax.set_ylabel(r'$P_1/P_0$')
+    ax.set_xlim(0.9 * ifar.min(), 1.1 * ifar.max())
+    return fig
+
+
+def plotfdr(p_b, ntop):
+    # False dismissal rate vs p_terr
+    fig, ax = plot_setup()
+    # get smallest N p_terr values
+    p_b = numpy.sort(p_b)[:ntop]
+    # cumulative probable noise/signal counts
+    cum_false = p_b.cumsum()
+    cum_true = (1. - p_b).cumsum()
+    ax.semilogy()
+    ax.plot(p_b, cum_false / cum_true, 'b+')
+    ax.plot(p_b, 1. / (numpy.arange(len(p_b)) + 1), 'c--', label=r'1 noise event')
+    ax.legend()
+    ax.set_xlabel(r'$p_{\rm terr}$')
+    ax.set_ylabel(r'Cumulative $p_{\rm terr}$ / Cumulative $p_{\rm astro}$')
+    ax.set_xlim(0., 1.05 * p_b.max())
+    return fig
+
+
+def finalize_plot(fig, args, extensions, name, pltype, tag):
+    # Helper function
+    for extn in extensions:
+        filename = args.pldir + '_'.join(name.split()) + '_' + pltype + tag + extn
+        if args.verbose:
+            print('writing %s ...' % filename)
+        fig.savefig(filename)
+
+
+
+[docs] +def odds_summary(args, rankstats, ifars, p_b, ntop, times=None, mchirps=None, + name='events', plot_extensions=None): + + print('\nSummary of Top %i %s' % (ntop, name.title())) + + # do sort in reverse order + statsort = numpy.argsort(1. / numpy.array(rankstats)) + topn = statsort[:ntop] # indices giving top n + topgps = [] + topstat = [] + topifar = [] + toppastro = [] + for n, i in enumerate(topn): + gps = times[i] if times is not None else '' + ifar = ifars[i] + stat = rankstats[i] + mchirpstring = 'mchirp %.3F' % mchirps[i] if mchirps is not None else '' + topgps.append(gps) + topstat.append(stat) + topifar.append(ifar) + print('#%d event:' % (n + 1), str(gps), mchirpstring) + print(' rankstat = %-8.3f' % stat) + print(' IFAR = %.2f' % ifar) + print(' odds = %g' % ((1. - p_b[i]) / p_b[i])) + toppastro.append(1. - p_b[i]) + + if args.p_astro_txt is not None: + numpy.savetxt(args.p_astro_txt, + numpy.column_stack((topgps, topstat, topifar, toppastro)), + fmt=['%.3F', '%.2F', '%.2F', '%.5F'], + delimiter=',', + header='GPS seconds, stat, IFAR/yr, p_astro') + + if hasattr(args, 'json_tag') and args.json_tag is not None: + # save to catalog-style files + def dump_json(gps, p_a, p_b): + jfile = args.plot_dir + 'H1L1V1-PYCBC_%s-%s-1.json' % \ + (args.json_tag, str(int(gps))) # truncate to integer GPS + with open(jfile, 'w') as jf: + json.dump({'Astro': p_a, 'Terrestrial': p_b}, jf) + if hasattr(args, 'json_min_ifar') and args.json_min_ifar is not None: + for g, ifar, pt in zip(times, ifars, p_b): + if ifar < args.json_min_ifar: + continue + dump_json(g, 1. - pt, pt) + else: + for g, pa in zip(topgps, toppastro): + dump_json(g, pa, 1. - pa) + + if plot_extensions is not None: + plottag = args.plot_tag or '' + if plottag != '': + plottag = '_' + plottag + fig = plotodds(rankstats, p_b) + finalize_plot(fig, args, plot_extensions, name, 'odds', plottag) + fig = plotpbg(rankstats, p_b) + finalize_plot(fig, args, plot_extensions, name, 'pbg', plottag) + fig = plotoddsifar(ifars, p_b) + finalize_plot(fig, args, plot_extensions, name, 'ifarodds', plottag) + fig = plotfdr(p_b, ntop) + finalize_plot(fig, args, plot_extensions, name, 'fdr', plottag)
+ + + +
+[docs] +def plotdist(rv, plot_lim=None, middle=None, credible_intervals=None, style='linear'): + + fig = figure.Figure() + FigureCanvas(fig) + ax = fig.gca() + + name = rv.name if hasattr(rv, 'name') else None + symb = rv.texsymb if hasattr(rv, 'texsymb') else r'x' + unit = rv.texunit if hasattr(rv, 'texunit') else None + + xlabel = r'$' + symb + '$' + if unit is not None: + xlabel += r' ($' + unit + r'$)' + + a, b = rv.interval(0.9999) + + if style == 'loglog': + ax.loglog() + ylabel = r'$p(' + symb + r')$' + space = lambda a, b: numpy.logspace(numpy.log10(a), numpy.log10(b), 100) + func = numpy.vectorize(rv.pdf) + xmin = a + ymin = rv.pdf(b) + + elif style == 'semilogx': + ax.semilogx() + ylabel = r'$' + symb + r'\,p(' + symb + r')$' + space = lambda a, b: numpy.logspace(numpy.log10(a), numpy.log10(b), 100) + func = numpy.vectorize(lambda x: x * rv.pdf(x)) + xmin = a + ymin = 0.0 + + else: # linear + ax.yaxis.set_ticklabels([]) + ylabel = r'$p(' + symb + r')$' + space = lambda a, b: numpy.linspace(a, b, 100) + func = numpy.vectorize(rv.pdf) + xmin = 0.0 + ymin = 0.0 + + x = space(a, b) + y = func(x) + + ax.plot(x, y, color='k', linestyle='-') + if plot_lim is not None: + xmin, xmax = plot_lim + ax.set_xlim(xmin=xmin, xmax=xmax) + else: + ax.set_xlim(xmin=xmin) + ax.set_ylim(ymin=ymin) + + if y.max() < 2 and y.max() > 1: + ax.set_ylim(ymax=2.) + + if name is not None: + ax.set_title(name.title()) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + + if middle is not None: + ax.plot([middle, middle], [ymin, func(middle)], 'k--') + + if credible_intervals is not None: + # alpha : density of fill shading + alpha = 1.0 / (1.0 + len(credible_intervals)) + for lo, hi in credible_intervals.values(): + lo = max(a, lo) + hi = min(b, hi) + x = space(lo, hi) + y = func(x) + ax.fill_between(x, y, ymin, color='k', alpha=alpha) + + return fig
+ + + +
+[docs] +def dist_summary(args, rv, plot_styles=('linear', 'loglog', 'semilogx'), + plot_extensions=None, middle=None, credible_intervals=None): + + name = rv.name if hasattr(rv, 'name') else 'posterior' + unit = rv.unit if hasattr(rv, 'unit') else '' + median = rv.median() + mode = rv.mode() if hasattr(rv, 'mode') else None + + print('Summary of ' + name.title()) + print('mean =', rv.mean(), unit) + print('median =', median, unit) + if mode is not None: + print('mode =', mode, unit) + print('stddev =', rv.std(), unit) + + if credible_intervals is not None and len(credible_intervals) > 0: + print('equal-tailed credible intervals:') + equal_tailed_credible_intervals = {} + for cred in credible_intervals: + lo, hi = rv.interval(cred) + equal_tailed_credible_intervals[cred] = (lo, hi) + print('%g%%' % (cred * 100), 'credible interval =', '[%g, %g]' % + (lo, hi), unit) + + if hasattr(rv, 'hpd_interval'): + print('highest probability density credible intervals:') + hpd_credible_intervals = {} + for cred in credible_intervals: + hpdlo, hpdhi = rv.hpd_interval(cred) + hpd_credible_intervals[cred] = (hpdlo, hpdhi) + print('%g%%' % (cred * 100), 'credible interval =', '[%g, %g]' % + (hpdlo, hpdhi), unit) + else: + hpd_credible_intervals = None + + if len(credible_intervals) == 0: + credible_intervals = None + intervals = None + + if middle == 'mode' and mode is not None: + middle = mode + if credible_intervals is not None: + # use hpd intervals with mode + intervals = hpd_credible_intervals + else: + middle = median + if credible_intervals is not None: + # use equal tailed intervals with median + intervals = equal_tailed_credible_intervals + + # plot distributions + if plot_extensions is not None: + plottag = args.plot_tag or '' + if plottag != '': + plottag = '_' + plottag + for style in plot_styles: + fig = plotdist(rv, plot_lim=args.plot_limits, middle=middle, + credible_intervals=intervals, style=style) + finalize_plot(fig, args, plot_extensions, name, style, plottag) + + if credible_intervals is not None and len(credible_intervals) == 1: + return median, lo - median, hi - median + # keep codeclimate happy with explicit return statement + return None
+ + +__all__ = ['plotdist', 'odds_summary', 'dist_summary'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/live_pastro.html b/latest/html/_modules/pycbc/population/live_pastro.html new file mode 100644 index 00000000000..b3286974e2d --- /dev/null +++ b/latest/html/_modules/pycbc/population/live_pastro.html @@ -0,0 +1,454 @@ + + + + + + pycbc.population.live_pastro — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.live_pastro

+import logging
+import h5py
+import numpy
+
+from pycbc.tmpltbank import bank_conversions as bankconv
+from pycbc.events import triggers
+from pycbc import conversions as conv
+from . import fgmc_functions as fgmcfun
+
+_s_per_yr = 1. / conv.sec_to_year(1.)
+
+logger = logging.getLogger('pycbc.population.live_pastro')
+
+
+
+[docs] +def check_template_param_bin_data(spec_json): + """ + Parameters + ---------- + spec_json: JSON dictionary-like object + Result of parsing json file containing static data + + Returns + ------- + spec_json: dictionary + """ + # Check the necessary data are present + assert 'param' in spec_json + assert 'bin_edges' in spec_json # should be a list of floats + assert 'sig_per_yr_binned' in spec_json # signal rate per bin (per year) + # Do the lengths of bin arrays match? + assert len(spec_json['bin_edges']) == \ + len(spec_json['sig_per_yr_binned']) + 1 + assert 'ref_bns_horizon' in spec_json # float + assert 'netsnr_thresh' in spec_json # float + + return spec_json
+ + + +def check_template_param_bin_farlim_data(spec_json): + """ + Parameters + ---------- + spec_json: JSON dictionary-like object + Result of parsing json file containing static data + + Returns + ------- + spec_json: dictionary + """ + # Standard template param bin checks + check_template_param_bin_data(spec_json) + + # In addition, need limiting FAR and SNR values + assert 'limit_far' in spec_json + assert 'limit_snr' in spec_json + + return spec_json + + +
+[docs] +def read_template_bank_param(spec_d, bankf): + """ + Parameters + ---------- + spec_d: dictionary + Prerequisite data for p astro calc + bankf: string + Path to HDF5 template bank file + + Returns + ------- + bank_data: dictionary + Template counts binned over specified param + """ + with h5py.File(bankf, 'r') as bank: + # All the templates + tids = numpy.arange(len(bank['mass1'])) + # Get param vals + logger.info('Getting %s values from bank', spec_d['param']) + parvals = bankconv.get_bank_property(spec_d['param'], bank, tids) + counts, edges = numpy.histogram(parvals, bins=spec_d['bin_edges']) + bank_data = {'bin_edges': edges, 'tcounts': counts, 'num_t': counts.sum()} + logger.info('Binned template counts: %s', counts) + + return bank_data
+ + + +
+[docs] +def noise_density_from_far(far, exp_fac): + """ + Exponential model of noise rate density per time per (reweighted) SNR + b(rho) ~ k exp(-alpha * rho), + where alpha is the 'index', yields the relation + b(rho) = alpha * FAR(rho), + where FAR is the integral of b(rho) from rho to infinity. + E.g. fits to single-ifo noise typically yield alpha ~ 6 + """ + return exp_fac * far
+ + + +def trials_type(ntriggered, nactive): + """ + The trials factor previously applied to an individual event type FAR + For single triggers, the factor is the number of active ifos + For coincs, the factor is either 1 (in double time) or 6 (in triple time) + 6 accounts for both the trials over coinc type and pvalue (non-)followup + %%% NOTE - ONLY VALID FOR 2- OR 3-IFO SEARCH %%% + """ + if ntriggered == 1: + return nactive + if ntriggered == 2 and nactive == 2: + return 1 + if ntriggered == 2 and nactive == 3: + return 6 + # All valid inputs are exhausted, throw an error + raise ValueError(f"I don't know what to do with {ntriggered} triggered and" + f" {nactive} active ifos!") + + +
+[docs] +def signal_pdf_from_snr(netsnr, thresh): + """ FGMC approximate signal distribution ~ SNR ** -4 + """ + return numpy.exp(fgmcfun.log_rho_fg_analytic(netsnr, thresh))
+ + + +
+[docs] +def signal_rate_rescale(horizons, ref_dhor): + """ + Compute a factor proportional to the rate of signals with given network SNR + to account for network sensitivity variation relative to a reference state + """ + # Combine sensitivities over ifos in a way analogous to network SNR + net_horizon = sum(hor ** 2. for hor in horizons.values()) ** 0.5 + # signal rate is proportional to horizon distance cubed + return net_horizon ** 3. / ref_dhor ** 3.
+ + + +
+[docs] +def signal_rate_trig_type(horizons, sens_ifos, trig_ifos): + """ + Compute a factor accounting for the fraction of signals seen as a given + trigger type + """ + # Single-ifo time + if len(sens_ifos) == 1: + assert len(trig_ifos) == 1 + return 1. + # Single trigger in multi-ifo time + if len(trig_ifos) == 1: + # Sensitive volume scales with horizon^3 + # Suppress horizon by sqrt(2) wrt coincs + return (horizons[trig_ifos[0]] / 2**0.5) ** 3. /\ + sum([horizons[i] ** 3. for i in sens_ifos]) + # Double coinc : volume determined by less sensitive ifo + # Compare to 2nd most sensitive ifo over the observing network + return sorted([horizons[i] for i in trig_ifos])[0] ** 3. /\ + sorted([horizons[i] for i in sens_ifos])[-2] ** 3.
+ + + +
+[docs] +def template_param_bin_pa(padata, trdata, horizons): + """ + Parameters + ---------- + padata: PAstroData instance + Static information on p astro calculation + trdata: dictionary + Trigger properties + horizons: dictionary + BNS horizon distances keyed on ifo + + Returns + ------- + p_astro, p_terr: tuple of floats + """ + massspin = (trdata['mass1'], trdata['mass2'], + trdata['spin1z'], trdata['spin2z']) + trig_param = triggers.get_param(padata.spec['param'], None, *massspin) + # NB digitize gives '1' for first bin, '2' for second etc. + bind = numpy.digitize(trig_param, padata.bank['bin_edges']) - 1 + logger.debug('Trigger %s is in bin %i', padata.spec['param'], bind) + + # Get noise rate density + if 'bg_fac' not in padata.spec: + expfac = 6. + else: + expfac = padata.spec['bg_fac'] + + # FAR is in Hz, therefore convert to rate per year (per SNR) + dnoise = noise_density_from_far(trdata['far'], expfac) * _s_per_yr + logger.debug('FAR %.3g, noise density per yr per SNR %.3g', + trdata['far'], dnoise) + # Scale by fraction of templates in bin + dnoise *= padata.bank['tcounts'][bind] / padata.bank['num_t'] + logger.debug('Noise density in bin %.3g', dnoise) + + # Get signal rate density per year at given SNR + dsig = signal_pdf_from_snr(trdata['network_snr'], + padata.spec['netsnr_thresh']) + logger.debug('SNR %.3g, signal pdf %.3g', trdata['network_snr'], dsig) + dsig *= padata.spec['sig_per_yr_binned'][bind] + logger.debug('Signal density per yr per SNR in bin %.3g', dsig) + # Scale by network sensitivity accounting for BNS horizon distances + dsig *= signal_rate_rescale(horizons, padata.spec['ref_bns_horizon']) + logger.debug('After horizon rescaling %.3g', dsig) + + p_astro = dsig / (dsig + dnoise) + logger.debug('p_astro %.4g', p_astro) + return p_astro, 1 - p_astro
+ + + +
+[docs] +def template_param_bin_types_pa(padata, trdata, horizons): + """ + Parameters + ---------- + padata: PAstroData instance + Static information on p astro calculation + trdata: dictionary + Trigger properties + horizons: dictionary + BNS horizon distances keyed on ifo + + Returns + ------- + p_astro, p_terr: tuple of floats + """ + massspin = (trdata['mass1'], trdata['mass2'], + trdata['spin1z'], trdata['spin2z']) + trig_param = triggers.get_param(padata.spec['param'], None, *massspin) + # NB digitize gives '1' for first bin, '2' for second etc. + bind = numpy.digitize(trig_param, padata.bank['bin_edges']) - 1 + logger.debug('Trigger %s is in bin %i', padata.spec['param'], bind) + + # Get noise rate density + if 'bg_fac' not in padata.spec: + expfac = 6. + else: + expfac = padata.spec['bg_fac'] + + # List of ifos over trigger threshold + tr_ifos = trdata['triggered'] + + # FAR is in Hz, therefore convert to rate per year (per SNR) + dnoise = noise_density_from_far(trdata['far'], expfac) * _s_per_yr + logger.debug('FAR %.3g, noise density per yr per SNR %.3g', + trdata['far'], dnoise) + # Scale by fraction of templates in bin + dnoise *= padata.bank['tcounts'][bind] / padata.bank['num_t'] + logger.debug('Noise density in bin %.3g', dnoise) + # Back out trials factor to give noise density for triggered event type + dnoise /= float(trials_type(len(tr_ifos), len(trdata['sensitive']))) + logger.debug('Divide by previously applied trials factor: %.3g', dnoise) + + # Get signal rate density per year at given SNR + dsig = signal_pdf_from_snr(trdata['network_snr'], + padata.spec['netsnr_thresh']) + logger.debug('SNR %.3g, signal pdf %.3g', trdata['network_snr'], dsig) + dsig *= padata.spec['sig_per_yr_binned'][bind] + logger.debug('Total signal density per yr per SNR in bin %.3g', dsig) + # Scale by network sensitivity accounting for BNS horizons + dsig *= signal_rate_rescale(horizons, padata.spec['ref_bns_horizon']) + logger.debug('After network horizon rescaling %.3g', dsig) + # Scale by relative signal rate in triggered ifos + dsig *= signal_rate_trig_type(horizons, trdata['sensitive'], tr_ifos) + logger.debug('After triggered ifo rate rescaling %.3g', dsig) + + p_astro = dsig / (dsig + dnoise) + logger.debug('p_astro %.4g', p_astro) + return p_astro, 1 - p_astro
+ + + +
+[docs] +def template_param_bin_types_farlim_pa(padata, trdata, horizons): + """ + Parameters + ---------- + padata: PAstroData instance + Static information on p astro calculation + trdata: dictionary + Trigger properties + horizons: dictionary + BNS horizon distances keyed on ifo + + Returns + ------- + p_astro, p_terr: tuple of floats + """ + # If the network SNR and FAR indicate saturation of the FAR estimate, + # set them to specified fixed values + trdata = padata.apply_significance_limits(trdata) + + # Now perform standard calculation with event types + return template_param_bin_types_pa(padata, trdata, horizons)
+ + + +__all__ = [ + "check_template_param_bin_data", + "read_template_bank_param", + "noise_density_from_far", + "signal_pdf_from_snr", + "signal_rate_rescale", + "signal_rate_trig_type", + "template_param_bin_pa", + "template_param_bin_types_pa", + "template_param_bin_types_farlim_pa", +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/live_pastro_utils.html b/latest/html/_modules/pycbc/population/live_pastro_utils.html new file mode 100644 index 00000000000..ce03d04a147 --- /dev/null +++ b/latest/html/_modules/pycbc/population/live_pastro_utils.html @@ -0,0 +1,259 @@ + + + + + + pycbc.population.live_pastro_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.live_pastro_utils

+import logging
+import json
+from . import live_pastro as livepa
+
+logger = logging.getLogger('pycbc.population.live_pastro_utils')
+
+
+
+[docs] +def insert_live_pastro_option_group(parser): + """ Add low-latency p astro options to the argparser object. + + Parameters + ---------- + parser : object + ArgumentParser instance. + + Returns + ------- + live_pastro_group : + Argument group object + """ + + live_pastro_group = parser.add_argument_group('Options for live p_astro') + live_pastro_group.add_argument('--p-astro-spec', + help='File containing information to set ' + 'up p_astro calculation') + + return live_pastro_group
+ + + +# Choices of p astro calc method +_check_spec = { + 'template_param_bins': livepa.check_template_param_bin_data, + 'template_param_bins_types': livepa.check_template_param_bin_data, + 'template_param_bins_types_farlim': + livepa.check_template_param_bin_farlim_data +} + +_read_bank = { + 'template_param_bins': livepa.read_template_bank_param, + 'template_param_bins_types': livepa.read_template_bank_param, + 'template_param_bins_types_farlim': livepa.read_template_bank_param +} + +_do_calc = { + 'template_param_bins': livepa.template_param_bin_pa, + 'template_param_bins_types': livepa.template_param_bin_types_pa, + 'template_param_bins_types_farlim': + livepa.template_param_bin_types_farlim_pa +} + + +
+[docs] +class PAstroData(): + """ Class for managing live p_astro calculation persistent info """ + def __init__(self, specfile, bank): + """ + Read in spec file and extract relevant info from bank + + Parameters + ---------- + specfile: str + Path to file giving method and static data used in calculation + bank: str + Path to hdf template bank file + """ + if specfile is None: + self.do = False + else: + self.do = True + + with open(specfile) as specf: + self.spec_json = json.load(specf) + try: + self.method = self.spec_json['method'] + except KeyError as ke: + raise ValueError("Can't find 'method' in p_astro spec file!") \ + from ke + logger.info('Setting up p_astro data with method %s', self.method) + self.spec = _check_spec[self.method](self.spec_json) + self.bank = _read_bank[self.method](self.spec, bank) + +
+[docs] + def apply_significance_limits(self, trigger_data): + """ + If the network SNR and FAR indicate saturation of the FAR estimate, + set them to the fixed values given in the specification. + """ + # This only happens for double or triple events + if len(trigger_data['triggered']) == 1: + return trigger_data + + if len(trigger_data['triggered']) > 1: + farlim = self.spec['limit_far'] + snrlim = self.spec['limit_snr'] + # Only do anything if FAR and SNR are beyond given limits + if trigger_data['far'] > farlim or \ + trigger_data['network_snr'] < snrlim: + return trigger_data + + logger.debug('Truncating FAR and SNR from %f, %f to %f, %f', + trigger_data['far'], trigger_data['network_snr'], + farlim, snrlim) + trigger_data['network_snr'] = snrlim + trigger_data['far'] = farlim + return trigger_data + + raise RuntimeError('Number of triggered ifos must be >0 !')
+ + +
+[docs] + def do_pastro_calc(self, trigger_data, horizons): + """ No-op, or call the despatch dictionary to evaluate p_astro """ + if not self.do: + return None, None + + logger.info('Computing p_astro') + p_astro, p_terr = _do_calc[self.method](self, trigger_data, horizons) + return p_astro, p_terr
+
+ + + +__all__ = [ + "insert_live_pastro_option_group", + "PAstroData" +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/population_models.html b/latest/html/_modules/pycbc/population/population_models.html new file mode 100644 index 00000000000..e42fa8aaadc --- /dev/null +++ b/latest/html/_modules/pycbc/population/population_models.html @@ -0,0 +1,673 @@ + + + + + + pycbc.population.population_models — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.population_models

+# Copyright (C) 2021  Shichao Wu, Alex Nitz, Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides functions for star formation rate models, time delay
+models, merger rate density, and population models of BBH/BNS/NSBH.
+"""
+
+from functools import partial
+import numpy as np
+import scipy.integrate as scipy_integrate
+import scipy.interpolate as scipy_interpolate
+from astropy import units
+from pycbc.cosmology import get_cosmology
+from pycbc.cosmology import cosmological_quantity_from_redshift
+
+
+
+[docs] +def sfr_grb_2008(z): + r""" The star formation rate (SFR) calibrated by high-z GRBs data. + + Parameters + ---------- + z : float + The redshift. + + Returns + ------- + rho_z : float + The SFR at redshift z, in unit of "Msolar/Mpc^3/yr". + + Note + ---- + Please see Eq.(5) in <arXiv:0804.4008> for more details. + """ + + rho_local = 0.02 # Msolar/yr/Mpc^3 + eta = -10 + + rho_z = rho_local*((1+z)**(3.4*eta) + ((1+z)/5000)**(-0.3*eta) + + ((1+z)/9)**(-3.5*eta))**(1./eta) + return rho_z
+ + + +
+[docs] +def sfr_madau_dickinson_2014(z): + r""" The madau-dickinson 2014 star formation rate (SFR). + + Parameters + ---------- + z : float + The redshift. + + Returns + ------- + rho_z : float + The SFR at redshift z, in unit of "Msolar/Mpc^3/yr". + + Notes + ----- + Pease see Eq.(15) in <arXiv:1403.0007> for more details. + """ + + rho_z = 0.015 * (1+z)**2.7 / (1 + ((1+z)/2.9)**5.6) + return rho_z
+ + + +
+[docs] +def sfr_madau_fragos_2017(z, k_imf=0.66, mode='high'): + r""" The madau-fragos 2017 star formation rate (SFR), + which updates madau-dickinson 2014 SFR by better reproducing + a number of recent 4 < z < 10 results. + + Parameters + ---------- + z : float + The redshift. + k_imf : float + The correction factor KIMF adjusts the SFR for the assumed IMF, + for the Salpeter IMF, k_imf=1.0, for the three component broken + power-law Kroupa IMF, k_imf=0.66, here we choose Kroupa IMF as default. + model : string + The model of SFR, choose from 'high' and 'low'. Default to 'high'. + + Returns + ------- + rho_z : float + The SFR at redshift z, in unit of "Msolar/Mpc^3/yr". + + Notes + ----- + Pease see <arXiv:1606.07887> and <arXiv:1706.07053> for more details. + """ + + if mode == 'low': + factor_a = 2.6 + factor_b = 3.2 + factor_c = 6.2 + elif mode == 'high': + factor_a = 2.7 + factor_b = 3.0 + factor_c = 5.35 + else: + raise ValueError("'mode' must choose from 'high' or 'low'.") + rho_z = k_imf * 0.015 * (1+z)**factor_a / (1 + ((1+z)/factor_b)**factor_c) + + return rho_z
+ + + +
+[docs] +def diff_lookback_time(z, **kwargs): + r""" The derivative of lookback time t(z) + with respect to redshit z. + + Parameters + ---------- + z : float + The redshift. + + Returns + ------- + dt_dz : float + The value of dt/dz at the redshift z. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Notes + ----- + Pease see Eq.(A3) in <arXiv:2011.02717v3> for more details. + """ + from sympy import sqrt + + cosmology = get_cosmology(**kwargs) + H0 = cosmology.H0.value * \ + (3.0856776E+19)**(-1)/(1/24/3600/365*1e-9) # Gyr^-1 + dt_dz = 1/H0/(1+z)/sqrt((cosmology.Ode0+cosmology.Om0*(1+z)**3)) + return dt_dz
+ + + +
+[docs] +def p_tau(tau, td_model="inverse"): + r""" The probability distribution of the time delay. + + Parameters + ---------- + tau : float + The merger delay time from the + formation of the binary system and the orbital + decay timescale through gravitational wave radiation. + td_model : str + The time delay model. + + Returns + ------- + p_t : float + The probability at time delay tau. + + Notes + ----- + Pease see the Appendix in <arXiv:2011.02717v3> for more details. + """ + from sympy import sqrt, exp, log + + if td_model == "log_normal": + t_ln = 2.9 # Gyr + sigma_ln = 0.2 + p_t = exp(-(log(tau)-log(t_ln))**2/(2*sigma_ln**2)) / \ + (sqrt(2*np.pi)*sigma_ln) + elif td_model == "gaussian": + t_g = 2 # Gyr + sigma_g = 0.3 + p_t = exp(-(tau-t_g)**2/(2*sigma_g**2)) / (sqrt(2*np.pi)*sigma_g) + elif td_model == "power_law": + alpha_t = 0.81 + p_t = tau**(-alpha_t) + elif td_model == "inverse": + p_t = tau**(-0.999) # Try to avoid dividing zero. + else: + raise ValueError("'model' must choose from \ + ['log_normal', 'gaussian', 'power_law', 'inverse'].") + + return p_t
+ + + +def convolution_trans(sfr, diff_lookback_t, model_td, **kwargs): + r""" This function is used in a symbolic integral, which to calculate + the merger rate density of CBC sources. This function converts the + convolution of the star formation rate SFR(tau) and the time delay + probability P(tau) on the time delay 'tau' into the convolution on + the redshift 'z'. + + Parameters + ---------- + sfr : function + The star formation rate function used in the convolution. + diff_lookback_t : function + The derivative of lookback time t(z) + with respect to redshit z. + model_td : str + The name of time delay model. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + func : sympy.core.symbol.Symbol + The product of SFR(z), P(tau(z)) and dt(z)/dz. + + Notes + ----- + Pease see Eq.(A2) in <arXiv:2011.02717v3> for more details. + """ + from sympy import integrate, symbols + + if model_td not in ['log_normal', 'gaussian', 'power_law', 'inverse']: + raise ValueError("'model_td' must choose from \ + ['log_normal', 'gaussian', 'power_law', 'inverse'].") + + # Fix the cosmology, set 'z/z_0' to be the only + # parameter in the symbolic integration. + diff_lookback_time_z = partial(diff_lookback_t, **kwargs) + z = symbols('z') + z_0 = symbols('z_0') + tau = integrate(diff_lookback_time_z(z), (z, z_0, z)) + func = sfr(z) * p_tau(tau, model_td) * diff_lookback_time_z(z) + return func + + +
+[docs] +def merger_rate_density(sfr_func, td_model, rho_local, maxz=10.0, + npoints=10000, z_array=None, **kwargs): + r""" This function uses the symbolic integral to calculate + the merger rate density of CBC sources. This function converts the + convolution of the star formation rate SFR(tau) and the time delay + probability P(tau) on the time delay 'tau' into the convolution on + the redshift 'z'. This function relies on `convolution_trans`. + + Parameters + ---------- + sfr_func : function + The star formation rate function used in the convolution. + td_model : str + The name of time delay model. + rho_local : float + The local merger rate of a certain type of CBC source. + In the unit of "Mpc^-3yr^-1". + maxz : float + The max redshift. The default value is 10. + npoints : int + The number of points used in the interpolation. The default + value is 10000. + z_array : numpy.array + The array of redshift. The default value is None. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + rho_z : scipy.interpolate.interp1d + The merger rate density. + + Notes + ----- + Pease see Eq.(A1), Eq.(A2) in <arXiv:2011.02717v3> for more details. + """ + from sympy import symbols, lambdify + + if z_array is None: + z_array = np.linspace(0, maxz, npoints) + + if td_model not in ['log_normal', 'gaussian', 'power_law', 'inverse']: + raise ValueError("'td_model' must choose from \ + ['log_normal', 'gaussian', 'power_law', 'inverse'].") + + z = symbols('z') + z_0 = symbols('z_0') + f_z = np.zeros(len(z_array)) + + func_1 = convolution_trans( + sfr=sfr_func, diff_lookback_t=diff_lookback_time, + model_td=td_model, **kwargs) + for i in range(len(z_array)): + func_2 = lambdify(z, func_1.subs(z_0, z_array[i]), 'scipy') + f_z[i] = scipy_integrate.quad( + func_2, z_array[i], np.inf, epsabs=1.49e-3)[0] + + f_z = f_z/f_z[0]*rho_local # Normalize & Rescale + rho_z = scipy_interpolate.interp1d(z_array, f_z) + return rho_z
+ + + +
+[docs] +def coalescence_rate(rate_den, maxz=10.0, npoints=10000, + z_array=None, **kwargs): + r""" This function calculates the coalescence(merger) rate at the redshift z. + + Parameters + ---------- + rate_den : function or scipy.interpolate.interp1d + The merger rate density as a function of redshift. In the unit of + "Mpc^-3yr^-1". Use `merger_rate_density` function to calculate. + maxz : float + The max redshift. The default value is 10. + npoints : int + The number of points used in the interpolation. The default + value is 10000. + z_array : numpy.array or list + The redshift range. The default value is None. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + coalescence_rate_interp : scipy.interpolate.interp1d + The coalescence rate. + + Notes + ----- + Pease see Eq.(1) in <arXiv:2011.02717v3> for more details. + """ + + if z_array is None: + z_array = np.linspace(0, maxz, npoints) + + dr_dz = [] + cosmology = get_cosmology(**kwargs) + + for z in z_array: + dr = cosmology.differential_comoving_volume(z) / (1+z) + dr_dz.append((dr*4*np.pi*units.sr*rate_den(z)*(units.Mpc)**(-3)).value) + + coalescence_rate_interp = scipy_interpolate.interp1d( + z_array, dr_dz, fill_value='extrapolate') + + return coalescence_rate_interp
+ + + +
+[docs] +def total_rate_upto_redshift(z, merger_rate): + r"""Total rate of occurrences out to some redshift. + + Parameters + ---------- + z : int, float, tuple, numpy.ndarray or list + The redshift. + merger_rate : scipy.interpolate.interp1d or function + The merger or coalescence rate. Function should take the + redshift as a single argument. Provided by users or + calculated by the `coalescence_rate` function. + + Returns + ------- + rate: float or list + The total rate of occurrences out to some redshift. In the + unit of "yr^-1". + """ + + if isinstance(z, (float, int)): + total_rate = scipy_integrate.quad( + merger_rate, 0, z, + epsabs=2.00e-4, epsrel=2.00e-4, limit=1000)[0] + elif isinstance(z, (tuple, np.ndarray, list)): + total_rate = [] + for redshift in z: + total_rate.append( + scipy_integrate.quad( + merger_rate, 0, redshift, + epsabs=2.00e-4, epsrel=2.00e-4, limit=1000)[0] + ) + else: + raise ValueError("'z' must be 'int', 'float', 'tuple', \ + 'numpy.ndarray' or 'list'.") + + return total_rate
+ + + +
+[docs] +def average_time_between_signals(z_array, merger_rate): + r""" This function calculates the average time interval + of a certain type of CBC source. + + Parameters + ---------- + z_array : numpy.array + The array of redshift. + merger_rate : scipy.interpolate.interp1d or function + The coalescence rate. Provided by users or calculated by + the `coalescence_rate` function. + + Returns + ------- + average_time : float + The average time interval (s). + """ + + total_rate = total_rate_upto_redshift( + z_array[-1], merger_rate) # yr^-1 + average_time = 1./total_rate * 365*24*3600 # s + return average_time
+ + + +
+[docs] +def norm_redshift_distribution(z_array, merger_rate): + r""" This function calculates the normalized redshift distribution + of a certain type of CBC source. + + Parameters + ---------- + z_array : numpy.array + The array of redshift. + merger_rate : scipy.interpolate.interp1d or function + The coalescence rate. Provided by users or calculated by + the `coalescence_rate` function. + + Returns + ------- + norm_coalescence_rate : numpy.array + The normalized redshift distribution. + + Notes + ----- + The can be used as a population-informed prior for redshift + and luminosity distance of CBC sources. + """ + + lambd = average_time_between_signals(z_array, merger_rate) + norm_coalescence_rate = lambd/(365*24*3600) * merger_rate(z_array) + return norm_coalescence_rate
+ + + +
+[docs] +def distance_from_rate( + total_rate, merger_rate, maxz=10, npoints=10000, **kwargs): + r"""Returns the luminosity distance from the given total rate value. + + Parameters + ---------- + total_rate : float + The total rate. + merger_rate : scipy.interpolate.interp1d or function + The coalescence rate. Provided by users or calculated by + the `coalescence_rate` function. + maxz : float + The max redshift in the interpolation, the default value is 10. + Please use the same `maxz` as in `merger_rate`. + npoints : int + The number of points used in the interpolation, the default value + is 10000. + \**kwargs : + All other keyword args are passed to :py:func:`get_cosmology` to + select a cosmology. If none provided, will use + :py:attr:`DEFAULT_COSMOLOGY`. + + Returns + ------- + dl : float + The luminosity distance at the given total rate value. + In the unit of "Mpc". + + Notes + ----- + This can be used in a population-informed prior for redshift + and luminosity distance of CBC sources. When this used in + high redshift range, please first use the `total_rate_upto_redshift` + function to plot the curve and find the point where the curve + starts to stay almost horizontal, then set `maxz` to the + corresponding value and change `npoints` to a reasonable value. + """ + cosmology = get_cosmology(**kwargs) + + if not hasattr(merger_rate, 'dist_interp'): + merger_rate.dist_interp = {} + + if ((cosmology.name not in merger_rate.dist_interp) or + (len(merger_rate.dist_interp[cosmology.name].x) != npoints)): + def rate_func(redshift): + return total_rate_upto_redshift(redshift, merger_rate) + + z_array = np.linspace(0, maxz, npoints) + dists = cosmological_quantity_from_redshift( + z_array, 'luminosity_distance', **kwargs) + total_rates = rate_func(z_array) + interp = scipy_interpolate.interp1d(total_rates, dists) + merger_rate.dist_interp[cosmology.name] = interp + + dl = merger_rate.dist_interp[cosmology.name](total_rate) + if np.isscalar(dl): + dl = float(dl) + return dl
+ + + +__all__ = ['sfr_grb_2008', 'sfr_madau_dickinson_2014', + 'sfr_madau_fragos_2017', 'diff_lookback_time', + 'p_tau', 'merger_rate_density', 'coalescence_rate', + 'norm_redshift_distribution', 'total_rate_upto_redshift', + 'distance_from_rate', 'average_time_between_signals'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/rates_functions.html b/latest/html/_modules/pycbc/population/rates_functions.html new file mode 100644 index 00000000000..ba3d81ab4c8 --- /dev/null +++ b/latest/html/_modules/pycbc/population/rates_functions.html @@ -0,0 +1,666 @@ + + + + + + pycbc.population.rates_functions — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.rates_functions

+"""
+A set of helper functions for evaluating rates.
+"""
+
+import numpy as np
+from numpy import log
+from scipy import integrate, optimize
+import scipy.stats as ss
+
+from pycbc.conversions import mchirp_from_mass1_mass2
+from pycbc.io.hdf import HFile
+
+
+
+[docs] +def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp): + """Read the zero-lag and time-lag triggers identified by templates in + a specified range of chirp mass. + + Parameters + ---------- + hdfile: + File that stores all the triggers + rhomin: float + Minimum value of SNR threhold (will need including ifar) + mass1: array + First mass of the waveform in the template bank + mass2: array + Second mass of the waveform in the template bank + lo_mchirp: float + Minimum chirp mass for the template + hi_mchirp: float + Maximum chirp mass for the template + + Returns + ------- + dictionary + containing foreground triggers and background information + """ + with HFile(fname, 'r') as bulk: + + id_bkg = bulk['background_exc/template_id'][:] + id_fg = bulk['foreground/template_id'][:] + + mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg]) + bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg)) + idx_bkg = np.where(bound == 1) + mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg]) + bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg)) + idx_fg = np.where(bound == 1) + + zerolagstat = bulk['foreground/stat'][:][idx_fg] + cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg] + dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg] + + return {'zerolagstat': zerolagstat[zerolagstat > rhomin], + 'dec_factors': dec_factors[cstat_back_exc > rhomin], + 'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
+ + + +
+[docs] +def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp): + ''' Read the STATMAP files to derive snr falloff for the background events. + Save the output to a txt file + Bank file is also provided to restrict triggers to BBH templates. + + Parameters + ---------- + fname_statmap: string + STATMAP file containing trigger information + fname_bank: string + File name of the template bank + path: string + Destination where txt file is saved + rhomin: float + Minimum value of SNR threhold (will need including ifar) + lo_mchirp: float + Minimum chirp mass for the template + hi_mchirp: float + Maximum chirp mass for template + ''' + + with HFile(fname_bank, 'r') as bulk: + mass1_bank = bulk['mass1'][:] + mass2_bank = bulk['mass2'][:] + full_data = process_full_data(fname_statmap, rhomin, + mass1_bank, mass2_bank, lo_mchirp, hi_mchirp) + + max_bg_stat = np.max(full_data['cstat_back_exc']) + bg_bins = np.linspace(rhomin, max_bg_stat, 76) + bg_counts = np.histogram(full_data['cstat_back_exc'], + weights=full_data['dec_factors'], bins=bg_bins)[0] + + zerolagstat = full_data['zerolagstat'] + coincs = zerolagstat[zerolagstat >= rhomin] + + bkg = (bg_bins[:-1], bg_bins[1:], bg_counts) + + return bkg, coincs
+ + + +
+[docs] +def log_rho_fgmc(t, injstats, bins): + counts, bins = np.histogram(injstats, bins) + + N = sum(counts) + dens = counts / np.diff(bins) / float(N) + + assert np.min(t) >= np.min(bins) + assert np.max(t) < np.max(bins) + + tinds = np.searchsorted(bins, t) - 1 + + return log(dens[tinds])
+ + + +
+[docs] +def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): + ''' + Function to fit the likelihood Fixme + ''' + + Lb = np.random.uniform(0., maxfg, len(Rf)) + pquit = 0 + + while pquit < 0.1: + # quit when the posterior on Lf is very close to its prior + + nsamp = len(Lb) + Rf_sel = np.random.choice(Rf, nsamp) + vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) + + Lf = Rf_sel * vt + + log_Lf, log_Lb = log(Lf), log(Lb) + + plR = 0 + for lfr in log_fg_ratios: + plR += np.logaddexp(lfr + log_Lf, log_Lb) + + plR -= (Lf + Lb) + plRn = plR - max(plR) + + idx = np.exp(plRn) > np.random.random(len(plRn)) + + pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] + + Lb = Lb[idx] + + return Rf_sel[idx], Lf[idx], Lb
+ + + +def _optm(x, alpha, mu, sigma): + '''Return probability density of skew-lognormal + See scipy.optimize.curve_fit + ''' + return ss.skewnorm.pdf(x, alpha, mu, sigma) + + +
+[docs] +def fit(R): + ''' Fit skew - lognormal to the rate samples achived from a prior analysis + Parameters + ---------- + R: array + Rate samples + Returns + ------- + ff[0]: float + The skewness + ff[1]: float + The mean + ff[2]: float + The standard deviation + ''' + + lR = np.log(R) + mu_norm, sigma_norm = np.mean(lR), np.std(lR) + + xs = np.linspace(min(lR), max(lR), 200) + kde = ss.gaussian_kde(lR) + pxs = kde(xs) + + # Initial guess has been taken as the mean and std-dev of the data + # And a guess assuming small skewness + ff = optimize.curve_fit(_optm, xs, pxs, p0 = [0.1, mu_norm, sigma_norm])[0] + return ff[0], ff[1], ff[2]
+ + + +
+[docs] +def skew_lognormal_samples(alpha, mu, sigma, minrp, maxrp): + ''' Returns a large number of Skew lognormal samples + Parameters + ---------- + alpha: float + Skewness of the distribution + mu: float + Mean of the distribution + sigma: float + Scale of the distribution + minrp: float + Minimum value for the samples + maxrp: float + Maximum value for the samples + Returns + ------- + Rfs: array + Large number of samples (may need fixing) + ''' + + nsamp = 100000000 + lRu = np.random.uniform(minrp, maxrp, nsamp) + plRu = ss.skewnorm.pdf(lRu, alpha, mu, sigma) + rndn = np.random.random(nsamp) + maxp = max(plRu) + idx = np.where(plRu/maxp > rndn) + log_Rf = lRu[idx] + Rfs = np.exp(log_Rf) + + return Rfs
+ + + +# The flat in log and power-law mass distribution models # + +# PDF for the two canonical models plus flat in mass model +
+[docs] +def prob_lnm(m1, m2, s1z, s2z, **kwargs): + ''' Return probability density for uniform in log + Parameters + ---------- + m1: array + Component masses 1 + m2: array + Component masses 2 + s1z: array + Aligned spin 1(Not in use currently) + s2z: + Aligned spin 2(Not in use currently) + **kwargs: string + Keyword arguments as model parameters + Returns + ------- + p_m1_m2: array + The probability density for m1, m2 pair + ''' + + min_mass = kwargs.get('min_mass', 5.) + max_mass = kwargs.get('max_mass', 95.) + max_mtotal = min_mass + max_mass + m1, m2 = np.array(m1), np.array(m2) + + C_lnm = integrate.quad(lambda x: (log(max_mtotal - x) - log(min_mass))/x, min_mass, max_mass)[0] + + xx = np.minimum(m1, m2) + m1 = np.maximum(m1, m2) + m2 = xx + + bound = np.sign(max_mtotal - m1 - m2) + bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass) + idx = np.where(bound != 2) + + p_m1_m2 = (1/C_lnm)*(1./m1)*(1./m2) + p_m1_m2[idx] = 0 + + return p_m1_m2
+ + + +
+[docs] +def prob_imf(m1, m2, s1z, s2z, **kwargs): + ''' Return probability density for power-law + Parameters + ---------- + m1: array + Component masses 1 + m2: array + Component masses 2 + s1z: array + Aligned spin 1(Not in use currently) + s2z: + Aligned spin 2(Not in use currently) + **kwargs: string + Keyword arguments as model parameters + + Returns + ------- + p_m1_m2: array + the probability density for m1, m2 pair + ''' + + min_mass = kwargs.get('min_mass', 5.) + max_mass = kwargs.get('max_mass', 95.) + alpha = kwargs.get('alpha', -2.35) + max_mtotal = min_mass + max_mass + m1, m2 = np.array(m1), np.array(m2) + + C_imf = max_mass**(alpha + 1)/(alpha + 1) + C_imf -= min_mass**(alpha + 1)/(alpha + 1) + + xx = np.minimum(m1, m2) + m1 = np.maximum(m1, m2) + m2 = xx + + bound = np.sign(max_mtotal - m1 - m2) + bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass) + idx = np.where(bound != 2) + + p_m1_m2 = np.zeros_like(m1) + idx = np.where(m1 <= max_mtotal/2.) + p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(m1[idx] - min_mass) + idx = np.where(m1 > max_mtotal/2.) + p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(max_mass - m1[idx]) + p_m1_m2[idx] = 0 + + return p_m1_m2/2.
+ + + +
+[docs] +def prob_flat(m1, m2, s1z, s2z, **kwargs): + ''' Return probability density for uniform in component mass + Parameters + ---------- + m1: array + Component masses 1 + m2: array + Component masses 2 + s1z: array + Aligned spin 1 (not in use currently) + s2z: + Aligned spin 2 (not in use currently) + **kwargs: string + Keyword arguments as model parameters + + Returns + ------- + p_m1_m2: array + the probability density for m1, m2 pair + ''' + + min_mass = kwargs.get('min_mass', 1.) + max_mass = kwargs.get('max_mass', 2.) + + bound = np.sign(m1 - m2) + bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass) + idx = np.where(bound != 2) + + p_m1_m2 = 2. / (max_mass - min_mass)**2 + p_m1_m2[idx] = 0 + + return p_m1_m2
+ + + +# Generate samples for the two canonical models plus flat in mass model +
+[docs] +def draw_imf_samples(**kwargs): + ''' Draw samples for power-law model + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + array + The first mass + array + The second mass + ''' + + alpha_salpeter = kwargs.get('alpha', -2.35) + nsamples = kwargs.get('nsamples', 1) + min_mass = kwargs.get('min_mass', 5.) + max_mass = kwargs.get('max_mass', 95.) + max_mtotal = min_mass + max_mass + + a = (max_mass/min_mass)**(alpha_salpeter + 1.0) - 1.0 + beta = 1.0 / (alpha_salpeter + 1.0) + + k = nsamples * int(1.5 + log(1 + 100./nsamples)) + aa = min_mass * (1.0 + a * np.random.random(k))**beta + bb = np.random.uniform(min_mass, aa, k) + + idx = np.where(aa + bb < max_mtotal) + m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] + + return np.resize(m1, nsamples), np.resize(m2, nsamples)
+ + + +
+[docs] +def draw_lnm_samples(**kwargs): + ''' Draw samples for uniform-in-log model + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + array + The first mass + array + The second mass + ''' + + #PDF doesnt match with sampler + nsamples = kwargs.get('nsamples', 1) + min_mass = kwargs.get('min_mass', 5.) + max_mass = kwargs.get('max_mass', 95.) + max_mtotal = min_mass + max_mass + lnmmin = log(min_mass) + lnmmax = log(max_mass) + + k = nsamples * int(1.5 + log(1 + 100./nsamples)) + aa = np.exp(np.random.uniform(lnmmin, lnmmax, k)) + bb = np.exp(np.random.uniform(lnmmin, lnmmax, k)) + + idx = np.where(aa + bb < max_mtotal) + m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] + + return np.resize(m1, nsamples), np.resize(m2, nsamples)
+ + + +
+[docs] +def draw_flat_samples(**kwargs): + ''' Draw samples for uniform in mass + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + array + The first mass + array + The second mass + ''' + + #PDF doesnt match with sampler + nsamples = kwargs.get('nsamples', 1) + min_mass = kwargs.get('min_mass', 1.) + max_mass = kwargs.get('max_mass', 2.) + + m1 = np.random.uniform(min_mass, max_mass, nsamples) + m2 = np.random.uniform(min_mass, max_mass, nsamples) + + return np.maximum(m1, m2), np.minimum(m1, m2)
+ + + +# Functions to generate chirp mass samples for the two canonical models +
+[docs] +def mchirp_sampler_lnm(**kwargs): + ''' Draw chirp mass samples for uniform-in-log model + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + mchirp-astro: array + The chirp mass samples for the population + ''' + m1, m2 = draw_lnm_samples(**kwargs) + mchirp_astro = mchirp_from_mass1_mass2(m1, m2) + + return mchirp_astro
+ + + +
+[docs] +def mchirp_sampler_imf(**kwargs): + ''' Draw chirp mass samples for power-law model + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + mchirp-astro: array + The chirp mass samples for the population + ''' + m1, m2 = draw_imf_samples(**kwargs) + mchirp_astro = mchirp_from_mass1_mass2(m1, m2) + + return mchirp_astro
+ + + +
+[docs] +def mchirp_sampler_flat(**kwargs): + ''' Draw chirp mass samples for flat in mass model + + Parameters + ---------- + **kwargs: string + Keyword arguments as model parameters and number of samples + + Returns + ------- + mchirp-astro: array + The chirp mass samples for the population + ''' + m1, m2 = draw_flat_samples(**kwargs) + mchirp_astro = mchirp_from_mass1_mass2(m1, m2) + + return mchirp_astro
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/population/scale_injections.html b/latest/html/_modules/pycbc/population/scale_injections.html new file mode 100644 index 00000000000..52548cdcf18 --- /dev/null +++ b/latest/html/_modules/pycbc/population/scale_injections.html @@ -0,0 +1,665 @@ + + + + + + pycbc.population.scale_injections — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.population.scale_injections

+import numpy as np
+from numpy import log
+import copy
+from scipy.interpolate import interp1d
+from scipy.integrate import quad
+from astropy.cosmology import WMAP9 as cosmo
+
+from pycbc.conversions import mchirp_from_mass1_mass2 as m1m2tomch
+from pycbc.io.hdf import HFile
+
+_mch_BNS = 1.4/2**.2
+_redshifts, _d_lum, _I = np.arange(0., 5., 0.01), [], []
+_save_params = ['mass1', 'mass2', 'spin1z', 'spin2z', 'spin1y', 'spin2y',
+                                'spin1x', 'spin2x', 'distance', 'end_time']
+
+for zz in _redshifts:
+    _d_lum.append(cosmo.luminosity_distance(zz).value)
+_dlum_interp = interp1d(_d_lum, _redshifts)
+
+
+[docs] +def read_injections(sim_files, m_dist, s_dist, d_dist): + ''' Read all the injections from the files in the provided folder. + The files must belong to individual set i.e. no files that combine + all the injections in a run. + Identify injection strategies and finds parameter boundaries. + Collect injection according to GPS. + + Parameters + ---------- + sim_files: list + List containign names of the simulation files + m_dist: list + The mass distribution used in the simulation runs + s_dist: list + The spin distribution used in the simulation runs + d_dist: list + The distance distribution used in the simulation runs + + Returns + ------- + injections: dictionary + Contains the organized information about the injections + ''' + + injections = {} + min_d, max_d = 1e12, 0 + nf = len(sim_files) + for i in range(nf): + + key = str(i) + injections[key] = process_injections(sim_files[i]) + injections[key]['file_name'] = sim_files[i] + injections[key]['m_dist'] = m_dist[i] + injections[key]['s_dist'] = s_dist[i] + injections[key]['d_dist'] = d_dist[i] + + mass1, mass2 = injections[key]['mass1'], injections[key]['mass2'] + distance = injections[key]['distance'] + + mchirp = m1m2tomch(mass1, mass2) + injections[key]['chirp_mass'] = mchirp + injections[key]['total_mass'] = mass1 + mass2 + + injections[key]['mtot_range'] = [min(mass1 + mass2), max(mass1 + mass2)] + injections[key]['m1_range'] = [min(mass1), max(mass1)] + injections[key]['m2_range'] = [min(mass2), max(mass2)] + injections[key]['d_range'] = [min(distance), max(distance)] + + min_d, max_d = min(min_d, min(distance)), max(max_d, max(distance)) + + injections['z_range'] = [dlum_to_z(min_d), dlum_to_z(max_d)] + + return injections
+ + +
+[docs] +def estimate_vt(injections, mchirp_sampler, model_pdf, **kwargs): + #Try including ifar threshold + '''Based on injection strategy and the desired astro model estimate the injected volume. + Scale injections and estimate sensitive volume. + + Parameters + ---------- + injections: dictionary + Dictionary obtained after reading injections from read_injections + mchirp_sampler: function + Sampler for producing chirp mass samples for the astro model. + model_pdf: function + The PDF for astro model in mass1-mass2-spin1z-spin2z space. + This is easily extendible to include precession + kwargs: key words + Inputs for thresholds and astrophysical models + + Returns + ------- + injection_chunks: dictionary + The input dictionary with VT and VT error included with the injections + ''' + + thr_var = kwargs.get('thr_var') + thr_val = kwargs.get('thr_val') + + nsamples = 1000000 #Used to calculate injected astro volume + injections = copy.deepcopy(injections) + min_z, max_z = injections['z_range'] + V = quad(contracted_dVdc, 0., max_z)[0] + + z_astro = astro_redshifts(min_z, max_z, nsamples) + astro_lum_dist = cosmo.luminosity_distance(z_astro).value + + mch_astro = np.array(mchirp_sampler(nsamples = nsamples, **kwargs)) + mch_astro_det = mch_astro * (1. + z_astro) + idx_within = np.zeros(nsamples) + + for key in injections.keys(): + + if key == 'z_range': + # This is repeated down again and is so + continue + + mchirp = injections[key]['chirp_mass'] + min_mchirp, max_mchirp = min(mchirp), max(mchirp) + distance = injections[key]['distance'] + + if injections[key]['d_dist'] == 'uniform': + d_min, d_max = min(distance), max(distance) + elif injections[key]['d_dist'] == 'dchirp': + d_fid_min = min(distance / (mchirp/_mch_BNS)**(5/6.)) + d_fid_max = max(distance / (mchirp/_mch_BNS)**(5/6.)) + + d_min = d_fid_min * (mch_astro_det/_mch_BNS)**(5/6.) + d_max = d_fid_max * (mch_astro_det/_mch_BNS)**(5/6.) + + bound = np.sign((max_mchirp-mch_astro_det)*(mch_astro_det-min_mchirp)) + bound += np.sign((d_max - astro_lum_dist)*(astro_lum_dist - d_min)) + + idx = np.where(bound == 2) + idx_within[idx] = 1 + + inj_V0 = 4*np.pi*V*len(idx_within[idx_within == 1])/float(nsamples) + injections['inj_astro_vol'] = inj_V0 + + # Estimate the sensitive volume + z_range = injections['z_range'] + V_min = quad(contracted_dVdc, 0., z_range[0])[0] + V_max = quad(contracted_dVdc, 0., z_range[1])[0] + + thr_falloff, i_inj, i_det, i_det_sq = [], 0, 0, 0 + gps_min, gps_max = 1e15, 0 + keys = injections.keys() + for key in keys: + + if key == 'z_range' or key == 'inj_astro_vol': + continue + + data = injections[key] + distance = data['distance'] + mass1, mass2 = data['mass1'], data['mass2'] + spin1z, spin2z = data['spin1z'], data['spin2z'] + mchirp = data['chirp_mass'] + gps_min = min(gps_min, min(data['end_time'])) + gps_max = max(gps_max, max(data['end_time'])) + + z_inj = dlum_to_z(distance) + m1_sc, m2_sc = mass1/(1 + z_inj), mass2/(1 + z_inj) + p_out = model_pdf(m1_sc, m2_sc, spin1z, spin2z) + p_out *= pdf_z_astro(z_inj, V_min, V_max) + + p_in = 0 + J = cosmo.luminosity_distance(z_inj + 0.0005).value + J -= cosmo.luminosity_distance(z_inj - 0.0005).value + J = abs(J)/0.001 # A quick way to get dD_l/dz + + # Sum probability of injections from j-th set for all the strategies + for key2 in keys: + + if key2 == 'z_range' or key2 == 'inj_astro_vol': + continue + + dt_j = injections[key2] + dist_j = dt_j['distance'] + m1_j, m2_j = dt_j['mass1'], dt_j['mass2'] + s1x_2, s2x_2 = dt_j['spin1x'], dt_j['spin2x'] + s1y_2, s2y_2 = dt_j['spin1y'], dt_j['spin2y'] + s1z_2, s2z_2 = dt_j['spin1z'], dt_j['spin2z'] + s1 = np.sqrt(s1x_2**2 + s1y_2**2 + s1z_2**2) + s2 = np.sqrt(s2x_2**2 + s2y_2**2 + s2z_2**2) + mch_j = dt_j['chirp_mass'] + + #Get probability density for injections in mass-distance space + if dt_j['m_dist'] == 'totalMass': + lomass, himass = min(min(m1_j), min(m2_j), max(max(m1_j), max(m2_j))) + lomass_2, himass_2 = lomass, himass + elif dt_j['m_dist'] == 'componentMass' or dt_j['m_dist'] == 'log': + lomass, himass = min(m1_j), max(m1_j) + lomass_2, himass_2 = min(m2_j), max(m2_j) + + if dt_j['d_dist'] == 'dchirp': + l_dist = min(dist_j / (mch_j/_mch_BNS)**(5/6.)) + h_dist = max(dist_j / (mch_j/_mch_BNS)**(5/6.)) + elif dt_j['d_dist'] == 'uniform': + l_dist, h_dist = min(dist_j), max(dist_j) + + mdist = dt_j['m_dist'] + prob_mass = inj_mass_pdf(mdist, mass1, mass2, + lomass, himass, lomass_2, himass_2) + + ddist = dt_j['d_dist'] + prob_dist = inj_distance_pdf(ddist, distance, l_dist, + h_dist, mchirp) + + hspin1, hspin2 = max(s1), max(s2) + prob_spin = inj_spin_pdf(dt_j['s_dist'], hspin1, spin1z) + prob_spin *= inj_spin_pdf(dt_j['s_dist'], hspin2, spin2z) + + p_in += prob_mass * prob_dist * prob_spin * J * (1 + z_inj)**2 + + p_in[p_in == 0] = 1e12 + p_out_in = p_out/p_in + + i_inj += np.sum(p_out_in) + i_det += np.sum((p_out_in)[data[thr_var] > thr_val]) + i_det_sq += np.sum((p_out_in)[data[thr_var] > thr_val]**2) + + idx_thr = np.where(data[thr_var] > thr_val) + thrs = data[thr_var][idx_thr] + ratios = p_out_in[idx_thr]/max(p_out_in[idx_thr]) + rndn = np.random.uniform(0, 1, len(ratios)) + idx_ratio = np.where(ratios > rndn) + thr_falloff.append(thrs[idx_ratio]) + + inj_V0 = injections['inj_astro_vol'] + injections['ninj'] = i_inj + injections['ndet'] = i_det + injections['ndetsq'] = i_det_sq + injections['VT'] = ((inj_V0*i_det/i_inj) * (gps_max - gps_min)/31557600) + injections['VT_err'] = injections['VT'] * np.sqrt(i_det_sq)/i_det + injections['thr_falloff'] = np.hstack(np.array(thr_falloff).flat) + + return injections
+ + +
+[docs] +def process_injections(hdffile): + """Function to read in the injection file and + extract the found injections and all injections + + Parameters + ---------- + hdffile: hdf file + File for which injections are to be processed + + Returns + ------- + data: dictionary + Dictionary containing injection read from the input file + """ + data = {} + + with HFile(hdffile, 'r') as inp: + found_index = inp['found_after_vetoes/injection_index'][:] + + for param in _save_params: + data[param] = inp['injections/'+param][:] + + ifar = np.zeros_like(data[_save_params[0]]) + ifar[found_index] = inp['found_after_vetoes/ifar'][:] + + data['ifar'] = ifar + + stat = np.zeros_like(data[_save_params[0]]) + stat[found_index] = inp['found_after_vetoes/stat'][:] + + data['stat'] = stat + + return data
+ + +
+[docs] +def dlum_to_z(dl): + ''' Get the redshift for a luminosity distance + + Parameters + ---------- + dl: array + The array of luminosity distances + + Returns + ------- + array + The redshift values corresponding to the luminosity distances + ''' + + return _dlum_interp(dl)
+ + +
+[docs] +def astro_redshifts(min_z, max_z, nsamples): + '''Sample the redshifts for sources, with redshift + independent rate, using standard cosmology + + Parameters + ---------- + min_z: float + Minimum redshift + max_z: float + Maximum redshift + nsamples: int + Number of samples + + Returns + ------- + z_astro: array + nsamples of redshift, between min_z, max_z, by standard cosmology + ''' + + dz, fac = 0.001, 3.0 + # use interpolation instead of directly estimating all the pdfz for rndz + V = quad(contracted_dVdc, 0., max_z)[0] + zbins = np.arange(min_z, max_z + dz/2., dz) + zcenter = (zbins[:-1] + zbins[1:]) / 2 + pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V + + int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0) + + rndz = np.random.uniform(min_z, max_z, int(fac*nsamples)) + pdf_zs = int_pdf(rndz) + maxpdf = max(pdf_zs) + rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf + diff = pdf_zs - rndn + idx = np.where(diff > 0) + z_astro = rndz[idx] + + np.random.shuffle(z_astro) + z_astro.resize(nsamples) + + return z_astro
+ + +
+[docs] +def pdf_z_astro(z, V_min, V_max): + ''' Get the probability density for the rate of events + at a redshift assuming standard cosmology + ''' + return contracted_dVdc(z)/(V_max - V_min)
+ + +
+[docs] +def contracted_dVdc(z): + #Return the time-dilated differential comoving volume + return cosmo.differential_comoving_volume(z).value/(1+z)
+ + +##### Defining current standard strategies used for making injections ##### + +
+[docs] +def inj_mass_pdf(key, mass1, mass2, lomass, himass, lomass_2 = 0, himass_2 = 0): + + '''Estimate the probability density based on the injection strategy + + Parameters + ---------- + key: string + Injection strategy + mass1: array + First mass of the injections + mass2: array + Second mass of the injections + lomass: float + Lower value of the mass distributions + himass: float + higher value of the mass distribution + + Returns + ------- + pdf: array + Probability density of the injections + ''' + + mass1, mass2 = np.array(mass1), np.array(mass2) + + if key == 'totalMass': + # Returns the PDF of mass when total mass is uniformly distributed. + # Both the component masses have the same distribution for this case. + + # Parameters + # ---------- + # lomass: lower component mass + # himass: higher component mass + + bound = np.sign((lomass + himass) - (mass1 + mass2)) + bound += np.sign((himass - mass1)*(mass1 - lomass)) + bound += np.sign((himass - mass2)*(mass2 - lomass)) + idx = np.where(bound != 3) + pdf = 1./(himass - lomass)/(mass1 + mass2 - 2 * lomass) + pdf[idx] = 0 + + return pdf + + if key == 'componentMass': + # Returns the PDF of mass when component mass is uniformly + # distributed. Component masses are independent for this case. + + # Parameters + # ---------- + # lomass: lower component mass + # himass: higher component mass + + bound = np.sign((himass - mass1)*(mass1 - lomass)) + bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2)) + idx = np.where(bound != 2) + pdf = np.ones_like(mass1) / (himass - lomass) / (himass_2 - lomass_2) + pdf[idx] = 0 + + return pdf + + if key == 'log': + # Returns the PDF of mass when component mass is uniform in log. + # Component masses are independent for this case. + + # Parameters + # ---------- + # lomass: lower component mass + # himass: higher component mass + + bound = np.sign((himass - mass1)*(mass1 - lomass)) + bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2)) + idx = np.where(bound != 2) + pdf = 1 / (log(himass) - log(lomass)) / (log(himass_2) - log(lomass_2)) + pdf /= (mass1 * mass2) + pdf[idx] = 0 + + return pdf
+ + +
+[docs] +def inj_spin_pdf(key, high_spin, spinz): + ''' Estimate the probability density of the + injections for the spin distribution. + + Parameters + ---------- + key: string + Injections strategy + high_spin: float + Maximum spin used in the strategy + spinz: array + Spin of the injections (for one component) + ''' + + # If the data comes from disable_spin simulation + if spinz[0] == 0: + return np.ones_like(spinz) + + spinz = np.array(spinz) + + bound = np.sign(np.absolute(high_spin) - np.absolute(spinz)) + bound += np.sign(1 - np.absolute(spinz)) + + if key == 'precessing': + # Returns the PDF of spins when total spin is + # isotropically distributed. Both the component + # masses have the same distribution for this case. + + pdf = (np.log(high_spin - np.log(abs(spinz)))/high_spin/2) + idx = np.where(bound != 2) + pdf[idx] = 0 + + return pdf + + if key == 'aligned': + # Returns the PDF of mass when spins are aligned and uniformly + # distributed. Component spins are independent for this case. + + pdf = (np.ones_like(spinz) / 2 / high_spin) + idx = np.where(bound != 2) + pdf[idx] = 0 + + return pdf + + if key == 'disable_spin': + # Returns unit array + + pdf = np.ones_like(spinz) + + return pdf
+ + +
+[docs] +def inj_distance_pdf(key, distance, low_dist, high_dist, mchirp = 1): + ''' Estimate the probability density of the + injections for the distance distribution. + + Parameters + ---------- + key: string + Injections strategy + distance: array + Array of distances + low_dist: float + Lower value of distance used in the injection strategy + high_dist: float + Higher value of distance used in the injection strategy + ''' + + distance = np.array(distance) + + if key == 'uniform': + # Returns the PDF at a distance when + # distance is uniformly distributed. + + pdf = np.ones_like(distance)/(high_dist - low_dist) + bound = np.sign((high_dist - distance)*(distance - low_dist)) + idx = np.where(bound != 1) + pdf[idx] = 0 + return pdf + + if key == 'dchirp': + # Returns the PDF at a distance when distance is uniformly + # distributed but scaled by the chirp mass + + weight = (mchirp/_mch_BNS)**(5./6) + pdf = np.ones_like(distance) / weight / (high_dist - low_dist) + bound = np.sign((weight*high_dist - distance)*(distance - weight*low_dist)) + idx = np.where(bound != 1) + pdf[idx] = 0 + return pdf
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd.html b/latest/html/_modules/pycbc/psd.html new file mode 100644 index 00000000000..70c2208bb26 --- /dev/null +++ b/latest/html/_modules/pycbc/psd.html @@ -0,0 +1,749 @@ + + + + + + pycbc.psd — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd

+#!/usr/bin/python
+# Copyright (C) 2014 Alex Nitz, Andrew Miller, Tito Dal Canton
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+import copy
+from ligo import segments
+from pycbc.psd.read import *
+from pycbc.psd.analytical import *
+from pycbc.psd.analytical_space import *
+from pycbc.psd.estimate import *
+from pycbc.psd.variation import *
+from pycbc.types import float32,float64
+from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
+from pycbc.types import DictOptionAction, MultiDetDictOptionAction
+from pycbc.types import copy_opts_for_single_ifo
+from pycbc.types import required_opts, required_opts_multi_ifo
+from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
+
+
+[docs] +def from_cli(opt, length, delta_f, low_frequency_cutoff, + strain=None, dyn_range_factor=1, precision=None): + """Parses the CLI options related to the noise PSD and returns a + FrequencySeries with the corresponding PSD. If necessary, the PSD is + linearly interpolated to achieve the resolution specified in the CLI. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (psd_model, psd_file, asd_file, psd_estimation, + psd_segment_length, psd_segment_stride, psd_inverse_length, + psd_output). + length : int + The length in samples of the output PSD. + delta_f : float + The frequency step of the output PSD. + low_frequency_cutoff: float + The low frequncy cutoff to use when calculating the PSD. + strain : {None, TimeSeries} + Time series containing the data from which the PSD should be measured, + when psd_estimation is in use. + dyn_range_factor : {1, float} + For PSDs taken from models or text files, if `dyn_range_factor` is + not None, then the PSD is multiplied by `dyn_range_factor` ** 2. + precision : str, choices (None,'single','double') + If not specified, or specified as None, the precision of the returned + PSD will match the precision of the data, if measuring a PSD, or will + match the default precision of the model if using an analytical PSD. + If 'single' the PSD will be converted to float32, if not already in + that precision. If 'double' the PSD will be converted to float64, if + not already in that precision. + Returns + ------- + psd : FrequencySeries + The frequency series containing the PSD. + """ + f_low = low_frequency_cutoff + sample_rate = (length -1) * 2 * delta_f + + try: + psd_estimation = opt.psd_estimation is not None + except AttributeError: + psd_estimation = False + + exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file, + psd_estimation] + if sum(map(bool, exclusive_opts)) != 1: + err_msg = "You must specify exactly one of '--psd-file', " + err_msg += "'--psd-model', '--asd-file', '--psd-estimation'" + raise ValueError(err_msg) + + if (opt.psd_model or opt.psd_file or opt.asd_file): + # PSD from lalsimulation or file + if opt.psd_model: + psd = from_string(opt.psd_model, length, delta_f, f_low, + **opt.psd_extra_args) + elif opt.psd_file or opt.asd_file: + if opt.asd_file: + psd_file_name = opt.asd_file + else: + psd_file_name = opt.psd_file + if psd_file_name.endswith(('.dat', '.txt')): + is_asd_file = bool(opt.asd_file) + psd = from_txt(psd_file_name, length, + delta_f, f_low, is_asd_file=is_asd_file) + elif opt.asd_file: + err_msg = "ASD files are only valid as ASCII files (.dat or " + err_msg += ".txt). Supplied {}.".format(psd_file_name) + elif psd_file_name.endswith(('.xml', '.xml.gz')): + psd = from_xml(psd_file_name, length, delta_f, f_low, + ifo_string=opt.psd_file_xml_ifo_string, + root_name=opt.psd_file_xml_root_name) + # Set values < flow to the value at flow (if flow > 0) + kmin = int(low_frequency_cutoff / psd.delta_f) + if kmin > 0: + psd[0:kmin] = psd[kmin] + + psd *= dyn_range_factor ** 2 + + elif psd_estimation: + # estimate PSD from data + psd = welch(strain, avg_method=opt.psd_estimation, + seg_len=int(opt.psd_segment_length * sample_rate + 0.5), + seg_stride=int(opt.psd_segment_stride * sample_rate + 0.5), + num_segments=opt.psd_num_segments, + require_exact_data_fit=False) + + if delta_f != psd.delta_f: + psd = interpolate(psd, delta_f, length) + + else: + # Shouldn't be possible to get here + raise ValueError("Shouldn't be possible to raise this!") + + if opt.psd_inverse_length: + psd = inverse_spectrum_truncation(psd, + int(opt.psd_inverse_length * sample_rate), + low_frequency_cutoff=f_low, + trunc_method=opt.invpsd_trunc_method) + + if hasattr(opt, 'psd_output') and opt.psd_output: + (psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output) + + if precision is None: + return psd + elif precision == 'single': + return psd.astype(float32) + elif precision == 'double': + return psd.astype(float64) + else: + err_msg = "If provided the precision kwarg must be either 'single' " + err_msg += "or 'double'. You provided %s." %(precision) + raise ValueError(err_msg)
+ + +
+[docs] +def from_cli_single_ifo(opt, length, delta_f, low_frequency_cutoff, ifo, + **kwargs): + """ + Get the PSD for a single ifo when using the multi-detector CLI + """ + single_det_opt = copy_opts_for_single_ifo(opt, ifo) + return from_cli(single_det_opt, length, delta_f, low_frequency_cutoff, + **kwargs)
+ + +
+[docs] +def from_cli_multi_ifos(opt, length_dict, delta_f_dict, + low_frequency_cutoff_dict, ifos, strain_dict=None, + **kwargs): + """ + Get the PSD for all ifos when using the multi-detector CLI + """ + psd = {} + for ifo in ifos: + if strain_dict is not None: + strain = strain_dict[ifo] + else: + strain = None + psd[ifo] = from_cli_single_ifo(opt, length_dict[ifo], delta_f_dict[ifo], + low_frequency_cutoff_dict[ifo], ifo, + strain=strain, **kwargs) + return psd
+ + +
+[docs] +def insert_psd_option_group(parser, output=True, include_data_options=True): + """ + Adds the options used to call the pycbc.psd.from_cli function to an + optparser as an OptionGroup. This should be used if you + want to use these options in your code. + + Parameters + ----------- + parser : object + OptionParser instance. + """ + psd_options = parser.add_argument_group( + "Options to select the method of PSD generation", + "The options --psd-model, --psd-file, --asd-file, " + "and --psd-estimation are mutually exclusive.") + psd_options.add_argument("--psd-model", + help="Get PSD from given analytical model. ", + choices=get_psd_model_list()) + psd_options.add_argument("--psd-extra-args", + nargs='+', action=DictOptionAction, + metavar='PARAM:VALUE', default={}, type=float, + help="(optional) Extra arguments passed to " + "the PSD models.") + psd_options.add_argument("--psd-file", + help="Get PSD using given PSD ASCII file") + psd_options.add_argument("--asd-file", + help="Get PSD using given ASD ASCII file") + psd_options.add_argument("--psd-inverse-length", type=float, + help="(Optional) The maximum length of the " + "impulse response of the overwhitening " + "filter (s)") + psd_options.add_argument("--invpsd-trunc-method", default=None, + choices=["hann"], + help="(Optional) What truncation method to use " + "when applying psd-inverse-length. If not " + "provided, a hard truncation will be used.") + # Options specific to XML PSD files + psd_options.add_argument("--psd-file-xml-ifo-string", + help="If using an XML PSD file, use the PSD in " + "the file's PSD dictionary with this " + "ifo string. If not given and only one " + "PSD present in the file return that, if " + "not given and multiple (or zero) PSDs " + "present an exception will be raised.") + psd_options.add_argument("--psd-file-xml-root-name", default='psd', + help="If given use this as the root name for " + "the PSD XML file. If this means nothing " + "to you, then it is probably safe to " + "ignore this option.") + # Options for PSD variation + psd_options.add_argument("--psdvar-segment", type=float, + metavar="SECONDS", help="Length of segment " + "for mean square calculation of PSD variation.") + psd_options.add_argument("--psdvar-short-segment", type=float, + metavar="SECONDS", help="Length of short segment " + "for outliers removal in PSD variability " + "calculation.") + psd_options.add_argument("--psdvar-long-segment", type=float, + metavar="SECONDS", help="Length of long segment " + "when calculating the PSD variability.") + psd_options.add_argument("--psdvar-psd-duration", type=float, + metavar="SECONDS", help="Duration of short " + "segments for PSD estimation.") + psd_options.add_argument("--psdvar-psd-stride", type=float, + metavar="SECONDS", help="Separation between PSD " + "estimation segments.") + psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ", + help="Minimum frequency to consider in strain " + "bandpass.") + psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ", + help="Maximum frequency to consider in strain " + "bandpass.") + + if include_data_options : + psd_options.add_argument("--psd-estimation", + help="Measure PSD from the data, using " + "given average method.", + choices=["mean", "median", "median-mean"]) + psd_options.add_argument("--psd-segment-length", type=float, + help="(Required for --psd-estimation) The " + "segment length for PSD estimation (s)") + psd_options.add_argument("--psd-segment-stride", type=float, + help="(Required for --psd-estimation) " + "The separation between consecutive " + "segments (s)") + psd_options.add_argument("--psd-num-segments", type=int, default=None, + help="(Optional, used only with " + "--psd-estimation). If given, PSDs will " + "be estimated using only this number of " + "segments. If more data is given than " + "needed to make this number of segments " + "then excess data will not be used in " + "the PSD estimate. If not enough data " + "is given, the code will fail.") + if output: + psd_options.add_argument("--psd-output", + help="(Optional) Write PSD to specified file") + + return psd_options
+ + +
+[docs] +def insert_psd_option_group_multi_ifo(parser): + """ + Adds the options used to call the pycbc.psd.from_cli function to an + optparser as an OptionGroup. This should be used if you + want to use these options in your code. + + Parameters + ----------- + parser : object + OptionParser instance. + """ + psd_options = parser.add_argument_group( + "Options to select the method of PSD generation", + "The options --psd-model, --psd-file, --asd-file, " + "and --psd-estimation are mutually exclusive.") + psd_options.add_argument("--psd-model", nargs="+", + action=MultiDetOptionAction, metavar='IFO:MODEL', + help="Get PSD from given analytical model. " + "Choose from %s" %(', '.join(get_psd_model_list()),)) + psd_options.add_argument("--psd-extra-args", + nargs='+', action=MultiDetDictOptionAction, + metavar='DETECTOR:PARAM:VALUE', default={}, + type=float, help="(optional) Extra arguments " + "passed to the PSD models.") + psd_options.add_argument("--psd-file", nargs="+", + action=MultiDetOptionAction, metavar='IFO:FILE', + help="Get PSD using given PSD ASCII file") + psd_options.add_argument("--asd-file", nargs="+", + action=MultiDetOptionAction, metavar='IFO:FILE', + help="Get PSD using given ASD ASCII file") + psd_options.add_argument("--psd-estimation", nargs="+", + action=MultiDetOptionAction, metavar='IFO:FILE', + help="Measure PSD from the data, using given " + "average method. Choose from " + "mean, median or median-mean.") + psd_options.add_argument("--psd-segment-length", type=float, nargs="+", + action=MultiDetOptionAction, metavar='IFO:LENGTH', + help="(Required for --psd-estimation) The segment " + "length for PSD estimation (s)") + psd_options.add_argument("--psd-segment-stride", type=float, nargs="+", + action=MultiDetOptionAction, metavar='IFO:STRIDE', + help="(Required for --psd-estimation) The separation" + " between consecutive segments (s)") + psd_options.add_argument("--psd-num-segments", type=int, nargs="+", + default=None, + action=MultiDetOptionAction, metavar='IFO:NUM', + help="(Optional, used only with --psd-estimation). " + "If given PSDs will be estimated using only " + "this number of segments. If more data is " + "given than needed to make this number of " + "segments than excess data will not be used in " + "the PSD estimate. If not enough data is given " + "the code will fail.") + psd_options.add_argument("--psd-inverse-length", type=float, nargs="+", + action=MultiDetOptionAction, metavar='IFO:LENGTH', + help="(Optional) The maximum length of the impulse" + " response of the overwhitening filter (s)") + psd_options.add_argument("--invpsd-trunc-method", default=None, + choices=["hann"], + help="(Optional) What truncation method to use " + "when applying psd-inverse-length. If not " + "provided, a hard truncation will be used.") + psd_options.add_argument("--psd-output", nargs="+", + action=MultiDetOptionAction, metavar='IFO:FILE', + help="(Optional) Write PSD to specified file") + + # Options for PSD variation + psd_options.add_argument("--psdvar-segment", type=float, + metavar="SECONDS", help="Length of segment " + "when calculating the PSD variability.") + psd_options.add_argument("--psdvar-short-segment", type=float, + metavar="SECONDS", help="Length of short segment " + "for outliers removal in PSD variability " + "calculation.") + psd_options.add_argument("--psdvar-long-segment", type=float, + metavar="SECONDS", help="Length of long segment " + "when calculating the PSD variability.") + psd_options.add_argument("--psdvar-psd-duration", type=float, + metavar="SECONDS", help="Duration of short " + "segments for PSD estimation.") + psd_options.add_argument("--psdvar-psd-stride", type=float, + metavar="SECONDS", help="Separation between PSD " + "estimation segments.") + psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ", + help="Minimum frequency to consider in strain " + "bandpass.") + psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ", + help="Maximum frequency to consider in strain " + "bandpass.") + + return psd_options
+ + +ensure_one_opt_groups = [] +ensure_one_opt_groups.append(['--psd-file', '--psd-model', + '--psd-estimation', '--asd-file']) + +
+[docs] +def verify_psd_options(opt, parser): + """Parses the CLI options and verifies that they are consistent and + reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (psd_model, psd_file, asd_file, psd_estimation, + psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output). + parser : object + OptionParser instance. + """ + try: + psd_estimation = opt.psd_estimation is not None + except AttributeError: + psd_estimation = False + + for opt_group in ensure_one_opt_groups: + ensure_one_opt(opt, parser, opt_group) + + if psd_estimation: + required_opts(opt, parser, + ['--psd-segment-stride', '--psd-segment-length'], + required_by = "--psd-estimation")
+ + +
+[docs] +def verify_psd_options_multi_ifo(opt, parser, ifos): + """Parses the CLI options and verifies that they are consistent and + reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (psd_model, psd_file, asd_file, psd_estimation, + psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output). + parser : object + OptionParser instance. + """ + for ifo in ifos: + for opt_group in ensure_one_opt_groups: + ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group) + + if opt.psd_estimation[ifo]: + required_opts_multi_ifo(opt, parser, ifo, + ['--psd-segment-stride', '--psd-segment-length'], + required_by = "--psd-estimation")
+ + +
+[docs] +def generate_overlapping_psds(opt, gwstrain, flen, delta_f, flow, + dyn_range_factor=1., precision=None): + """Generate a set of overlapping PSDs to cover a stretch of data. This + allows one to analyse a long stretch of data with PSD measurements that + change with time. + + Parameters + ----------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (psd_model, psd_file, asd_file, psd_estimation, + psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output). + gwstrain : Strain object + The timeseries of raw data on which to estimate PSDs. + flen : int + The length in samples of the output PSDs. + delta_f : float + The frequency step of the output PSDs. + flow: float + The low frequncy cutoff to use when calculating the PSD. + dyn_range_factor : {1, float} + For PSDs taken from models or text files, if `dyn_range_factor` is + not None, then the PSD is multiplied by `dyn_range_factor` ** 2. + precision : str, choices (None,'single','double') + If not specified, or specified as None, the precision of the returned + PSD will match the precision of the data, if measuring a PSD, or will + match the default precision of the model if using an analytical PSD. + If 'single' the PSD will be converted to float32, if not already in + that precision. If 'double' the PSD will be converted to float64, if + not already in that precision. + + Returns + -------- + psd_and_times : list of (start, end, PSD) tuples + This is a list of tuples containing one entry for each PSD. The first + and second entries (start, end) in each tuple represent the index + range of the gwstrain data that was used to estimate that PSD. The + third entry (psd) contains the PSD estimate between that interval. + """ + if not opt.psd_estimation: + psd = from_cli(opt, flen, delta_f, flow, strain=gwstrain, + dyn_range_factor=dyn_range_factor, precision=precision) + psds_and_times = [ (0, len(gwstrain), psd) ] + return psds_and_times + + # Figure out the data length used for PSD generation + seg_stride = int(opt.psd_segment_stride * gwstrain.sample_rate) + seg_len = int(opt.psd_segment_length * gwstrain.sample_rate) + input_data_len = len(gwstrain) + + if opt.psd_num_segments is None: + # FIXME: Should we make --psd-num-segments mandatory? + # err_msg = "You must supply --num-segments." + # raise ValueError(err_msg) + num_segments = int(input_data_len // seg_stride) - 1 + else: + num_segments = int(opt.psd_num_segments) + + psd_data_len = (num_segments - 1) * seg_stride + seg_len + + # How many unique PSD measurements is this? + psds_and_times = [] + if input_data_len < psd_data_len: + err_msg = "Input data length must be longer than data length needed " + err_msg += "to estimate a PSD. You specified that a PSD should be " + err_msg += "estimated with %d seconds. " %(psd_data_len) + err_msg += "Input data length is %d seconds. " %(input_data_len) + raise ValueError(err_msg) + elif input_data_len == psd_data_len: + num_psd_measurements = 1 + psd_stride = 0 + else: + num_psd_measurements = int(2 * (input_data_len-1) / psd_data_len) + psd_stride = int((input_data_len - psd_data_len) / num_psd_measurements) + + for idx in range(num_psd_measurements): + if idx == (num_psd_measurements - 1): + start_idx = input_data_len - psd_data_len + end_idx = input_data_len + else: + start_idx = psd_stride * idx + end_idx = psd_data_len + psd_stride * idx + strain_part = gwstrain[start_idx:end_idx] + psd = from_cli(opt, flen, delta_f, flow, strain=strain_part, + dyn_range_factor=dyn_range_factor, precision=precision) + psds_and_times.append( (start_idx, end_idx, psd) ) + return psds_and_times
+ + +
+[docs] +def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow, + dyn_range_factor=1., precision=None): + """Generate a set of overlapping PSDs covering the data in GWstrain. + Then associate these PSDs with the appropriate segment in strain_segments. + + Parameters + ----------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (psd_model, psd_file, asd_file, psd_estimation, + psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output). + fd_segments : StrainSegments.fourier_segments() object + The fourier transforms of the various analysis segments. The psd + attribute of each segment is updated to point to the appropriate PSD. + gwstrain : Strain object + The timeseries of raw data on which to estimate PSDs. + flen : int + The length in samples of the output PSDs. + delta_f : float + The frequency step of the output PSDs. + flow: float + The low frequncy cutoff to use when calculating the PSD. + dyn_range_factor : {1, float} + For PSDs taken from models or text files, if `dyn_range_factor` is + not None, then the PSD is multiplied by `dyn_range_factor` ** 2. + precision : str, choices (None,'single','double') + If not specified, or specified as None, the precision of the returned + PSD will match the precision of the data, if measuring a PSD, or will + match the default precision of the model if using an analytical PSD. + If 'single' the PSD will be converted to float32, if not already in + that precision. If 'double' the PSD will be converted to float64, if + not already in that precision. + """ + psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f, + flow, dyn_range_factor=dyn_range_factor, + precision=precision) + + for fd_segment in fd_segments: + best_psd = None + psd_overlap = 0 + inp_seg = segments.segment(fd_segment.seg_slice.start, + fd_segment.seg_slice.stop) + for start_idx, end_idx, psd in psds_and_times: + psd_seg = segments.segment(start_idx, end_idx) + if psd_seg.intersects(inp_seg): + curr_overlap = abs(inp_seg & psd_seg) + if curr_overlap > psd_overlap: + psd_overlap = curr_overlap + best_psd = psd + if best_psd is None: + err_msg = "No PSDs found intersecting segment!" + raise ValueError(err_msg) + fd_segment.psd = best_psd
+ + +
+[docs] +def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen, + delta_f, flow, ifo, + dyn_range_factor=1., precision=None): + """ + Associate PSDs to segments for a single ifo when using the multi-detector + CLI + """ + single_det_opt = copy_opts_for_single_ifo(opt, ifo) + associate_psds_to_segments(single_det_opt, fd_segments, gwstrain, flen, + delta_f, flow, dyn_range_factor=dyn_range_factor, + precision=precision)
+ + +
+[docs] +def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen, + delta_f, flow, ifos, + dyn_range_factor=1., precision=None): + """ + Associate PSDs to segments for all ifos when using the multi-detector CLI + """ + for ifo in ifos: + if gwstrain is not None: + strain = gwstrain[ifo] + else: + strain = None + + if fd_segments is not None: + segments = fd_segments[ifo] + else: + segments = None + + associate_psds_to_single_ifo_segments(opt, segments, strain, flen, + delta_f, flow, ifo, dyn_range_factor=dyn_range_factor, + precision=precision)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd/analytical.html b/latest/html/_modules/pycbc/psd/analytical.html new file mode 100644 index 00000000000..34a13602dcb --- /dev/null +++ b/latest/html/_modules/pycbc/psd/analytical.html @@ -0,0 +1,335 @@ + + + + + + pycbc.psd.analytical — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd.analytical

+#!/usr/bin/python
+# Copyright (C) 2012-2016 Alex Nitz, Tito Dal Canton, Leo Singer
+#               2022 Shichao Wu
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""Provides reference PSDs from LALSimulation and pycbc.psd.analytical_space.
+
+More information about how to use these ground-based detectors' PSD can be
+found in the guide about :ref:`Analytic PSDs from lalsimulation`. For
+space-borne ones, see `pycbc.psd.analytical_space` module.
+"""
+import numbers
+from pycbc.types import FrequencySeries
+from pycbc.psd.analytical_space import (
+    analytical_psd_lisa_tdi_XYZ, analytical_psd_lisa_tdi_AE,
+    analytical_psd_lisa_tdi_T, sh_transformed_psd_lisa_tdi_XYZ,
+    analytical_psd_lisa_tdi_AE_confusion,
+    analytical_psd_tianqin_tdi_XYZ, analytical_psd_tianqin_tdi_AE,
+    analytical_psd_tianqin_tdi_T, analytical_psd_tianqin_tdi_AE_confusion,
+    analytical_psd_taiji_tdi_XYZ, analytical_psd_taiji_tdi_AE,
+    analytical_psd_taiji_tdi_T, analytical_psd_taiji_tdi_AE_confusion,
+    )
+import lal
+import numpy
+
+# build a list of usable PSD functions from lalsimulation
+_name_prefix = 'SimNoisePSD'
+_name_suffix = 'Ptr'
+_name_blacklist = ('FromFile', 'MirrorTherm', 'Quantum', 'Seismic', 'Shot', 'SuspTherm')
+_psd_list = []
+
+try:
+    import lalsimulation
+    for _name in lalsimulation.__dict__:
+        if _name != _name_prefix and _name.startswith(_name_prefix) and not _name.endswith(_name_suffix):
+            _name = _name[len(_name_prefix):]
+            if _name not in _name_blacklist:
+                _psd_list.append(_name)
+except ImportError:
+    pass
+
+_psd_list = sorted(_psd_list)
+
+# add functions wrapping lalsimulation PSDs
+for _name in _psd_list:
+    exec("""
+def %s(length, delta_f, low_freq_cutoff):
+    \"\"\"Return a FrequencySeries containing the %s PSD from LALSimulation.
+    \"\"\"
+    return from_string("%s", length, delta_f, low_freq_cutoff)
+""" % (_name, _name, _name))
+
+
+[docs] +def get_psd_model_list(): + """ Returns a list of available reference PSD functions. + + Returns + ------- + list + Returns a list of names of reference PSD functions. + """ + return get_lalsim_psd_list() + get_pycbc_psd_list()
+ + +
+[docs] +def get_lalsim_psd_list(): + """Return a list of available reference PSD functions from LALSimulation. + """ + return _psd_list
+ + +
+[docs] +def get_pycbc_psd_list(): + """ Return a list of available reference PSD functions coded in PyCBC. + + Returns + ------- + list + Returns a list of names of all reference PSD functions coded in PyCBC. + """ + pycbc_analytical_psd_list = pycbc_analytical_psds.keys() + pycbc_analytical_psd_list = sorted(pycbc_analytical_psd_list) + return pycbc_analytical_psd_list
+ + +
+[docs] +def from_string(psd_name, length, delta_f, low_freq_cutoff, **kwargs): + """Generate a frequency series containing a LALSimulation or + built-in space-borne detectors' PSD specified by name. + + Parameters + ---------- + psd_name : string + PSD name as found in LALSimulation (minus the SimNoisePSD prefix) + or pycbc.psd.analytical_space. + length : int + Length of the frequency series in samples. + delta_f : float + Frequency resolution of the frequency series. + low_freq_cutoff : float + Frequencies below this value are set to zero. + **kwargs : + All other keyword arguments are passed to the PSD model. + + Returns + ------- + psd : FrequencySeries + The generated frequency series. + """ + + # check if valid PSD model + if psd_name not in get_psd_model_list(): + raise ValueError(psd_name + ' not found among analytical ' + 'PSD functions.') + + # make sure length has the right type for CreateREAL8FrequencySeries + if not isinstance(length, numbers.Integral) or length <= 0: + raise TypeError('length must be a positive integer') + length = int(length) + + # if PSD model is in LALSimulation + if psd_name in get_lalsim_psd_list(): + lalseries = lal.CreateREAL8FrequencySeries( + '', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length) + try: + func = lalsimulation.__dict__[ + _name_prefix + psd_name + _name_suffix] + except KeyError: + func = lalsimulation.__dict__[_name_prefix + psd_name] + func(lalseries, low_freq_cutoff) + else: + lalsimulation.SimNoisePSD(lalseries, 0, func) + psd = FrequencySeries(lalseries.data.data, delta_f=delta_f) + + # if PSD model is coded in PyCBC + else: + func = pycbc_analytical_psds[psd_name] + psd = func(length, delta_f, low_freq_cutoff, **kwargs) + + # zero-out content below low-frequency cutoff + kmin = int(low_freq_cutoff / delta_f) + psd.data[:kmin] = 0 + + return psd
+ + +
+[docs] +def flat_unity(length, delta_f, low_freq_cutoff): + """ Returns a FrequencySeries of ones above the low_frequency_cutoff. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : int + Low-frequency cutoff for output FrequencySeries. + + Returns + ------- + FrequencySeries + Returns a FrequencySeries containing the unity PSD model. + """ + fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f) + kmin = int(low_freq_cutoff / fseries.delta_f) + fseries.data[:kmin] = 0 + return fseries
+ + +# dict of analytical PSDs coded in PyCBC +pycbc_analytical_psds = { + 'flat_unity' : flat_unity, + + 'analytical_psd_lisa_tdi_XYZ' : analytical_psd_lisa_tdi_XYZ, + 'analytical_psd_lisa_tdi_AE' : analytical_psd_lisa_tdi_AE, + 'analytical_psd_lisa_tdi_T' : analytical_psd_lisa_tdi_T, + 'sh_transformed_psd_lisa_tdi_XYZ' : sh_transformed_psd_lisa_tdi_XYZ, + 'analytical_psd_lisa_tdi_AE_confusion' : analytical_psd_lisa_tdi_AE_confusion, + + 'analytical_psd_tianqin_tdi_XYZ' : analytical_psd_tianqin_tdi_XYZ, + 'analytical_psd_tianqin_tdi_AE' : analytical_psd_tianqin_tdi_AE, + 'analytical_psd_tianqin_tdi_T' : analytical_psd_tianqin_tdi_T, + 'analytical_psd_tianqin_tdi_AE_confusion' : analytical_psd_tianqin_tdi_AE_confusion, + + 'analytical_psd_taiji_tdi_XYZ' : analytical_psd_taiji_tdi_XYZ, + 'analytical_psd_taiji_tdi_AE' : analytical_psd_taiji_tdi_AE, + 'analytical_psd_taiji_tdi_T' : analytical_psd_taiji_tdi_T, + 'analytical_psd_taiji_tdi_AE_confusion' : analytical_psd_taiji_tdi_AE_confusion, +} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd/analytical_space.html b/latest/html/_modules/pycbc/psd/analytical_space.html new file mode 100644 index 00000000000..02721edd9ea --- /dev/null +++ b/latest/html/_modules/pycbc/psd/analytical_space.html @@ -0,0 +1,2158 @@ + + + + + + pycbc.psd.analytical_space — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd.analytical_space

+# Copyright (C) 2022  Shichao Wu, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides (semi-)analytical PSDs and sensitivity curves for space
+borne detectors, such as LISA, Taiji, and TianQin. Based on LISA technical note
+<LISA-LCST-SGS-TN-001>, LDC manual <LISA-LCST-SGS-MAN-001>,
+paper <10.1088/1361-6382/ab1101>, <10.1088/0264-9381/33/3/035010>,
+<10.1103/PhysRevD.102.063021>, <10.1103/PhysRevD.100.043003>,
+and <10.1103/PhysRevD.107.064021>.
+"""
+
+import numpy as np
+from scipy.interpolate import interp1d
+from astropy.constants import c
+from pycbc.psd.read import from_numpy_arrays
+
+
+def _psd_acc_noise(f, acc_noise_level=None):
+    """ The PSD of TDI-based space-borne GW
+    detectors' acceleration noise. Note that
+    this is suitable for LISA and Taiji, TianQin
+    has a different form.
+
+    Parameters
+    ----------
+    f : float or numpy.array
+        The frequency or frequency range, in the unit of "Hz".
+    acc_noise_level : float
+        The level of acceleration noise.
+
+    Returns
+    -------
+    s_acc_nu : float or numpy.array
+        The PSD value or array for acceleration noise.
+    Notes
+    -----
+        Please see Eq.(11-13) in <LISA-LCST-SGS-TN-001> for more details.
+    """
+    s_acc = acc_noise_level**2 * (1+(4e-4/f)**2)*(1+(f/8e-3)**4)
+    s_acc_d = s_acc * (2*np.pi*f)**(-4)
+    s_acc_nu = (2*np.pi*f/c.value)**2 * s_acc_d
+
+    return s_acc_nu
+
+
+
+[docs] +def psd_lisa_acc_noise(f, acc_noise_level=3e-15): + """ The PSD of LISA's acceleration noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + + Returns + ------- + s_acc_nu : float or numpy.array + The PSD value or array for acceleration noise. + Notes + ----- + Please see Eq.(11-13) in <LISA-LCST-SGS-TN-001> for more details. + """ + s_acc_nu = _psd_acc_noise(f, acc_noise_level) + + return s_acc_nu
+ + + +
+[docs] +def psd_tianqin_acc_noise(f, acc_noise_level=1e-15): + """ The PSD of TianQin's acceleration noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + + Returns + ------- + s_acc_nu : float or numpy.array + The PSD value or array for acceleration noise. + Notes + ----- + Please see Table(1) in <10.1088/0264-9381/33/3/035010> + and that paper for more details. + """ + s_acc_d = acc_noise_level**2 * (2*np.pi*f)**(-4) * (1+1e-4/f) + s_acc_nu = (2*np.pi*f/c.value)**2 * s_acc_d + + return s_acc_nu
+ + + +
+[docs] +def psd_taiji_acc_noise(f, acc_noise_level=3e-15): + """ The PSD of Taiji's acceleration noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + + Returns + ------- + s_acc_nu : float or numpy.array + The PSD value or array for acceleration noise. + Notes + ----- + Please see Eq.(2) in <10.1103/PhysRevD.107.064021> for more details. + """ + s_acc_nu = _psd_acc_noise(f, acc_noise_level) + + return s_acc_nu
+ + + +def _psd_oms_noise(f, oms_noise_level=None): + """ The PSD of TDI-based space-borne GW detectors' OMS noise. + Note that this is suitable for LISA and Taiji, TianQin + has a different form. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + s_oms_nu : float or numpy.array + The PSD value or array for OMS noise. + Notes + ----- + Please see Eq.(9-10) in <LISA-LCST-SGS-TN-001> for more details. + """ + s_oms_d = oms_noise_level**2 * (1+(2e-3/f)**4) + s_oms_nu = s_oms_d * (2*np.pi*f/c.value)**2 + + return s_oms_nu + + +
+[docs] +def psd_lisa_oms_noise(f, oms_noise_level=15e-12): + """ The PSD of LISA's OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + s_oms_nu : float or numpy.array + The PSD value or array for OMS noise. + Notes + ----- + Please see Eq.(9-10) in <LISA-LCST-SGS-TN-001> for more details. + """ + s_oms_nu = _psd_oms_noise(f, oms_noise_level) + + return s_oms_nu
+ + + +
+[docs] +def psd_tianqin_oms_noise(f, oms_noise_level=1e-12): + """ The PSD of TianQin's OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + s_oms_nu : float or numpy.array + The PSD value or array for OMS noise. + Notes + ----- + Please see Table(1) in <10.1088/0264-9381/33/3/035010> + and that paper for more details. + """ + s_oms_d = oms_noise_level**2 + s_oms_nu = s_oms_d * (2*np.pi*f/c.value)**2 + + return s_oms_nu
+ + + +
+[docs] +def psd_taiji_oms_noise(f, oms_noise_level=8e-12): + """ The PSD of Taiji's OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + s_oms_nu : float or numpy.array + The PSD value or array for OMS noise. + Notes + ----- + Please see Eq.(1) in <10.1103/PhysRevD.107.064021> for more details. + """ + s_oms_nu = _psd_oms_noise(f, oms_noise_level) + + return s_oms_nu
+ + + +
+[docs] +def lisa_psd_components(f, acc_noise_level=3e-15, oms_noise_level=15e-12): + """ The PSD of LISA's acceleration and OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + low_freq_component, high_freq_component : + The PSD value or array for acceleration and OMS noise. + """ + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + low_freq_component = psd_lisa_acc_noise(f, acc_noise_level) + high_freq_component = psd_lisa_oms_noise(f, oms_noise_level) + + return low_freq_component, high_freq_component
+ + + +
+[docs] +def tianqin_psd_components(f, acc_noise_level=1e-15, oms_noise_level=1e-12): + """ The PSD of TianQin's acceleration and OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + low_freq_component, high_freq_component : + The PSD value or array for acceleration and OMS noise. + """ + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + low_freq_component = psd_tianqin_acc_noise(f, acc_noise_level) + high_freq_component = psd_tianqin_oms_noise(f, oms_noise_level) + + return low_freq_component, high_freq_component
+ + + +
+[docs] +def taiji_psd_components(f, acc_noise_level=3e-15, oms_noise_level=8e-12): + """ The PSD of Taiji's acceleration and OMS noise. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + low_freq_component, high_freq_component : + The PSD value or array for acceleration and OMS noise. + """ + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + low_freq_component = psd_taiji_acc_noise(f, acc_noise_level) + high_freq_component = psd_taiji_oms_noise(f, oms_noise_level) + + return low_freq_component, high_freq_component
+ + + +
+[docs] +def omega_length(f, len_arm=None): + """ The function to calculate 2*pi*f*arm_length. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of LISA, TianQin, or Taiji. The default + value here is None. + + Returns + ------- + omega_len : float or numpy.array + The value of 2*pi*f*arm_length. + """ + omega_len = 2*np.pi*f * len_arm/c.value + + return omega_len
+ + + +def _analytical_psd_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm=None, psd_components=None, tdi=None): + """ The TDI-1.5/2.0 analytical PSD (X,Y,Z channel) for TDI-based + space-borne GW detectors. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of the detector, in the unit of "m". + psd_components : numpy.array + The PSDs of acceleration noise and OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel). + Notes + ----- + Please see Eq.(19-20) in <LISA-LCST-SGS-TN-001> for more details. + """ + len_arm = np.float64(len_arm) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + s_acc_nu, s_oms_nu = psd_components + omega_len = omega_length(fr, len_arm) + psd = 16*(np.sin(omega_len))**2 * (s_oms_nu + + s_acc_nu*(3+np.cos(2*omega_len))) + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + psd *= tdi2_factor + fseries = from_numpy_arrays(fr, psd, length, delta_f, low_freq_cutoff) + + return fseries + + +
+[docs] +def analytical_psd_lisa_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, tdi=None): + """ The TDI-1.5/2.0 analytical PSD (X,Y,Z channel) for LISA. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA. + Notes + ----- + Please see Eq.(19-20) in <LISA-LCST-SGS-TN-001> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(lisa_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_tianqin_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12, tdi=None): + """ The TDI-1.5/2.0 analytical PSD (X,Y,Z channel) for TianQin. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for TianQin. + Notes + ----- + Please see Table(1) in <10.1088/0264-9381/33/3/035010> + for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(tianqin_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_taiji_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12, tdi=None): + """ The TDI-1.5/2.0 analytical PSD (X,Y,Z channel) for Taiji. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for Taiji. + Notes + ----- + Please see <10.1103/PhysRevD.107.064021> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(taiji_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +def _analytical_csd_tdi_XY(length, delta_f, low_freq_cutoff, + len_arm=None, psd_components=None, tdi=None): + """ The cross-spectrum density between TDI channel X and Y. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of the detector, in the unit of "m". + psd_components : numpy.array + The PSDs of acceleration noise and OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The CSD between TDI-1.5/2.0 channel X and Y. + Notes + ----- + Please see Eq.(56) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + len_arm = np.float64(len_arm) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + s_acc_nu, s_oms_nu = psd_components + omega_len = omega_length(fr, len_arm) + csd = (-8*np.sin(omega_len)**2 * np.cos(omega_len) * + (s_oms_nu+4*s_acc_nu)) + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + csd *= tdi2_factor + fseries = from_numpy_arrays(fr, csd, length, delta_f, low_freq_cutoff) + + return fseries + + +
+[docs] +def analytical_csd_lisa_tdi_XY(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, tdi=None): + """ The cross-spectrum density between LISA's TDI channel X and Y. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The CSD between LISA's TDI-1.5/2.0 channel X and Y. + Notes + ----- + Please see Eq.(56) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(lisa_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_csd_tdi_XY(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +def _analytical_psd_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm=None, psd_components=None, tdi=None): + """ The PSD of TDI-1.5/2.0 channel A and E. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of the detector, in the unit of "m". + psd_components : numpy.array + The PSDs of acceleration noise and OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of TDI-1.5/2.0 channel A and E. + Notes + ----- + Please see Eq.(58) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + len_arm = np.float64(len_arm) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + s_acc_nu, s_oms_nu = psd_components + omega_len = omega_length(fr, len_arm) + psd = (8*(np.sin(omega_len))**2 * + (4*(1+np.cos(omega_len)+np.cos(omega_len)**2)*s_acc_nu + + (2+np.cos(omega_len))*s_oms_nu)) + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + psd *= tdi2_factor + fseries = from_numpy_arrays(fr, psd, length, delta_f, low_freq_cutoff) + + return fseries + + +
+[docs] +def analytical_psd_lisa_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, tdi=None): + """ The PSD of LISA's TDI-1.5/2.0 channel A and E. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of LISA's TDI-1.5/2.0 channel A and E. + Notes + ----- + Please see Eq.(58) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(lisa_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_tianqin_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12, tdi=None): + """ The PSD of TianQin's TDI-1.5/2.0 channel A and E. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of TianQin's TDI-1.5/2.0 channel A and E. + Notes + ----- + Please see Table(1) in <10.1088/0264-9381/33/3/035010> + for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(tianqin_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_taiji_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12, tdi=None): + """ The PSD of Taiji's TDI-1.5/2.0 channel A and E. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of Taiji's TDI-1.5/2.0 channel A and E. + Notes + ----- + Please see <10.1103/PhysRevD.107.064021> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(taiji_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +def _analytical_psd_tdi_T(length, delta_f, low_freq_cutoff, + len_arm=None, psd_components=None, tdi=None): + """ The PSD of TDI-1.5/2.0 channel T. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of the detector, in the unit of "m". + psd_components : numpy.array + The PSDs of acceleration noise and OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of TDI-1.5/2.0 channel T. + Notes + ----- + Please see Eq.(59) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + len_arm = np.float64(len_arm) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + s_acc_nu, s_oms_nu = psd_components + omega_len = omega_length(fr, len_arm) + psd = (32*np.sin(omega_len)**2 * np.sin(omega_len/2)**2 * + (4*s_acc_nu*np.sin(omega_len/2)**2 + s_oms_nu)) + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + psd *= tdi2_factor + fseries = from_numpy_arrays(fr, psd, length, delta_f, low_freq_cutoff) + + return fseries + + +
+[docs] +def analytical_psd_lisa_tdi_T(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, tdi=None): + """ The PSD of LISA's TDI-1.5/2.0 channel T. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of LISA's TDI-1.5/2.0 channel T. + Notes + ----- + Please see Eq.(59) in <LISA-LCST-SGS-MAN-001(Radler)> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(lisa_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_T(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_tianqin_tdi_T(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12, tdi=None): + """ The PSD of TianQin's TDI-1.5/2.0 channel T. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of TianQin's TDI-1.5/2.0 channel T. + Notes + ----- + Please see Table(1) in <10.1088/0264-9381/33/3/035010> + for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(tianqin_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_T(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def analytical_psd_taiji_tdi_T(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12, tdi=None): + """ The PSD of Taiji's TDI-1.5/2.0 channel T. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The PSD of Taiji's TDI-1.5/2.0 channel T. + Notes + ----- + Please see <10.1103/PhysRevD.107.064021> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + psd_components = np.array(taiji_psd_components( + fr, acc_noise_level, oms_noise_level)) + fseries = _analytical_psd_tdi_T(length, delta_f, low_freq_cutoff, + len_arm, psd_components, tdi) + + return fseries
+ + + +
+[docs] +def averaged_lisa_fplus_sq_numerical(f, len_arm=2.5e9): + """ A numerical fit for LISA's squared antenna response function, + averaged over sky and polarization angle. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of LISA, in the unit of "m". + + Returns + ------- + fp_sq_numerical : float or numpy.array + The sky and polarization angle averaged squared antenna response. + Notes + ----- + Please see Eq.(36) in <LISA-LCST-SGS-TN-001> for more details. + """ + from astropy.utils.data import download_file + + if len_arm != 2.5e9: + raise ValueError("Currently only support 'len_arm=2.5e9'.") + # Download the numerical LISA averaged response. + url = "https://zenodo.org/record/7497853/files/AvFXp2_Raw.npy" + file_path = download_file(url, cache=True) + freqs, fp_sq = np.load(file_path) + # Padding the end. + freqs = np.append(freqs, 2) + fp_sq = np.append(fp_sq, 0.0012712348970728724) + fp_sq_interp = interp1d(freqs, fp_sq, kind='linear', + fill_value="extrapolate") + fp_sq_numerical = fp_sq_interp(f)/16 + + return fp_sq_numerical
+ + + +
+[docs] +def averaged_fplus_sq_approximated(f, len_arm=None): + r""" A simplified fit for TDI-based space-borne GW detectors' + squared antenna response function, averaged over sky and + polarization angle. + + .. math:: + <\left(F_{X}^{+}\right)^{2}>\approx \frac{3}{20} \frac{1}{ + 1+0.6(\omega L)^{2}} + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of the detector, in the unit of "m". + + Returns + ------- + fp_sq_approx : float or numpy.array + The sky and polarization angle averaged squared antenna response. + Notes + ----- + Please see Eq.(9) in <10.1088/1361-6382/ab1101> for more details. + """ + fp_sq_approx = (3./20.)*(1./(1.+0.6*omega_length(f, len_arm)**2)) + + return fp_sq_approx
+ + + +
+[docs] +def averaged_tianqin_fplus_sq_numerical(f, len_arm=np.sqrt(3)*1e8): + """ A numerical fit for TianQin's squared antenna response function, + averaged over sky and polarization angle. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of TianQin, in the unit of "m". + + Returns + ------- + fp_sq_numerical : float or numpy.array + The sky and polarization angle averaged squared antenna response. + Notes + ----- + Please see Eq.(15-16) in <10.1103/PhysRevD.100.043003> + for more details. + """ + base = averaged_fplus_sq_approximated(f, len_arm) + a = [1, 1e-4, 2639e-4, 231/5*1e-4, -2093/1.25*1e-4, 2173e-5, + 2101e-6, 3027/2*1e-5, -42373/5*1e-6, 176087e-8, + -8023/5*1e-7, 5169e-9] + omega_len = omega_length(f, len_arm) + omega_len_low_f = omega_len[omega_len < 4.1] + omega_len_high_f = omega_len[omega_len >= 4.1] + base_low_f = base[:len(omega_len_low_f)] + base_high_f = base[len(omega_len_low_f):] + low_f_modulation = np.polyval(a[::-1], omega_len_low_f) + high_f_modulation = np.exp( + -0.322 * np.sin(2*omega_len_high_f-4.712) + 0.078 + ) + low_f_result = np.multiply(base_low_f, low_f_modulation) + high_f_result = np.multiply(base_high_f, high_f_modulation) + fp_sq_numerical = np.concatenate((low_f_result, high_f_result)) + + return fp_sq_numerical
+ + + +
+[docs] +def averaged_response_lisa_tdi(f, len_arm=2.5e9, tdi=None): + """ LISA's TDI-1.5/2.0 response function to GW, + averaged over sky and polarization angle. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of LISA, in the unit of "m". + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + response_tdi : float or numpy.array + The sky and polarization angle averaged TDI-1.5/2.0 response to GW. + Notes + ----- + Please see Eq.(39-40) in <LISA-LCST-SGS-TN-001> for more details. + """ + omega_len = omega_length(f, len_arm) + ave_fp2 = averaged_lisa_fplus_sq_numerical(f, len_arm) + response_tdi = (4*omega_len)**2 * np.sin(omega_len)**2 * ave_fp2 + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + response_tdi *= tdi2_factor + + return response_tdi
+ + + +
+[docs] +def averaged_response_tianqin_tdi(f, len_arm=np.sqrt(3)*1e8, tdi=None): + """ TianQin's TDI-1.5/2.0 response function to GW, + averaged over sky and polarization angle. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of TianQin, in the unit of "m". + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + response_tdi : float or numpy.array + The sky and polarization angle averaged TDI-1.5/2.0 response to GW. + """ + omega_len = omega_length(f, len_arm) + ave_fp2 = averaged_tianqin_fplus_sq_numerical(f, len_arm) + response_tdi = (4*omega_len)**2 * np.sin(omega_len)**2 * ave_fp2 + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + response_tdi *= tdi2_factor + + return response_tdi
+ + + +
+[docs] +def averaged_response_taiji_tdi(f, len_arm=3e9, tdi=None): + """ Taiji's TDI-1.5/2.0 response function to GW, + averaged over sky and polarization angle. + + Parameters + ---------- + f : float or numpy.array + The frequency or frequency range, in the unit of "Hz". + len_arm : float + The arm length of Taiji, in the unit of "m". + tdi : str + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + response_tdi : float or numpy.array + The sky and polarization angle averaged TDI-1.5/2.0 response to GW. + """ + omega_len = omega_length(f, len_arm) + ave_fp2 = averaged_fplus_sq_approximated(f, len_arm) + response_tdi = (4*omega_len)**2 * np.sin(omega_len)**2 * ave_fp2 + if str(tdi) not in ["1.5", "2.0"]: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + if str(tdi) == "2.0": + tdi2_factor = 4*(np.sin(2*omega_len))**2 + response_tdi *= tdi2_factor + + return response_tdi
+ + + +
+[docs] +def sensitivity_curve_lisa_semi_analytical(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, + acc_noise_level=3e-15, + oms_noise_level=15e-12): + """ The semi-analytical LISA's sensitivity curve (6-links), + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged semi-analytical + LISA's sensitivity curve (6-links). + Notes + ----- + Please see Eq.(42-43) in <LISA-LCST-SGS-TN-001> for more details. + """ + len_arm = np.float64(len_arm) + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + fp_sq = averaged_lisa_fplus_sq_numerical(fr, len_arm) + s_acc_nu, s_oms_nu = lisa_psd_components( + fr, acc_noise_level, oms_noise_level) + omega_len = omega_length(fr, len_arm) + sense_curve = ((s_oms_nu + s_acc_nu*(3+np.cos(2*omega_len))) / + (omega_len**2*fp_sq)) + fseries = from_numpy_arrays(fr, sense_curve/2, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_tianqin_analytical(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12): + """ The analytical TianQin's sensitivity curve (6-links), + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged analytical + TianQin's sensitivity curve (6-links). + """ + len_arm = np.float64(len_arm) + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + fp_sq = averaged_tianqin_fplus_sq_numerical(fr, len_arm) + s_acc_nu, s_oms_nu = tianqin_psd_components( + fr, acc_noise_level, oms_noise_level) + omega_len = omega_length(fr, len_arm) + sense_curve = ((s_oms_nu + s_acc_nu*(3+np.cos(2*omega_len))) / + (omega_len**2*fp_sq)) + fseries = from_numpy_arrays(fr, sense_curve/2, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_taiji_analytical(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12): + """ The analytical Taiji's sensitivity curve (6-links), + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged analytical + Taiji's sensitivity curve (6-links). + """ + len_arm = np.float64(len_arm) + acc_noise_level = np.float64(acc_noise_level) + oms_noise_level = np.float64(oms_noise_level) + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + fp_sq = averaged_fplus_sq_approximated(fr, len_arm) + s_acc_nu, s_oms_nu = taiji_psd_components( + fr, acc_noise_level, oms_noise_level) + omega_len = omega_length(fr, len_arm) + sense_curve = ((s_oms_nu + s_acc_nu*(3+np.cos(2*omega_len))) / + (omega_len**2*fp_sq)) + fseries = from_numpy_arrays(fr, sense_curve/2, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_lisa_SciRD(length, delta_f, low_freq_cutoff): + """ The analytical LISA's sensitivity curve in SciRD, + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged analytical + LISA's sensitivity curve in SciRD. + Notes + ----- + Please see Eq.(114) in <LISA-LCST-SGS-TN-001> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + s_I = 5.76e-48 * (1+(4e-4/fr)**2) + s_II = 3.6e-41 + R = 1 + (fr/2.5e-2)**2 + sense_curve = 10/3 * (s_I/(2*np.pi*fr)**4+s_II) * R + fseries = from_numpy_arrays(fr, sense_curve, length, + delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def confusion_fit_lisa(length, delta_f, low_freq_cutoff, duration=1.0): + """ The LISA's sensitivity curve for Galactic confusion noise, + averaged over sky and polarization angle. No instrumental noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + duration : float + The duration of observation, between 0 and 10, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + LISA's sensitivity curve for Galactic confusion noise. + No instrumental noise. + Notes + ----- + Please see Eq.(85-86) in <LISA-LCST-SGS-TN-001> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + f1 = 10**(-0.25*np.log10(duration)-2.7) + fk = 10**(-0.27*np.log10(duration)-2.47) + sh_confusion = (0.5*1.14e-44*fr**(-7/3)*np.exp(-(fr/f1)**1.8) * + (1.0+np.tanh((fk-fr)/0.31e-3))) + fseries = from_numpy_arrays(fr, sh_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def confusion_fit_tianqin(length, delta_f, low_freq_cutoff, duration=1.0): + """ The TianQin's sensitivity curve for Galactic confusion noise, + averaged over sky and polarization angle. No instrumental noise. + Only valid for 0.5 mHz < f < 10 mHz. Note that the results between + 0.5, 1, 2, 4, and 5 years are extrapolated, might be non-physical. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + duration : float + The duration of observation, between 0 and 5, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + TianQin's sensitivity curve for Galactic confusion noise. + No instrumental noise. + Notes + ----- + Please see Table(II) in <10.1103/PhysRevD.102.063021> + for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + t_obs = [0.5, 1, 2, 4, 5] + a0 = [-18.6, -18.6, -18.6, -18.6, -18.6] + a1 = [-1.22, -1.13, -1.45, -1.43, -1.51] + a2 = [0.009, -0.945, 0.315, -0.687, -0.710] + a3 = [-1.87, -1.02, -1.19, 0.24, -1.13] + a4 = [0.65, 4.05, -4.48, -0.15, -0.83] + a5 = [3.6, -4.5, 10.8, -1.8, 13.2] + a6 = [-4.6, -0.5, -9.4, -3.2, -19.1] + fit_a0 = interp1d(t_obs, a0, kind='cubic', fill_value="extrapolate") + fit_a1 = interp1d(t_obs, a1, kind='cubic', fill_value="extrapolate") + fit_a2 = interp1d(t_obs, a2, kind='cubic', fill_value="extrapolate") + fit_a3 = interp1d(t_obs, a3, kind='cubic', fill_value="extrapolate") + fit_a4 = interp1d(t_obs, a4, kind='cubic', fill_value="extrapolate") + fit_a5 = interp1d(t_obs, a5, kind='cubic', fill_value="extrapolate") + fit_a6 = interp1d(t_obs, a6, kind='cubic', fill_value="extrapolate") + if duration not in t_obs: + raise Warning("Note that the results between " + + "0.5, 1, 2, 4, and 5 years are extrapolated, " + + "might be non-physical.") + # 10/3 is the factor for sky-average, the original fit in the paper + # is not sky-averaged. + sh_confusion = 10./3 * np.power( + 10, + fit_a0(duration) + + fit_a1(duration) * np.log10(fr*1e3) + + fit_a2(duration) * np.log10(fr*1e3)**2 + + fit_a3(duration) * np.log10(fr*1e3)**3 + + fit_a4(duration) * np.log10(fr*1e3)**4 + + fit_a5(duration) * np.log10(fr*1e3)**5 + + fit_a6(duration) * np.log10(fr*1e3)**6 + )**2 + # avoid the jump of values + sh_confusion[(fr > 3e-4) & (fr < 5e-4)] = \ + sh_confusion[(np.abs(fr - 5e-4)).argmin()] + sh_confusion[(fr < 3e-4) | (fr > 1e-2)] = 0 + fseries = from_numpy_arrays(fr, sh_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def confusion_fit_taiji(length, delta_f, low_freq_cutoff, duration=1.0): + """ The Taiji's sensitivity curve for Galactic confusion noise, + averaged over sky and polarization angle. No instrumental noise. + Only valid for 0.1 mHz < f < 10 mHz. Note that the results between + 0.5, 1, 2, and 4 years are extrapolated, might be non-physical. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + duration : float + The duration of observation, between 0 and 4, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + Taiji's sensitivity curve for Galactic confusion noise. + No instrumental noise. + Notes + ----- + Please see Eq.(6) and Table(I) in <10.1103/PhysRevD.107.064021> + for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + t_obs = [0.5, 1, 2, 4] + a0 = [-85.3498, -85.4336, -85.3919, -85.5448] + a1 = [-2.64899, -2.46276, -2.69735, -3.23671] + a2 = [-0.0699707, -0.183175, -0.749294, -1.64187] + a3 = [-0.478447, -0.884147, -1.15302, -1.14711] + a4 = [-0.334821, -0.427176, -0.302761, 0.0325887] + a5 = [0.0658353, 0.128666, 0.175521, 0.187854] + fit_a0 = interp1d(t_obs, a0, kind='cubic', fill_value="extrapolate") + fit_a1 = interp1d(t_obs, a1, kind='cubic', fill_value="extrapolate") + fit_a2 = interp1d(t_obs, a2, kind='cubic', fill_value="extrapolate") + fit_a3 = interp1d(t_obs, a3, kind='cubic', fill_value="extrapolate") + fit_a4 = interp1d(t_obs, a4, kind='cubic', fill_value="extrapolate") + fit_a5 = interp1d(t_obs, a5, kind='cubic', fill_value="extrapolate") + if duration not in t_obs: + raise Warning("Note that the results between " + + "0.5, 1, 2, and 4 years are extrapolated, " + + "might be non-physical.") + sh_confusion = np.exp( + fit_a0(duration) + + fit_a1(duration) * np.log(fr*1e3) + + fit_a2(duration) * np.log(fr*1e3)**2 + + fit_a3(duration) * np.log(fr*1e3)**3 + + fit_a4(duration) * np.log(fr*1e3)**4 + + fit_a5(duration) * np.log(fr*1e3)**5 + ) + sh_confusion[(fr < 1e-4) | (fr > 1e-2)] = 0 + fseries = from_numpy_arrays(fr, sh_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_lisa_confusion(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, + base_model="semi", duration=1.0): + """ The LISA's sensitivity curve with Galactic confusion noise, + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + base_model : string + The base model of sensitivity curve, chosen from "semi" or "SciRD". + duration : float + The duration of observation, between 0 and 10, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + LISA's sensitivity curve with Galactic confusion noise. + Notes + ----- + Please see Eq.(85-86) in <LISA-LCST-SGS-TN-001> for more details. + """ + if base_model == "semi": + base_curve = sensitivity_curve_lisa_semi_analytical( + length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, oms_noise_level) + elif base_model == "SciRD": + base_curve = sensitivity_curve_lisa_SciRD( + length, delta_f, low_freq_cutoff) + else: + raise ValueError("Must choose from 'semi' or 'SciRD'.") + if duration < 0 or duration > 10: + raise ValueError("Must between 0 and 10.") + fseries_confusion = confusion_fit_lisa( + length, delta_f, low_freq_cutoff, duration) + fseries = from_numpy_arrays(base_curve.sample_frequencies, + base_curve+fseries_confusion, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_tianqin_confusion(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12, duration=1.0): + """ The TianQin's sensitivity curve with Galactic confusion noise, + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + duration : float + The duration of observation, between 0 and 5, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + TianQin's sensitivity curve with Galactic confusion noise. + """ + base_curve = sensitivity_curve_tianqin_analytical( + length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, oms_noise_level) + if duration < 0 or duration > 5: + raise ValueError("Must between 0 and 5.") + fseries_confusion = confusion_fit_tianqin( + length, delta_f, low_freq_cutoff, duration) + fseries = from_numpy_arrays(base_curve.sample_frequencies, + base_curve+fseries_confusion, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sensitivity_curve_taiji_confusion(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12, duration=1.0): + """ The Taiji's sensitivity curve with Galactic confusion noise, + averaged over sky and polarization angle. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + duration : float + The duration of observation, between 0 and 4, in the unit of years. + + Returns + ------- + fseries : FrequencySeries + The sky and polarization angle averaged + Taiji's sensitivity curve with Galactic confusion noise. + """ + base_curve = sensitivity_curve_taiji_analytical( + length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, oms_noise_level) + if duration < 0 or duration > 4: + raise ValueError("Must between 0 and 4.") + fseries_confusion = confusion_fit_taiji( + length, delta_f, low_freq_cutoff, duration) + fseries = from_numpy_arrays(base_curve.sample_frequencies, + base_curve+fseries_confusion, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def sh_transformed_psd_lisa_tdi_XYZ(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, + base_model="semi", duration=1.0, + tdi=None): + """ The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA + with Galactic confusion noise, transformed from LISA sensitivity curve. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + base_model : string + The base model of sensitivity curve, chosen from "semi" or "SciRD". + duration : float + The duration of observation, between 0 and 10, in the unit of years. + tdi : string + The version of TDI, currently only for 1.5 or 2.0. + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA with Galactic confusion + noise, transformed from LISA sensitivity curve. + Notes + ----- + Please see Eq.(7,41-43) in <LISA-LCST-SGS-TN-001> for more details. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + if str(tdi) in ["1.5", "2.0"]: + response = averaged_response_lisa_tdi(fr, len_arm, tdi) + else: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + fseries_response = from_numpy_arrays(fr, np.array(response), + length, delta_f, low_freq_cutoff) + sh = sensitivity_curve_lisa_confusion(length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, + oms_noise_level, base_model, + duration) + psd = 2*sh.data * fseries_response.data + fseries = from_numpy_arrays(sh.sample_frequencies, psd, + length, delta_f, low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def semi_analytical_psd_lisa_confusion_noise(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, duration=1.0, + tdi=None): + """ The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA Galactic confusion noise, + no instrumental noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + duration : float + The duration of observation, between 0 and 10, in the unit of years. + tdi : string + The version of TDI, currently only for 1.5 or 2.0. + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA Galactic confusion + noise, no instrumental noise. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + if str(tdi) in ["1.5", "2.0"]: + response = averaged_response_lisa_tdi(fr, len_arm, tdi) + else: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + fseries_response = from_numpy_arrays(fr, np.array(response), + length, delta_f, low_freq_cutoff) + fseries_confusion = confusion_fit_lisa( + length, delta_f, low_freq_cutoff, duration) + psd_confusion = 2*fseries_confusion.data * fseries_response.data + fseries = from_numpy_arrays(fseries_confusion.sample_frequencies, + psd_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def analytical_psd_tianqin_confusion_noise(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + duration=1.0, tdi=None): + """ The TDI-1.5/2.0 PSD (X,Y,Z channel) for TianQin Galactic confusion + noise, no instrumental noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + duration : float + The duration of observation, between 0 and 5, in the unit of years. + tdi : string + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for TianQin Galactic confusion + noise, no instrumental noise. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + if str(tdi) in ["1.5", "2.0"]: + response = averaged_response_tianqin_tdi(fr, len_arm, tdi) + else: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + fseries_response = from_numpy_arrays(fr, np.array(response), + length, delta_f, low_freq_cutoff) + fseries_confusion = confusion_fit_tianqin( + length, delta_f, low_freq_cutoff, duration) + psd_confusion = 2*fseries_confusion.data * fseries_response.data + fseries = from_numpy_arrays(fseries_confusion.sample_frequencies, + psd_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def analytical_psd_taiji_confusion_noise(length, delta_f, low_freq_cutoff, + len_arm=3e9, duration=1.0, + tdi=None): + """ The TDI-1.5/2.0 PSD (X,Y,Z channel) for Taiji Galactic confusion + noise, no instrumental noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + duration : float + The duration of observation, between 0 and 4, in the unit of years. + tdi : string + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (X,Y,Z channel) for Taiji Galactic confusion + noise, no instrumental noise. + """ + fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length) + if str(tdi) in ["1.5", "2.0"]: + response = averaged_response_taiji_tdi(fr, len_arm, tdi) + else: + raise ValueError("The version of TDI, currently only for 1.5 or 2.0.") + fseries_response = from_numpy_arrays(fr, np.array(response), + length, delta_f, low_freq_cutoff) + fseries_confusion = confusion_fit_taiji( + length, delta_f, low_freq_cutoff, duration) + psd_confusion = 2*fseries_confusion.data * fseries_response.data + fseries = from_numpy_arrays(fseries_confusion.sample_frequencies, + psd_confusion, length, delta_f, + low_freq_cutoff) + + return fseries
+ + + +
+[docs] +def analytical_psd_lisa_tdi_AE_confusion(length, delta_f, low_freq_cutoff, + len_arm=2.5e9, acc_noise_level=3e-15, + oms_noise_level=15e-12, + duration=1.0, tdi=None): + """ The TDI-1.5/2.0 PSD (A,E channel) for LISA + with Galactic confusion noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of LISA, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + duration : float + The duration of observation, between 0 and 10, in the unit of years. + tdi : string + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (A,E channel) for LISA with Galactic confusion + noise. + """ + psd_AE = analytical_psd_lisa_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, + oms_noise_level, tdi) + psd_X_confusion = semi_analytical_psd_lisa_confusion_noise( + length, delta_f, low_freq_cutoff, + len_arm, duration, tdi) + # S_A = S_E = S_X - S_XY, confusion noise's contribution to + # S_XY is -0.5 * psd_X_confusion, while for S_X is psd_X_confusion. + # S_T = S_X + 2*S_XY, so S_T keeps the same. + fseries = psd_AE + 1.5 * psd_X_confusion + + return fseries
+ + + +
+[docs] +def analytical_psd_tianqin_tdi_AE_confusion(length, delta_f, low_freq_cutoff, + len_arm=np.sqrt(3)*1e8, + acc_noise_level=1e-15, + oms_noise_level=1e-12, + duration=1.0, tdi=None): + """ The TDI-1.5/2.0 PSD (A,E channel) for TianQin + with Galactic confusion noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of TianQin, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + duration : float + The duration of observation, between 0 and 5, in the unit of years. + tdi : string + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (A,E channel) for TianQin with Galactic confusion + noise. + """ + psd_AE = analytical_psd_tianqin_tdi_AE(length, delta_f, + low_freq_cutoff, + len_arm, acc_noise_level, + oms_noise_level, tdi) + psd_X_confusion = analytical_psd_tianqin_confusion_noise( + length, delta_f, low_freq_cutoff, + len_arm, duration, tdi) + # S_A = S_E = S_X - S_XY, confusion noise's contribution to + # S_XY is -0.5 * psd_X_confusion, while for S_X is psd_X_confusion. + # S_T = S_X + 2*S_XY, so S_T keeps the same. + fseries = psd_AE + 1.5 * psd_X_confusion + + return fseries
+ + + +
+[docs] +def analytical_psd_taiji_tdi_AE_confusion(length, delta_f, low_freq_cutoff, + len_arm=3e9, acc_noise_level=3e-15, + oms_noise_level=8e-12, + duration=1.0, tdi=None): + """ The TDI-1.5/2.0 PSD (A,E channel) for Taiji + with Galactic confusion noise. + + Parameters + ---------- + length : int + Length of output Frequencyseries. + delta_f : float + Frequency step for output FrequencySeries. + low_freq_cutoff : float + Low-frequency cutoff for output FrequencySeries. + len_arm : float + The arm length of Taiji, in the unit of "m". + acc_noise_level : float + The level of acceleration noise. + oms_noise_level : float + The level of OMS noise. + duration : float + The duration of observation, between 0 and 4, in the unit of years. + tdi : string + The version of TDI. Choose from "1.5" or "2.0". + + Returns + ------- + fseries : FrequencySeries + The TDI-1.5/2.0 PSD (A,E channel) for Taiji with Galactic confusion + noise. + """ + psd_AE = analytical_psd_taiji_tdi_AE(length, delta_f, low_freq_cutoff, + len_arm, acc_noise_level, + oms_noise_level, tdi) + psd_X_confusion = analytical_psd_taiji_confusion_noise( + length, delta_f, low_freq_cutoff, + len_arm, duration, tdi) + # S_A = S_E = S_X - S_XY, confusion noise's contribution to + # S_XY is -0.5 * psd_X_confusion, while for S_X is psd_X_confusion. + # S_T = S_X + 2*S_XY, so S_T keeps the same. + fseries = psd_AE + 1.5 * psd_X_confusion + + return fseries
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd/estimate.html b/latest/html/_modules/pycbc/psd/estimate.html new file mode 100644 index 00000000000..7b6f65057e8 --- /dev/null +++ b/latest/html/_modules/pycbc/psd/estimate.html @@ -0,0 +1,498 @@ + + + + + + pycbc.psd.estimate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd.estimate

+# Copyright (C) 2012 Tito Dal Canton
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""Utilites to estimate PSDs from data.
+"""
+
+import numpy
+from pycbc.types import Array, FrequencySeries, TimeSeries, zeros
+from pycbc.types import real_same_precision_as, complex_same_precision_as
+from pycbc.fft import fft, ifft
+
+# Change to True in front-end if you want this function to use caching
+# This is a mostly-hidden optimization option that most users will not want
+# to use. It is used in PyCBC Live
+USE_CACHING_FOR_WELCH_FFTS = False
+USE_CACHING_FOR_INV_SPEC_TRUNC = False
+# If using caching we want output to be unique if called at different places
+# (and if called from different modules/functions), these unique IDs acheive
+# that. The numbers are not significant, only that they are unique.
+WELCH_UNIQUE_ID = 438716587
+INVSPECTRUNC_UNIQUE_ID = 100257896
+
+
+[docs] +def median_bias(n): + """Calculate the bias of the median average PSD computed from `n` segments. + + Parameters + ---------- + n : int + Number of segments used in PSD estimation. + + Returns + ------- + ans : float + Calculated bias. + + Raises + ------ + ValueError + For non-integer or non-positive `n`. + + Notes + ----- + See arXiv:gr-qc/0509116 appendix B for details. + """ + if type(n) is not int or n <= 0: + raise ValueError('n must be a positive integer') + if n >= 1000: + return numpy.log(2) + ans = 1 + for i in range(1, (n - 1) // 2 + 1): + ans += 1.0 / (2*i + 1) - 1.0 / (2*i) + return ans
+ + +
+[docs] +def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann', + avg_method='median', num_segments=None, require_exact_data_fit=False): + """PSD estimator based on Welch's method. + + Parameters + ---------- + timeseries : TimeSeries + Time series for which the PSD is to be estimated. + seg_len : int + Segment length in samples. + seg_stride : int + Separation between consecutive segments, in samples. + window : {'hann', numpy.ndarray} + Function used to window segments before Fourier transforming, or + a `numpy.ndarray` that specifies the window. + avg_method : {'median', 'mean', 'median-mean'} + Method used for averaging individual segment PSDs. + + Returns + ------- + psd : FrequencySeries + Frequency series containing the estimated PSD. + + Raises + ------ + ValueError + For invalid choices of `seg_len`, `seg_stride` `window` and + `avg_method` and for inconsistent combinations of len(`timeseries`), + `seg_len` and `seg_stride`. + + Notes + ----- + See arXiv:gr-qc/0509116 for details. + """ + from pycbc.strain.strain import execute_cached_fft + + window_map = { + 'hann': numpy.hanning + } + + # sanity checks + if isinstance(window, numpy.ndarray) and window.size != seg_len: + raise ValueError('Invalid window: incorrect window length') + if not isinstance(window, numpy.ndarray) and window not in window_map: + raise ValueError('Invalid window: unknown window {!r}'.format(window)) + if avg_method not in ('mean', 'median', 'median-mean'): + raise ValueError('Invalid averaging method') + if type(seg_len) is not int or type(seg_stride) is not int \ + or seg_len <= 0 or seg_stride <= 0: + raise ValueError('Segment length and stride must be positive integers') + + if timeseries.precision == 'single': + fs_dtype = numpy.complex64 + elif timeseries.precision == 'double': + fs_dtype = numpy.complex128 + + num_samples = len(timeseries) + if num_segments is None: + num_segments = int(num_samples // seg_stride) + # NOTE: Is this not always true? + if (num_segments - 1) * seg_stride + seg_len > num_samples: + num_segments -= 1 + + if not require_exact_data_fit: + data_len = (num_segments - 1) * seg_stride + seg_len + + # Get the correct amount of data + if data_len < num_samples: + diff = num_samples - data_len + start = diff // 2 + end = num_samples - diff // 2 + # Want this to be integers so if diff is odd, catch it here. + if diff % 2: + start = start + 1 + + timeseries = timeseries[start:end] + num_samples = len(timeseries) + if data_len > num_samples: + err_msg = "I was asked to estimate a PSD on %d " %(data_len) + err_msg += "data samples. However the data provided only contains " + err_msg += "%d data samples." %(num_samples) + + if num_samples != (num_segments - 1) * seg_stride + seg_len: + raise ValueError('Incorrect choice of segmentation parameters') + + if not isinstance(window, numpy.ndarray): + window = window_map[window](seg_len) + w = Array(window.astype(timeseries.dtype)) + + # calculate psd of each segment + delta_f = 1. / timeseries.delta_t / seg_len + if not USE_CACHING_FOR_WELCH_FFTS: + segment_tilde = FrequencySeries( + numpy.zeros(int(seg_len / 2 + 1)), + delta_f=delta_f, + dtype=fs_dtype, + ) + + segment_psds = [] + for i in range(num_segments): + segment_start = i * seg_stride + segment_end = segment_start + seg_len + segment = timeseries[segment_start:segment_end] + assert len(segment) == seg_len + if not USE_CACHING_FOR_WELCH_FFTS: + fft(segment * w, segment_tilde) + else: + segment_tilde = execute_cached_fft(segment * w, + uid=WELCH_UNIQUE_ID) + seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy() + + #halve the DC and Nyquist components to be consistent with TO10095 + seg_psd[0] /= 2 + seg_psd[-1] /= 2 + + segment_psds.append(seg_psd) + + segment_psds = numpy.array(segment_psds) + + if avg_method == 'mean': + psd = numpy.mean(segment_psds, axis=0) + elif avg_method == 'median': + psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments) + elif avg_method == 'median-mean': + odd_psds = segment_psds[::2] + even_psds = segment_psds[1::2] + odd_median = numpy.median(odd_psds, axis=0) / \ + median_bias(len(odd_psds)) + even_median = numpy.median(even_psds, axis=0) / \ + median_bias(len(even_psds)) + psd = (odd_median + even_median) / 2 + + psd *= 2 * delta_f * seg_len / (w*w).sum() + + return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype, + epoch=timeseries.start_time)
+ + +
+[docs] +def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): + """Modify a PSD such that the impulse response associated with its inverse + square root is no longer than `max_filter_len` time samples. In practice + this corresponds to a coarse graining or smoothing of the PSD. + + Parameters + ---------- + psd : FrequencySeries + PSD whose inverse spectrum is to be truncated. + max_filter_len : int + Maximum length of the time-domain filter in samples. + low_frequency_cutoff : {None, int} + Frequencies below `low_frequency_cutoff` are zeroed in the output. + trunc_method : {None, 'hann'} + Function used for truncating the time-domain filter. + None produces a hard truncation at `max_filter_len`. + + Returns + ------- + psd : FrequencySeries + PSD whose inverse spectrum has been truncated. + + Raises + ------ + ValueError + For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. + + Notes + ----- + See arXiv:gr-qc/0509116 for details. + """ + from pycbc.strain.strain import execute_cached_fft, execute_cached_ifft + + # sanity checks + if type(max_filter_len) is not int or max_filter_len <= 0: + raise ValueError('max_filter_len must be a positive integer') + if low_frequency_cutoff is not None and \ + (low_frequency_cutoff < 0 or + low_frequency_cutoff > psd.sample_frequencies[-1]): + raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') + + N = (len(psd)-1)*2 + + inv_asd = FrequencySeries(zeros(len(psd)), delta_f=psd.delta_f, \ + dtype=complex_same_precision_as(psd)) + + kmin = 1 + if low_frequency_cutoff: + kmin = int(low_frequency_cutoff / psd.delta_f) + + inv_asd[kmin:N//2] = (1.0 / psd[kmin:N//2]) ** 0.5 + if not USE_CACHING_FOR_INV_SPEC_TRUNC: + q = TimeSeries( + numpy.zeros(N), + delta_t=(N / psd.delta_f), + dtype=real_same_precision_as(psd) + ) + ifft(inv_asd, q) + else: + q = execute_cached_ifft(inv_asd, copy_output=False, + uid=INVSPECTRUNC_UNIQUE_ID) + + trunc_start = max_filter_len // 2 + trunc_end = N - max_filter_len // 2 + if trunc_end < trunc_start: + raise ValueError('Invalid value in inverse_spectrum_truncation') + + if trunc_method == 'hann': + trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) + q[0:trunc_start] *= trunc_window[-trunc_start:] + q[trunc_end:N] *= trunc_window[0:max_filter_len//2] + + if trunc_start < trunc_end: + q[trunc_start:trunc_end] = 0 + if not USE_CACHING_FOR_INV_SPEC_TRUNC: + psd_trunc = FrequencySeries( + numpy.zeros(len(psd)), + delta_f=psd.delta_f, + dtype=complex_same_precision_as(psd) + ) + fft(q, psd_trunc) + else: + psd_trunc = execute_cached_fft(q, copy_output=False, + uid=INVSPECTRUNC_UNIQUE_ID) + psd_trunc *= psd_trunc.conj() + psd_out = 1. / abs(psd_trunc) + + return psd_out
+ + +
+[docs] +def interpolate(series, delta_f, length=None): + """Return a new PSD that has been interpolated to the desired delta_f. + + Parameters + ---------- + series : FrequencySeries + Frequency series to be interpolated. + delta_f : float + The desired delta_f of the output + length : None or int + The desired number of frequency samples. The default is None, + so it will be calculated from the given `series` and `delta_f`. + But this will cause an inconsistency issue of length sometimes, + so if `length` is given, then just use it. + + Returns + ------- + interpolated series : FrequencySeries + A new FrequencySeries that has been interpolated. + """ + if length is None: + new_n = (len(series)-1) * series.delta_f / delta_f + 1 + else: + new_n = length + samples = numpy.arange(0, numpy.rint(new_n)) * delta_f + interpolated_series = numpy.interp(samples, series.sample_frequencies.numpy(), series.numpy()) + return FrequencySeries(interpolated_series, epoch=series.epoch, + delta_f=delta_f, dtype=series.dtype)
+ + +
+[docs] +def bandlimited_interpolate(series, delta_f): + """Return a new PSD that has been interpolated to the desired delta_f. + + Parameters + ---------- + series : FrequencySeries + Frequency series to be interpolated. + delta_f : float + The desired delta_f of the output + + Returns + ------- + interpolated series : FrequencySeries + A new FrequencySeries that has been interpolated. + """ + series = FrequencySeries(series, dtype=complex_same_precision_as(series), delta_f=series.delta_f) + + N = (len(series) - 1) * 2 + delta_t = 1.0 / series.delta_f / N + + new_N = int(1.0 / (delta_t * delta_f)) + new_n = new_N // 2 + 1 + + series_in_time = TimeSeries(zeros(N), dtype=real_same_precision_as(series), delta_t=delta_t) + ifft(series, series_in_time) + + padded_series_in_time = TimeSeries(zeros(new_N), dtype=series_in_time.dtype, delta_t=delta_t) + padded_series_in_time[0:N//2] = series_in_time[0:N//2] + padded_series_in_time[new_N-N//2:new_N] = series_in_time[N//2:N] + + interpolated_series = FrequencySeries(zeros(new_n), dtype=series.dtype, delta_f=delta_f) + fft(padded_series_in_time, interpolated_series) + + return interpolated_series
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd/read.html b/latest/html/_modules/pycbc/psd/read.html new file mode 100644 index 00000000000..1009039001e --- /dev/null +++ b/latest/html/_modules/pycbc/psd/read.html @@ -0,0 +1,326 @@ + + + + + + pycbc.psd.read — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd.read

+#!/usr/bin/python
+# Copyright (C) 2012 Alex Nitz, Tito Dal Canton
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""Utilities to read PSDs from files.
+"""
+
+import logging
+import numpy
+import scipy.interpolate
+from pycbc.types import FrequencySeries
+
+logger = logging.getLogger('pycbc.psd.read')
+
+
+[docs] +def from_numpy_arrays(freq_data, noise_data, length, delta_f, low_freq_cutoff): + """Interpolate n PSD (as two 1-dimensional arrays of frequency and data) + to the desired length, delta_f and low frequency cutoff. + + Parameters + ---------- + freq_data : array + Array of frequencies. + noise_data : array + PSD values corresponding to frequencies in freq_arr. + length : int + Length of the frequency series in samples. + delta_f : float + Frequency resolution of the frequency series in Herz. + low_freq_cutoff : float + Frequencies below this value are set to zero. + + Returns + ------- + psd : FrequencySeries + The generated frequency series. + """ + # Only include points above the low frequency cutoff + if freq_data[0] > low_freq_cutoff: + raise ValueError('Lowest frequency in input data ' + ' is higher than requested low-frequency cutoff ' + str(low_freq_cutoff)) + + kmin = int(low_freq_cutoff / delta_f) + flow = kmin * delta_f + + data_start = (0 if freq_data[0]==low_freq_cutoff else numpy.searchsorted(freq_data, flow) - 1) + data_start = max(0, data_start) + # If the cutoff is exactly in the file, start there + if freq_data[data_start+1] == low_freq_cutoff: + data_start += 1 + + freq_data = freq_data[data_start:] + noise_data = noise_data[data_start:] + + if (length - 1) * delta_f > freq_data[-1]: + logger.warning('Requested number of samples exceeds the highest ' + 'available frequency in the input data, ' + 'will use max available frequency instead. ' + '(requested %f Hz, available %f Hz)', + (length - 1) * delta_f, freq_data[-1]) + length = int(freq_data[-1]/delta_f + 1) + + flog = numpy.log(freq_data) + slog = numpy.log(noise_data) + + psd_interp = scipy.interpolate.interp1d( + flog, slog, fill_value=(slog[0], slog[-1]), bounds_error=False) + psd = numpy.zeros(length, dtype=numpy.float64) + + vals = numpy.log(numpy.arange(kmin, length) * delta_f) + psd[kmin:] = numpy.exp(psd_interp(vals)) + + return FrequencySeries(psd, delta_f=delta_f)
+ + + +
+[docs] +def from_txt(filename, length, delta_f, low_freq_cutoff, is_asd_file=True): + """Read an ASCII file containing one-sided ASD or PSD data and generate + a frequency series with the corresponding PSD. The ASD or PSD data is + interpolated in order to match the desired resolution of the + generated frequency series. + + Parameters + ---------- + filename : string + Path to a two-column ASCII file. The first column must contain + the frequency (positive frequencies only) and the second column + must contain the amplitude density OR power spectral density. + length : int + Length of the frequency series in samples. + delta_f : float + Frequency resolution of the frequency series in Herz. + low_freq_cutoff : float + Frequencies below this value are set to zero. + is_asd_file : Boolean + If false assume that the second column holds power spectral density. + If true assume that the second column holds amplitude spectral density. + Default: True + + Returns + ------- + psd : FrequencySeries + The generated frequency series. + + Raises + ------ + ValueError + If the ASCII file contains negative, infinite or NaN frequencies + or amplitude densities. + """ + file_data = numpy.loadtxt(filename) + if (file_data < 0).any() or \ + numpy.logical_not(numpy.isfinite(file_data)).any(): + raise ValueError('Invalid data in ' + filename) + + freq_data = file_data[:, 0] + noise_data = file_data[:, 1] + if is_asd_file: + noise_data = noise_data ** 2 + + return from_numpy_arrays(freq_data, noise_data, length, delta_f, + low_freq_cutoff)
+ + +
+[docs] +def from_xml(filename, length, delta_f, low_freq_cutoff, ifo_string=None, + root_name='psd'): + """Read an ASCII file containing one-sided ASD or PSD data and generate + a frequency series with the corresponding PSD. The ASD or PSD data is + interpolated in order to match the desired resolution of the + generated frequency series. + + Parameters + ---------- + filename : string + Path to a two-column ASCII file. The first column must contain + the frequency (positive frequencies only) and the second column + must contain the amplitude density OR power spectral density. + length : int + Length of the frequency series in samples. + delta_f : float + Frequency resolution of the frequency series in Herz. + low_freq_cutoff : float + Frequencies below this value are set to zero. + ifo_string : string + Use the PSD in the file's PSD dictionary with this ifo string. + If not given and only one PSD present in the file return that, if not + given and multiple (or zero) PSDs present an exception will be raised. + root_name : string (default='psd') + If given use this as the root name for the PSD XML file. If this means + nothing to you, then it is probably safe to ignore this option. + + Returns + ------- + psd : FrequencySeries + The generated frequency series. + + """ + import lal.series + from ligo.lw import utils as ligolw_utils + + with open(filename, 'rb') as fp: + ct_handler = lal.series.PSDContentHandler + xml_doc = ligolw_utils.load_fileobj(fp, compress='auto', + contenthandler=ct_handler) + psd_dict = lal.series.read_psd_xmldoc(xml_doc, root_name=root_name) + + if ifo_string is not None: + psd_freq_series = psd_dict[ifo_string] + elif len(psd_dict.keys()) == 1: + psd_freq_series = psd_dict[tuple(psd_dict.keys())[0]] + else: + err_msg = "No ifo string given and input XML file contains not " + err_msg += "exactly one PSD. Specify which PSD you want to use." + raise ValueError(err_msg) + + noise_data = psd_freq_series.data.data[:] + freq_data = numpy.arange(len(noise_data)) * psd_freq_series.deltaF + + return from_numpy_arrays(freq_data, noise_data, length, delta_f, + low_freq_cutoff)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/psd/variation.html b/latest/html/_modules/pycbc/psd/variation.html new file mode 100644 index 00000000000..2be5ee3a6e9 --- /dev/null +++ b/latest/html/_modules/pycbc/psd/variation.html @@ -0,0 +1,552 @@ + + + + + + pycbc.psd.variation — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.psd.variation

+""" PSD Variation """
+
+import numpy
+from numpy.fft import rfft, irfft
+import scipy.signal as sig
+from scipy.interpolate import interp1d
+
+import pycbc.psd
+from pycbc.types import TimeSeries
+
+
+
+[docs] +def create_full_filt(freqs, filt, plong, srate, psd_duration): + """Create a filter to convolve with strain data to find PSD variation. + + Parameters + ---------- + freqs : numpy.ndarray + Array of sample frequencies of the PSD. + filt : numpy.ndarray + A bandpass filter. + plong : numpy.ndarray + The estimated PSD. + srate : float + The sample rate of the data. + psd_duration : float + The duration of the estimated PSD. + + Returns + ------- + full_filt : numpy.ndarray + The full filter used to calculate PSD variation. + """ + + # Make the weighting filter - bandpass, which weight by f^-7/6, + # and whiten. The normalization is chosen so that the variance + # will be one if this filter is applied to white noise which + # already has a variance of one. + fweight = freqs ** (-7./6.) * filt / numpy.sqrt(plong) + fweight[0] = 0. + norm = (sum(abs(fweight) ** 2) / (len(fweight) - 1.)) ** -0.5 + fweight = norm * fweight + fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong) + fwhiten[0] = 0. + full_filt = sig.windows.hann(int(psd_duration * srate)) * numpy.roll( + irfft(fwhiten * fweight), int(psd_duration / 2) * srate) + + return full_filt
+ + + +
+[docs] +def mean_square(data, delta_t, srate, short_stride, stride): + """ Calculate mean square of given time series once per stride + + First of all this function calculate the mean square of given time + series once per short_stride. This is used to find and remove + outliers due to short glitches. Here an outlier is defined as any + element which is greater than two times the average of its closest + neighbours. Every outlier is substituted with the average of the + corresponding adjacent elements. + Then, every second the function compute the mean square of the + smoothed time series, within the stride. + + Parameters + ---------- + data : numpy.ndarray + delta_t : float + Duration of the time series + srate : int + Sample rate of the data were it given as a TimeSeries + short_stride : float + Stride duration for outlier removal + stride ; float + Stride duration + + Returns + ------- + m_s: List + Mean square of given time series + """ + + # Calculate mean square of data once per short stride and replace + # outliers + short_ms = numpy.mean(data.reshape(-1, int(srate * short_stride)) ** 2, + axis=1) + # Define an array of averages that is used to substitute outliers + ave = 0.5 * (short_ms[2:] + short_ms[:-2]) + outliers = short_ms[1:-1] > (2. * ave) + short_ms[1:-1][outliers] = ave[outliers] + + # Calculate mean square of data every step within a window equal to + # stride seconds + m_s = [] + inv_time = int(1. / short_stride) + for index in range(int(delta_t - stride + 1)): + m_s.append(numpy.mean(short_ms[inv_time * index:inv_time * + int(index+stride)])) + return m_s
+ + + +
+[docs] +def calc_filt_psd_variation(strain, segment, short_segment, psd_long_segment, + psd_duration, psd_stride, psd_avg_method, low_freq, + high_freq): + """ Calculates time series of PSD variability + + This function first splits the segment up into 512 second chunks. It + then calculates the PSD over this 512 second. The PSD is used to + to create a filter that is the composition of three filters: + 1. Bandpass filter between f_low and f_high. + 2. Weighting filter which gives the rough response of a CBC template. + 3. Whitening filter. + Next it makes the convolution of this filter with the stretch of data. + This new time series is given to the "mean_square" function, which + computes the mean square of the timeseries within an 8 seconds window, + once per second. + The result, which is the variance of the S/N in that stride for the + Parseval theorem, is then stored in a timeseries. + + Parameters + ---------- + strain : TimeSeries + Input strain time series to estimate PSDs + segment : {float, 8} + Duration of the segments for the mean square estimation in seconds. + short_segment : {float, 0.25} + Duration of the short segments for the outliers removal. + psd_long_segment : {float, 512} + Duration of the long segments for PSD estimation in seconds. + psd_duration : {float, 8} + Duration of FFT segments for long term PSD estimation, in seconds. + psd_stride : {float, 4} + Separation between FFT segments for long term PSD estimation, in + seconds. + psd_avg_method : {string, 'median'} + Method for averaging PSD estimation segments. + low_freq : {float, 20} + Minimum frequency to consider the comparison between PSDs. + high_freq : {float, 480} + Maximum frequency to consider the comparison between PSDs. + + Returns + ------- + psd_var : TimeSeries + Time series of the variability in the PSD estimation + """ + # Calculate strain precision + if strain.precision == 'single': + fs_dtype = numpy.float32 + elif strain.precision == 'double': + fs_dtype = numpy.float64 + + # Convert start and end times immediately to floats + start_time = float(strain.start_time) + end_time = float(strain.end_time) + srate = int(strain.sample_rate) + + # Fix the step for the PSD estimation and the time to remove at the + # edge of the time series. + step = 1.0 + strain_crop = 8.0 + + # Find the times of the long segments + times_long = numpy.arange(start_time, end_time, + psd_long_segment - 2 * strain_crop + - segment + step) + + # Create a bandpass filter between low_freq and high_freq + filt = sig.firwin(4 * srate, [low_freq, high_freq], pass_zero=False, + window='hann', fs=srate) + filt.resize(int(psd_duration * srate)) + # Fourier transform the filter and take the absolute value to get + # rid of the phase. + filt = abs(rfft(filt)) + + psd_var_list = [] + for tlong in times_long: + # Calculate PSD for long segment + if tlong + psd_long_segment <= float(end_time): + astrain = strain.time_slice(tlong, tlong + psd_long_segment) + plong = pycbc.psd.welch( + astrain, + seg_len=int(psd_duration * strain.sample_rate), + seg_stride=int(psd_stride * strain.sample_rate), + avg_method=psd_avg_method) + else: + astrain = strain.time_slice(tlong, end_time) + plong = pycbc.psd.welch( + strain.time_slice(end_time - psd_long_segment, + end_time), + seg_len=int(psd_duration * strain.sample_rate), + seg_stride=int(psd_stride * strain.sample_rate), + avg_method=psd_avg_method) + astrain = astrain.numpy() + freqs = numpy.array(plong.sample_frequencies, dtype=fs_dtype) + plong = plong.numpy() + + full_filt = create_full_filt(freqs, filt, plong, srate, psd_duration) + # Convolve the filter with long segment of data + wstrain = sig.fftconvolve(astrain, full_filt, mode='same') + wstrain = wstrain[int(strain_crop * srate):-int(strain_crop * srate)] + # compute the mean square of the chunk of data + delta_t = len(wstrain) * strain.delta_t + variation = mean_square(wstrain, delta_t, srate, short_segment, segment) + psd_var_list.append(numpy.array(variation, dtype=wstrain.dtype)) + + # Package up the time series to return + psd_var = TimeSeries(numpy.concatenate(psd_var_list), delta_t=step, + epoch=start_time + strain_crop + segment) + + return psd_var
+ + + +
+[docs] +def find_trigger_value(psd_var, idx, start, sample_rate): + """ Find the PSD variation value at a particular time with the filter + method. If the time is outside the timeseries bound, 1. is given. + + Parameters + ---------- + psd_var : TimeSeries + Time series of the varaibility in the PSD estimation + idx : numpy.ndarray + Time indices of the triggers + start : float + GPS start time + sample_rate : float + Sample rate defined in ini file + + Returns + ------- + vals : Array + PSD variation value at a particular time + """ + # Find gps time of the trigger + time = start + idx / sample_rate + # Extract the PSD variation at trigger time through linear + # interpolation + if not hasattr(psd_var, 'cached_psd_var_interpolant'): + psd_var.cached_psd_var_interpolant = \ + interp1d(psd_var.sample_times.numpy(), + psd_var.numpy(), + fill_value=1.0, + bounds_error=False) + vals = psd_var.cached_psd_var_interpolant(time) + + return vals
+ + + +
+[docs] +def live_create_filter(psd_estimated, + psd_duration, + sample_rate, + low_freq=20, + high_freq=480): + """ + Create a filter to be used in the calculation of the psd variation for the + PyCBC Live search. This filter combines a bandpass between a lower and + upper frequency and an estimated signal response so that the variance + will be 1 when the filter is applied to white noise. + + Within the PyCBC Live search this filter needs to be recreated every time + the estimated psd is updated and needs to be unique for each detector. + + Parameters + ---------- + psd_estimated : pycbc.frequencyseries + The current PyCBC Live PSD: variations are measured relative to this + estimate. + psd_duration : float + The duration of the estimation of the psd, in seconds. + sample_rate : int + The sample rate of the strain data being search over. + low_freq : int (default = 20) + The lower frequency to apply in the bandpass filter. + high_freq : int (default = 480) + The upper frequency to apply in the bandpass filter. + + Returns + ------- + full_filt : numpy.ndarray + The complete filter to be convolved with the strain data to + find the psd variation value. + + """ + + # Create a bandpass filter between low_freq and high_freq once + filt = sig.firwin(4 * sample_rate, + [low_freq, high_freq], + pass_zero=False, + window='hann', + fs=sample_rate) + filt.resize(int(psd_duration * sample_rate)) + + # Fourier transform the filter and take the absolute value to get + # rid of the phase. + filt = abs(rfft(filt)) + + # Extract the psd frequencies to create a representative filter. + freqs = numpy.array(psd_estimated.sample_frequencies, dtype=numpy.float32) + plong = psd_estimated.numpy() + full_filt = create_full_filt(freqs, filt, plong, sample_rate, psd_duration) + + return full_filt
+ + + +
+[docs] +def live_calc_psd_variation(strain, + full_filt, + increment, + data_trim=2.0, + short_stride=0.25): + """ + Calculate the psd variation in the PyCBC Live search. + + The Live strain data is convolved with the filter to produce a timeseries + containing the PSD variation values for each sample. The mean square of + the timeseries is calculated over the short_stride to find outliers caused + by short duration glitches. Outliers are replaced with the average of + adjacent elements in the array. This array is then further averaged every + second to produce the PSD variation timeseries. + + Parameters + ---------- + strain : pycbc.timeseries + Live data being searched through by the PyCBC Live search. + full_filt : numpy.ndarray + A filter created by `live_create_filter`. + increment : float + The number of seconds in each increment in the PyCBC Live search. + data_trim : float + The number of seconds to be trimmed from either end of the convolved + timeseries to prevent artefacts. + short_stride : float + The number of seconds to average the PSD variation timeseries over to + remove the effects of short duration glitches. + + Returns + ------- + psd_var : pycbc.timeseries + A timeseries containing the PSD variation values. + + """ + sample_rate = int(strain.sample_rate) + + # Grab the last increments worth of data, plus padding for edge effects. + astrain = strain.time_slice(strain.end_time - increment - (data_trim * 3), + strain.end_time) + + # Convolve the data and the filter to produce the PSD variation timeseries, + # then trim the beginning and end of the data to prevent edge effects. + wstrain = sig.fftconvolve(astrain, full_filt, mode='same') + wstrain = wstrain[int(data_trim * sample_rate):-int(data_trim * sample_rate)] + + # Create a PSD variation array by taking the mean square of the PSD + # variation timeseries every short_stride + short_ms = numpy.mean( + wstrain.reshape(-1, int(sample_rate * short_stride)) ** 2, axis=1) + + # Define an array of averages that is used to substitute outliers + ave = 0.5 * (short_ms[2:] + short_ms[:-2]) + outliers = short_ms[1:-1] > (2. * ave) + short_ms[1:-1][outliers] = ave[outliers] + + # Calculate the PSD variation every second by a moving window average + # containing (1/short_stride) short_ms samples. + m_s = [] + samples_per_second = 1 / short_stride + for idx in range(int(len(short_ms) / samples_per_second)): + start = int(samples_per_second * idx) + end = int(samples_per_second * (idx + 1)) + m_s.append(numpy.mean(short_ms[start:end])) + + m_s = numpy.array(m_s, dtype=wstrain.dtype) + psd_var = TimeSeries(m_s, + delta_t=1.0, + epoch=strain.end_time - increment - (data_trim * 2)) + + return psd_var
+ + + +
+[docs] +def live_find_var_value(triggers, + psd_var_timeseries): + """ + Extract the PSD variation values at trigger times by linear interpolation. + + Parameters + ---------- + triggers : dict + Dictionary containing input trigger times. + psd_var_timeseries : pycbc.timeseries + A timeseries containing the PSD variation value for each second of the + latest increment in PyCBC Live. + + Returns + ------- + psd_var_vals : numpy.ndarray + Array of interpolated PSD variation values at trigger times. + """ + + # Create the interpolator + interpolator = interp1d(psd_var_timeseries.sample_times.numpy(), + psd_var_timeseries.numpy(), + fill_value=1.0, + bounds_error=False) + # Evaluate at the trigger times + psd_var_vals = interpolator(triggers['end_time']) + + return psd_var_vals
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/rate.html b/latest/html/_modules/pycbc/rate.html new file mode 100644 index 00000000000..ddce04baa62 --- /dev/null +++ b/latest/html/_modules/pycbc/rate.html @@ -0,0 +1,508 @@ + + + + + + pycbc.rate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.rate

+import numpy
+import bisect
+import logging
+
+from . import bin_utils
+
+logger = logging.getLogger('pycbc.rate')
+
+
+
+[docs] +def integral_element(mu, pdf): + ''' + Returns an array of elements of the integrand dP = p(mu) dmu + for a density p(mu) defined at sample values mu ; samples need + not be equally spaced. Uses a simple trapezium rule. + Number of dP elements is 1 - (number of mu samples). + ''' + dmu = mu[1:] - mu[:-1] + bin_mean = (pdf[1:] + pdf[:-1]) / 2. + return dmu * bin_mean
+ + + +
+[docs] +def normalize_pdf(mu, pofmu): + """ + Takes a function pofmu defined at rate sample values mu and + normalizes it to be a suitable pdf. Both mu and pofmu must be + arrays or lists of the same length. + """ + if min(pofmu) < 0: + raise ValueError("Probabilities cannot be negative, don't ask me to " + "normalize a function with negative values!") + if min(mu) < 0: + raise ValueError("Rates cannot be negative, don't ask me to " + "normalize a function over a negative domain!") + + dp = integral_element(mu, pofmu) + return mu, pofmu/sum(dp)
+ + + +
+[docs] +def compute_upper_limit(mu_in, post, alpha=0.9): + """ + Returns the upper limit mu_high of confidence level alpha for a + posterior distribution post on the given parameter mu. + The posterior need not be normalized. + """ + if 0 < alpha < 1: + dp = integral_element(mu_in, post) + high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha) + # if alpha is in (0,1] and post is non-negative, bisect_left + # will always return an index in the range of mu since + # post.cumsum()/post.sum() will always begin at 0 and end at 1 + mu_high = mu_in[high_idx] + elif alpha == 1: + mu_high = numpy.max(mu_in[post > 0]) + else: + raise ValueError("Confidence level must be in (0,1].") + + return mu_high
+ + + +
+[docs] +def compute_lower_limit(mu_in, post, alpha=0.9): + """ + Returns the lower limit mu_low of confidence level alpha for a + posterior distribution post on the given parameter mu. + The posterior need not be normalized. + """ + if 0 < alpha < 1: + dp = integral_element(mu_in, post) + low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha) + # if alpha is in [0,1) and post is non-negative, bisect_right + # will always return an index in the range of mu since + # post.cumsum()/post.sum() will always begin at 0 and end at 1 + mu_low = mu_in[low_idx] + elif alpha == 1: + mu_low = numpy.min(mu_in[post > 0]) + else: + raise ValueError("Confidence level must be in (0,1].") + + return mu_low
+ + + +
+[docs] +def confidence_interval_min_width(mu, post, alpha=0.9): + ''' + Returns the minimal-width confidence interval [mu_low, mu_high] of + confidence level alpha for a posterior distribution post on the parameter mu. + ''' + if not 0 < alpha < 1: + raise ValueError("Confidence level must be in (0,1).") + + # choose a step size for the sliding confidence window + alpha_step = 0.01 + + # initialize the lower and upper limits + mu_low = numpy.min(mu) + mu_high = numpy.max(mu) + + # find the smallest window (by delta-mu) stepping by dalpha + for ai in numpy.arange(0, 1 - alpha, alpha_step): + ml = compute_lower_limit(mu, post, 1 - ai) + mh = compute_upper_limit(mu, post, alpha + ai) + if mh - ml < mu_high - mu_low: + mu_low = ml + mu_high = mh + + return mu_low, mu_high
+ + + +
+[docs] +def hpd_coverage(mu, pdf, thresh): + ''' + Integrates a pdf over mu taking only bins where + the mean over the bin is above a given threshold + This gives the coverage of the HPD interval for + the given threshold. + ''' + dp = integral_element(mu, pdf) + bin_mean = (pdf[1:] + pdf[:-1]) / 2. + + return dp[bin_mean > thresh].sum()
+ + + +
+[docs] +def hpd_threshold(mu_in, post, alpha, tol): + ''' + For a PDF post over samples mu_in, find a density + threshold such that the region having higher density + has coverage of at least alpha, and less than alpha + plus a given tolerance. + ''' + norm_post = normalize_pdf(mu_in, post) + # initialize bisection search + p_minus = 0.0 + p_plus = max(post) + while abs(hpd_coverage(mu_in, norm_post, p_minus) - + hpd_coverage(mu_in, norm_post, p_plus)) >= tol: + p_test = (p_minus + p_plus) / 2. + if hpd_coverage(mu_in, post, p_test) >= alpha: + # test value was too low or just right + p_minus = p_test + else: + # test value was too high + p_plus = p_test + # p_minus never goes above the required threshold and p_plus never goes below + # thus on exiting p_minus is at or below the required threshold and the + # difference in coverage is within tolerance + return p_minus
+ + + +
+[docs] +def hpd_credible_interval(mu_in, post, alpha=0.9, tolerance=1e-3): + ''' + Returns the minimum and maximum rate values of the HPD + (Highest Posterior Density) credible interval for a posterior + post defined at the sample values mu_in. Samples need not be + uniformly spaced and posterior need not be normalized. + + Will not return a correct credible interval if the posterior + is multimodal and the correct interval is not contiguous; + in this case will over-cover by including the whole range from + minimum to maximum mu. + ''' + if alpha == 1: + nonzero_samples = mu_in[post > 0] + mu_low = numpy.min(nonzero_samples) + mu_high = numpy.max(nonzero_samples) + elif 0 < alpha < 1: + # determine the highest PDF for which the region with + # higher density has sufficient coverage + pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance) + samples_over_threshold = mu_in[post > pthresh] + mu_low = numpy.min(samples_over_threshold) + mu_high = numpy.max(samples_over_threshold) + + return mu_low, mu_high
+ + + +# Following functions are for the old pylal volume vs mass calculations +# These were replaced by 'imr_utils' functions now contained in sensitivity.py +# and bin_utils.py + + +
+[docs] +def integrate_efficiency(dbins, eff, err=0, logbins=False): + + if logbins: + logd = numpy.log(dbins) + dlogd = logd[1:] - logd[:-1] + # use log midpoint of bins + dreps = numpy.exp((numpy.log(dbins[1:]) + numpy.log(dbins[:-1])) / 2.) + vol = numpy.sum(4.*numpy.pi * dreps**3. * eff * dlogd) + # propagate errors in eff to errors in v + verr = numpy.sqrt( + numpy.sum((4.*numpy.pi * dreps**3. * err * dlogd)**2.) + ) + else: + dd = dbins[1:] - dbins[:-1] + dreps = (dbins[1:] + dbins[:-1]) / 2. + vol = numpy.sum(4. * numpy.pi * dreps**2. * eff * dd) + # propagate errors + verr = numpy.sqrt(numpy.sum((4.*numpy.pi * dreps**2. * err * dd)**2.)) + + return vol, verr
+ + + +
+[docs] +def compute_efficiency(f_dist, m_dist, dbins): + ''' + Compute the efficiency as a function of distance for the given sets of found + and missed injection distances. + Note that injections that do not fit into any dbin get lost :( + ''' + efficiency = numpy.zeros(len(dbins) - 1) + error = numpy.zeros(len(dbins) - 1) + for j, dlow in enumerate(dbins[:-1]): + dhigh = dbins[j + 1] + found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh)) + missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh)) + if found+missed == 0: + # avoid divide by 0 in empty bins + missed = 1. + efficiency[j] = float(found) / (found + missed) + error[j] = numpy.sqrt(efficiency[j] * (1 - efficiency[j]) / + (found + missed)) + + return efficiency, error
+ + + +
+[docs] +def mean_efficiency_volume(found, missed, dbins): + + if len(found) == 0: + # no efficiency here + return numpy.zeros(len(dbins) - 1), numpy.zeros(len(dbins) - 1), 0, 0 + + # only need distances + f_dist = numpy.array([l.distance for l in found]) + m_dist = numpy.array([l.distance for l in missed]) + + # compute the efficiency and its variance + eff, err = compute_efficiency(f_dist, m_dist, dbins) + vol, verr = integrate_efficiency(dbins, eff, err) + + return eff, err, vol, verr
+ + + +
+[docs] +def filter_injections_by_mass(injs, mbins, bin_num, bin_type, bin_num2=None): + ''' + For a given set of injections (sim_inspiral rows), return the subset + of injections that fall within the given mass range. + ''' + if bin_type == "Mass1_Mass2": + m1bins = numpy.concatenate((mbins.lower()[0], + numpy.array([mbins.upper()[0][-1]]))) + m1lo = m1bins[bin_num] + m1hi = m1bins[bin_num + 1] + m2bins = numpy.concatenate((mbins.lower()[1], + numpy.array([mbins.upper()[1][-1]]))) + m2lo = m2bins[bin_num2] + m2hi = m2bins[bin_num2 + 1] + newinjs = [l for l in injs if + ((m1lo <= l.mass1 < m1hi and m2lo <= l.mass2 < m2hi) or + (m1lo <= l.mass2 < m1hi and m2lo <= l.mass1 < m2hi))] + return newinjs + + mbins = numpy.concatenate((mbins.lower()[0], + numpy.array([mbins.upper()[0][-1]]))) + mlow = mbins[bin_num] + mhigh = mbins[bin_num + 1] + if bin_type == "Chirp_Mass": + newinjs = [l for l in injs if (mlow <= l.mchirp < mhigh)] + elif bin_type == "Total_Mass": + newinjs = [l for l in injs if (mlow <= l.mass1 + l.mass2 < mhigh)] + elif bin_type == "Component_Mass": + # here it is assumed that m2 is fixed + newinjs = [l for l in injs if (mlow <= l.mass1 < mhigh)] + elif bin_type == "BNS_BBH": + if bin_num in [0, 2]: + # BNS/BBH case + newinjs = [l for l in injs if + (mlow <= l.mass1 < mhigh and mlow <= l.mass2 < mhigh)] + else: + # NSBH + newinjs = [l for l in injs if (mbins[0] <= l.mass1 < mbins[1] and + mbins[2] <= l.mass2 < mbins[3])] + # BHNS + newinjs += [l for l in injs if (mbins[0] <= l.mass2 < mbins[1] and + mbins[2] <= l.mass1 < mbins[3])] + + return newinjs
+ + + +
+[docs] +def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None): + """ + Compute the average luminosity an experiment was sensitive to + + Assumes that luminosity is uniformly distributed in space. + Input is the sets of found and missed injections. + """ + # mean and std estimate for luminosity + volArray = bin_utils.BinnedArray(mass_bins) + vol2Array = bin_utils.BinnedArray(mass_bins) + + # found/missed stats + foundArray = bin_utils.BinnedArray(mass_bins) + missedArray = bin_utils.BinnedArray(mass_bins) + + # compute the mean luminosity in each mass bin + effvmass = [] + errvmass = [] + # 2D case first + if bin_type == "Mass1_Mass2": + for j, mc1 in enumerate(mass_bins.centres()[0]): + for k, mc2 in enumerate(mass_bins.centres()[1]): + newfound = filter_injections_by_mass( + found, mass_bins, j, bin_type, k) + newmissed = filter_injections_by_mass( + missed, mass_bins, j, bin_type, k) + + foundArray[(mc1, mc2)] = len(newfound) + missedArray[(mc1, mc2)] = len(newmissed) + + # compute the volume using this injection set + meaneff, efferr, meanvol, volerr = mean_efficiency_volume( + newfound, newmissed, dbins) + effvmass.append(meaneff) + errvmass.append(efferr) + volArray[(mc1, mc2)] = meanvol + vol2Array[(mc1, mc2)] = volerr + + return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass + + for j, mc in enumerate(mass_bins.centres()[0]): + + # filter out injections not in this mass bin + newfound = filter_injections_by_mass(found, mass_bins, j, bin_type) + newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type) + + foundArray[(mc, )] = len(newfound) + missedArray[(mc, )] = len(newmissed) + + # compute the volume using this injection set + meaneff, efferr, meanvol, volerr = mean_efficiency_volume( + newfound, newmissed, dbins) + effvmass.append(meaneff) + errvmass.append(efferr) + volArray[(mc, )] = meanvol + vol2Array[(mc, )] = volerr + + return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/color.html b/latest/html/_modules/pycbc/results/color.html new file mode 100644 index 00000000000..134c398bad7 --- /dev/null +++ b/latest/html/_modules/pycbc/results/color.html @@ -0,0 +1,162 @@ + + + + + + pycbc.results.color — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.color

+""" Utilities for managing matplotlib colors and mapping ifos to color
+"""
+
+_ifo_color_map = {
+    'G1': '#222222',  # dark gray
+    'K1': '#ffb200',  # yellow/orange
+    'H1': '#ee0000',  # red
+    'I1': '#b0dd8b',  # light green
+    'L1': '#4ba6ff',  # blue
+    'V1': '#9b59b6',  # magenta/purple
+}
+
+_source_color_map = {
+    'BNS': '#A2C8F5',   # light blue
+    'NSBH': '#FFB482',  # light orange
+    'BBH': '#FE9F9B',   # light red
+    'Mass Gap': '#8EE5A1',  # light green
+    'GNS': '#98D6CB',   # turquoise
+    'GG': '#79BB87',    # green
+    'BHG': '#C6C29E'    # dark khaki
+}
+
+
+
+[docs] +def ifo_color(ifo): + return _ifo_color_map[ifo]
+ + + +
+[docs] +def source_color(source): + return _source_color_map[source]
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/dq.html b/latest/html/_modules/pycbc/results/dq.html new file mode 100644 index 00000000000..88dde05c4f4 --- /dev/null +++ b/latest/html/_modules/pycbc/results/dq.html @@ -0,0 +1,198 @@ + + + + + + pycbc.results.dq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.dq

+'''This module contains utilities for following up search triggers'''
+
+# JavaScript for searching the aLOG
+redirect_javascript = """<script type="text/javascript">
+function redirect(form,way)
+{
+        // Set location to form and submit.
+        if(form != '')
+        {
+                document.forms[form].action=way;
+                document.forms[form].submit();
+        }
+        else
+        {
+                window.top.location = way;
+        }
+}
+</script>"""
+
+search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
+<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
+<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
+</form>"""
+
+data_h1_string = """
+<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
+Summary</a>
+&nbsp;
+<a onclick="redirect('h1_alog_search',
+'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
+return true;">aLOG</a>"""
+
+data_l1_string="""
+<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
+Summary</a>
+&nbsp;
+<a onclick="redirect('l1_alog_search',
+'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
+return true;">aLOG</a>"""
+
+
+
+
+
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/followup.html b/latest/html/_modules/pycbc/results/followup.html new file mode 100644 index 00000000000..ef2d647afe8 --- /dev/null +++ b/latest/html/_modules/pycbc/results/followup.html @@ -0,0 +1,291 @@ + + + + + + pycbc.results.followup — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.followup

+# Copyright (C) 2014 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module provides functions to generate followup plots and trigger
+time series.
+"""
+import numpy, matplotlib
+# Only if a backend is not already set ... This should really *not* be done
+# here, but in the executables you should set matplotlib.use()
+# This matches the check that matplotlib does internally, but this *may* be
+# version dependenant. If this is a problem then remove this and control from
+# the executables directly.
+import sys
+if 'matplotlib.backends' not in sys.modules:
+    matplotlib.use('agg')
+import pylab, mpld3, mpld3.plugins
+from ligo.segments import segment
+from pycbc.io.hdf import HFile
+
+
+[docs] +def columns_from_file_list(file_list, columns, ifo, start, end): + """ Return columns of information stored in single detector trigger + files. + + Parameters + ---------- + file_list_file : string + pickle file containing the list of single detector + triggers. + ifo : string + The ifo to return triggers for. + columns : list of strings + The list of columns to read from the trigger files. + start : int + The start time to get triggers from + end : int + The end time to get triggers from + + Returns + ------- + trigger_dict : dict + A dictionary of column vectors with column names as keys. + """ + file_list = file_list.find_output_with_ifo(ifo) + file_list = file_list.find_all_output_in_range(ifo, segment(start, end)) + + trig_dict = {} + for trig_file in file_list: + f = HFile(trig_file.storage_path, 'r') + + time = f['end_time'][:] + pick = numpy.logical_and(time < end, time > start) + pick_loc = numpy.where(pick)[0] + + for col in columns: + if col not in trig_dict: + trig_dict[col] = [] + trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]]) + + return trig_dict
+ + +ifo_color = {'H1': 'blue', 'L1':'red', 'V1':'green'} + +
+[docs] +def coinc_timeseries_plot(coinc_file, start, end): + fig = pylab.figure() + f = HFile(coinc_file, 'r') + + stat1 = f['foreground/stat1'] + stat2 = f['foreground/stat2'] + time1 = f['foreground/time1'] + time2 = f['foreground/time2'] + ifo1 = f.attrs['detector_1'] + ifo2 = f.attrs['detector_2'] + + pylab.scatter(time1, stat1, label=ifo1, color=ifo_color[ifo1]) + pylab.scatter(time2, stat2, label=ifo2, color=ifo_color[ifo2]) + + fmt = '.12g' + mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt)) + pylab.legend() + pylab.xlabel('Time (s)') + pylab.ylabel('NewSNR') + pylab.grid() + return mpld3.fig_to_html(fig)
+ + +
+[docs] +def trigger_timeseries_plot(file_list, ifos, start, end): + + fig = pylab.figure() + for ifo in ifos: + trigs = columns_from_file_list(file_list, + ['snr', 'end_time'], + ifo, start, end) + print(trigs) + pylab.scatter(trigs['end_time'], trigs['snr'], label=ifo, + color=ifo_color[ifo]) + + fmt = '.12g' + mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt)) + pylab.legend() + pylab.xlabel('Time (s)') + pylab.ylabel('SNR') + pylab.grid() + return mpld3.fig_to_html(fig)
+ + +
+[docs] +def times_to_urls(times, window, tag): + base = '/../followup/%s/%s/%s' + return times_to_links(times, window, tag, base=base)
+ + + + + + + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/layout.html b/latest/html/_modules/pycbc/results/layout.html new file mode 100644 index 00000000000..d56bc28be54 --- /dev/null +++ b/latest/html/_modules/pycbc/results/layout.html @@ -0,0 +1,248 @@ + + + + + + pycbc.results.layout — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.layout

+# Copyright (C) 2015 Alexander Harvey Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" This module contains result page layout and numbering helper functions
+"""
+import os.path
+from itertools import zip_longest
+
+
+[docs] +def two_column_layout(path, cols, unique='', **kwargs): + """ Make a well layout in a two column format + + Parameters + ---------- + path: str + Location to make the well html file + unique: str + String to add to end of well name. Used if you want more than one well. + cols: list of tuples + The format of the items on the well result section. Each tuple + contains the two files that are shown in the left and right hand + side of a row in the well.html page. + """ + path = os.path.join(os.getcwd(), path, 'well{}.html'.format(unique)) + from pycbc.results.render import render_workflow_html_template + render_workflow_html_template(path, 'two_column.html', cols, **kwargs)
+ + +
+[docs] +def single_layout(path, files, **kwargs): + """ Make a well layout in single column format + + path: str + Location to make the well html file + files: list of pycbc.workflow.core.Files + This list of images to show in order within the well layout html file. + """ + two_column_layout(path, [(f,) for f in files], **kwargs)
+ + +
+[docs] +def grouper(iterable, n, fillvalue=None): + """ Group items into chunks of n length + """ + args = [iter(iterable)] * n + return zip_longest(*args, fillvalue=fillvalue)
+ + +
+[docs] +def group_layout(path, files, **kwargs): + """ Make a well layout in chunks of two from a list of files + + path: str + Location to make the well html file + files: list of pycbc.workflow.core.Files + This list of images to show in order within the well layout html file. + Every two are placed on the same row. + """ + if len(files) > 0: + two_column_layout(path, list(grouper(files, 2)), **kwargs)
+ + +
+[docs] +class SectionNumber(object): + """ Class to help with numbering sections in an output page. + """ + def __init__(self, base, secs): + """ Create section numbering instance + + Parameters + ---------- + base: path + The path of the of output html results directory + secs: list of strings + List of the subsections of the output html page + """ + self.base = base + self.secs = secs + self.name = {} + self.count = {} + self.num = {} + + for num, sec in enumerate(secs): + self.name[sec] = '%s._%s' % (num + 1, sec) + self.num[sec] = num + self.count[sec] = 1 + + def __getitem__ (self, path): + """ Return the path to use for the given subsection with numbering + included. The numbering increments for each new subsection request. If + a section is re-requested, it gets the original numbering. + """ + if path in self.name: + name = self.name[path] + else: + sec, subsec = path.split('/') + subnum = self.count[sec] + num = self.num[sec] + name = '%s/%s.%02d_%s' % (self.name[sec], num + 1, subnum, subsec) + self.count[sec] += 1 + self.name[path] = name + path = os.path.join(os.getcwd(), self.base, name) + return path
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/metadata.html b/latest/html/_modules/pycbc/results/metadata.html new file mode 100644 index 00000000000..3a5647d3925 --- /dev/null +++ b/latest/html/_modules/pycbc/results/metadata.html @@ -0,0 +1,313 @@ + + + + + + pycbc.results.metadata — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.metadata

+"""
+This Module contains generic utility functions for creating plots within
+PyCBC.
+"""
+import os.path, pycbc.version
+import configparser as ConfigParser
+from html.parser import HTMLParser
+from xml.sax.saxutils import escape, unescape
+
+escape_table = {
+                '"': "&quot;",
+                "'": "&apos;",
+                "@": "&#64;",
+                }
+unescape_table = {
+                  "&#64;" : "@",
+                 }
+for k, v in escape_table.items():
+    unescape_table[v] = k
+
+
+[docs] +def html_escape(text): + """ Sanitize text for html parsing """ + return escape(text, escape_table)
+ + +
+[docs] +class MetaParser(HTMLParser): + def __init__(self): + self.metadata = {} + HTMLParser.__init__(self) + +
+[docs] + def handle_data(self, data): + pass
+ + +
+[docs] + def handle_starttag(self, tag, attrs): + attr= {} + for key, value in attrs: + attr[key] = value + if tag == 'div' and 'class' in attr and attr['class'] == 'pycbc-meta': + self.metadata[attr['key']] = unescape(attr['value'], unescape_table)
+
+ + + +
+[docs] +def save_html_with_metadata(fig, filename, fig_kwds, kwds): + """ Save a html output to file with metadata """ + if isinstance(fig, str): + text = fig + else: + from mpld3 import fig_to_html + text = fig_to_html(fig, **fig_kwds) + + f = open(filename, 'w') + for key, value in kwds.items(): + value = escape(value, escape_table) + line = "<div class=pycbc-meta key=\"%s\" value=\"%s\"></div>" % (str(key), value) + f.write(line) + + f.write(text)
+ + +
+[docs] +def load_html_metadata(filename): + """ Get metadata from html file """ + parser = MetaParser() + data = open(filename, 'r').read() + + if 'pycbc-meta' in data: + print("LOADING HTML FILE %s" % filename) + parser.feed(data) + cp = ConfigParser.ConfigParser(parser.metadata) + cp.add_section(os.path.basename(filename)) + return cp
+ + +
+[docs] +def save_png_with_metadata(fig, filename, fig_kwds, kwds): + """ Save a matplotlib figure to a png with metadata + """ + from PIL import Image, PngImagePlugin + fig.savefig(filename, **fig_kwds) + + im = Image.open(filename) + meta = PngImagePlugin.PngInfo() + + for key in kwds: + meta.add_text(str(key), str(kwds[key])) + + im.save(filename, "png", pnginfo=meta)
+ + +
+[docs] +def save_pdf_with_metadata(fig, filename, fig_kwds, kwds): + """Save a matplotlib figure to a PDF file with metadata. + """ + # https://stackoverflow.com/a/17462125 + from matplotlib.backends.backend_pdf import PdfPages + + with PdfPages(filename) as pdfp: + fig.savefig(pdfp, format='pdf', **fig_kwds) + metadata = pdfp.infodict() + for key in kwds: + if str(key).lower() == 'title': + # map the title to the official PDF keyword (capitalized) + metadata['Title'] = str(kwds[key]) + else: + metadata[str(key)] = str(kwds[key])
+ + +
+[docs] +def load_png_metadata(filename): + from PIL import Image + data = Image.open(filename).info + cp = ConfigParser.ConfigParser(data) + cp.add_section(os.path.basename(filename)) + return cp
+ + +_metadata_saver = {'.png': save_png_with_metadata, + '.html': save_html_with_metadata, + '.pdf': save_pdf_with_metadata, + } +_metadata_loader = {'.png': load_png_metadata, + '.html': load_html_metadata, + } + +
+[docs] +def save_fig_with_metadata(fig, filename, fig_kwds=None, **kwds): + """ Save plot to file with metadata included. Kewords translate to metadata + that is stored directly in the plot file. Limited format types available. + + Parameters + ---------- + fig: matplotlib figure + The matplotlib figure to save to the file + filename: str + Name of file to store the plot. + """ + if fig_kwds is None: + fig_kwds = {} + try: + extension = os.path.splitext(filename)[1] + kwds['version'] = pycbc.version.git_verbose_msg + _metadata_saver[extension](fig, filename, fig_kwds, kwds) + except KeyError: + raise TypeError('Cannot save file %s with metadata, extension %s not ' + 'supported at this time' % (filename, extension))
+ + +
+[docs] +def load_metadata_from_file(filename): + """ Load the plot related metadata saved in a file + + Parameters + ---------- + filename: str + Name of file load metadata from. + + Returns + ------- + cp: ConfigParser + A configparser object containing the metadata + """ + try: + extension = os.path.splitext(filename)[1] + return _metadata_loader[extension](filename) + except KeyError: + raise TypeError('Cannot read metadata from file %s, extension %s not ' + 'supported at this time' % (filename, extension))
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/mpld3_utils.html b/latest/html/_modules/pycbc/results/mpld3_utils.html new file mode 100644 index 00000000000..09492c80fd6 --- /dev/null +++ b/latest/html/_modules/pycbc/results/mpld3_utils.html @@ -0,0 +1,260 @@ + + + + + + pycbc.results.mpld3_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.mpld3_utils

+""" This module provides functionality to extend mpld3
+"""
+import mpld3, mpld3.plugins, mpld3.utils
+
+
+
+
+
+[docs] +class MPLSlide(mpld3.plugins.PluginBase): + JAVASCRIPT = """ + mpld3.Axes.prototype.zoomed = function(propagate) { + propagate = typeof propagate == "undefined" ? true : propagate; + if (propagate) { + var dt0 = this.zoom.translate()[0] - this.zoom.last_t[0]; + var dt1 = this.zoom.translate()[1] - this.zoom.last_t[1]; + var ds = this.zoom.scale() / this.zoom.last_s; + this.zoom_x.translate([ this.zoom_x.translate()[0] + dt0, 0 ]); + this.zoom_x.scale(this.zoom_x.scale() * ds); + + this.zoom.last_t = this.zoom.translate(); + this.zoom.last_s = this.zoom.scale(); + this.sharex.forEach(function(ax) { + ax.zoom_x.translate(this.zoom_x.translate()).scale(this.zoom_x.scale()); + }.bind(this)); + + this.sharex.forEach(function(ax) { + ax.zoomed(false); + }); + } + for (var i = 0; i < this.elements.length; i++) { + this.elements[i].zoomed(); + } + }; + + mpld3.ZoomPlugin = mpld3_ZoomPlugin; + mpld3.register_plugin("zoom", mpld3_ZoomPlugin); + mpld3_ZoomPlugin.prototype = Object.create(mpld3.Plugin.prototype); + mpld3_ZoomPlugin.prototype.constructor = mpld3_ZoomPlugin; + mpld3_ZoomPlugin.prototype.requiredProps = []; + mpld3_ZoomPlugin.prototype.defaultProps = { + button: true, + enabled: null + }; + function mpld3_ZoomPlugin(fig, props) { + mpld3.Plugin.call(this, fig, props); + if (this.props.enabled === null) { + this.props.enabled = !this.props.button; + } + var enabled = this.props.enabled; + if (this.props.button) { + var ZoomButton = mpld3.ButtonFactory({ + buttonID: "zoom", + sticky: true, + actions: [ "scroll", "drag" ], + onActivate: this.activate.bind(this), + onDeactivate: this.deactivate.bind(this), + onDraw: function() { + this.setState(enabled); + }, + icon: function() { + return mpld3.icons["move"]; + } + }); + this.fig.buttons.push(ZoomButton); + } + } + mpld3_ZoomPlugin.prototype.activate = function() { + this.fig.enable_zoom(); + }; + mpld3_ZoomPlugin.prototype.deactivate = function() { + this.fig.disable_zoom(); + }; + mpld3_ZoomPlugin.prototype.draw = function() { + if (this.props.enabled) this.fig.enable_zoom(); else this.fig.disable_zoom(); + }; + """ + def __init__(self, button=True, enabled=None): + if enabled is None: + enabled = not button + self.dict_ = {"type": "zoom", + "button": button, + "enabled": enabled}
+ + +
+[docs] +class Tooltip(mpld3.plugins.PointHTMLTooltip): + JAVASCRIPT = "" + def __init__(self, points, labels=None, + hoffset=0, voffset=10, css=None): + super(Tooltip, self).__init__(points, labels, hoffset, voffset, "")
+ + +
+[docs] +class LineTooltip(mpld3.plugins.LineHTMLTooltip): + JAVASCRIPT = "" + def __init__(self, line, label=None, hoffset=0, voffset=10, css=None): + super(LineTooltip, self).__init__(line, label, hoffset, voffset, "")
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/plot.html b/latest/html/_modules/pycbc/results/plot.html new file mode 100644 index 00000000000..b7782daa0fb --- /dev/null +++ b/latest/html/_modules/pycbc/results/plot.html @@ -0,0 +1,185 @@ + + + + + + pycbc.results.plot — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.plot

+""" Plotting utilities and premade plot configurations
+"""
+
+
+[docs] +def hist_overflow(val, val_max, **kwds): + """ Make a histogram with an overflow bar above val_max """ + import pylab + + overflow = len(val[val>=val_max]) + pylab.hist(val[val<val_max], **kwds) + + if 'color' in kwds: + color = kwds['color'] + else: + color = None + + if overflow > 0: + rect = pylab.bar(val_max+0.05, overflow, .5, color=color)[0] + pylab.text(rect.get_x(), + 1.10*rect.get_height(), '%s+' % val_max)
+ + + +
+[docs] +def add_style_opt_to_parser(parser, default=None): + """Adds an option to set the matplotlib style to a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to add the option to. + default : str, optional + The default style to use. Default, None, will result in the default + matplotlib style to be used. + """ + from matplotlib import pyplot + parser.add_argument('--mpl-style', default=default, + choices=['default']+pyplot.style.available+['xkcd'], + help='Set the matplotlib style to use.')
+ + + +
+[docs] +def set_style_from_cli(opts): + """Uses the mpl-style option to set the style for plots. + + Note: This will change the global rcParams. + """ + from matplotlib import pyplot + if opts.mpl_style == 'xkcd': + # this is treated differently for some reason + pyplot.xkcd() + elif opts.mpl_style is not None: + pyplot.style.use(opts.mpl_style)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/psd.html b/latest/html/_modules/pycbc/results/psd.html new file mode 100644 index 00000000000..0729428b351 --- /dev/null +++ b/latest/html/_modules/pycbc/results/psd.html @@ -0,0 +1,207 @@ + + + + + + pycbc.results.psd — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.psd

+# Copyright (C) 2022
+# Tito Dal Canton, Gareth Cabourn Davies, Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Module to generate PSD figures
+"""
+from pycbc.results import ifo_color
+from pycbc import DYN_RANGE_FAC
+
+
+
+[docs] +def generate_asd_plot(psddict, output_filename, f_min=10.): + """ + Generate an ASD plot as used for upload to GraceDB. + + Parameters + ---------- + psddict: dictionary + A dictionary keyed on ifo containing the PSDs as + FrequencySeries objects + + output_filename: string + The filename for the plot to be saved to + + f_min: float + Minimum frequency at which anything should be plotted + + Returns + ------- + None + """ + from matplotlib import pyplot as plt + asd_fig, asd_ax = plt.subplots(1) + asd_min = [1E-24] # Default minimum to plot + + for ifo in sorted(psddict.keys()): + curr_psd = psddict[ifo] + freqs = curr_psd.sample_frequencies + physical = (freqs >= f_min) # Ignore lower frequencies + asd_to_plot = curr_psd[physical] ** 0.5 / DYN_RANGE_FAC + asd_min.append(min(asd_to_plot)) + asd_ax.loglog(freqs[physical], + asd_to_plot, + c=ifo_color(ifo), + label=ifo) + + asd_ax.grid(True) + asd_ax.legend() + asd_ax.set_xlim([f_min, 1300]) + asd_ax.set_ylim([min(asd_min), 1E-20]) + asd_ax.set_xlabel('Frequency (Hz)') + asd_ax.set_ylabel('ASD') + asd_fig.savefig(output_filename)
+ + + +__all__ = ["generate_asd_plot"] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/pygrb_plotting_utils.html b/latest/html/_modules/pycbc/results/pygrb_plotting_utils.html new file mode 100644 index 00000000000..8edb570a4c7 --- /dev/null +++ b/latest/html/_modules/pycbc/results/pygrb_plotting_utils.html @@ -0,0 +1,371 @@ + + + + + + pycbc.results.pygrb_plotting_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.pygrb_plotting_utils

+# Copyright (C) 2019 Francesco Pannarale, Gino Contestabile, Cameron Mills
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+# =============================================================================
+# Preamble
+# =============================================================================
+
+"""
+Module to generate PyGRB figures: scatter plots and timeseries.
+"""
+
+import copy
+import numpy
+from ligo import segments
+from pycbc.results import save_fig_with_metadata
+
+
+# =============================================================================
+# Used locally: plot contours in a scatter plot with SNR as horizontal axis
+# =============================================================================
+
+[docs] +def contour_plotter(axis, snr_vals, contours, colors, vert_spike=False): + """Plot contours in a scatter plot where SNR is on the horizontal axis""" + + for i, _ in enumerate(contours): + plot_vals_x = [] + plot_vals_y = [] + if vert_spike: + for j, _ in enumerate(snr_vals): + # Workaround to ensure vertical spike is shown on veto plots + if contours[i][j] > 1E-15 and not plot_vals_x: + plot_vals_x.append(snr_vals[j]) + plot_vals_y.append(0.1) + if contours[i][j] > 1E-15 and plot_vals_x: + plot_vals_x.append(snr_vals[j]) + plot_vals_y.append(contours[i][j]) + else: + plot_vals_x = snr_vals + plot_vals_y = contours[i] + axis.plot(plot_vals_x, plot_vals_y, colors[i])
+ + + +# +# Functions used in executables +# + +# ============================================================================= +# Plot trigger time and offsource extent over segments +# Courtesy of Alex Dietz +# ============================================================================= +
+[docs] +def make_grb_segments_plot(wkflow, science_segs, trigger_time, trigger_name, + out_dir, coherent_seg=None, fail_criterion=None): + """Plot trigger time and offsource extent over segments""" + + import matplotlib.pyplot as plt + from matplotlib.patches import Rectangle + from matplotlib.lines import Line2D + from pycbc.results.color import ifo_color + + ifos = wkflow.ifos + if len(science_segs.keys()) == 0: + extent = segments.segment(int(wkflow.cp.get("workflow", "start-time")), + int(wkflow.cp.get("workflow", "end-time"))) + else: + pltpad = [science_segs.extent_all()[1] - trigger_time, + trigger_time - science_segs.extent_all()[0]] + extent = segments.segmentlist([science_segs.extent_all(), + segments.segment(trigger_time + - pltpad[0], + trigger_time + + pltpad[1])]).extent() + + ifo_colors = {} + for ifo in ifos: + ifo_colors[ifo] = ifo_color(ifo) + if ifo not in science_segs.keys(): + science_segs[ifo] = segments.segmentlist([]) + + # Make plot + fig, subs = plt.subplots(len(ifos), sharey=True) + if len(ifos) == 1: + subs = [subs] + plt.xticks(rotation=20, ha='right') + for sub, ifo in zip(subs, ifos): + for seg in science_segs[ifo]: + sub.add_patch(Rectangle((seg[0], 0.1), abs(seg), 0.8, + facecolor=ifo_colors[ifo], + edgecolor='none')) + if coherent_seg: + if len(science_segs[ifo]) > 0 and \ + coherent_seg in science_segs[ifo]: + sub.plot([trigger_time, trigger_time], [0, 1], '-', + c='orange') + sub.add_patch(Rectangle((coherent_seg[0], 0), + abs(coherent_seg), 1, alpha=0.5, + facecolor='orange', edgecolor='none')) + else: + sub.plot([trigger_time, trigger_time], [0, 1], ':', + c='orange') + sub.plot([coherent_seg[0], coherent_seg[0]], [0, 1], '--', + c='orange', alpha=0.5) + sub.plot([coherent_seg[1], coherent_seg[1]], [0, 1], '--', + c='orange', alpha=0.5) + else: + sub.plot([trigger_time, trigger_time], [0, 1], ':k') + if fail_criterion: + if len(science_segs[ifo]) > 0: + style_str = '--' + else: + style_str = '-' + sub.plot([fail_criterion[0], fail_criterion[0]], [0, 1], style_str, + c='black', alpha=0.5) + sub.plot([fail_criterion[1], fail_criterion[1]], [0, 1], style_str, + c='black', alpha=0.5) + + sub.set_frame_on(False) + sub.set_yticks([]) + sub.set_ylabel(ifo, rotation=45) + sub.set_ylim([0, 1]) + sub.set_xlim([float(extent[0]), float(extent[1])]) + sub.get_xaxis().get_major_formatter().set_useOffset(False) + sub.get_xaxis().get_major_formatter().set_scientific(False) + sub.get_xaxis().tick_bottom() + if sub is subs[-1]: + sub.tick_params(labelsize=10, pad=1) + else: + sub.get_xaxis().set_ticks([]) + sub.get_xaxis().set_ticklabels([]) + + xmin, xmax = fig.axes[-1].get_xaxis().get_view_interval() + ymin, _ = fig.axes[-1].get_yaxis().get_view_interval() + fig.axes[-1].add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', + linewidth=2)) + fig.axes[-1].set_xlabel('GPS Time') + + fig.axes[0].set_title('Science Segments for GRB%s' % trigger_name) + plt.tight_layout() + fig.subplots_adjust(hspace=0) + + plot_name = 'GRB%s_segments.png' % trigger_name + plot_url = 'file://localhost%s/%s' % (out_dir, plot_name) + fig.savefig('%s/%s' % (out_dir, plot_name)) + + return [ifos, plot_name, extent, plot_url]
+ + + +# ============================================================================= +# Given the trigger and injection values of a quantity, determine the maximum +# ============================================================================= +
+[docs] +def axis_max_value(trig_values, inj_values, inj_file): + """Deterime the maximum of a quantity in the trigger and injection data""" + + axis_max = trig_values.max() + if inj_file and inj_values.size and inj_values.max() > axis_max: + axis_max = inj_values.max() + + return axis_max
+ + + +# ============================================================================= +# Given the trigger and injection values of a quantity, determine the minimum +# ============================================================================= +
+[docs] +def axis_min_value(trig_values, inj_values, inj_file): + """Deterime the minimum of a quantity in the trigger and injection data""" + + axis_min = trig_values.min() + if inj_file and inj_values.size and inj_values.min() < axis_min: + axis_min = inj_values.min() + + return axis_min
+ + + +# ============================================================================= +# Master plotting function: fits all plotting needs in for PyGRB results +# ============================================================================= +
+[docs] +def pygrb_plotter(trigs, injs, xlabel, ylabel, opts, + snr_vals=None, conts=None, shade_cont_value=None, + colors=None, vert_spike=False, cmd=None): + """Master function to plot PyGRB results""" + from matplotlib import pyplot as plt + + # Set up plot + fig = plt.figure() + cax = fig.gca() + # Plot trigger-related and (if present) injection-related quantities + cax_plotter = cax.loglog if opts.use_logs else cax.plot + cax_plotter(trigs[0], trigs[1], 'bx') + if not (injs[0] is None and injs[1] is None): + cax_plotter(injs[0], injs[1], 'r+') + cax.grid() + # Plot contours + if conts is not None: + contour_plotter(cax, snr_vals, conts, colors, vert_spike=vert_spike) + # Add shading above a specific contour (typically used for vetoed area) + if shade_cont_value is not None: + limy = cax.get_ylim()[1] + polyx = copy.deepcopy(snr_vals) + polyy = copy.deepcopy(conts[shade_cont_value]) + polyx = numpy.append(polyx, [max(snr_vals), min(snr_vals)]) + polyy = numpy.append(polyy, [limy, limy]) + cax.fill(polyx, polyy, color='#dddddd') + # Axes: labels and limits + cax.set_xlabel(xlabel) + cax.set_ylabel(ylabel) + if opts.x_lims: + x_lims = map(float, opts.x_lims.split(',')) + cax.set_xlim(x_lims) + if opts.y_lims: + y_lims = map(float, opts.y_lims.split(',')) + cax.set_ylim(y_lims) + # Wrap up + plt.tight_layout() + save_fig_with_metadata(fig, opts.output_file, cmd=cmd, + title=opts.plot_title, + caption=opts.plot_caption) + plt.close()
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/pygrb_postprocessing_utils.html b/latest/html/_modules/pycbc/results/pygrb_postprocessing_utils.html new file mode 100644 index 00000000000..340b99b15b0 --- /dev/null +++ b/latest/html/_modules/pycbc/results/pygrb_postprocessing_utils.html @@ -0,0 +1,1006 @@ + + + + + + pycbc.results.pygrb_postprocessing_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.pygrb_postprocessing_utils

+# Copyright (C) 2019 Francesco Pannarale, Gino Contestabile, Cameron Mills
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+# =============================================================================
+# Preamble
+# =============================================================================
+
+"""
+Module to generate PyGRB figures: scatter plots and timeseries.
+"""
+
+import os
+import logging
+import argparse
+import copy
+import numpy
+import h5py
+
+from scipy import stats
+import ligo.segments as segments
+from pycbc.events.coherent import reweightedsnr_cut
+from pycbc import add_common_pycbc_options
+from pycbc.io.hdf import HFile
+
+logger = logging.getLogger('pycbc.results.pygrb_postprocessing_utils')
+
+# All/most of these final imports will become obsolete with hdf5 switch
+try:
+    from ligo.lw import utils
+    from ligo.lw.table import Table
+    from ligo.segments.utils import fromsegwizard
+    from glue.ligolw import lsctables as glsctables
+    from glue.ligolw.ligolw import LIGOLWContentHandler
+except ImportError:
+    pass
+
+
+# =============================================================================
+# Arguments functions:
+# * Initialize a parser object with arguments shared by all plotting scripts
+# * Add to the parser object the arguments used for Monte-Carlo on distance
+# * Add to the parser object the arguments used for BestNR calculation
+# * Add to the parser object the arguments for found/missed injection files
+# =============================================================================
+
+[docs] +def pygrb_initialize_plot_parser(description=None): + """Sets up a basic argument parser object for PyGRB plotting scripts""" + + formatter_class = argparse.ArgumentDefaultsHelpFormatter + parser = argparse.ArgumentParser(description=description, + formatter_class=formatter_class) + add_common_pycbc_options(parser) + parser.add_argument("-o", "--output-file", default=None, + help="Output file.") + parser.add_argument("--x-lims", action="store", default=None, + help="Comma separated minimum and maximum values " + + "for the horizontal axis. When using negative " + + "values an equal sign after --x-lims is necessary.") + parser.add_argument("--y-lims", action="store", default=None, + help="Comma separated minimum and maximum values " + + "for the vertical axis. When using negative values " + + "an equal sign after --y-lims is necessary.") + parser.add_argument("--use-logs", default=False, action="store_true", + help="Produce a log-log plot") + parser.add_argument("-i", "--ifo", default=None, help="IFO used for IFO " + + "specific plots") + parser.add_argument("-a", "--seg-files", nargs="+", action="store", + default=None, help="The location of the buffer, " + + "onsource and offsource segment files.") + parser.add_argument("-V", "--veto-files", nargs="+", action="store", + default=None, help="The location of the CATX veto " + + "files provided as a list of space-separated values.") + parser.add_argument("-b", "--veto-category", action="store", type=int, + default=None, help="Apply vetoes up to this level " + + "inclusive.") + parser.add_argument('--plot-title', default=None, + help="If provided, use the given string as the plot " + + "title.") + parser.add_argument('--plot-caption', default=None, + help="If provided, use the given string as the plot " + + "caption") + return parser
+ + + +
+[docs] +def pygrb_add_slide_opts(parser): + """Add to parser object arguments related to short timeslides""" + parser.add_argument("--slide-id", type=str, default='0', + help="If all, the plotting scripts will use triggers" + + "from all short slides.")
+ + + +
+[docs] +def slide_opts_helper(args): + """ + This function overwrites the types of input slide_id information + when loading data in postprocessing scripts. + """ + if args.slide_id.isdigit(): + args.slide_id = int(args.slide_id) + elif args.slide_id.lower() == "all": + args.slide_id = None + else: + raise ValueError("--slide-id must be the string all or an int")
+ + + +
+[docs] +def pygrb_add_injmc_opts(parser): + """Add to parser object the arguments used for Monte-Carlo on distance.""" + if parser is None: + parser = argparse.ArgumentParser() + parser.add_argument("-M", "--num-mc-injections", action="store", + type=int, default=100, help="Number of Monte " + + "Carlo injection simulations to perform.") + parser.add_argument("-S", "--seed", action="store", type=int, + default=1234, help="Seed to initialize Monte Carlo.") + parser.add_argument("-U", "--upper-inj-dist", action="store", + type=float, default=1000, help="The upper distance " + + "of the injections in Mpc, if used.") + parser.add_argument("-L", "--lower-inj-dist", action="store", + type=float, default=0, help="The lower distance of " + + "the injections in Mpc, if used.") + parser.add_argument("-n", "--num-bins", action="store", type=int, + default=0, help="The number of bins used to " + + "calculate injection efficiency.") + parser.add_argument("-w", "--waveform-error", action="store", + type=float, default=0, help="The standard deviation " + + "to use when calculating the waveform error.") + for ifo in ["g1", "h1", "k1", "l1", "v1"]: + parser.add_argument(f"--{ifo}-cal-error", action="store", type=float, + default=0, help="The standard deviation to use " + + f"when calculating the {ifo.upper()} " + + "calibration amplitude error.") + parser.add_argument(f"--{ifo}-dc-cal-error", action="store", + type=float, default=1.0, help="The scaling " + + "factor to use when calculating the " + + f"{ifo.upper()} calibration amplitude error.")
+ + + +
+[docs] +def pygrb_add_bestnr_opts(parser): + """Add to the parser object the arguments used for BestNR calculation""" + if parser is None: + parser = argparse.ArgumentParser() + parser.add_argument("-Q", "--chisq-index", action="store", type=float, + default=6.0, help="chisq_index for newSNR " + + "calculation (default: 6)") + parser.add_argument("-N", "--chisq-nhigh", action="store", type=float, + default=2.0, help="chisq_nhigh for newSNR " + + "calculation (default: 2")
+ + + +
+[docs] +def pygrb_add_null_snr_opts(parser): + """Add to the parser object the arguments used for null SNR calculation + and null SNR cut.""" + parser.add_argument("-A", "--null-snr-threshold", action="store", + default=5.25, + type=float, + help="Null SNR threshold for null SNR cut " + "(default: 5.25)") + parser.add_argument("-T", "--null-grad-thresh", action="store", type=float, + default=20., help="Threshold above which to " + + "increase the values of the null SNR cut") + parser.add_argument("-D", "--null-grad-val", action="store", type=float, + default=0.2, help="Rate the null SNR cut will " + + "increase above the threshold")
+ + + +
+[docs] +def pygrb_add_single_snr_cut_opt(parser): + """Add to the parser object an argument to place a threshold on single + detector SNR.""" + parser.add_argument("-B", "--sngl-snr-threshold", action="store", + type=float, default=4.0, help="Single detector SNR " + + "threshold, the two most sensitive detectors " + + "should have SNR above this.")
+ + + +
+[docs] +def pygrb_add_bestnr_cut_opt(parser): + """Add to the parser object an argument to place a threshold on BestNR.""" + if parser is None: + parser = argparse.ArgumentParser() + parser.add_argument("--newsnr-threshold", type=float, metavar='THRESHOLD', + default=0., + help="Cut triggers with NewSNR less than THRESHOLD" + + "Default 0: all events are considered.")
+ + + +# ============================================================================= +# Wrapper to pick triggers with certain slide_ids +# ============================================================================= +
+[docs] +def slide_filter(trig_file, data, slide_id=None): + """ + This function adds the capability to select triggers with specific + slide_ids during the postprocessing stage of PyGRB. + """ + if slide_id is None: + return data + mask = numpy.where(trig_file['network/slide_id'][:] == slide_id)[0] + return data[mask]
+ + + +# ============================================================================= +# Wrapper to read segments files +# ============================================================================= +def _read_seg_files(seg_files): + """Read segments txt files""" + + if len(seg_files) != 3 or seg_files is None: + err_msg = "The location of three segment files is necessary." + err_msg += "[bufferSeg.txt, offSourceSeg.txt, onSourceSeg.txt]" + raise RuntimeError(err_msg) + + times = {} + keys = ["buffer", "off", "on"] + + for key, seg_file in zip(keys, seg_files): + segs = fromsegwizard(open(seg_file, 'r')) + if len(segs) > 1: + err_msg = 'More than one segment, an error has occured.' + raise RuntimeError(err_msg) + times[key] = segs[0] + + return times + + +# ============================================================================= +# Function to load a table from an xml file +# ============================================================================= +
+[docs] +def load_xml_table(file_name, table_name): + """Load xml table from file.""" + + xml_doc = utils.load_filename( + file_name, + compress='auto', + contenthandler=glsctables.use_in(LIGOLWContentHandler) + ) + return Table.get_table(xml_doc, table_name)
+ + + +# ============================================================================= +# Function to load segments from an xml file +# ============================================================================= +def _load_segments_from_xml(xml_doc, return_dict=False, select_id=None): + """Read a ligo.segments.segmentlist from the file object file containing an + xml segment table. + + Parameters + ---------- + xml_doc: name of segment xml file + + Keyword Arguments: + return_dict : [ True | False ] + return a ligo.segments.segmentlistdict containing coalesced + ligo.segments.segmentlists keyed by seg_def.name for each entry + in the contained segment_def_table. Default False + select_id : int + return a ligo.segments.segmentlist object containing only + those segments matching the given segment_def_id integer + + """ + + # Load SegmentDefTable and SegmentTable + seg_def_table = load_xml_table(xml_doc, + glsctables.SegmentDefTable.tableName) + seg_table = load_xml_table(xml_doc, glsctables.SegmentTable.tableName) + + if return_dict: + segs = segments.segmentlistdict() + else: + segs = segments.segmentlist() + + seg_id = {} + for seg_def in seg_def_table: + seg_id[int(seg_def.segment_def_id)] = str(seg_def.name) + if return_dict: + segs[str(seg_def.name)] = segments.segmentlist() + + for seg in seg_table: + if return_dict: + segs[seg_id[int(seg.segment_def_id)]]\ + .append(segments.segment(seg.start_time, seg.end_time)) + continue + if select_id and int(seg.segment_def_id) == select_id: + segs.append(segments.segment(seg.start_time, seg.end_time)) + continue + segs.append(segments.segment(seg.start_time, seg.end_time)) + + if return_dict: + for seg_name in seg_id.values(): + segs[seg_name] = segs[seg_name].coalesce() + else: + segs = segs.coalesce() + + return segs + + +# ============================================================================= +# Function to extract vetoes +# ============================================================================= +def _extract_vetoes(all_veto_files, ifos, veto_cat): + """Extracts vetoes from veto filelist""" + + if all_veto_files and (veto_cat is None): + err_msg = "Must supply veto category to apply vetoes." + raise RuntimeError(err_msg) + + # Initialize veto containers + vetoes = segments.segmentlistdict() + for ifo in ifos: + vetoes[ifo] = segments.segmentlist() + + veto_files = [] + veto_cats = range(2, veto_cat+1) + for cat in veto_cats: + veto_files += [vf for vf in all_veto_files if "CAT"+str(cat) in vf] + n_found = len(veto_files) + n_expected = len(ifos)*len(veto_cats) + if n_found != n_expected: + err_msg = f"Found {n_found} veto files instead of the expected " + err_msg += f"{n_expected}; check the options." + raise RuntimeError(err_msg) + + # Construct veto list from veto filelist + if veto_files: + for veto_file in veto_files: + ifo = os.path.basename(veto_file)[:2] + if ifo in ifos: + # This returns a coalesced list of the vetoes + tmp_veto_segs = _load_segments_from_xml(veto_file) + for entry in tmp_veto_segs: + vetoes[ifo].append(entry) + for ifo in ifos: + vetoes[ifo].coalesce() + + return vetoes + + +# ============================================================================= +# Function to get the ID numbers from a LIGO-LW table +# ============================================================================= +def _get_id_numbers(ligolw_table, column): + """Grab the IDs of a LIGO-LW table""" + + ids = [int(getattr(row, column)) for row in ligolw_table] + + return ids + + +# ============================================================================= +# Function to build a dictionary (indexed by ifo) of time-slid vetoes +# ============================================================================= +def _slide_vetoes(vetoes, slide_dict_or_list, slide_id, ifos): + """Build a dictionary (indexed by ifo) of time-slid vetoes""" + + # Copy vetoes + if vetoes is not None: + slid_vetoes = copy.deepcopy(vetoes) + # Slide them + for ifo in ifos: + slid_vetoes[ifo].shift(-slide_dict_or_list[slide_id][ifo]) + else: + slid_vetoes = {ifo: segments.segmentlist() for ifo in ifos} + + return slid_vetoes + + +# +# Used (also) in executables +# + +# ============================================================================= +# Functions to load triggers +# ============================================================================= +
+[docs] +def dataset_iterator(g, prefix=''): + """Reach all datasets in and HDF file""" + + for key, item in g.items(): + # Avoid slash as first character + path = prefix[1:] + '/' + key + if isinstance(item, h5py.Dataset): + yield (path, item) + elif isinstance(item, h5py.Group): + yield from dataset_iterator(item, path)
+ + + +
+[docs] +def load_triggers(input_file, ifos, vetoes, rw_snr_threshold=None, + slide_id=None): + """Loads triggers from PyGRB output file, returning a dictionary""" + + trigs = HFile(input_file, 'r') + rw_snr = trigs['network/reweighted_snr'][:] + net_ids = trigs['network/event_id'][:] + ifo_ids = {} + for ifo in ifos: + ifo_ids[ifo] = trigs[ifo+'/event_id'][:] + trigs.close() + + if vetoes is not None: + # Developers: see PR 3972 for previous implementation + raise NotImplementedError + + # Apply the reweighted SNR cut on the reweighted SNR + if rw_snr_threshold is not None: + rw_snr = reweightedsnr_cut(rw_snr, rw_snr_threshold) + + # Establish the indices of data not surviving the cut + above_thresh = rw_snr > 0 + num_orig_pts = len(above_thresh) + + # Do not assume that IFO and network datasets are sorted the same way: + # find where each surviving network/event_id is placed in the IFO/event_id + ifo_ids_above_thresh_locations = {} + for ifo in ifos: + ifo_ids_above_thresh_locations[ifo] = \ + numpy.array([numpy.where(ifo_ids[ifo] == net_id)[0][0] + for net_id in net_ids[above_thresh]]) + + # Apply the cut on all the data by remove points with reweighted SNR = 0 + trigs_dict = {} + with HFile(input_file, "r") as trigs: + for (path, dset) in dataset_iterator(trigs): + # The dataset contains information other than trig/inj properties: + # just copy it + if len(dset) != num_orig_pts: + trigs_dict[path] = dset[:] + # The dataset is relative to an IFO: cut with the correct index + elif path[:2] in ifos: + ifo = path[:2] + if ifo_ids_above_thresh_locations[ifo].size != 0: + trigs_dict[path] = \ + dset[:][ifo_ids_above_thresh_locations[ifo]] + else: + trigs_dict[path] = numpy.array([]) + # The dataset is relative to the network: cut it before copying + else: + trigs_dict[path] = dset[above_thresh] + + if trigs_dict[path].size == trigs['network/slide_id'][:].size: + trigs_dict[path] = slide_filter(trigs, trigs_dict[path], + slide_id=slide_id) + + return trigs_dict
+ + + +# ============================================================================= +# Detector utils: +# * Function to calculate the antenna response F+^2 + Fx^2 +# * Function to calculate the antenna distance factor +# ============================================================================= +def _get_antenna_single_response(antenna, ra, dec, geocent_time): + """Returns the antenna response F+^2 + Fx^2 of an IFO (passed as pycbc + Detector type) at a given sky location and time.""" + + fp, fc = antenna.antenna_pattern(ra, dec, 0, geocent_time) + + return fp**2 + fc**2 + + +# Vectorize the function above on all but the first argument +get_antenna_responses = numpy.vectorize(_get_antenna_single_response, + otypes=[float]) +get_antenna_responses.excluded.add(0) + + +
+[docs] +def get_antenna_dist_factor(antenna, ra, dec, geocent_time, inc=0.0): + """Returns the antenna factors (defined as eq. 4.3 on page 57 of + Duncan Brown's Ph.D.) for an IFO (passed as pycbc Detector type) at + a given sky location and time.""" + + fp, fc = antenna.antenna_pattern(ra, dec, 0, geocent_time) + + return numpy.sqrt(fp ** 2 * (1 + numpy.cos(inc)) ** 2 / 4 + fc ** 2)
+ + + +# ============================================================================= +# Construct sorted triggers from trials +# ============================================================================= +
+[docs] +def sort_trigs(trial_dict, trigs, slide_dict, seg_dict): + """Constructs sorted triggers from a trials dictionary""" + + sorted_trigs = {} + + # Begin by sorting the triggers into each slide + for slide_id in slide_dict: + sorted_trigs[slide_id] = [] + for slide_id, event_id in zip(trigs['network/slide_id'], + trigs['network/event_id']): + sorted_trigs[slide_id].append(event_id) + + for slide_id in slide_dict: + # These can only *reduce* the analysis time + curr_seg_list = seg_dict[slide_id] + + # Check the triggers are all in the analysed segment lists + for event_id in sorted_trigs[slide_id]: + index = numpy.flatnonzero(trigs['network/event_id'] == event_id)[0] + end_time = trigs['network/end_time_gc'][index] + if end_time not in curr_seg_list: + # This can be raised if the trigger is on the segment boundary, + # so check if the trigger is within 1/100 of a second within + # the list + if end_time + 0.01 in curr_seg_list: + continue + if end_time - 0.01 in curr_seg_list: + continue + err_msg = "Triggers found in input files not in the list of " + err_msg += "analysed segments. This should not happen." + raise RuntimeError(err_msg) + # END OF CHECK # + + # Keep triggers that are in trial_dict + sorted_trigs[slide_id] = [event_id for event_id in + sorted_trigs[slide_id] + if trigs['network/end_time_gc'][ + trigs['network/event_id'] == event_id][0] + in trial_dict[slide_id]] + + return sorted_trigs
+ + + +# ============================================================================= +# Extract basic trigger properties and store them as dictionaries +# ============================================================================= +
+[docs] +def extract_basic_trig_properties(trial_dict, trigs, slide_dict, seg_dict, + opts): + """Extract and store as dictionaries time, SNR, and BestNR of + time-slid triggers""" + + # Sort the triggers into each slide + sorted_trigs = sort_trigs(trial_dict, trigs, slide_dict, seg_dict) + logger.info("Triggers sorted.") + + # Build the 3 dictionaries + trig_time = {} + trig_snr = {} + trig_bestnr = {} + for slide_id in slide_dict: + slide_trigs = sorted_trigs[slide_id] + indices = numpy.nonzero( + numpy.isin(trigs['network/event_id'], slide_trigs))[0] + if slide_trigs: + trig_time[slide_id] = trigs['network/end_time_gc'][ + indices] + trig_snr[slide_id] = trigs['network/coherent_snr'][ + indices] + else: + trig_time[slide_id] = numpy.asarray([]) + trig_snr[slide_id] = numpy.asarray([]) + trig_bestnr[slide_id] = reweightedsnr_cut( + trigs['network/reweighted_snr'][indices], + opts.newsnr_threshold) + + logger.info("Time, SNR, and BestNR of triggers extracted.") + + return trig_time, trig_snr, trig_bestnr
+ + + +# ============================================================================= +# Function to extract ifos from hdfs +# ============================================================================= +
+[docs] +def extract_ifos(trig_file): + """Extracts IFOs from hdf file""" + + # Load hdf file + hdf_file = HFile(trig_file, 'r') + + # Extract IFOs + ifos = sorted(list(hdf_file.keys())) + + # Remove 'network' key from list of ifos + if 'network' in ifos: + ifos.remove('network') + + return ifos
+ + + +# ============================================================================= +# Function to extract IFOs and vetoes +# ============================================================================= +
+[docs] +def extract_ifos_and_vetoes(trig_file, veto_files, veto_cat): + """Extracts IFOs from HDF files and vetoes from a directory""" + + logger.info("Extracting IFOs and vetoes.") + + # Extract IFOs + ifos = extract_ifos(trig_file) + + # Extract vetoes + if veto_files is not None: + vetoes = _extract_vetoes(veto_files, ifos, veto_cat) + else: + vetoes = None + + return ifos, vetoes
+ + + +# ============================================================================= +# Function to load timeslides +# ============================================================================= +
+[docs] +def load_time_slides(hdf_file_path): + """Loads timeslides from PyGRB output file as a dictionary""" + hdf_file = HFile(hdf_file_path, 'r') + ifos = extract_ifos(hdf_file_path) + ids = numpy.arange(len(hdf_file[f'{ifos[0]}/search/time_slides'])) + time_slide_dict = { + slide_id: { + ifo: hdf_file[f'{ifo}/search/time_slides'][slide_id] + for ifo in ifos} + for slide_id in ids} + + # Check time_slide_ids are ordered correctly. + if not (numpy.all(ids[1:] == numpy.array(ids[:-1])+1) and ids[0] == 0): + err_msg = "time_slide_ids list should start at zero and increase by " + err_msg += "one for every element" + raise RuntimeError(err_msg) + # Check that the zero-lag slide has time_slide_id == 0. + if not numpy.all(numpy.array(list(time_slide_dict[0].values())) == 0): + err_msg = "The slide with time_slide_id == 0 should be the " + err_msg += "zero-lag-slide but it has non-zero slide values: " + err_msg += f"{time_slide_dict[0]}." + raise RuntimeError(err_msg) + + return time_slide_dict
+ + + +# ============================================================================= +# Function to load the segment dicitonary +# ============================================================================= +
+[docs] +def load_segment_dict(hdf_file_path): + """ + Loads the segment dictionary with the format + {slide_id: segmentlist(segments analyzed)} + """ + + # Long time slides will require mapping between slides and segments + hdf_file = HFile(hdf_file_path, 'r') + ifos = extract_ifos(hdf_file_path) + # Get slide IDs + slide_ids = numpy.arange(len(hdf_file[f'{ifos[0]}/search/time_slides'])) + # Get segment start/end times + seg_starts = hdf_file['network/search/segments/start_times'][:] + seg_ends = hdf_file['network/search/segments/end_times'][:] + # Write list of segments + seg_list = segments.segmentlist([segments.segment(seg_start, seg_ends[i]) + for i, seg_start in enumerate(seg_starts)]) + + # Write segment_dict in proper format + # At the moment of this comment, there is only one segment + segment_dict = {slide: seg_list.coalesce() for slide in slide_ids} + + return segment_dict
+ + + +# ============================================================================= +# Construct the trials from the timeslides, segments, and vetoes +# ============================================================================= +
+[docs] +def construct_trials(seg_files, seg_dict, ifos, slide_dict, vetoes): + """Constructs trials from triggers, timeslides, segments and vetoes""" + + trial_dict = {} + + # Get segments + segs = _read_seg_files(seg_files) + + # Separate segments + trial_time = abs(segs['on']) + + for slide_id in slide_dict: + # These can only *reduce* the analysis time + curr_seg_list = seg_dict[slide_id] + + # Construct the buffer segment list + seg_buffer = segments.segmentlist() + for ifo in ifos: + slide_offset = slide_dict[slide_id][ifo] + seg_buffer.append(segments.segment(segs['buffer'][0] - + slide_offset, + segs['buffer'][1] - + slide_offset)) + seg_buffer.coalesce() + + # Construct the ifo-indexed dictionary of slid veteoes + slid_vetoes = _slide_vetoes(vetoes, slide_dict, slide_id, ifos) + + # Construct trial list and check against buffer + trial_dict[slide_id] = segments.segmentlist() + for curr_seg in curr_seg_list: + iter_int = 1 + while 1: + trial_end = curr_seg[0] + trial_time*iter_int + if trial_end > curr_seg[1]: + break + curr_trial = segments.segment(trial_end - trial_time, + trial_end) + if not seg_buffer.intersects_segment(curr_trial): + intersect = numpy.any([slid_vetoes[ifo]. + intersects_segment(curr_trial) + for ifo in ifos]) + if not intersect: + trial_dict[slide_id].append(curr_trial) + + iter_int += 1 + + return trial_dict
+ + + +# ============================================================================= +# Find max and median of loudest SNRs or BestNRs +# ============================================================================= +
+[docs] +def sort_stat(time_veto_max_stat): + """Sort a dictionary of loudest SNRs/BestNRs""" + + full_time_veto_max_stat = list(time_veto_max_stat.values()) + full_time_veto_max_stat = numpy.concatenate(full_time_veto_max_stat) + full_time_veto_max_stat.sort() + + return full_time_veto_max_stat
+ + + +# ============================================================================= +# Find max and median of loudest SNRs or BestNRs +# ============================================================================= +
+[docs] +def max_median_stat(slide_dict, time_veto_max_stat, trig_stat, total_trials): + """Deterine the maximum and median of the loudest SNRs/BestNRs""" + + max_stat = max([trig_stat[slide_id].max() if trig_stat[slide_id].size + else 0 for slide_id in slide_dict]) + + full_time_veto_max_stat = sort_stat(time_veto_max_stat) + + if total_trials % 2: + median_stat = full_time_veto_max_stat[(total_trials - 1) // 2] + else: + median_stat = numpy.mean((full_time_veto_max_stat) + [total_trials//2 - 1: total_trials//2 + 1]) + + return max_stat, median_stat, full_time_veto_max_stat
+ + + +# ============================================================================= +# Function to determine calibration and waveform errors for injection sets +# ============================================================================= +
+[docs] +def mc_cal_wf_errs(num_mc_injs, inj_dists, cal_err, wf_err, max_dc_cal_err): + """Includes calibration and waveform errors by running an MC""" + + # The efficiency calculations include calibration and waveform + # errors incorporated by running over each injection num_mc_injs times, + # where each time we draw a random value of distance. + + num_injs = len(inj_dists) + + inj_dist_mc = numpy.ndarray((num_mc_injs+1, num_injs)) + inj_dist_mc[0, :] = inj_dists + for i in range(num_mc_injs): + cal_dist_red = stats.norm.rvs(size=num_injs) * cal_err + wf_dist_red = numpy.abs(stats.norm.rvs(size=num_injs) * wf_err) + inj_dist_mc[i+1, :] = inj_dists / (max_dc_cal_err * + (1 + cal_dist_red) * + (1 + wf_dist_red)) + + return inj_dist_mc
+ + + +# ============================================================================= +# Function to calculate the coincident SNR +# ============================================================================= +
+[docs] +def get_coinc_snr(trigs_or_injs): + """ Calculate coincident SNR using coherent and null SNRs""" + + coh_snr_sq = numpy.square(trigs_or_injs['network/coherent_snr'][:]) + null_snr_sq = numpy.square(trigs_or_injs['network/null_snr'][:]) + coinc_snr = numpy.sqrt(coh_snr_sq + null_snr_sq) + + return coinc_snr
+ + + +
+[docs] +def template_hash_to_id(trigger_file, bank_path): + """ + This function converts the template hashes from a trigger file + into 'template_id's that represent indices of the + templates within the bank. + Parameters + ---------- + trigger_file: HFile object for trigger file + bank_file: filepath for template bank + """ + with HFile(bank_path, "r") as bank: + hashes = bank['template_hash'][:] + ifos = [k for k in trigger_file.keys() if k != 'network'] + trig_hashes = trigger_file[f'{ifos[0]}/template_hash'][:] + trig_ids = numpy.zeros(trig_hashes.shape[0], dtype=int) + for idx, t_hash in enumerate(hashes): + matches = numpy.where(trig_hashes == t_hash) + trig_ids[matches] = idx + return trig_ids
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/render.html b/latest/html/_modules/pycbc/results/render.html new file mode 100644 index 00000000000..b6861de3072 --- /dev/null +++ b/latest/html/_modules/pycbc/results/render.html @@ -0,0 +1,396 @@ + + + + + + pycbc.results.render — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.render

+# Copyright (C) 2015 Christopher M. Biwer
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import os.path, types
+import codecs
+
+from configparser import ConfigParser
+from jinja2 import Environment, FileSystemLoader
+from xml.sax.saxutils import unescape
+
+import pycbc.results
+from pycbc.results import unescape_table
+from pycbc.results.metadata import save_html_with_metadata
+from pycbc.workflow.core import SegFile, makedir
+
+
+[docs] +def render_workflow_html_template(filename, subtemplate, filelists, **kwargs): + """ Writes a template given inputs from the workflow generator. Takes + a list of tuples. Each tuple is a pycbc File object. Also the name of the + subtemplate to render and the filename of the output. + """ + + dirnam = os.path.dirname(filename) + makedir(dirnam) + + try: + filenames = [f.name for filelist in filelists for f in filelist if f is not None] + except TypeError: + filenames = [] + + # render subtemplate + subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells' + env = Environment(loader=FileSystemLoader(subtemplate_dir)) + env.globals.update(get_embedded_config=get_embedded_config) + env.globals.update(path_exists=os.path.exists) + env.globals.update(len=len) + subtemplate = env.get_template(subtemplate) + context = {'filelists' : filelists, + 'dir' : dirnam} + context.update(kwargs) + output = subtemplate.render(context) + + # save as html page + kwds = {'render-function' : 'render_tmplt', + 'filenames' : ','.join(filenames)} + kwds.update(kwargs) + + for key in kwds: + kwds[key] = str(kwds[key]) + + save_html_with_metadata(str(output), filename, None, kwds)
+ + +
+[docs] +def get_embedded_config(filename): + """ Attempt to load config data attached to file + """ + def check_option(self, section, name): + return (self.has_section(section) and + (self.has_option(section, name) or (name in self.defaults()))) + + try: + cp = pycbc.results.load_metadata_from_file(filename) + except TypeError: + cp = ConfigParser() + + cp.check_option = types.MethodType(check_option, cp) + + return cp
+ + +
+[docs] +def setup_template_render(path, config_path): + """ This function is the gateway for rendering a template for a file. + """ + + # initialization + cp = get_embedded_config(path) + output = '' + filename = os.path.basename(path) + + # use meta-data if not empty for rendering + if cp.has_option(filename, 'render-function'): + render_function_name = cp.get(filename, 'render-function') + render_function = eval(render_function_name) + output = render_function(path, cp) + + # read configuration file for rendering + elif os.path.exists(config_path): + cp.read(config_path) + + # render template + if cp.has_option(filename, 'render-function'): + render_function_name = cp.get(filename, 'render-function') + render_function = eval(render_function_name) + output = render_function(path, cp) + else: + output = render_default(path, cp) + + # if no configuration file is present + # then render the default template + else: + output = render_default(path, cp) + + return output
+ + +
+[docs] +def render_default(path, cp): + """ This is the default function that will render a template to a string of HTML. The + string will be for a drop-down tab that contains a link to the file. + + If the file extension requires information to be read, then that is passed to the + content variable (eg. a segmentlistdict). + """ + + # define filename and slug from path + filename = os.path.basename(path) + slug = filename.replace('.', '_') + + # initializations + content = None + + if path.endswith('.xml') or path.endswith('.xml.gz'): + # segment or veto file return a segmentslistdict instance + try: + wf_file = SegFile.from_segment_xml(path) + # FIXME: This is a dictionary, but the code wants a segmentlist + # for now I just coalesce. + wf_file.return_union_seglist() + except Exception as e: + print('No segment table found in %s : %s' % (path, e)) + + # render template + template_dir = pycbc.results.__path__[0] + '/templates/files' + env = Environment(loader=FileSystemLoader(template_dir)) + env.globals.update(abs=abs) + env.globals.update(open=open) + env.globals.update(path_exists=os.path.exists) + template = env.get_template('file_default.html') + context = {'path' : path, + 'filename' : filename, + 'slug' : slug, + 'cp' : cp, + 'content' : content} + output = template.render(context) + + return output
+ + +
+[docs] +def render_glitchgram(path, cp): + """ Render a glitchgram file template. + """ + + # define filename and slug from path + filename = os.path.basename(path) + slug = filename.replace('.', '_') + + # render template + template_dir = pycbc.results.__path__[0] + '/templates/files' + env = Environment(loader=FileSystemLoader(template_dir)) + env.globals.update(abs=abs) + template = env.get_template(cp.get(filename, 'template')) + context = {'filename' : filename, + 'slug' : slug, + 'cp' : cp} + output = template.render(context) + + return output
+ + +
+[docs] +def render_text(path, cp): + """ Render a file as text. + """ + + # define filename and slug from path + filename = os.path.basename(path) + slug = filename.replace('.', '_') + + # initializations + content = None + + # read file as a string + with codecs.open(path, 'r', encoding='utf-8', errors='replace') as fp: + content = fp.read() + + # replace all the escaped characters + content = unescape(content, unescape_table) + + # render template + template_dir = pycbc.results.__path__[0] + '/templates/files' + env = Environment(loader=FileSystemLoader(template_dir)) + env.globals.update(abs=abs) + env.globals.update(path_exists=os.path.exists) + template = env.get_template('file_pre.html') + context = {'filename' : filename, + 'slug' : slug, + 'cp' : cp, + 'content' : content} + output = template.render(context) + + return output
+ + +
+[docs] +def render_ignore(path, cp): + """ Does not render anything. + """ + + return ''
+ + +
+[docs] +def render_tmplt(path, cp): + """ Render a file as text. + """ + + # define filename and slug from path + filename = os.path.basename(path) + slug = filename.replace('.', '_') + + # initializations + content = None + + # read file as a string + with open(path, 'r') as fp: + content = fp.read() + + # replace all the escaped characters + content = unescape(content, unescape_table) + + # render template + template_dir = '/'.join(path.split('/')[:-1]) + env = Environment(loader=FileSystemLoader(template_dir)) + env.globals.update(setup_template_render=setup_template_render) + env.globals.update(get_embedded_config=get_embedded_config) + env.globals.update(path_exists=os.path.exists) + template = env.get_template(filename) + context = {'filename' : filename, + 'slug' : slug, + 'cp' : cp, + 'content' : content} + output = template.render(context) + + return output
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/scatter_histograms.html b/latest/html/_modules/pycbc/results/scatter_histograms.html new file mode 100644 index 00000000000..b3900733f7f --- /dev/null +++ b/latest/html/_modules/pycbc/results/scatter_histograms.html @@ -0,0 +1,1048 @@ + + + + + + pycbc.results.scatter_histograms — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.scatter_histograms

+# Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Module to generate figures with scatter plots and histograms.
+"""
+
+import itertools
+import sys
+
+import numpy
+
+import scipy.stats
+
+import matplotlib
+
+# Only if a backend is not already set ... This should really *not* be done
+# here, but in the executables you should set matplotlib.use()
+# This matches the check that matplotlib does internally, but this *may* be
+# version dependenant. If this is a problem then remove this and control from
+# the executables directly.
+if 'matplotlib.backends' not in sys.modules:  # nopep8
+    matplotlib.use('agg')
+
+from matplotlib import (offsetbox, pyplot, gridspec, colors)
+
+from pycbc.results import str_utils
+from pycbc.io import FieldArray
+
+
+
+[docs] +def create_axes_grid(parameters, labels=None, height_ratios=None, + width_ratios=None, no_diagonals=False): + """Given a list of parameters, creates a figure with an axis for + every possible combination of the parameters. + + Parameters + ---------- + parameters : list + Names of the variables to be plotted. + labels : {None, dict}, optional + A dictionary of parameters -> parameter labels. + height_ratios : {None, list}, optional + Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec` + for details. + width_ratios : {None, list}, optional + Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec` + for details. + no_diagonals : {False, bool}, optional + Do not produce axes for the same parameter on both axes. + + Returns + ------- + fig : pyplot.figure + The figure that was created. + axis_dict : dict + A dictionary mapping the parameter combinations to the axis and their + location in the subplots grid; i.e., the key, values are: + `{('param1', 'param2'): (pyplot.axes, row index, column index)}` + """ + if labels is None: + labels = {p: p for p in parameters} + elif any(p not in labels for p in parameters): + raise ValueError("labels must be provided for all parameters") + # Create figure with adequate size for number of parameters. + ndim = len(parameters) + if no_diagonals: + ndim -= 1 + if ndim < 3: + fsize = (8, 7) + else: + fsize = (ndim*3 - 1, ndim*3 - 2) + fig = pyplot.figure(figsize=fsize) + # create the axis grid + gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios, + height_ratios=height_ratios, + wspace=0.05, hspace=0.05) + # create grid of axis numbers to easily create axes in the right locations + axes = numpy.arange(ndim**2).reshape((ndim, ndim)) + + # Select possible combinations of plots and establish rows and columns. + combos = list(itertools.combinations(parameters, 2)) + # add the diagonals + if not no_diagonals: + combos += [(p, p) for p in parameters] + + # create the mapping between parameter combos and axes + axis_dict = {} + # cycle over all the axes, setting thing as needed + for nrow in range(ndim): + for ncolumn in range(ndim): + ax = pyplot.subplot(gs[axes[nrow, ncolumn]]) + # map to a parameter index + px = parameters[ncolumn] + if no_diagonals: + py = parameters[nrow+1] + else: + py = parameters[nrow] + if (px, py) in combos: + axis_dict[px, py] = (ax, nrow, ncolumn) + # x labels only on bottom + if nrow + 1 == ndim: + ax.set_xlabel('{}'.format(labels[px]), fontsize=18) + else: + pyplot.setp(ax.get_xticklabels(), visible=False) + # y labels only on left + if ncolumn == 0: + ax.set_ylabel('{}'.format(labels[py]), fontsize=18) + else: + pyplot.setp(ax.get_yticklabels(), visible=False) + else: + # make non-used axes invisible + ax.axis('off') + return fig, axis_dict
+ + + +
+[docs] +def get_scale_fac(fig, fiducial_width=8, fiducial_height=7): + """Gets a factor to scale fonts by for the given figure. The scale + factor is relative to a figure with dimensions + (`fiducial_width`, `fiducial_height`). + """ + width, height = fig.get_size_inches() + return (width*height/(fiducial_width*fiducial_height))**0.5
+ + + +
+[docs] +def construct_kde(samples_array, use_kombine=False, kdeargs=None): + """Constructs a KDE from the given samples. + + Parameters + ---------- + samples_array : array + Array of values to construct the KDE for. + use_kombine : bool, optional + Use kombine's clustered KDE instead of scipy's. Default is False. + kdeargs : dict, optional + Additional arguments to pass to the KDE. Can be any argument recognized + by :py:func:`scipy.stats.gaussian_kde` or + :py:func:`kombine.clustered_kde.optimized_kde`. In either case, you can + also set ``max_kde_samples`` to limit the number of samples that are + used for KDE construction. + + Returns + ------- + kde : + The KDE. + """ + # make sure samples are randomly sorted + numpy.random.seed(0) + numpy.random.shuffle(samples_array) + # if kde arg specifies a maximum number of samples, limit them + if kdeargs is None: + kdeargs = {} + else: + kdeargs = kdeargs.copy() + max_nsamples = kdeargs.pop('max_kde_samples', None) + samples_array = samples_array[:max_nsamples] + if use_kombine: + try: + import kombine + except ImportError: + raise ImportError("kombine is not installed.") + if kdeargs is None: + kdeargs = {} + # construct the kde + if use_kombine: + kde = kombine.clustered_kde.optimized_kde(samples_array, **kdeargs) + else: + kde = scipy.stats.gaussian_kde(samples_array.T, **kdeargs) + return kde
+ + + +
+[docs] +def create_density_plot(xparam, yparam, samples, plot_density=True, + plot_contours=True, percentiles=None, cmap='viridis', + contour_color=None, label_contours=True, + contour_linestyles=None, + xmin=None, xmax=None, + ymin=None, ymax=None, exclude_region=None, + fig=None, ax=None, use_kombine=False, + kdeargs=None): + """Computes and plots posterior density and confidence intervals using the + given samples. + + Parameters + ---------- + xparam : string + The parameter to plot on the x-axis. + yparam : string + The parameter to plot on the y-axis. + samples : dict, numpy structured array, or FieldArray + The samples to plot. + plot_density : {True, bool} + Plot a color map of the density. + plot_contours : {True, bool} + Plot contours showing the n-th percentiles of the density. + percentiles : {None, float or array} + What percentile contours to draw. If None, will plot the 50th + and 90th percentiles. + cmap : {'viridis', string} + The name of the colormap to use for the density plot. + contour_color : {None, string} + What color to make the contours. Default is white for density + plots and black for other plots. + label_contours : bool, optional + Whether to label the contours. Default is True. + contour_linestyles : list, optional + Linestyles to use for the contours. Default (None) will use solid. + xmin : {None, float} + Minimum value to plot on x-axis. + xmax : {None, float} + Maximum value to plot on x-axis. + ymin : {None, float} + Minimum value to plot on y-axis. + ymax : {None, float} + Maximum value to plot on y-axis. + exclue_region : {None, str} + Exclude the specified region when plotting the density or contours. + Must be a string in terms of `xparam` and `yparam` that is + understandable by numpy's logical evaluation. For example, if + `xparam = m_1` and `yparam = m_2`, and you want to exclude the region + for which `m_2` is greater than `m_1`, then exclude region should be + `'m_2 > m_1'`. + fig : {None, pyplot.figure} + Add the plot to the given figure. If None and ax is None, will create + a new figure. + ax : {None, pyplot.axes} + Draw plot on the given axis. If None, will create a new axis from + `fig`. + use_kombine : {False, bool} + Use kombine's KDE to calculate density. Otherwise, will use + `scipy.stats.gaussian_kde.` Default is False. + kdeargs : dict, optional + Pass the given keyword arguments to the KDE. + + Returns + ------- + fig : pyplot.figure + The figure the plot was made on. + ax : pyplot.axes + The axes the plot was drawn on. + """ + if percentiles is None: + percentiles = numpy.array([50., 90.]) + percentiles = 100. - numpy.array(percentiles) + percentiles.sort() + + if ax is None and fig is None: + fig = pyplot.figure() + if ax is None: + ax = fig.add_subplot(111) + + # convert samples to array and construct kde + xsamples = samples[xparam] + ysamples = samples[yparam] + arr = numpy.vstack((xsamples, ysamples)).T + kde = construct_kde(arr, use_kombine=use_kombine, kdeargs=kdeargs) + + # construct grid to evaluate on + if xmin is None: + xmin = xsamples.min() + if xmax is None: + xmax = xsamples.max() + if ymin is None: + ymin = ysamples.min() + if ymax is None: + ymax = ysamples.max() + npts = 100 + X, Y = numpy.mgrid[ + xmin:xmax:complex(0, npts), # pylint:disable=invalid-slice-index + ymin:ymax:complex(0, npts)] # pylint:disable=invalid-slice-index + pos = numpy.vstack([X.ravel(), Y.ravel()]) + if use_kombine: + Z = numpy.exp(kde(pos.T).reshape(X.shape)) + draw = kde.draw + else: + Z = kde(pos).T.reshape(X.shape) + draw = kde.resample + + if exclude_region is not None: + # convert X,Y to a single FieldArray so we can use it's ability to + # evaluate strings + farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y}) + Z[farr[exclude_region]] = 0. + + if plot_density: + ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax], + aspect='auto', cmap=cmap, zorder=1) + if contour_color is None: + contour_color = 'w' + + if plot_contours: + # compute the percentile values + resamps = kde(draw(int(npts**2))) + if use_kombine: + resamps = numpy.exp(resamps) + s = numpy.percentile(resamps, percentiles) + if contour_color is None: + contour_color = 'k' + # make linewidths thicker if not plotting density for clarity + if plot_density: + lw = 1 + else: + lw = 2 + ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw, + linestyles=contour_linestyles, zorder=3) + # label contours + if label_contours: + lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)] + fmt = dict(zip(ct.levels, lbls)) + fs = 12 + ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs) + + return fig, ax
+ + + +
+[docs] +def create_marginalized_hist(ax, values, label, percentiles=None, + color='k', fillcolor='gray', linecolor='navy', + linestyle='-', plot_marginal_lines=True, + title=True, expected_value=None, + expected_color='red', rotated=False, + plot_min=None, plot_max=None, log_scale=False): + """Plots a 1D marginalized histogram of the given param from the given + samples. + + Parameters + ---------- + ax : pyplot.Axes + The axes on which to draw the plot. + values : array + The parameter values to plot. + label : str + A label to use for the title. + percentiles : {None, float or array} + What percentiles to draw lines at. If None, will draw lines at + `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the + median). + color : {'k', string} + What color to make the histogram; default is black. + fillcolor : {'gray', string, or None} + What color to fill the histogram with. Set to None to not fill the + histogram. Default is 'gray'. + plot_marginal_lines : bool, optional + Put vertical lines at the marginal percentiles. Default is True. + linestyle : str, optional + What line style to use for the histogram. Default is '-'. + linecolor : {'navy', string} + What color to use for the percentile lines. Default is 'navy'. + title : bool, optional + Add a title with a estimated value +/- uncertainty. The estimated value + is the pecentile halfway between the max/min of ``percentiles``, while + the uncertainty is given by the max/min of the ``percentiles``. If no + percentiles are specified, defaults to quoting the median +/- 95/5 + percentiles. + rotated : {False, bool} + Plot the histogram on the y-axis instead of the x. Default is False. + plot_min : {None, float} + The minimum value to plot. If None, will default to whatever `pyplot` + creates. + plot_max : {None, float} + The maximum value to plot. If None, will default to whatever `pyplot` + creates. + scalefac : {1., float} + Factor to scale the default font sizes by. Default is 1 (no scaling). + log_scale : boolean + Should the histogram bins be logarithmically spaced + """ + if fillcolor is None: + htype = 'step' + else: + htype = 'stepfilled' + if rotated: + orientation = 'horizontal' + else: + orientation = 'vertical' + if log_scale: + bins = numpy.logspace( + numpy.log10(numpy.nanmin(values)), + numpy.log10(numpy.nanmax(values)), + 50 + ) + else: + bins = numpy.linspace( + numpy.nanmin(values), + numpy.nanmax(values), + 50, + ) + ax.hist(values, bins=bins, histtype=htype, orientation=orientation, + facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2, + density=True) + if percentiles is None: + percentiles = [5., 50., 95.] + if len(percentiles) > 0: + plotp = numpy.percentile(values, percentiles) + else: + plotp = [] + if plot_marginal_lines: + for val in plotp: + if rotated: + ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3) + else: + ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3) + # plot expected + if expected_value is not None: + if rotated: + ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2) + else: + ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2) + if title: + if len(percentiles) > 0: + minp = min(percentiles) + maxp = max(percentiles) + medp = (maxp + minp) / 2. + else: + minp = 5 + medp = 50 + maxp = 95 + values_min = numpy.percentile(values, minp) + values_med = numpy.percentile(values, medp) + values_max = numpy.percentile(values, maxp) + negerror = values_med - values_min + poserror = values_max - values_med + fmt = '${0}$'.format(str_utils.format_value( + values_med, negerror, plus_error=poserror)) + if rotated: + ax.yaxis.set_label_position("right") + # sets colored title for marginal histogram + set_marginal_histogram_title(ax, fmt, color, + label=label, rotated=rotated) + else: + # sets colored title for marginal histogram + set_marginal_histogram_title(ax, fmt, color, label=label) + # remove ticks and set limits + if rotated: + # Remove x-ticks + ax.set_xticks([]) + # turn off x-labels + ax.set_xlabel('') + # set limits + ymin, ymax = ax.get_ylim() + if plot_min is not None: + ymin = plot_min + if plot_max is not None: + ymax = plot_max + ax.set_ylim(ymin, ymax) + else: + # Remove y-ticks + ax.set_yticks([]) + # turn off y-label + ax.set_ylabel('') + # set limits + xmin, xmax = ax.get_xlim() + if plot_min is not None: + xmin = plot_min + if plot_max is not None: + xmax = plot_max + ax.set_xlim(xmin, xmax)
+ + + +
+[docs] +def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): + """ Sets the title of the marginal histograms. + + Parameters + ---------- + ax : Axes + The `Axes` instance for the plot. + fmt : str + The string to add to the title. + color : str + The color of the text to add to the title. + label : str + If title does not exist, then include label at beginning of the string. + rotated : bool + If `True` then rotate the text 270 degrees for sideways title. + """ + + # get rotation angle of the title + rotation = 270 if rotated else 0 + + # get how much to displace title on axes + xscale = 1.05 if rotated else 0.0 + if rotated: + yscale = 1.0 + elif len(ax.get_figure().axes) > 1: + yscale = 1.15 + else: + yscale = 1.05 + + # get class that packs text boxes vertical or horizonitally + packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker + + # if no title exists + if not hasattr(ax, "title_boxes"): + + # create a text box + title = "{} = {}".format(label, fmt) + tbox1 = offsetbox.TextArea( + title, + textprops=dict(color=color, size=15, rotation=rotation, + ha='left', va='bottom')) + + # save a list of text boxes as attribute for later + ax.title_boxes = [tbox1] + + # pack text boxes + ybox = packer_class(children=ax.title_boxes, + align="bottom", pad=0, sep=5) + + # else append existing title + else: + + # delete old title + ax.title_anchor.remove() + + # add new text box to list + tbox1 = offsetbox.TextArea( + " {}".format(fmt), + textprops=dict(color=color, size=15, rotation=rotation, + ha='left', va='bottom')) + ax.title_boxes = ax.title_boxes + [tbox1] + + # pack text boxes + ybox = packer_class(children=ax.title_boxes, + align="bottom", pad=0, sep=5) + + # add new title and keep reference to instance as an attribute + anchored_ybox = offsetbox.AnchoredOffsetbox( + loc=2, child=ybox, pad=0., + frameon=False, bbox_to_anchor=(xscale, yscale), + bbox_transform=ax.transAxes, borderpad=0.) + ax.title_anchor = ax.add_artist(anchored_ybox)
+ + + +
+[docs] +def create_multidim_plot(parameters, samples, labels=None, + mins=None, maxs=None, expected_parameters=None, + expected_parameters_color='r', + plot_marginal=True, plot_scatter=True, + plot_maxl=False, + plot_marginal_lines=True, + marginal_percentiles=None, contour_percentiles=None, + marginal_title=True, marginal_linestyle='-', + zvals=None, show_colorbar=True, cbar_label=None, + vmin=None, vmax=None, scatter_cmap='plasma', + scatter_log_cmap=False, log_parameters=None, + plot_density=False, plot_contours=True, + density_cmap='viridis', + contour_color=None, label_contours=True, + contour_linestyles=None, + hist_color='black', + line_color=None, fill_color='gray', + use_kombine=False, kdeargs=None, + fig=None, axis_dict=None): + """Generate a figure with several plots and histograms. + + Parameters + ---------- + parameters: list + Names of the variables to be plotted. + samples : FieldArray + A field array of the samples to plot. + labels: dict, optional + A dictionary mapping parameters to labels. If none provided, will just + use the parameter strings as the labels. + mins : {None, dict}, optional + Minimum value for the axis of each variable in `parameters`. + If None, it will use the minimum of the corresponding variable in + `samples`. + maxs : {None, dict}, optional + Maximum value for the axis of each variable in `parameters`. + If None, it will use the maximum of the corresponding variable in + `samples`. + expected_parameters : {None, dict}, optional + Expected values of `parameters`, as a dictionary mapping parameter + names -> values. A cross will be plotted at the location of the + expected parameters on axes that plot any of the expected parameters. + expected_parameters_color : {'r', string}, optional + What color to make the expected parameters cross. + plot_marginal : {True, bool} + Plot the marginalized distribution on the diagonals. If False, the + diagonal axes will be turned off. + plot_scatter : {True, bool} + Plot each sample point as a scatter plot. + marginal_percentiles : {None, array} + What percentiles to draw lines at on the 1D histograms. + If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the + upper 90th percentile and the median). + marginal_title : bool, optional + Add a title over the 1D marginal plots that gives an estimated value + +/- uncertainty. The estimated value is the pecentile halfway between + the max/min of ``maginal_percentiles``, while the uncertainty is given + by the max/min of the ``marginal_percentiles. If no + ``marginal_percentiles`` are specified, the median +/- 95/5 percentiles + will be quoted. + marginal_linestyle : str, optional + What line style to use for the marginal histograms. + contour_percentiles : {None, array} + What percentile contours to draw on the scatter plots. If None, + will plot the 50th and 90th percentiles. + zvals : {None, array} + An array to use for coloring the scatter plots. If None, scatter points + will be the same color. + show_colorbar : {True, bool} + Show the colorbar of zvalues used for the scatter points. A ValueError + will be raised if zvals is None and this is True. + cbar_label : {None, str} + Specify a label to add to the colorbar. + vmin: {None, float}, optional + Minimum value for the colorbar. If None, will use the minimum of zvals. + vmax: {None, float}, optional + Maximum value for the colorbar. If None, will use the maxmimum of + zvals. + scatter_cmap : {'plasma', string} + The color map to use for the scatter points. Default is 'plasma'. + scatter_log_cmap : boolean + Should the scatter point coloring be on a log scale? Default False + log_parameters : list or None + Which parameters should be plotted on a log scale + plot_density : {False, bool} + Plot the density of points as a color map. + plot_contours : {True, bool} + Draw contours showing the 50th and 90th percentile confidence regions. + density_cmap : {'viridis', string} + The color map to use for the density plot. + contour_color : {None, string} + The color to use for the contour lines. Defaults to white for + density plots, navy for scatter plots without zvals, and black + otherwise. + label_contours : bool, optional + Whether to label the contours. Default is True. + contour_linestyles : list, optional + Linestyles to use for the contours. Default (None) will use solid. + use_kombine : {False, bool} + Use kombine's KDE to calculate density. Otherwise, will use + `scipy.stats.gaussian_kde.` Default is False. + kdeargs : dict, optional + Pass the given keyword arguments to the KDE. + fig : pyplot.figure + Use the given figure instead of creating one. + axis_dict : dict + Use the given dictionary of axes instead of creating one. + + Returns + ------- + fig : pyplot.figure + The figure that was created. + axis_dict : dict + A dictionary mapping the parameter combinations to the axis and their + location in the subplots grid; i.e., the key, values are: + `{('param1', 'param2'): (pyplot.axes, row index, column index)}` + """ + if labels is None: + labels = {p: p for p in parameters} + if log_parameters is None: + log_parameters = [] + # set up the figure with a grid of axes + # if only plotting 2 parameters, make the marginal plots smaller + nparams = len(parameters) + if nparams == 2 and plot_marginal: + width_ratios = [3, 1] + height_ratios = [1, 3] + else: + width_ratios = height_ratios = None + + if plot_maxl: + # make sure loglikelihood is provide + if 'loglikelihood' not in samples.fieldnames: + raise ValueError("plot-maxl requires loglikelihood") + maxidx = samples['loglikelihood'].argmax() + + # only plot scatter if more than one parameter + plot_scatter = plot_scatter and nparams > 1 + + # Sort zvals to get higher values on top in scatter plots + if plot_scatter: + if zvals is not None: + sort_indices = zvals.argsort() + zvals = zvals[sort_indices] + samples = samples[sort_indices] + if contour_color is None: + contour_color = 'k' + elif show_colorbar: + raise ValueError("must provide z values to create a colorbar") + else: + # just make all scatter points same color + zvals = 'gray' + if plot_contours and contour_color is None: + contour_color = 'navy' + + # create the axis grid + if fig is None and axis_dict is None: + fig, axis_dict = create_axes_grid( + parameters, labels=labels, + width_ratios=width_ratios, height_ratios=height_ratios, + no_diagonals=not plot_marginal) + + # convert samples to a dictionary to avoid re-computing derived parameters + # every time they are needed + # only try to plot what's available + sd = {} + for p in parameters: + try: + sd[p] = samples[p] + except (ValueError, TypeError, IndexError): + continue + samples = sd + parameters = list(sd.keys()) + + # values for axis bounds + if mins is None: + mins = {p: samples[p].min() for p in parameters} + else: + # copy the dict + mins = {p: val for p, val in mins.items()} + if maxs is None: + maxs = {p: samples[p].max() for p in parameters} + else: + # copy the dict + maxs = {p: val for p, val in maxs.items()} + + # Diagonals... + if plot_marginal: + for pi, param in enumerate(parameters): + ax, _, _ = axis_dict[param, param] + # if only plotting 2 parameters and on the second parameter, + # rotate the marginal plot + rotated = nparams == 2 and pi == nparams-1 + # see if there are expected values + if expected_parameters is not None: + try: + expected_value = expected_parameters[param] + except KeyError: + expected_value = None + else: + expected_value = None + create_marginalized_hist( + ax, samples[param], label=labels[param], + color=hist_color, fillcolor=fill_color, + log_scale=param in log_parameters, + plot_marginal_lines=plot_marginal_lines, + linestyle=marginal_linestyle, linecolor=line_color, + title=marginal_title, expected_value=expected_value, + expected_color=expected_parameters_color, + rotated=rotated, plot_min=mins[param], plot_max=maxs[param], + percentiles=marginal_percentiles) + + # Off-diagonals... + for px, py in axis_dict: + if px == py or px not in parameters or py not in parameters: + continue + ax, _, _ = axis_dict[px, py] + if plot_scatter: + if plot_density: + alpha = 0.3 + else: + alpha = 1. + if scatter_log_cmap: + cmap_norm = colors.LogNorm(vmin=vmin, vmax=vmax) + else: + cmap_norm = colors.Normalize(vmin=vmin, vmax=vmax) + + plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5, + edgecolors='none', norm=cmap_norm, + cmap=scatter_cmap, alpha=alpha, zorder=2) + + if plot_contours or plot_density: + # Exclude out-of-bound regions + # this is a bit kludgy; should probably figure out a better + # solution to eventually allow for more than just m_p m_s + if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'): + exclude_region = 'm_s > m_p' + else: + exclude_region = None + create_density_plot( + px, py, samples, plot_density=plot_density, + plot_contours=plot_contours, cmap=density_cmap, + percentiles=contour_percentiles, + contour_color=contour_color, label_contours=label_contours, + contour_linestyles=contour_linestyles, + xmin=mins[px], xmax=maxs[px], + ymin=mins[py], ymax=maxs[py], + exclude_region=exclude_region, ax=ax, + use_kombine=use_kombine, kdeargs=kdeargs) + + if plot_maxl: + maxlx = samples[px][maxidx] + maxly = samples[py][maxidx] + ax.scatter(maxlx, maxly, marker='x', s=20, c=contour_color, + zorder=5) + + if expected_parameters is not None: + try: + ax.axvline(expected_parameters[px], lw=1.5, + color=expected_parameters_color, zorder=5) + except KeyError: + pass + try: + ax.axhline(expected_parameters[py], lw=1.5, + color=expected_parameters_color, zorder=5) + except KeyError: + pass + + ax.set_xlim(mins[px], maxs[px]) + ax.set_ylim(mins[py], maxs[py]) + + # adjust tick number for large number of plots + if len(parameters) > 3: + for px, py in axis_dict: + ax, _, _ = axis_dict[px, py] + ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3)) + ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3)) + + if plot_scatter and show_colorbar: + # compute font size based on fig size + scale_fac = get_scale_fac(fig) + fig.subplots_adjust(right=0.85, wspace=0.03) + cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8]) + cb = fig.colorbar(plt, cax=cbar_ax) + if cbar_label is not None: + cb.set_label(cbar_label, fontsize=12*scale_fac) + cb.ax.tick_params(labelsize=8*scale_fac) + + return fig, axis_dict
+ + + +
+[docs] +def remove_common_offset(arr): + """Given an array of data, removes a common offset > 1000, returning the + removed value. + """ + offset = 0 + isneg = (arr <= 0).all() + # make sure all values have the same sign + if isneg or (arr >= 0).all(): + # only remove offset if the minimum and maximum values are the same + # order of magintude and > O(1000) + minpwr = numpy.log10(abs(arr).min()) + maxpwr = numpy.log10(abs(arr).max()) + if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3: + offset = numpy.floor(10**minpwr) + if isneg: + offset *= -1 + arr = arr - offset + return arr, int(offset)
+ + + +
+[docs] +def reduce_ticks(ax, which, maxticks=3): + """Given a pyplot axis, resamples its `which`-axis ticks such that are at most + `maxticks` left. + + Parameters + ---------- + ax : axis + The axis to adjust. + which : {'x' | 'y'} + Which axis to adjust. + maxticks : {3, int} + Maximum number of ticks to use. + + Returns + ------- + array + An array of the selected ticks. + """ + ticks = getattr(ax, 'get_{}ticks'.format(which))() + if len(ticks) > maxticks: + # make sure the left/right value is not at the edge + minax, maxax = getattr(ax, 'get_{}lim'.format(which))() + dw = abs(maxax-minax)/10. + start_idx, end_idx = 0, len(ticks) + if ticks[0] < minax + dw: + start_idx += 1 + if ticks[-1] > maxax - dw: + end_idx -= 1 + # get reduction factor + fac = int(len(ticks) / maxticks) + ticks = ticks[start_idx:end_idx:fac] + return ticks
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/snr.html b/latest/html/_modules/pycbc/results/snr.html new file mode 100644 index 00000000000..7952131bc1a --- /dev/null +++ b/latest/html/_modules/pycbc/results/snr.html @@ -0,0 +1,203 @@ + + + + + + pycbc.results.snr — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.snr

+# Copyright (C) 2022
+# Tito Dal Canton, Gareth Cabourn Davies, Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Module to generate SNR figures
+"""
+import pylab as pl
+from pycbc.results import ifo_color
+
+
+
+[docs] +def generate_snr_plot(snrdict, output_filename, triggers, ref_time): + """ + Generate an SNR timeseries plot as used for upload to GraceDB. + + Parameters + ---------- + + snrdict: dictionary + A dictionary keyed on ifo containing the SNR + TimeSeries objects + output_filename: string + The filename for the plot to be saved to + triggers : dictionary of tuples + A dictionary keyed on IFO, containing (trigger time, trigger snr) + ref_time : number, GPS seconds + Reference time which will be used as the zero point of the plot + This should be an integer value, but doesn't need to be an integer + + Returns + ------- + None + """ + pl.figure() + ref_time = int(ref_time) + for ifo in sorted(snrdict): + curr_snrs = snrdict[ifo] + + pl.plot(curr_snrs.sample_times - ref_time, abs(curr_snrs), + c=ifo_color(ifo), label=ifo) + if ifo in triggers: + pl.plot(triggers[ifo][0] - ref_time, + triggers[ifo][1], marker='x', c=ifo_color(ifo)) + + pl.legend() + pl.xlabel(f'GPS time from {ref_time:d} (s)') + pl.ylabel('SNR') + pl.savefig(output_filename) + pl.close()
+ + + +__all__ = ["generate_snr_plot"] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/str_utils.html b/latest/html/_modules/pycbc/results/str_utils.html new file mode 100644 index 00000000000..25f4d9357d2 --- /dev/null +++ b/latest/html/_modules/pycbc/results/str_utils.html @@ -0,0 +1,389 @@ + + + + + + pycbc.results.str_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.str_utils

+# Copyright (C) 2016  Collin Capano
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides functions for formatting values into strings for display.
+"""
+
+import numpy
+
+mjax_header = """
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$']]}});
+</script>
+<script type="text/javascript"
+    src="//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
+</script>
+"""
+
+
+
+[docs] +def mathjax_html_header(): + """Standard header to use for html pages to display latex math. + + Returns + ------- + header: str + The necessary html head needed to use latex on an html page. + """ + return mjax_header
+ + +
+[docs] +def drop_trailing_zeros(num): + """ + Drops the trailing zeros in a float that is printed. + """ + txt = '%f' %(num) + txt = txt.rstrip('0') + if txt.endswith('.'): + txt = txt[:-1] + return txt
+ + +
+[docs] +def get_signum(val, err, max_sig=numpy.inf): + """ + Given an error, returns a string for val formated to the appropriate + number of significant figures. + """ + coeff, pwr = ('%e' % err).split('e') + if pwr.startswith('-'): + pwr = int(pwr[1:]) + if round(float(coeff)) == 10.: + pwr -= 1 + pwr = min(pwr, max_sig) + tmplt = '%.' + str(pwr+1) + 'f' + return tmplt % val + else: + pwr = int(pwr[1:]) + if round(float(coeff)) == 10.: + pwr += 1 + # if the error is large, we can sometimes get 0; + # adjust the round until we don't get 0 (assuming the actual + # value isn't 0) + return_val = round(val, -pwr+1) + if val != 0.: + loop_count = 0 + max_recursion = 100 + while return_val == 0.: + pwr -= 1 + return_val = round(val, -pwr+1) + loop_count += 1 + if loop_count > max_recursion: + raise ValueError("Maximum recursion depth hit! Input " +\ + "values are: val = %f, err = %f" %(val, err)) + return drop_trailing_zeros(return_val)
+ + +
+[docs] +def format_value(value, error, plus_error=None, use_scientific_notation=3, + include_error=True, use_relative_error=False, ndecs=None): + """Given a numerical value and some bound on it, formats the number into a + string such that the value is rounded to the nearest significant figure, + which is determined by the error = abs(value-bound). + + Note: if either use_scientific_notation or include_error are True, the + returned string will include LaTeX characters. + + Parameters + ---------- + value : float + The value to format. + error : float + The uncertainty in the value. This is used to determine the + number of significant figures to print. If the value has no + uncertainty, you can just do value*1e-k, where k+1 is the number + of significant figures you want. + plus_error : {None, float} + The upper uncertainty on the value; i.e., what you need to add to the + value to get its upper bound. If provided, ``error`` is assumed to be + the negative; i.e., value +plus_error -error. The number of + significant figures printed is determined from min(error, + plus_error). + use_scientific_notation : int, optional + If ``abs(log10(value))`` is greater than the given, the return string + will be formated to "\%.1f \\times 10^{p}", where p is the powers of 10 + needed for the leading number in the value to be in the singles spot. + Otherwise will return "\%.(p+1)f". Default is 3. To turn off, set to + ``numpy.inf``. Note: using scientific notation assumes that the + returned value will be enclosed in LaTeX math mode. + include_error : {True, bool} + Include the error in the return string; the output will be formated + val \\pm err, where err is the error rounded to the same + power of 10 as val. Otherwise, just the formatted value will + be returned. If plus_error is provided then the return text will be + formatted as ``val^{+plus_error}_{-error}``. + use_relative_error : {False, bool} + If include_error, the error will be formatted as a percentage of the + the value. + ndecs: {None, int} + Number of values after the decimal point. If not provided, + it will default to the number of values in the error. + + Returns + ------- + string + The value (and error, if include_error is True) formatted as a string. + + + Examples + -------- + Given a value and its uncertainty: + + >>> val, err + (3.9278372067613837e-22, 2.2351435286500487e-23) + + Format with error quoted: + + >>> format_value(val, err) + '3.93 \\pm 0.22\\times 10^{-22}' + + Quote error as a relative error: + + >>> format_value(val, err, use_relative_error=True) + '3.93 \\times 10^{-22} \\pm5.6\\%' + + Format without the error and without scientific notation: + + >>> format_value(val, err, use_scientific_notation=float('inf'), + include_error=False) + '0.000000000000000000000393' + + Given an plus error: + + >>> err_plus + 8.2700310560051804e-24 + + Format with both bounds quoted: + + >>> format_value(val, err, plus_error=err_plus) + '3.928^{+0.083}_{-0.224}\\times 10^{-22}' + + Format with both bounds quoted as a relative error: + + >>> format_value(val, err, plus_error=err_plus, use_relative_error=True) + '3.928\\times 10^{-22}\\,^{+2.1\\%}_{-5.7\\%}' + + """ + minus_sign = '-' if value < 0. else '' + value = abs(value) + minus_err = abs(error) + if plus_error is None: + plus_err = minus_err + else: + plus_err = abs(plus_error) + error = min(minus_err, plus_err) + if value == 0. or abs(numpy.log10(value)) < use_scientific_notation: + conversion_factor = 0. + else: + conversion_factor = numpy.floor(numpy.log10(value)) + value = value * 10**(-conversion_factor) + error = error * 10**(-conversion_factor) + if conversion_factor == 0.: + powfactor = '' + elif conversion_factor == 1.: + powfactor = r'\times 10' + else: + powfactor = r'\times 10^{%i}' %(int(conversion_factor)) + + if ndecs is not None: + decs = value * 10**(-ndecs) + else: + decs = error + # now round the the appropriate number of sig figs + valtxt = get_signum(value, decs) + valtxt = '{}{}'.format(minus_sign, valtxt) + + if include_error: + if plus_error is None: + errtxt = get_signum(error, error) + if use_relative_error and float(valtxt) != 0.: + relative_err = 100.*float(errtxt)/float(valtxt) + # we round the relative error to the nearest 1% using + # get_signum; Note that if the relative error is < 1%, + # get_signum will automatically increase the number of values + # after the decimal until it gets to the first non-zero value + relative_err = get_signum(relative_err, 1.) + txt = r'%s %s \pm%s\%%' %(valtxt, powfactor, relative_err) + else: + txt = r'%s \pm %s%s' %(valtxt, errtxt, powfactor) + else: + plus_err = plus_err * 10**(-conversion_factor) + minus_err = minus_err * 10**(-conversion_factor) + minus_err_txt = get_signum(minus_err, decs) + plus_err_txt = get_signum(plus_err, decs) + if use_relative_error and float(valtxt) != 0.: + # same as above, but with plus and minus + rel_plus_err = get_signum( + 100.*float(plus_err_txt)/float(valtxt), 1.) + rel_minus_err = get_signum( + 100.*float(minus_err_txt)/float(valtxt), 1.) + txt = r'%s%s\,^{+%s\%%}_{-%s\%%}' %(valtxt, powfactor, + rel_plus_err, rel_minus_err) + else: + txt = r'%s^{+%s}_{-%s}%s' %(valtxt, plus_err_txt, + minus_err_txt, powfactor) + else: + txt = r'%s%s' %(valtxt, powfactor) + return txt
+ + + +__all__ = [ + "mathjax_html_header", + "drop_trailing_zeros", + "get_signum", + "format_value" +] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/table_utils.html b/latest/html/_modules/pycbc/results/table_utils.html new file mode 100644 index 00000000000..7f77a2a586d --- /dev/null +++ b/latest/html/_modules/pycbc/results/table_utils.html @@ -0,0 +1,327 @@ + + + + + + pycbc.results.table_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.table_utils

+# Copyright (C) 2014 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module provides functions to generate sortable html tables
+"""
+import mako.template
+import uuid
+import copy
+import numpy
+
+google_table_template = mako.template.Template("""
+    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
+    <script type='text/javascript'>
+      google.load('visualization', '1', {packages:['table']});
+      google.setOnLoadCallback(drawTable);
+      function drawTable() {
+        var data = new google.visualization.DataTable();
+        % for type, name in column_descriptions:
+            data.addColumn('${str(type)}', '${str(name)}');
+        % endfor
+        data.addRows(${data});
+
+        % if format_strings is not None:
+            % for i, format_string in enumerate(format_strings):
+                % if format_string is not None:
+                    var formatter = new google.visualization.NumberFormat({pattern:'${format_string}'});
+                    formatter.format(data, ${i});
+                % endif
+            % endfor
+        % endif
+        var table = new google.visualization.Table(document.getElementById('${div_id}'));
+        table.draw(data, {showRowNumber: 'true',
+                          page: '${page_enable}',
+                          allowHtml: 'true',
+                          pageSize: ${page_size}});
+      }
+    </script>
+    <div id='${div_id}'></div>
+""")
+
+
+[docs] +def html_table(columns, names, page_size=None, format_strings=None): + """ Return an html table of this data + + Parameters + ---------- + columns : list of numpy arrays + names : list of strings + The list of columns names + page_size : {int, None}, optional + The number of items to show on each page of the table + format_strings : {lists of strings, None}, optional + The ICU format string for this column, None for no formatting. All + columns must have a format string if provided. + + Returns + ------- + html_table : str + A str containing the html code to display a table of this data + """ + if page_size is None: + page = 'disable' + else: + page = 'enable' + + div_id = uuid.uuid4() + + column_descriptions = [] + for column, name in zip(columns, names): + if column.dtype.kind == 'S' or column.dtype.kind == 'U': + ctype = 'string' + else: + ctype = 'number' + column_descriptions.append((ctype, name)) + + data = [] + for item in zip(*columns): + data.append(list(item)) + + return google_table_template.render(div_id=div_id, + page_enable=page, + column_descriptions = column_descriptions, + page_size=page_size, + data=data, + format_strings=format_strings, + )
+ + +static_table_template = mako.template.Template(""" + <table class="table"> + % for row in range(n_rows): + % if titles is not None: + <tr> + % if row_labels is not None: + <td> + </td> + % endif + % for i in range(n_columns): + <th> + ${titles[row * n_columns + i]} + </th> + % endfor + </tr> + % endif + + % for i in range(len(data)): + <tr> + % if row_labels is not None: + <td> + ${row_labels[i]} + </td> + % endif + % for j in range(n_columns): + <td> + ${data[i][row * n_columns + j]} + </td> + % endfor + </tr> + % endfor + % endfor + </table> +""") + +
+[docs] +def static_table(data, titles=None, columns_max=None, row_labels=None): + """ Return an html table of this data + + Parameters + ---------- + data : two-dimensional string array + Array containing the cell values + titles : numpy array + Vector str of titles, must be the same length as data + columns_max : integer or None + If given, will restrict the number of columns in the table + row_labels : list of strings + Optional list of row labels to be given as the first cell in + each data row. Does not count towards columns_max + + Returns + ------- + html_table : str + A string containing the html table. + """ + data = copy.deepcopy(data) + titles = copy.deepcopy(titles) + row_labels = copy.deepcopy(row_labels) + drows, dcols = numpy.array(data).shape + if titles is not None and not len(titles) == dcols: + raise ValueError("titles and data lengths do not match") + + if row_labels is not None and not len(row_labels) == drows: + raise ValueError( + "row_labels must be the same number of rows supplied to data" + ) + + if columns_max is not None: + n_rows = int(numpy.ceil(len(data[0]) / columns_max)) + n_columns = min(columns_max, len(data[0])) + if len(data[0]) < n_rows * n_columns: + # Pad the data and titles with empty strings + n_missing = int(n_rows * n_columns - len(data[0])) + data = numpy.hstack((data, numpy.zeros((len(data), n_missing), dtype='U1'))) + if titles is not None: + titles += [' '] * n_missing + else: + n_rows = 1 + n_columns = len(data[0]) + + return static_table_template.render( + data=data, + titles=titles, + n_columns=n_columns, + n_rows=n_rows, + row_labels=row_labels, + )
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/results/versioning.html b/latest/html/_modules/pycbc/results/versioning.html new file mode 100644 index 00000000000..561aa40ad70 --- /dev/null +++ b/latest/html/_modules/pycbc/results/versioning.html @@ -0,0 +1,276 @@ + + + + + + pycbc.results.versioning — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.results.versioning

+# Copyright (C) 2015 Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import subprocess
+import urllib.parse
+
+import lal
+import lalframe
+
+import pycbc.version
+
+logger = logging.getLogger('pycbc.results.versioning')
+
+
+[docs] +def get_library_version_info(): + """This will return a list of dictionaries containing versioning + information about the various LIGO libraries that PyCBC will use in an + analysis run.""" + library_list = [] + + def add_info_new_version(info_dct, curr_module, extra_str): + vcs_object = getattr(curr_module, extra_str +'VCSInfo') + info_dct['ID'] = vcs_object.vcsId + info_dct['Status'] = vcs_object.vcsStatus + info_dct['Version'] = vcs_object.version + info_dct['Tag'] = vcs_object.vcsTag + info_dct['Author'] = vcs_object.vcsAuthor + info_dct['Branch'] = vcs_object.vcsBranch + info_dct['Committer'] = vcs_object.vcsCommitter + info_dct['Date'] = vcs_object.vcsDate + + lalinfo = {} + lalinfo['Name'] = 'LAL' + try: + lalinfo['ID'] = lal.VCSId + lalinfo['Status'] = lal.VCSStatus + lalinfo['Version'] = lal.VCSVersion + lalinfo['Tag'] = lal.VCSTag + lalinfo['Author'] = lal.VCSAuthor + lalinfo['Branch'] = lal.VCSBranch + lalinfo['Committer'] = lal.VCSCommitter + lalinfo['Date'] = lal.VCSDate + except AttributeError: + add_info_new_version(lalinfo, lal, '') + library_list.append(lalinfo) + + lalframeinfo = {} + try: + lalframeinfo['Name'] = 'LALFrame' + lalframeinfo['ID'] = lalframe.FrameVCSId + lalframeinfo['Status'] = lalframe.FrameVCSStatus + lalframeinfo['Version'] = lalframe.FrameVCSVersion + lalframeinfo['Tag'] = lalframe.FrameVCSTag + lalframeinfo['Author'] = lalframe.FrameVCSAuthor + lalframeinfo['Branch'] = lalframe.FrameVCSBranch + lalframeinfo['Committer'] = lalframe.FrameVCSCommitter + lalframeinfo['Date'] = lalframe.FrameVCSDate + except AttributeError: + add_info_new_version(lalframeinfo, lalframe, 'Frame') + library_list.append(lalframeinfo) + + lalsimulationinfo = {} + lalsimulationinfo['Name'] = 'LALSimulation' + try: + import lalsimulation + lalsimulationinfo['ID'] = lalsimulation.SimulationVCSId + lalsimulationinfo['Status'] = lalsimulation.SimulationVCSStatus + lalsimulationinfo['Version'] = lalsimulation.SimulationVCSVersion + lalsimulationinfo['Tag'] = lalsimulation.SimulationVCSTag + lalsimulationinfo['Author'] = lalsimulation.SimulationVCSAuthor + lalsimulationinfo['Branch'] = lalsimulation.SimulationVCSBranch + lalsimulationinfo['Committer'] = lalsimulation.SimulationVCSCommitter + lalsimulationinfo['Date'] = lalsimulation.SimulationVCSDate + except AttributeError: + add_info_new_version(lalsimulationinfo, lalsimulation, 'Simulation') + except ImportError: + pass + library_list.append(lalsimulationinfo) + + pycbcinfo = {} + pycbcinfo['Name'] = 'PyCBC' + pycbcinfo['ID'] = pycbc.version.version + pycbcinfo['Status'] = pycbc.version.git_status + pycbcinfo['Version'] = pycbc.version.release or '' + pycbcinfo['Tag'] = pycbc.version.git_tag + pycbcinfo['Author'] = pycbc.version.git_author + pycbcinfo['Builder'] = pycbc.version.git_builder + pycbcinfo['Branch'] = pycbc.version.git_branch + pycbcinfo['Committer'] = pycbc.version.git_committer + pycbcinfo['Date'] = pycbc.version.git_build_date + library_list.append(pycbcinfo) + + return library_list
+ + +
+[docs] +def get_code_version_numbers(executable_names, executable_files): + """Will extract the version information from the executables listed in + the executable section of the supplied ConfigParser object. + + Returns + -------- + dict + A dictionary keyed by the executable name with values giving the + version string for each executable. + """ + code_version_dict = {} + for exe_name, value in zip(executable_names, executable_files): + value = urllib.parse.urlparse(value) + logger.info("Getting version info for %s", exe_name) + version_string = None + if value.scheme in ['gsiftp', 'http', 'https']: + code_version_dict[exe_name] = "Using bundle downloaded from %s" % value + elif value.scheme == 'singularity': + txt = ( + "Executable run from a singularity image. See config file " + "and site catalog for details of what image was used." + ) + code_version_dict[exe_name] = txt + else: + try: + version_string = subprocess.check_output( + [value.path, '--version'], + stderr=subprocess.STDOUT + ).decode() + except subprocess.CalledProcessError: + version_string = "Executable fails on {} --version" + version_string = version_string.format(value.path) + except OSError: + version_string = "Executable doesn't seem to exist(!)" + code_version_dict[exe_name] = version_string + return code_version_dict
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/scheme.html b/latest/html/_modules/pycbc/scheme.html new file mode 100644 index 00000000000..7e58fa21030 --- /dev/null +++ b/latest/html/_modules/pycbc/scheme.html @@ -0,0 +1,507 @@ + + + + + + pycbc.scheme — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.scheme

+# Copyright (C) 2014  Alex Nitz, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides python contexts that set the default behavior for PyCBC
+objects.
+"""
+import os
+import pycbc
+from functools import wraps
+import logging
+from .libutils import get_ctypes_library
+
+logger = logging.getLogger('pycbc.scheme')
+
+
+class _SchemeManager(object):
+    _single = None
+
+    def __init__(self):
+
+        if _SchemeManager._single is not None:
+            raise RuntimeError("SchemeManager is a private class")
+        _SchemeManager._single= self
+
+        self.state= None
+        self._lock= False
+
+    def lock(self):
+        self._lock= True
+
+    def unlock(self):
+        self._lock= False
+
+    def shift_to(self, state):
+        if self._lock is False:
+            self.state = state
+        else:
+            raise RuntimeError("The state is locked, cannot shift schemes")
+
+# Create the global processing scheme manager
+mgr = _SchemeManager()
+DefaultScheme = None
+default_context = None
+
+
+
+[docs] +class Scheme(object): + """Context that sets PyCBC objects to use CPU processing. """ + _single = None + def __init__(self): + if DefaultScheme is type(self): + return + if Scheme._single is not None: + raise RuntimeError("Only one processing scheme can be used") + Scheme._single = True + def __enter__(self): + mgr.shift_to(self) + mgr.lock() + def __exit__(self, type, value, traceback): + mgr.unlock() + mgr.shift_to(default_context) + def __del__(self): + if Scheme is not None: + Scheme._single = None
+ + +_cuda_cleanup_list=[] + +
+[docs] +def register_clean_cuda(function): + _cuda_cleanup_list.append(function)
+ + +
+[docs] +def clean_cuda(context): + #Before cuda context is destroyed, all item destructions dependent on cuda + # must take place. This calls all functions that have been registered + # with _register_clean_cuda() in reverse order + #So the last one registered, is the first one cleaned + _cuda_cleanup_list.reverse() + for func in _cuda_cleanup_list: + func() + + context.pop() + from pycuda.tools import clear_context_caches + clear_context_caches()
+ + +
+[docs] +class CUDAScheme(Scheme): + """Context that sets PyCBC objects to use a CUDA processing scheme. """ + def __init__(self, device_num=0): + Scheme.__init__(self) + if not pycbc.HAVE_CUDA: + raise RuntimeError("Install PyCUDA to use CUDA processing") + import pycuda.driver + pycuda.driver.init() + self.device = pycuda.driver.Device(device_num) + self.context = self.device.make_context(flags=pycuda.driver.ctx_flags.SCHED_BLOCKING_SYNC) + import atexit + atexit.register(clean_cuda,self.context)
+ + +
+[docs] +class CPUScheme(Scheme): + def __init__(self, num_threads=1): + if isinstance(num_threads, int): + self.num_threads=num_threads + elif num_threads == 'env' and "PYCBC_NUM_THREADS" in os.environ: + self.num_threads = int(os.environ["PYCBC_NUM_THREADS"]) + else: + import multiprocessing + self.num_threads = multiprocessing.cpu_count() + self._libgomp = None + + def __enter__(self): + Scheme.__enter__(self) + try: + self._libgomp = get_ctypes_library("gomp", ['gomp'], + mode=ctypes.RTLD_GLOBAL) + except: + # Should we fail or give a warning if we cannot import + # libgomp? Seems to work even for MKL scheme, but + # not entirely sure why... + pass + + os.environ["OMP_NUM_THREADS"] = str(self.num_threads) + if self._libgomp is not None: + self._libgomp.omp_set_num_threads( int(self.num_threads) ) + + def __exit__(self, type, value, traceback): + os.environ["OMP_NUM_THREADS"] = "1" + if self._libgomp is not None: + self._libgomp.omp_set_num_threads(1) + Scheme.__exit__(self, type, value, traceback)
+ + +
+[docs] +class MKLScheme(CPUScheme): + def __init__(self, num_threads=1): + CPUScheme.__init__(self, num_threads) + if not pycbc.HAVE_MKL: + raise RuntimeError("Can't find MKL libraries")
+ + +
+[docs] +class NumpyScheme(CPUScheme): + pass
+ + + +scheme_prefix = { + CUDAScheme: "cuda", + CPUScheme: "cpu", + MKLScheme: "mkl", + NumpyScheme: "numpy", +} +_scheme_map = {v: k for (k, v) in scheme_prefix.items()} + +_default_scheme_prefix = os.getenv("PYCBC_SCHEME", "cpu") +try: + _default_scheme_class = _scheme_map[_default_scheme_prefix] +except KeyError as exc: + raise RuntimeError( + "PYCBC_SCHEME={!r} not recognised, please select one of: {}".format( + _default_scheme_prefix, + ", ".join(map(repr, _scheme_map)), + ), + ) + +
+[docs] +class DefaultScheme(_default_scheme_class): + pass
+ + +default_context = DefaultScheme() +mgr.state = default_context +scheme_prefix[DefaultScheme] = _default_scheme_prefix + +
+[docs] +def current_prefix(): + return scheme_prefix[type(mgr.state)]
+ + +_import_cache = {} +
+[docs] +def schemed(prefix): + + def scheming_function(func): + @wraps(func) + def _scheming_function(*args, **kwds): + try: + return _import_cache[mgr.state][func](*args, **kwds) + except KeyError: + exc_errors = [] + for sch in mgr.state.__class__.__mro__[0:-2]: + try: + backend = __import__(prefix + scheme_prefix[sch], + fromlist=[func.__name__]) + schemed_fn = getattr(backend, func.__name__) + except (ImportError, AttributeError) as e: + exc_errors += [e] + continue + + if mgr.state not in _import_cache: + _import_cache[mgr.state] = {} + + _import_cache[mgr.state][func] = schemed_fn + + return schemed_fn(*args, **kwds) + + err = """Failed to find implementation of (%s) + for %s scheme." % (str(fn), current_prefix())""" + for emsg in exc_errors: + err += print(emsg) + raise RuntimeError(err) + return _scheming_function + + return scheming_function
+ + +
+[docs] +def cpuonly(func): + @wraps(func) + def _cpuonly(*args, **kwds): + if not issubclass(type(mgr.state), CPUScheme): + raise TypeError(fn.__name__ + + " can only be called from a CPU processing scheme.") + else: + return func(*args, **kwds) + return _cpuonly
+ + +
+[docs] +def insert_processing_option_group(parser): + """ + Adds the options used to choose a processing scheme. This should be used + if your program supports the ability to select the processing scheme. + + Parameters + ---------- + parser : object + OptionParser instance + """ + processing_group = parser.add_argument_group("Options for selecting the" + " processing scheme in this program.") + processing_group.add_argument("--processing-scheme", + help="The choice of processing scheme. " + "Choices are " + str(list(set(scheme_prefix.values()))) + + ". (optional for CPU scheme) The number of " + "execution threads " + "can be indicated by cpu:NUM_THREADS, " + "where NUM_THREADS " + "is an integer. The default is a single thread. " + "If the scheme is provided as cpu:env, the number " + "of threads can be provided by the PYCBC_NUM_THREADS " + "environment variable. If the environment variable " + "is not set, the number of threads matches the number " + "of logical cores. ", + default="cpu") + + processing_group.add_argument("--processing-device-id", + help="(optional) ID of GPU to use for accelerated " + "processing", + default=0, type=int)
+ + +
+[docs] +def from_cli(opt): + """Parses the command line options and returns a processing scheme. + + Parameters + ---------- + opt: object + Result of parsing the CLI with OptionParser, or any object with + the required attributes. + + Returns + ------- + ctx: Scheme + Returns the requested processing scheme. + """ + scheme_str = opt.processing_scheme.split(':') + name = scheme_str[0] + + if name == "cuda": + logger.info("Running with CUDA support") + ctx = CUDAScheme(opt.processing_device_id) + elif name == "mkl": + if len(scheme_str) > 1: + numt = scheme_str[1] + if numt.isdigit(): + numt = int(numt) + ctx = MKLScheme(num_threads=numt) + else: + ctx = MKLScheme() + logger.info("Running with MKL support: %s threads" % ctx.num_threads) + else: + if len(scheme_str) > 1: + numt = scheme_str[1] + if numt.isdigit(): + numt = int(numt) + ctx = CPUScheme(num_threads=numt) + else: + ctx = CPUScheme() + logger.info("Running with CPU support: %s threads" % ctx.num_threads) + return ctx
+ + +
+[docs] +def verify_processing_options(opt, parser): + """Parses the processing scheme options and verifies that they are + reasonable. + + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes. + parser : object + OptionParser instance. + """ + scheme_types = scheme_prefix.values() + if opt.processing_scheme.split(':')[0] not in scheme_types: + parser.error("(%s) is not a valid scheme type.")
+ + +
+[docs] +class ChooseBySchemeDict(dict): + """ This class represents a dictionary whose purpose is to chose objects + based on their processing scheme. The keys are intended to be processing + schemes. + """ + def __getitem__(self, scheme): + for base in scheme.__mro__[0:-1]: + try: + return dict.__getitem__(self, base) + break + except: + pass
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/sensitivity.html b/latest/html/_modules/pycbc/sensitivity.html new file mode 100644 index 00000000000..94e5047a891 --- /dev/null +++ b/latest/html/_modules/pycbc/sensitivity.html @@ -0,0 +1,468 @@ + + + + + + pycbc.sensitivity — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.sensitivity

+""" This module contains utilities for calculating search sensitivity
+"""
+import numpy
+import logging
+
+from pycbc.conversions import chirp_distance
+from . import bin_utils
+
+logger = logging.getLogger('pycbc.sensitivity')
+
+
+
+[docs] +def compute_search_efficiency_in_bins( + found, total, ndbins, + sim_to_bins_function=lambda sim: (sim.distance,)): + """ + Calculate search efficiency in the given ndbins. + + The first dimension of ndbins must be bins over injected distance. + sim_to_bins_function must map an object to a tuple indexing the ndbins. + """ + bins = bin_utils.BinnedRatios(ndbins) + + # increment the numerator and denominator with found / found+missed injs + [bins.incnumerator(sim_to_bins_function(sim)) for sim in found] + [bins.incdenominator(sim_to_bins_function(sim)) for sim in total] + + # regularize by setting denoms to 1 to avoid nans + bins.regularize() + + # efficiency array is the ratio + eff = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=bins.ratio()) + + # compute binomial uncertainties in each bin + err_arr = numpy.sqrt(eff.array * (1-eff.array)/bins.denominator.array) + err = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=err_arr) + + return eff, err
+ + + +
+[docs] +def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function): + """ + Calculate search sensitive volume by integrating efficiency in distance bins + + No cosmological corrections are applied: flat space is assumed. + The first dimension of ndbins must be bins over injected distance. + sim_to_bins_function must maps an object to a tuple indexing the ndbins. + """ + eff, err = compute_search_efficiency_in_bins( + found, total, ndbins, sim_to_bins_function) + dx = ndbins[0].upper() - ndbins[0].lower() + r = ndbins[0].centres() + + # volume and errors have one fewer dimension than the input NDBins + vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) + errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) + + # integrate efficiency to obtain volume + vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx) + + # propagate errors in eff to errors in V + errors.array = numpy.sqrt( + ((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1) + ) + + return vol, errors
+ + + +
+[docs] +def volume_to_distance_with_errors(vol, vol_err): + """ Return the distance and standard deviation upper and lower bounds + + Parameters + ---------- + vol: float + vol_err: float + + Returns + ------- + dist: float + ehigh: float + elow: float + + """ + dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0) + ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist + delta = numpy.where(vol >= vol_err, vol - vol_err, 0) + elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0) + return dist, ehigh, elow
+ + + +
+[docs] +def volume_montecarlo(found_d, missed_d, found_mchirp, missed_mchirp, + distribution_param, distribution, limits_param, + min_param=None, max_param=None): + """ + Compute sensitive volume and standard error via direct Monte Carlo integral + + Injections should be made over a range of distances such that sensitive + volume due to signals closer than D_min is negligible, and efficiency at + distances above D_max is negligible + TODO : Replace this function by Collin's formula given in Usman et al. ? + OR get that coded as a new function? + + Parameters + ----------- + found_d: numpy.ndarray + The distances of found injections + missed_d: numpy.ndarray + The distances of missed injections + found_mchirp: numpy.ndarray + Chirp mass of found injections + missed_mchirp: numpy.ndarray + Chirp mass of missed injections + distribution_param: string + Parameter D of the injections used to generate a distribution over + distance, may be 'distance', 'chirp_distance'. + distribution: string + form of the distribution over the parameter, may be + 'log' (uniform in log D) + 'uniform' (uniform in D) + 'distancesquared' (uniform in D**2) + 'volume' (uniform in D**3) + limits_param: string + Parameter Dlim specifying limits inside which injections were made + may be 'distance', 'chirp distance' + min_param: float + minimum value of Dlim at which injections were made; only used for + log distribution, then if None the minimum actually injected value + will be used + max_param: float + maximum value of Dlim out to which injections were made; if None + the maximum actually injected value will be used + + Returns + -------- + volume: float + Volume estimate + volume_error: float + The standard error in the volume + """ + d_power = { + 'log' : 3., + 'uniform' : 2., + 'distancesquared' : 1., + 'volume' : 0. + }[distribution] + mchirp_power = { + 'log' : 0., + 'uniform' : 5. / 6., + 'distancesquared' : 5. / 3., + 'volume' : 15. / 6. + }[distribution] + + # establish maximum physical distance: first for chirp distance distribution + if limits_param == 'chirp_distance': + mchirp_standard_bns = 1.4 * 2.**(-1. / 5.) + all_mchirp = numpy.concatenate((found_mchirp, missed_mchirp)) + max_mchirp = all_mchirp.max() + if max_param is not None: + # use largest injected mchirp to convert back to distance + max_distance = max_param * \ + (max_mchirp / mchirp_standard_bns)**(5. / 6.) + else: + max_distance = max(found_d.max(), missed_d.max()) + elif limits_param == 'distance': + if max_param is not None: + max_distance = max_param + else: + # if no max distance given, use max distance actually injected + max_distance = max(found_d.max(), missed_d.max()) + else: + raise NotImplementedError("%s is not a recognized parameter" + % limits_param) + + # volume of sphere + montecarlo_vtot = (4. / 3.) * numpy.pi * max_distance**3. + + # arrays of weights for the MC integral + if distribution_param == 'distance': + found_weights = found_d ** d_power + missed_weights = missed_d ** d_power + elif distribution_param == 'chirp_distance': + # weight by a power of mchirp to rescale injection density to the + # target mass distribution + found_weights = found_d ** d_power * \ + found_mchirp ** mchirp_power + missed_weights = missed_d ** d_power * \ + missed_mchirp ** mchirp_power + else: + raise NotImplementedError("%s is not a recognized distance parameter" + % distribution_param) + + all_weights = numpy.concatenate((found_weights, missed_weights)) + + # measured weighted efficiency is w_i for a found inj and 0 for missed + # MC integral is volume of sphere * (sum of found weights)/(sum of all weights) + # over injections covering the sphere + mc_weight_samples = numpy.concatenate((found_weights, 0 * missed_weights)) + mc_sum = sum(mc_weight_samples) + + if limits_param == 'distance': + mc_norm = sum(all_weights) + elif limits_param == 'chirp_distance': + # if injections are made up to a maximum chirp distance, account for + # extra missed injections that would occur when injecting up to + # maximum physical distance : this works out to a 'chirp volume' factor + mc_norm = sum(all_weights * (max_mchirp / all_mchirp) ** (5. / 2.)) + + # take out a constant factor + mc_prefactor = montecarlo_vtot / mc_norm + + # count the samples + if limits_param == 'distance': + Ninj = len(mc_weight_samples) + elif limits_param == 'chirp_distance': + # find the total expected number after extending from maximum chirp + # dist up to maximum physical distance + if distribution == 'log': + # only need minimum distance in this one case + if min_param is not None: + min_distance = min_param * \ + (numpy.min(all_mchirp) / mchirp_standard_bns) ** (5. / 6.) + else: + min_distance = min(numpy.min(found_d), numpy.min(missed_d)) + logrange = numpy.log(max_distance / min_distance) + Ninj = len(mc_weight_samples) + (5. / 6.) * \ + sum(numpy.log(max_mchirp / all_mchirp) / logrange) + else: + Ninj = sum((max_mchirp / all_mchirp) ** mchirp_power) + + # sample variance of efficiency: mean of the square - square of the mean + mc_sample_variance = sum(mc_weight_samples ** 2.) / Ninj - \ + (mc_sum / Ninj) ** 2. + + # return MC integral and its standard deviation; variance of mc_sum scales + # relative to sample variance by Ninj (Bienayme' rule) + vol = mc_prefactor * mc_sum + vol_err = mc_prefactor * (Ninj * mc_sample_variance) ** 0.5 + return vol, vol_err
+ + + +
+[docs] +def chirp_volume_montecarlo( + found_d, missed_d, found_mchirp, missed_mchirp, + distribution_param, distribution, limits_param, min_param, max_param): + + assert distribution_param == 'chirp_distance' + assert limits_param == 'chirp_distance' + + found_dchirp = chirp_distance(found_d, found_mchirp) + missed_dchirp = chirp_distance(missed_d, missed_mchirp) + # treat chirp distances in MC volume estimate as physical distances + return volume_montecarlo(found_dchirp, missed_dchirp, found_mchirp, + missed_mchirp, 'distance', distribution, + 'distance', min_param, max_param)
+ + + +
+[docs] +def volume_binned_pylal(f_dist, m_dist, bins=15): + """ Compute the sensitive volume using a distance binned efficiency estimate + + Parameters + ----------- + f_dist: numpy.ndarray + The distances of found injections + m_dist: numpy.ndarray + The distances of missed injections + + Returns + -------- + volume: float + Volume estimate + volume_error: float + The standard error in the volume + """ + def sims_to_bin(sim): + return (sim, 0) + + total = numpy.concatenate([f_dist, m_dist]) + ndbins = bin_utils.NDBins([bin_utils.LinearBins(min(total), max(total), bins), + bin_utils.LinearBins(0., 1, 1)]) + vol, verr = compute_search_volume_in_bins(f_dist, total, ndbins, sims_to_bin) + return vol.array[0], verr.array[0]
+ + + +
+[docs] +def volume_shell(f_dist, m_dist): + """ Compute the sensitive volume using sum over spherical shells. + + Parameters + ----------- + f_dist: numpy.ndarray + The distances of found injections + m_dist: numpy.ndarray + The distances of missed injections + + Returns + -------- + volume: float + Volume estimate + volume_error: float + The standard error in the volume + """ + f_dist.sort() + m_dist.sort() + distances = numpy.concatenate([f_dist, m_dist]) + dist_sorting = distances.argsort() + distances = distances[dist_sorting] + low = 0 + vol = 0 + vol_err = 0 + for i in range(len(distances)): + if i == len(distances) - 1: + break + + high = (distances[i+1] + distances[i]) / 2 + bin_width = high - low + + if dist_sorting[i] < len(f_dist): + vol += 4 * numpy.pi * distances[i]**2.0 * bin_width + vol_err += (4 * numpy.pi * distances[i]**2.0 * bin_width)**2.0 + + low = high + vol_err = vol_err ** 0.5 + return vol, vol_err
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain.html b/latest/html/_modules/pycbc/strain.html new file mode 100644 index 00000000000..9081dcd8880 --- /dev/null +++ b/latest/html/_modules/pycbc/strain.html @@ -0,0 +1,169 @@ + + + + + + pycbc.strain — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain

+from .recalibrate import CubicSpline, PhysicalModel
+
+from .strain import detect_loud_glitches
+from .strain import from_cli, from_cli_single_ifo, from_cli_multi_ifos
+from .strain import insert_strain_option_group, insert_strain_option_group_multi_ifo
+from .strain import verify_strain_options, verify_strain_options_multi_ifo
+from .strain import gate_data, StrainSegments, StrainBuffer
+
+from .gate import add_gate_option_group, gates_from_cli
+from .gate import apply_gates_to_td, apply_gates_to_fd, psd_gates_from_cli
+
+models = {
+    CubicSpline.name: CubicSpline,
+    PhysicalModel.name: PhysicalModel
+}
+
+
+
+[docs] +def read_model_from_config(cp, ifo, section="calibration"): + """Returns an instance of the calibration model specified in the + given configuration file. + + Parameters + ---------- + cp : WorflowConfigParser + An open config file to read. + ifo : string + The detector (H1, L1) whose model will be loaded. + section : {"calibration", string} + Section name from which to retrieve the model. + + Returns + ------- + instance + An instance of the calibration model class. + """ + model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None) + recalibrator = models[model].from_config(cp, ifo.lower(), section) + + return recalibrator
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain/calibration.html b/latest/html/_modules/pycbc/strain/calibration.html new file mode 100644 index 00000000000..a0ab5cb8ab2 --- /dev/null +++ b/latest/html/_modules/pycbc/strain/calibration.html @@ -0,0 +1,322 @@ + + + + + + pycbc.strain.calibration — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain.calibration

+# Copyright (C) 2018 Colm Talbot
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" Functions for adding calibration factors to waveform templates.
+"""
+
+import numpy as np
+from scipy.interpolate import UnivariateSpline
+from abc import (ABCMeta, abstractmethod)
+
+
+
+[docs] +class Recalibrate(metaclass=ABCMeta): + name = None + + def __init__(self, ifo_name): + self.ifo_name = ifo_name + self.params = dict() + +
+[docs] + @abstractmethod + def apply_calibration(self, strain): + """Apply calibration model + + This method should be overwritten by subclasses + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + return
+ + +
+[docs] + def map_to_adjust(self, strain, prefix='recalib_', **params): + """Map an input dictionary of sampling parameters to the + adjust_strain function by filtering the dictionary for the + calibration parameters, then calling adjust_strain. + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + prefix: str + Prefix for calibration parameter names + params : dict + Dictionary of sampling parameters which includes + calibration parameters. + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + + self.params.update({ + key[len(prefix):]: params[key] + for key in params if prefix in key and self.ifo_name in key}) + + strain_adjusted = self.apply_calibration(strain) + + return strain_adjusted
+ + +
+[docs] + @classmethod + def from_config(cls, cp, ifo, section): + """Read a config file to get calibration options and transfer + functions which will be used to intialize the model. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config file. + ifo : string + The detector (H1, L1) for which the calibration model will + be loaded. + section : string + The section name in the config file from which to retrieve + the calibration options. + Return + ------ + instance + An instance of the class. + """ + all_params = dict(cp.items(section)) + params = {key[len(ifo)+1:]: all_params[key] + for key in all_params if ifo.lower() in key} + model = params.pop('model') + params['ifo_name'] = ifo.lower() + + return all_models[model](**params)
+
+ + + +
+[docs] +class CubicSpline(Recalibrate): + name = 'cubic_spline' + + def __init__(self, minimum_frequency, maximum_frequency, n_points, + ifo_name): + """ + Cubic spline recalibration + + see https://dcc.ligo.org/LIGO-T1400682/public + + This assumes the spline points follow + np.logspace(np.log(minimum_frequency), np.log(maximum_frequency), + n_points) + + Parameters + ---------- + minimum_frequency: float + minimum frequency of spline points + maximum_frequency: float + maximum frequency of spline points + n_points: int + number of spline points + """ + Recalibrate.__init__(self, ifo_name=ifo_name) + minimum_frequency = float(minimum_frequency) + maximum_frequency = float(maximum_frequency) + n_points = int(n_points) + if n_points < 4: + raise ValueError( + 'Use at least 4 spline points for calibration model') + self.n_points = n_points + self.spline_points = np.logspace(np.log10(minimum_frequency), + np.log10(maximum_frequency), n_points) + +
+[docs] + def apply_calibration(self, strain): + """Apply calibration model + + This applies cubic spline calibration to the strain. + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + amplitude_parameters =\ + [self.params['amplitude_{}_{}'.format(self.ifo_name, ii)] + for ii in range(self.n_points)] + amplitude_spline = UnivariateSpline(self.spline_points, + amplitude_parameters) + delta_amplitude = amplitude_spline(strain.sample_frequencies.numpy()) + + phase_parameters =\ + [self.params['phase_{}_{}'.format(self.ifo_name, ii)] + for ii in range(self.n_points)] + phase_spline = UnivariateSpline(self.spline_points, phase_parameters) + delta_phase = phase_spline(strain.sample_frequencies.numpy()) + + strain_adjusted = strain * (1.0 + delta_amplitude)\ + * (2.0 + 1j * delta_phase) / (2.0 - 1j * delta_phase) + + return strain_adjusted
+
+ + + +all_models = { + CubicSpline.name: CubicSpline +} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain/gate.html b/latest/html/_modules/pycbc/strain/gate.html new file mode 100644 index 00000000000..89e10534c02 --- /dev/null +++ b/latest/html/_modules/pycbc/strain/gate.html @@ -0,0 +1,328 @@ + + + + + + pycbc.strain.gate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain.gate

+# Copyright (C) 2016 Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" Functions for applying gates to data.
+"""
+
+from scipy import linalg
+from . import strain
+
+
+def _gates_from_cli(opts, gate_opt):
+    """Parses the given `gate_opt` into something understandable by
+    `strain.gate_data`.
+    """
+    gates = {}
+    if getattr(opts, gate_opt) is None:
+        return gates
+    for gate in getattr(opts, gate_opt):
+        try:
+            ifo, central_time, half_dur, taper_dur = gate.split(':')
+            central_time = float(central_time)
+            half_dur = float(half_dur)
+            taper_dur = float(taper_dur)
+        except ValueError:
+            raise ValueError("--gate {} not formatted correctly; ".format(
+                gate) + "see help")
+        try:
+            gates[ifo].append((central_time, half_dur, taper_dur))
+        except KeyError:
+            gates[ifo] = [(central_time, half_dur, taper_dur)]
+    return gates
+
+
+
+[docs] +def gates_from_cli(opts): + """Parses the --gate option into something understandable by + `strain.gate_data`. + """ + return _gates_from_cli(opts, 'gate')
+ + + +
+[docs] +def psd_gates_from_cli(opts): + """Parses the --psd-gate option into something understandable by + `strain.gate_data`. + """ + return _gates_from_cli(opts, 'psd_gate')
+ + + +
+[docs] +def apply_gates_to_td(strain_dict, gates): + """Applies the given dictionary of gates to the given dictionary of + strain. + + Parameters + ---------- + strain_dict : dict + Dictionary of time-domain strain, keyed by the ifos. + gates : dict + Dictionary of gates. Keys should be the ifo to apply the data to, + values are a tuple giving the central time of the gate, the half + duration, and the taper duration. + + Returns + ------- + dict + Dictionary of time-domain strain with the gates applied. + """ + # copy data to new dictionary + outdict = dict(strain_dict.items()) + for ifo in gates: + outdict[ifo] = strain.gate_data(outdict[ifo], gates[ifo]) + return outdict
+ + + +
+[docs] +def apply_gates_to_fd(stilde_dict, gates): + """Applies the given dictionary of gates to the given dictionary of + strain in the frequency domain. + + Gates are applied by IFFT-ing the strain data to the time domain, applying + the gate, then FFT-ing back to the frequency domain. + + Parameters + ---------- + stilde_dict : dict + Dictionary of frequency-domain strain, keyed by the ifos. + gates : dict + Dictionary of gates. Keys should be the ifo to apply the data to, + values are a tuple giving the central time of the gate, the half + duration, and the taper duration. + + Returns + ------- + dict + Dictionary of frequency-domain strain with the gates applied. + """ + # copy data to new dictionary + outdict = dict(stilde_dict.items()) + # create a time-domin strain dictionary to apply the gates to + strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates]) + # apply gates and fft back to the frequency domain + for ifo,d in apply_gates_to_td(strain_dict, gates).items(): + outdict[ifo] = d.to_frequencyseries() + return outdict
+ + + +
+[docs] +def add_gate_option_group(parser): + """Adds the options needed to apply gates to data. + + Parameters + ---------- + parser : object + ArgumentParser instance. + """ + gate_group = parser.add_argument_group("Options for gating data") + + gate_group.add_argument("--gate", nargs="+", type=str, + metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", + help="Apply one or more gates to the data before " + "filtering.") + gate_group.add_argument("--gate-overwhitened", action="store_true", + help="Overwhiten data first, then apply the " + "gates specified in --gate. Overwhitening " + "allows for sharper tapers to be used, " + "since lines are not blurred.") + gate_group.add_argument("--psd-gate", nargs="+", type=str, + metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", + help="Apply one or more gates to the data used " + "for computing the PSD. Gates are applied " + "prior to FFT-ing the data for PSD " + "estimation.") + return gate_group
+ + + +
+[docs] +def gate_and_paint(data, lindex, rindex, invpsd, copy=True): + """Gates and in-paints data. + + Parameters + ---------- + data : TimeSeries + The data to gate. + lindex : int + The start index of the gate. + rindex : int + The end index of the gate. + invpsd : FrequencySeries + The inverse of the PSD. + copy : bool, optional + Copy the data before applying the gate. Otherwise, the gate will + be applied in-place. Default is True. + + Returns + ------- + TimeSeries : + The gated and in-painted time series. + """ + # Uses the hole-filling method of + # https://arxiv.org/pdf/1908.05644.pdf + # Copy the data and zero inside the hole + if copy: + data = data.copy() + # Here's ambiguity about when gate end time exactly is, rindex-1 or rindex? + data[lindex:rindex] = 0 + + # get the over-whitened gated data + tdfilter = invpsd.astype('complex').to_timeseries() * invpsd.delta_t + owhgated_data = (data.to_frequencyseries() * invpsd).to_timeseries() + + # remove the projection into the null space + proj = linalg.solve_toeplitz(tdfilter[:(rindex - lindex)], + owhgated_data[lindex:rindex]) + data[lindex:rindex] -= proj + return data
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain/lines.html b/latest/html/_modules/pycbc/strain/lines.html new file mode 100644 index 00000000000..aeea0ae3e80 --- /dev/null +++ b/latest/html/_modules/pycbc/strain/lines.html @@ -0,0 +1,366 @@ + + + + + + pycbc.strain.lines — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain.lines

+# Copyright (C) 2019 Miriam Cabero
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+""" Functions for removing frequency lines from real data.
+"""
+
+import numpy
+from pycbc.types import TimeSeries, zeros
+
+
+[docs] +def complex_median(complex_list): + """ Get the median value of a list of complex numbers. + + Parameters + ---------- + complex_list: list + List of complex numbers to calculate the median. + + Returns + ------- + a + 1.j*b: complex number + The median of the real and imaginary parts. + """ + median_real = numpy.median([complex_number.real + for complex_number in complex_list]) + median_imag = numpy.median([complex_number.imag + for complex_number in complex_list]) + return median_real + 1.j*median_imag
+ + +
+[docs] +def avg_inner_product(data1, data2, bin_size): + """ Calculate the time-domain inner product averaged over bins. + + Parameters + ---------- + data1: pycbc.types.TimeSeries + First data set. + data2: pycbc.types.TimeSeries + Second data set, with same duration and sample rate as data1. + bin_size: float + Duration of the bins the data will be divided into to calculate + the inner product. + + Returns + ------- + inner_prod: list + The (complex) inner product of data1 and data2 obtained in each bin. + amp: float + The absolute value of the median of the inner product. + phi: float + The angle of the median of the inner product. + """ + assert data1.duration == data2.duration + assert data1.sample_rate == data2.sample_rate + seglen = int(bin_size * data1.sample_rate) + inner_prod = [] + for idx in range(int(data1.duration / bin_size)): + start, end = idx * seglen, (idx+1) * seglen + norm = len(data1[start:end]) + bin_prod = 2 * sum(data1.data[start:end].real * + numpy.conjugate(data2.data[start:end])) / norm + inner_prod.append(bin_prod) + + # Get the median over all bins to avoid outliers due to the presence + # of a signal in a particular bin. + inner_median = complex_median(inner_prod) + return inner_prod, numpy.abs(inner_median), numpy.angle(inner_median)
+ + +
+[docs] +def line_model(freq, data, tref, amp=1, phi=0): + """ Simple time-domain model for a frequency line. + + Parameters + ---------- + freq: float + Frequency of the line. + data: pycbc.types.TimeSeries + Reference data, to get delta_t, start_time, duration and sample_times. + tref: float + Reference time for the line model. + amp: {1., float}, optional + Amplitude of the frequency line. + phi: {0. float}, optional + Phase of the frequency line (radians). + + Returns + ------- + freq_line: pycbc.types.TimeSeries + A timeseries of the line model with frequency 'freq'. The returned + data are complex to allow measuring the amplitude and phase of the + corresponding frequency line in the strain data. For extraction, use + only the real part of the data. + """ + freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t, + epoch=data.start_time) + + times = data.sample_times - float(tref) + alpha = 2 * numpy.pi * freq * times + phi + freq_line.data = amp * numpy.exp(1.j * alpha) + + return freq_line
+ + +
+[docs] +def matching_line(freq, data, tref, bin_size=1): + """ Find the parameter of the line with frequency 'freq' in the data. + + Parameters + ---------- + freq: float + Frequency of the line to find in the data. + data: pycbc.types.TimeSeries + Data from which the line wants to be measured. + tref: float + Reference time for the frequency line. + bin_size: {1, float}, optional + Duration of the bins the data will be divided into for averaging. + + Returns + ------- + line_model: pycbc.types.TimeSeries + A timeseries containing the frequency line with the amplitude + and phase measured from the data. + """ + template_line = line_model(freq, data, tref=tref) + # Measure amplitude and phase of the line in the data + _, amp, phi = avg_inner_product(data, template_line, + bin_size=bin_size) + return line_model(freq, data, tref=tref, amp=amp, phi=phi)
+ + +
+[docs] +def calibration_lines(freqs, data, tref=None): + """ Extract the calibration lines from strain data. + + Parameters + ---------- + freqs: list + List containing the frequencies of the calibration lines. + data: pycbc.types.TimeSeries + Strain data to extract the calibration lines from. + tref: {None, float}, optional + Reference time for the line. If None, will use data.start_time. + + Returns + ------- + data: pycbc.types.TimeSeries + The strain data with the calibration lines removed. + """ + if tref is None: + tref = float(data.start_time) + for freq in freqs: + measured_line = matching_line(freq, data, tref, + bin_size=data.duration) + data -= measured_line.data.real + + return data
+ + +
+[docs] +def clean_data(freqs, data, chunk, avg_bin): + """ Extract time-varying (wandering) lines from strain data. + + Parameters + ---------- + freqs: list + List containing the frequencies of the wandering lines. + data: pycbc.types.TimeSeries + Strain data to extract the wandering lines from. + chunk: float + Duration of the chunks the data will be divided into to account + for the time variation of the wandering lines. Should be smaller + than data.duration, and allow for at least a few chunks. + avg_bin: float + Duration of the bins each chunk will be divided into for averaging + the inner product when measuring the parameters of the line. Should + be smaller than chunk. + + Returns + ------- + data: pycbc.types.TimeSeries + The strain data with the wandering lines removed. + """ + if avg_bin >= chunk: + raise ValueError('The bin size for averaging the inner product ' + 'must be less than the chunk size.') + if chunk >= data.duration: + raise ValueError('The chunk size must be less than the ' + 'data duration.') + steps = numpy.arange(0, int(data.duration/chunk)-0.5, 0.5) + seglen = chunk * data.sample_rate + + tref = float(data.start_time) + for freq in freqs: + for step in steps: + start, end = int(step*seglen), int((step+1)*seglen) + chunk_line = matching_line(freq, data[start:end], + tref, bin_size=avg_bin) + + # Apply hann window on sides of chunk_line to smooth boundaries + # and avoid discontinuities + hann_window = numpy.hanning(len(chunk_line)) + apply_hann = TimeSeries(numpy.ones(len(chunk_line)), + delta_t=chunk_line.delta_t, + epoch=chunk_line.start_time) + if step == 0: + apply_hann.data[len(hann_window)/2:] *= \ + hann_window[len(hann_window)/2:] + elif step == steps[-1]: + apply_hann.data[:len(hann_window)/2] *= \ + hann_window[:len(hann_window)/2] + else: + apply_hann.data *= hann_window + chunk_line.data *= apply_hann.data + data.data[start:end] -= chunk_line.data.real + + return data
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain/recalibrate.html b/latest/html/_modules/pycbc/strain/recalibrate.html new file mode 100644 index 00000000000..e0c75ef4e0c --- /dev/null +++ b/latest/html/_modules/pycbc/strain/recalibrate.html @@ -0,0 +1,684 @@ + + + + + + pycbc.strain.recalibrate — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain.recalibrate

+""" Classes and functions for adjusting strain data.
+"""
+# Copyright (C) 2015 Ben Lackey, Christopher M. Biwer,
+#                    Daniel Finstad, Colm Talbot, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+from abc import (ABCMeta, abstractmethod)
+
+import numpy as np
+from scipy.interpolate import UnivariateSpline
+from pycbc.types import FrequencySeries
+
+
+
+[docs] +class Recalibrate(metaclass=ABCMeta): + """ Base class for modifying calibration """ + name = None + + def __init__(self, ifo_name): + self.ifo_name = ifo_name + self.params = dict() + +
+[docs] + @abstractmethod + def apply_calibration(self, strain): + """Apply calibration model + + This method should be overwritten by subclasses + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + return
+ + +
+[docs] + def map_to_adjust(self, strain, prefix='recalib_', **params): + """Map an input dictionary of sampling parameters to the + adjust_strain function by filtering the dictionary for the + calibration parameters, then calling adjust_strain. + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + prefix: str + Prefix for calibration parameter names + params : dict + Dictionary of sampling parameters which includes + calibration parameters. + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + + self.params.update({ + key[len(prefix):]: params[key] + for key in params if prefix in key and self.ifo_name in key}) + + strain_adjusted = self.apply_calibration(strain) + + return strain_adjusted
+ + +
+[docs] + @classmethod + def from_config(cls, cp, ifo, section): + """Read a config file to get calibration options and transfer + functions which will be used to intialize the model. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config file. + ifo : string + The detector (H1, L1) for which the calibration model will + be loaded. + section : string + The section name in the config file from which to retrieve + the calibration options. + Return + ------ + instance + An instance of the class. + """ + all_params = dict(cp.items(section)) + params = {key[len(ifo)+1:]: all_params[key] + for key in all_params if ifo.lower() in key} + params = {key: params[key] for key in params} + params.pop('model') + params['ifo_name'] = ifo.lower() + + return cls(**params)
+
+ + + +
+[docs] +class CubicSpline(Recalibrate): + """Cubic spline recalibration + + see https://dcc.ligo.org/LIGO-T1400682/public + + This assumes the spline points follow + np.logspace(np.log(minimum_frequency), np.log(maximum_frequency), + n_points) + + Parameters + ---------- + minimum_frequency: float + minimum frequency of spline points + maximum_frequency: float + maximum frequency of spline points + n_points: int + number of spline points + """ + name = 'cubic_spline' + + def __init__(self, minimum_frequency, maximum_frequency, n_points, + ifo_name): + Recalibrate.__init__(self, ifo_name=ifo_name) + minimum_frequency = float(minimum_frequency) + maximum_frequency = float(maximum_frequency) + n_points = int(n_points) + if n_points < 4: + raise ValueError( + 'Use at least 4 spline points for calibration model') + self.n_points = n_points + self.spline_points = np.logspace(np.log10(minimum_frequency), + np.log10(maximum_frequency), n_points) + +
+[docs] + def apply_calibration(self, strain): + """Apply calibration model + + This applies cubic spline calibration to the strain. + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + amplitude_parameters =\ + [self.params['amplitude_{}_{}'.format(self.ifo_name, ii)] + for ii in range(self.n_points)] + amplitude_spline = UnivariateSpline(self.spline_points, + amplitude_parameters) + delta_amplitude = amplitude_spline(strain.sample_frequencies.numpy()) + + phase_parameters =\ + [self.params['phase_{}_{}'.format(self.ifo_name, ii)] + for ii in range(self.n_points)] + phase_spline = UnivariateSpline(self.spline_points, phase_parameters) + delta_phase = phase_spline(strain.sample_frequencies.numpy()) + + strain_adjusted = strain * (1.0 + delta_amplitude)\ + * (2.0 + 1j * delta_phase) / (2.0 - 1j * delta_phase) + + return strain_adjusted
+
+ + + +
+[docs] +class PhysicalModel(object): + """ Class for adjusting time-varying calibration parameters of given + strain data. + + Parameters + ---------- + strain : FrequencySeries + The strain to be adjusted. + freq : array + The frequencies corresponding to the values of c0, d0, a0 in Hertz. + fc0 : float + Coupled-cavity (CC) pole at time t0, when c0=c(t0) and a0=a(t0) are + measured. + c0 : array + Initial sensing function at t0 for the frequencies. + d0 : array + Digital filter for the frequencies. + a_tst0 : array + Initial actuation function for the test mass at t0 for the + frequencies. + a_pu0 : array + Initial actuation function for the penultimate mass at t0 for the + frequencies. + fs0 : float + Initial spring frequency at t0 for the signal recycling cavity. + qinv0 : float + Initial inverse quality factor at t0 for the signal recycling + cavity. + """ + + name = 'physical_model' + def __init__(self, freq=None, fc0=None, c0=None, d0=None, + a_tst0=None, a_pu0=None, fs0=None, qinv0=None): + self.freq = np.real(freq) + self.c0 = c0 + self.d0 = d0 + self.a_tst0 = a_tst0 + self.a_pu0 = a_pu0 + self.fc0 = float(fc0) + self.fs0 = float(fs0) + self.qinv0 = float(qinv0) + + # initial detuning at time t0 + init_detuning = self.freq**2 / (self.freq**2 - 1.0j * self.freq * \ + self.fs0 * self.qinv0 + self.fs0**2) + + # initial open loop gain + self.g0 = self.c0 * self.d0 * (self.a_tst0 + self.a_pu0) + + # initial response function + self.r0 = (1.0 + self.g0) / self.c0 + + # residual of c0 after factoring out the coupled cavity pole fc0 + self.c_res = self.c0 * (1 + 1.0j * self.freq / self.fc0)/init_detuning + +
+[docs] + def update_c(self, fs=None, qinv=None, fc=None, kappa_c=1.0): + """ Calculate the sensing function c(f,t) given the new parameters + kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. + + Parameters + ---------- + fc : float + Coupled-cavity (CC) pole at time t. + kappa_c : float + Scalar correction factor for sensing function at time t. + fs : float + Spring frequency for signal recycling cavity. + qinv : float + Inverse quality factor for signal recycling cavity. + + Returns + ------- + c : numpy.array + The new sensing function c(f,t). + """ + detuning_term = self.freq**2 / (self.freq**2 - 1.0j *self.freq*fs * \ + qinv + fs**2) + return self.c_res * kappa_c / (1 + 1.0j * self.freq/fc)*detuning_term
+ + +
+[docs] + def update_g(self, fs=None, qinv=None, fc=None, kappa_tst_re=1.0, + kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0, + kappa_c=1.0): + """ Calculate the open loop gain g(f,t) given the new parameters + kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. + + Parameters + ---------- + fc : float + Coupled-cavity (CC) pole at time t. + kappa_c : float + Scalar correction factor for sensing function c at time t. + kappa_tst_re : float + Real part of scalar correction factor for actuation function + a_tst0 at time t. + kappa_pu_re : float + Real part of scalar correction factor for actuation function + a_pu0 at time t. + kappa_tst_im : float + Imaginary part of scalar correction factor for actuation function + a_tst0 at time t. + kappa_pu_im : float + Imaginary part of scalar correction factor for actuation function + a_pu0 at time t. + fs : float + Spring frequency for signal recycling cavity. + qinv : float + Inverse quality factor for signal recycling cavity. + + Returns + ------- + g : numpy.array + The new open loop gain g(f,t). + """ + c = self.update_c(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c) + a_tst = self.a_tst0 * (kappa_tst_re + 1.0j * kappa_tst_im) + a_pu = self.a_pu0 * (kappa_pu_re + 1.0j * kappa_pu_im) + return c * self.d0 * (a_tst + a_pu)
+ + +
+[docs] + def update_r(self, fs=None, qinv=None, fc=None, kappa_c=1.0, + kappa_tst_re=1.0, kappa_tst_im=0.0, kappa_pu_re=1.0, + kappa_pu_im=0.0): + """ Calculate the response function R(f,t) given the new parameters + kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. + + Parameters + ---------- + fc : float + Coupled-cavity (CC) pole at time t. + kappa_c : float + Scalar correction factor for sensing function c at time t. + kappa_tst_re : float + Real part of scalar correction factor for actuation function + a_tst0 at time t. + kappa_pu_re : float + Real part of scalar correction factor for actuation function + a_pu0 at time t. + kappa_tst_im : float + Imaginary part of scalar correction factor for actuation function + a_tst0 at time t. + kappa_pu_im : float + Imaginary part of scalar correction factor for actuation function + a_pu0 at time t. + fs : float + Spring frequency for signal recycling cavity. + qinv : float + Inverse quality factor for signal recycling cavity. + + Returns + ------- + r : numpy.array + The new response function r(f,t). + """ + c = self.update_c(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c) + g = self.update_g(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c, + kappa_tst_re=kappa_tst_re, + kappa_tst_im=kappa_tst_im, + kappa_pu_re=kappa_pu_re, kappa_pu_im=kappa_pu_im) + return (1.0 + g) / c
+ + +
+[docs] + def adjust_strain(self, strain, delta_fs=None, delta_qinv=None, + delta_fc=None, kappa_c=1.0, kappa_tst_re=1.0, + kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0): + """Adjust the FrequencySeries strain by changing the time-dependent + calibration parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv. + + Parameters + ---------- + strain : FrequencySeries + The strain data to be adjusted. + delta_fc : float + Change in coupled-cavity (CC) pole at time t. + kappa_c : float + Scalar correction factor for sensing function c0 at time t. + kappa_tst_re : float + Real part of scalar correction factor for actuation function + A_{tst0} at time t. + kappa_tst_im : float + Imaginary part of scalar correction factor for actuation function + A_tst0 at time t. + kappa_pu_re : float + Real part of scalar correction factor for actuation function + A_{pu0} at time t. + kappa_pu_im : float + Imaginary part of scalar correction factor for actuation function + A_{pu0} at time t. + fs : float + Spring frequency for signal recycling cavity. + qinv : float + Inverse quality factor for signal recycling cavity. + + Returns + ------- + strain_adjusted : FrequencySeries + The adjusted strain. + """ + fc = self.fc0 + delta_fc if delta_fc else self.fc0 + fs = self.fs0 + delta_fs if delta_fs else self.fs0 + qinv = self.qinv0 + delta_qinv if delta_qinv else self.qinv0 + + # calculate adjusted response function + r_adjusted = self.update_r(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c, + kappa_tst_re=kappa_tst_re, + kappa_tst_im=kappa_tst_im, + kappa_pu_re=kappa_pu_re, + kappa_pu_im=kappa_pu_im) + + # calculate error function + k = r_adjusted / self.r0 + + # decompose into amplitude and unwrapped phase + k_amp = np.abs(k) + k_phase = np.unwrap(np.angle(k)) + + # convert to FrequencySeries by interpolating then resampling + order = 1 + k_amp_off = UnivariateSpline(self.freq, k_amp, k=order, s=0) + k_phase_off = UnivariateSpline(self.freq, k_phase, k=order, s=0) + freq_even = strain.sample_frequencies.numpy() + k_even_sample = k_amp_off(freq_even) * \ + np.exp(1.0j * k_phase_off(freq_even)) + strain_adjusted = FrequencySeries(strain.numpy() * \ + k_even_sample, + delta_f=strain.delta_f) + + return strain_adjusted
+ + +
+[docs] + @classmethod + def tf_from_file(cls, path, delimiter=" "): + """Convert the contents of a file with the columns + [freq, real(h), imag(h)] to a numpy.array with columns + [freq, real(h)+j*imag(h)]. + + Parameters + ---------- + path : string + delimiter : {" ", string} + + Return + ------ + numpy.array + """ + data = np.loadtxt(path, delimiter=delimiter) + freq = data[:, 0] + h = data[:, 1] + 1.0j * data[:, 2] + return np.array([freq, h]).transpose()
+ + +
+[docs] + @classmethod + def from_config(cls, cp, ifo, section): + """Read a config file to get calibration options and transfer + functions which will be used to intialize the model. + + Parameters + ---------- + cp : WorkflowConfigParser + An open config file. + ifo : string + The detector (H1, L1) for which the calibration model will + be loaded. + section : string + The section name in the config file from which to retrieve + the calibration options. + + Return + ------ + instance + An instance of the Recalibrate class. + """ + # read transfer functions + tfs = [] + tf_names = ["a-tst", "a-pu", "c", "d"] + for tag in ['-'.join([ifo, "transfer-function", name]) + for name in tf_names]: + tf_path = cp.get_opt_tag(section, tag, None) + tfs.append(cls.tf_from_file(tf_path)) + a_tst0 = tfs[0][:, 1] + a_pu0 = tfs[1][:, 1] + c0 = tfs[2][:, 1] + d0 = tfs[3][:, 1] + freq = tfs[0][:, 0] + + # if upper stage actuation is included, read that in and add it + # to a_pu0 + uim_tag = '-'.join([ifo, 'transfer-function-a-uim']) + if cp.has_option(section, uim_tag): + tf_path = cp.get_opt_tag(section, uim_tag, None) + a_pu0 += cls.tf_from_file(tf_path)[:, 1] + + # read fc0, fs0, and qinv0 + fc0 = cp.get_opt_tag(section, '-'.join([ifo, "fc0"]), None) + fs0 = cp.get_opt_tag(section, '-'.join([ifo, "fs0"]), None) + qinv0 = cp.get_opt_tag(section, '-'.join([ifo, "qinv0"]), None) + + return cls(freq=freq, fc0=fc0, c0=c0, d0=d0, a_tst0=a_tst0, + a_pu0=a_pu0, fs0=fs0, qinv0=qinv0)
+ + +
+[docs] + def map_to_adjust(self, strain, **params): + """Map an input dictionary of sampling parameters to the + adjust_strain function by filtering the dictionary for the + calibration parameters, then calling adjust_strain. + + Parameters + ---------- + strain : FrequencySeries + The strain to be recalibrated. + params : dict + Dictionary of sampling parameters which includes + calibration parameters. + + Return + ------ + strain_adjusted : FrequencySeries + The recalibrated strain. + """ + # calibration param names + arg_names = ['delta_fs', 'delta_fc', 'delta_qinv', 'kappa_c', + 'kappa_tst_re', 'kappa_tst_im', 'kappa_pu_re', + 'kappa_pu_im'] + # calibration param labels as they exist in config files + arg_labels = [''.join(['calib_', name]) for name in arg_names] + # default values for calibration params + default_values = [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0] + # make list of calibration param values + calib_args = [] + for arg, val in zip(arg_labels, default_values): + if arg in params: + calib_args.append(params[arg]) + else: + calib_args.append(val) + # adjust the strain using calibration param values + strain_adjusted = self.adjust_strain(strain, delta_fs=calib_args[0], + delta_fc=calib_args[1], delta_qinv=calib_args[2], + kappa_c=calib_args[3], + kappa_tst_re=calib_args[4], + kappa_tst_im=calib_args[5], + kappa_pu_re=calib_args[6], + kappa_pu_im=calib_args[7]) + return strain_adjusted
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/strain/strain.html b/latest/html/_modules/pycbc/strain/strain.html new file mode 100644 index 00000000000..ad84a693af4 --- /dev/null +++ b/latest/html/_modules/pycbc/strain/strain.html @@ -0,0 +1,2257 @@ + + + + + + pycbc.strain.strain — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.strain.strain

+#Copyright (C) 2013 Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules contains functions reading, generating, and segmenting strain data
+"""
+import copy
+import logging
+import functools
+import numpy
+
+from scipy.signal import kaiserord
+
+import pycbc.types
+from pycbc.types import TimeSeries, zeros
+from pycbc.types import Array, FrequencySeries
+from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
+from pycbc.types import MultiDetOptionActionSpecial
+from pycbc.types import DictOptionAction, MultiDetDictOptionAction
+from pycbc.types import required_opts, required_opts_multi_ifo
+from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
+from pycbc.types import copy_opts_for_single_ifo, complex_same_precision_as
+from pycbc.inject import InjectionSet, SGBurstInjectionSet
+from pycbc.filter import resample_to_delta_t, lowpass, highpass, make_frequency_series
+from pycbc.filter.zpk import filter_zpk
+from pycbc.waveform.spa_tmplt import spa_distance
+import pycbc.psd
+from pycbc.fft import FFT, IFFT
+import pycbc.events
+import pycbc.frame
+import pycbc.filter
+
+logger = logging.getLogger('pycbc.strain.strain')
+
+
+[docs] +def next_power_of_2(n): + """Return the smallest integer power of 2 larger than the argument. + + Parameters + ---------- + n : int + A positive integer. + + Returns + ------- + m : int + Smallest integer power of 2 larger than n. + """ + return 1 << n.bit_length()
+ + +
+[docs] +def detect_loud_glitches(strain, psd_duration=4., psd_stride=2., + psd_avg_method='median', low_freq_cutoff=30., + threshold=50., cluster_window=5., corrupt_time=4., + high_freq_cutoff=None, output_intermediates=False): + """Automatic identification of loud transients for gating purposes. + + This function first estimates the PSD of the input time series using the + FindChirp Welch method. Then it whitens the time series using that + estimate. Finally, it computes the magnitude of the whitened series, + thresholds it and applies the FindChirp clustering over time to the + surviving samples. + + Parameters + ---------- + strain : TimeSeries + Input strain time series to detect glitches over. + psd_duration : {float, 4} + Duration of the segments for PSD estimation in seconds. + psd_stride : {float, 2} + Separation between PSD estimation segments in seconds. + psd_avg_method : {string, 'median'} + Method for averaging PSD estimation segments. + low_freq_cutoff : {float, 30} + Minimum frequency to include in the whitened strain. + threshold : {float, 50} + Minimum magnitude of whitened strain for considering a transient to + be present. + cluster_window : {float, 5} + Length of time window to cluster surviving samples over, in seconds. + corrupt_time : {float, 4} + Amount of time to be discarded at the beginning and end of the input + time series. + high_frequency_cutoff : {float, None} + Maximum frequency to include in the whitened strain. If given, the + input series is downsampled accordingly. If omitted, the Nyquist + frequency is used. + output_intermediates : {bool, False} + Save intermediate time series for debugging. + """ + + if high_freq_cutoff: + strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff, + method='ldas') + else: + strain = strain.copy() + + # taper strain + corrupt_length = int(corrupt_time * strain.sample_rate) + w = numpy.arange(corrupt_length) / float(corrupt_length) + strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype) + strain[(len(strain) - corrupt_length):] *= \ + pycbc.types.Array(w[::-1], dtype=strain.dtype) + + if output_intermediates: + strain.save_to_wav('strain_conditioned.wav') + + # zero-pad strain to a power-of-2 length + strain_pad_length = next_power_of_2(len(strain)) + pad_start = int(strain_pad_length / 2 - len(strain) / 2) + pad_end = pad_start + len(strain) + pad_epoch = strain.start_time - pad_start / float(strain.sample_rate) + strain_pad = pycbc.types.TimeSeries( + pycbc.types.zeros(strain_pad_length, dtype=strain.dtype), + delta_t=strain.delta_t, copy=False, epoch=pad_epoch) + strain_pad[pad_start:pad_end] = strain[:] + + # estimate the PSD + psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)], + seg_len=int(psd_duration * strain.sample_rate), + seg_stride=int(psd_stride * strain.sample_rate), + avg_method=psd_avg_method, + require_exact_data_fit=False) + psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration) + psd = pycbc.psd.inverse_spectrum_truncation( + psd, int(psd_duration * strain.sample_rate), + low_frequency_cutoff=low_freq_cutoff, + trunc_method='hann') + kmin = int(low_freq_cutoff / psd.delta_f) + psd[0:kmin] = numpy.inf + if high_freq_cutoff: + kmax = int(high_freq_cutoff / psd.delta_f) + psd[kmax:] = numpy.inf + + # whiten + strain_tilde = strain_pad.to_frequencyseries() + + if high_freq_cutoff: + norm = high_freq_cutoff - low_freq_cutoff + else: + norm = strain.sample_rate / 2. - low_freq_cutoff + strain_tilde *= (psd * norm) ** (-0.5) + + strain_pad = strain_tilde.to_timeseries() + + if output_intermediates: + strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav') + + mag = abs(strain_pad[pad_start:pad_end]) + + if output_intermediates: + mag.save('strain_whitened_mag.npy') + + mag = mag.numpy() + + # remove strain corrupted by filters at the ends + mag[0:corrupt_length] = 0 + mag[-1:-corrupt_length-1:-1] = 0 + + # find peaks and their times + indices = numpy.where(mag > threshold)[0] + cluster_idx = pycbc.events.findchirp_cluster_over_window( + indices, numpy.array(mag[indices]), + int(cluster_window*strain.sample_rate)) + times = [idx * strain.delta_t + strain.start_time \ + for idx in indices[cluster_idx]] + + return times
+ + +
+[docs] +def from_cli(opt, dyn_range_fac=1, precision='single', + inj_filter_rejector=None): + """Parses the CLI options related to strain data reading and conditioning. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (gps-start-time, gps-end-time, strain-high-pass, + pad-data, sample-rate, (frame-cache or frame-files), channel-name, + fake-strain, fake-strain-seed, fake-strain-from-file, gating_file). + dyn_range_fac : {float, 1}, optional + A large constant to reduce the dynamic range of the strain. + precision : string + Precision of the returned strain ('single' or 'double'). + inj_filter_rejector : InjFilterRejector instance; optional, default=None + If given send the InjFilterRejector instance to the inject module so + that it can store a reduced representation of injections if + necessary. + + Returns + ------- + strain : TimeSeries + The time series containing the conditioned strain data. + """ + gating_info = {} + + injector = InjectionSet.from_cli(opt) + + if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store: + if opt.frame_cache: + frame_source = opt.frame_cache + if opt.frame_files: + frame_source = opt.frame_files + + logger.info("Reading Frames") + + if hasattr(opt, 'frame_sieve') and opt.frame_sieve: + sieve = opt.frame_sieve + else: + sieve = None + + if opt.frame_type: + strain = pycbc.frame.query_and_read_frame( + opt.frame_type, opt.channel_name, + start_time=opt.gps_start_time-opt.pad_data, + end_time=opt.gps_end_time+opt.pad_data, + sieve=sieve) + elif opt.frame_files or opt.frame_cache: + strain = pycbc.frame.read_frame( + frame_source, opt.channel_name, + start_time=opt.gps_start_time-opt.pad_data, + end_time=opt.gps_end_time+opt.pad_data, + sieve=sieve) + elif opt.hdf_store: + strain = pycbc.frame.read_store(opt.hdf_store, opt.channel_name, + opt.gps_start_time - opt.pad_data, + opt.gps_end_time + opt.pad_data) + + elif opt.fake_strain or opt.fake_strain_from_file: + logger.info("Generating Fake Strain") + duration = opt.gps_end_time - opt.gps_start_time + duration += 2 * opt.pad_data + pdf = 1.0 / opt.fake_strain_filter_duration + fake_flow = opt.fake_strain_flow + fake_rate = opt.fake_strain_sample_rate + fake_extra_args = opt.fake_strain_extra_args + plen = round(opt.sample_rate / pdf) // 2 + 1 + if opt.fake_strain_from_file: + logger.info("Reading ASD from file") + strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file, + plen, pdf, + fake_flow, + is_asd_file=True) + elif opt.fake_strain != 'zeroNoise': + logger.info("Making PSD for strain") + strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf, + fake_flow, **fake_extra_args) + + if opt.fake_strain == 'zeroNoise': + logger.info("Making zero-noise time series") + strain = TimeSeries(pycbc.types.zeros(duration * fake_rate), + delta_t=1.0 / fake_rate, + epoch=opt.gps_start_time - opt.pad_data) + else: + logger.info("Making colored noise") + from pycbc.noise.reproduceable import colored_noise + strain = colored_noise(strain_psd, + opt.gps_start_time - opt.pad_data, + opt.gps_end_time + opt.pad_data, + seed=opt.fake_strain_seed, + sample_rate=fake_rate, + low_frequency_cutoff=fake_flow, + filter_duration=1.0/pdf) + + if not strain.sample_rate_close(fake_rate): + err_msg = "Actual sample rate of generated data does not match " + err_msg += "that expected. Possible causes of this:\n" + err_msg += "The desired duration is not a multiple of delta_t. " + err_msg += "e.g. If using LISA with delta_t = 15 the duration " + err_msg += "must be a multiple of 15 seconds." + raise ValueError(err_msg) + + if not opt.channel_name and (opt.injection_file \ + or opt.sgburst_injection_file): + raise ValueError('Please provide channel names with the format ' + 'ifo:channel (e.g. H1:CALIB-STRAIN) to inject ' + 'simulated signals into fake strain') + + if opt.zpk_z and opt.zpk_p and opt.zpk_k: + logger.info("Highpass Filtering") + strain = highpass(strain, frequency=opt.strain_high_pass) + + logger.info("Applying zpk filter") + z = numpy.array(opt.zpk_z) + p = numpy.array(opt.zpk_p) + k = float(opt.zpk_k) + strain = filter_zpk(strain.astype(numpy.float64), z, p, k) + + if opt.normalize_strain: + logger.info("Dividing strain by constant") + l = opt.normalize_strain + strain = strain / l + + if opt.strain_high_pass: + logger.info("Highpass Filtering") + strain = highpass(strain, frequency=opt.strain_high_pass) + + if opt.sample_rate: + logger.info("Resampling data") + strain = resample_to_delta_t(strain, + 1. / opt.sample_rate, + method='ldas') + + if injector is not None: + logger.info("Applying injections") + injections = \ + injector.apply(strain, opt.channel_name.split(':')[0], + distance_scale=opt.injection_scale_factor, + injection_sample_rate=opt.injection_sample_rate, + inj_filter_rejector=inj_filter_rejector) + + if opt.sgburst_injection_file: + logger.info("Applying sine-Gaussian burst injections") + injector = SGBurstInjectionSet(opt.sgburst_injection_file) + injector.apply(strain, opt.channel_name.split(':')[0], + distance_scale=opt.injection_scale_factor) + + if precision == 'single': + logger.info("Converting to float32") + strain = (strain * dyn_range_fac).astype(pycbc.types.float32) + elif precision == "double": + logger.info("Converting to float64") + strain = (strain * dyn_range_fac).astype(pycbc.types.float64) + else: + raise ValueError("Unrecognized precision {}".format(precision)) + + if opt.gating_file is not None: + logger.info("Gating times contained in gating file") + gate_params = numpy.loadtxt(opt.gating_file) + if len(gate_params.shape) == 1: + gate_params = [gate_params] + for gate_time, gate_window, gate_taper in gate_params: + strain = strain.gate(gate_time, window=gate_window, + method=opt.gating_method, + copy=False, + taper_width=gate_taper) + gating_info['file'] = \ + [gp for gp in gate_params \ + if (gp[0] + gp[1] + gp[2] >= strain.start_time) \ + and (gp[0] - gp[1] - gp[2] <= strain.end_time)] + + if opt.autogating_threshold is not None: + gating_info['auto'] = [] + for _ in range(opt.autogating_max_iterations): + glitch_times = detect_loud_glitches( + strain, threshold=opt.autogating_threshold, + cluster_window=opt.autogating_cluster, + low_freq_cutoff=opt.strain_high_pass, + corrupt_time=opt.pad_data + opt.autogating_pad) + gate_params = [[gt, opt.autogating_width, opt.autogating_taper] + for gt in glitch_times] + gating_info['auto'] += gate_params + for gate_time, gate_window, gate_taper in gate_params: + strain = strain.gate(gate_time, window=gate_window, + method=opt.gating_method, + copy=False, + taper_width=gate_taper) + if len(glitch_times) > 0: + logger.info('Autogating at %s', + ', '.join(['%.3f' % gt + for gt in glitch_times])) + else: + break + + if opt.strain_high_pass: + logger.info("Highpass Filtering") + strain = highpass(strain, frequency=opt.strain_high_pass) + + if opt.strain_low_pass: + logger.info("Lowpass Filtering") + strain = lowpass(strain, frequency=opt.strain_low_pass) + + if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type: + stilde = strain.to_frequencyseries() + from pycbc.io.hdf import HFile + tf_file = HFile(opt.witness_tf_file) + for key in tf_file: + witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type, + str(key), + start_time=strain.start_time, + end_time=strain.end_time) + witness = (witness * dyn_range_fac).astype(strain.dtype) + tf = pycbc.types.load_frequencyseries(opt.witness_tf_file, + group=key) + tf = tf.astype(stilde.dtype) + + flen = int(opt.witness_filter_length * strain.sample_rate) + tf = pycbc.psd.interpolate(tf, stilde.delta_f) + + tf_time = tf.to_timeseries() + window = Array(numpy.hanning(flen * 2), dtype=strain.dtype) + tf_time[0:flen] *= window[flen:] + tf_time[len(tf_time)-flen:] *= window[0:flen] + tf = tf_time.to_frequencyseries() + + kmax = min(len(tf), len(stilde) - 1) + stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax] + + strain = stilde.to_timeseries() + + if opt.pad_data: + logger.info("Remove Padding") + start = int(opt.pad_data * strain.sample_rate) + end = int(len(strain) - strain.sample_rate * opt.pad_data) + strain = strain[start:end] + + if opt.taper_data: + logger.info("Tapering data") + # Use auto-gating, a one-sided gate is a taper + pd_taper_window = opt.taper_data + gate_params = [(strain.start_time, 0., pd_taper_window)] + gate_params.append((strain.end_time, 0., pd_taper_window)) + gate_data(strain, gate_params) + + if injector is not None: + strain.injections = injections + strain.gating_info = gating_info + + return strain
+ + +
+[docs] +def from_cli_single_ifo(opt, ifo, inj_filter_rejector=None, **kwargs): + """ + Get the strain for a single ifo when using the multi-detector CLI + """ + single_det_opt = copy_opts_for_single_ifo(opt, ifo) + return from_cli(single_det_opt, + inj_filter_rejector=inj_filter_rejector, **kwargs)
+ + +
+[docs] +def from_cli_multi_ifos(opt, ifos, inj_filter_rejector_dict=None, **kwargs): + """ + Get the strain for all ifos when using the multi-detector CLI + """ + strain = {} + if inj_filter_rejector_dict is None: + inj_filter_rejector_dict = {ifo: None for ifo in ifos} + for ifo in ifos: + strain[ifo] = from_cli_single_ifo(opt, ifo, + inj_filter_rejector_dict[ifo], **kwargs) + return strain
+ + + +
+[docs] +def insert_strain_option_group(parser, gps_times=True): + """ Add strain-related options to the optparser object. + + Adds the options used to call the pycbc.strain.from_cli function to an + optparser as an OptionGroup. This should be used if you + want to use these options in your code. + + Parameters + ----------- + parser : object + OptionParser instance. + gps_times : bool, optional + Include ``--gps-start-time`` and ``--gps-end-time`` options. Default + is True. + """ + + data_reading_group = parser.add_argument_group("Options for obtaining h(t)", + "These options are used for generating h(t) either by " + "reading from a file or by generating it. This is only " + "needed if the PSD is to be estimated from the data, ie. " + " if the --psd-estimation option is given.") + + # Required options + if gps_times: + data_reading_group.add_argument("--gps-start-time", + help="The gps start time of the data " + "(integer seconds)", type=int) + data_reading_group.add_argument("--gps-end-time", + help="The gps end time of the data " + "(integer seconds)", type=int) + + data_reading_group.add_argument("--strain-high-pass", type=float, + help="High pass frequency") + data_reading_group.add_argument("--strain-low-pass", type=float, + help="Low pass frequency") + data_reading_group.add_argument("--pad-data", default=8, + help="Extra padding to remove highpass corruption " + "(integer seconds, default 8)", type=int) + data_reading_group.add_argument("--taper-data", + help="Taper ends of data to zero using the supplied length as a " + "window (integer seconds)", type=int, default=0) + data_reading_group.add_argument("--sample-rate", type=float, + help="The sample rate to use for h(t) generation (integer Hz)") + data_reading_group.add_argument("--channel-name", type=str, + help="The channel containing the gravitational strain data") + + # Read from cache file + data_reading_group.add_argument("--frame-cache", type=str, nargs="+", + help="Cache file containing the frame locations.") + # Read from frame files + data_reading_group.add_argument("--frame-files", + type=str, nargs="+", + help="list of frame files") + # Read from hdf store file + data_reading_group.add_argument("--hdf-store", + type=str, + help="Store of time series data in hdf format") + # Use datafind to get frame files + data_reading_group.add_argument("--frame-type", + type=str, + metavar="S:TYPE", + help="(optional), replaces frame-files. Use datafind " + "to get the needed frame file(s) of this type " + "from site S.") + # Filter frame files by URL + data_reading_group.add_argument("--frame-sieve", + type=str, + help="(optional), Only use frame files where the " + "URL matches the regular expression given.") + + # Generate gaussian noise with given psd + data_reading_group.add_argument("--fake-strain", + help="Name of model PSD for generating fake gaussian noise.", + choices=pycbc.psd.get_psd_model_list() + ['zeroNoise']) + data_reading_group.add_argument("--fake-strain-extra-args", + nargs='+', action=DictOptionAction, + metavar='PARAM:VALUE', default={}, type=float, + help="(optional) Extra arguments passed to " + "the PSD models.") + data_reading_group.add_argument("--fake-strain-seed", type=int, default=0, + help="Seed value for the generation of fake colored" + " gaussian noise") + data_reading_group.add_argument("--fake-strain-from-file", + help="File containing ASD for generating fake noise from it.") + data_reading_group.add_argument("--fake-strain-flow", + default=1.0, type=float, + help="Low frequency cutoff of the fake strain") + data_reading_group.add_argument("--fake-strain-filter-duration", + default=128.0, type=float, + help="Duration in seconds of the fake data coloring filter") + data_reading_group.add_argument("--fake-strain-sample-rate", + default=16384, type=float, + help="Sample rate of the fake data generation") + + # Injection options + data_reading_group.add_argument("--injection-file", type=str, + help="(optional) Injection file containing parameters" + " of CBC signals to be added to the strain") + data_reading_group.add_argument("--sgburst-injection-file", type=str, + help="(optional) Injection file containing parameters" + "of sine-Gaussian burst signals to add to the strain") + data_reading_group.add_argument("--injection-scale-factor", type=float, + default=1, + help="Divide injections by this factor " + "before adding to the strain data") + data_reading_group.add_argument("--injection-sample-rate", type=float, + help="Sample rate to use for injections (integer Hz). " + "Typically similar to the strain data sample rate." + "If not provided, the strain sample rate will be " + "used") + data_reading_group.add_argument("--injection-f-ref", type=float, + help="Reference frequency in Hz for creating CBC " + "injections from an XML file") + data_reading_group.add_argument("--injection-f-final", type=float, + help="Override the f_final field of a CBC XML " + "injection file (frequency in Hz)") + + # Gating options + data_reading_group.add_argument("--gating-file", type=str, + help="(optional) Text file of gating segments to apply." + " Format of each line is (all values in seconds):" + " gps_time zeros_half_width pad_half_width") + data_reading_group.add_argument('--autogating-threshold', type=float, + metavar='SIGMA', + help='If given, find and gate glitches ' + 'producing a deviation larger than ' + 'SIGMA in the whitened strain time ' + 'series.') + data_reading_group.add_argument('--autogating-max-iterations', type=int, + metavar='SIGMA', default=1, + help='If given, iteratively apply ' + 'autogating') + data_reading_group.add_argument('--autogating-cluster', type=float, + metavar='SECONDS', default=5., + help='Length of clustering window for ' + 'detecting glitches for autogating.') + data_reading_group.add_argument('--autogating-width', type=float, + metavar='SECONDS', default=0.25, + help='Half-width of the gating window.') + data_reading_group.add_argument('--autogating-taper', type=float, + metavar='SECONDS', default=0.25, + help='Taper the strain before and after ' + 'each gating window over a duration ' + 'of SECONDS.') + data_reading_group.add_argument('--autogating-pad', type=float, + metavar='SECONDS', default=16, + help='Ignore the given length of whitened ' + 'strain at the ends of a segment, to ' + 'avoid filters ringing.') + data_reading_group.add_argument('--gating-method', type=str, + default='taper', + help='Choose the method for gating. ' + 'Default: `taper`', + choices=['hard', 'taper', 'paint']) + # Optional + data_reading_group.add_argument("--normalize-strain", type=float, + help="(optional) Divide frame data by constant.") + data_reading_group.add_argument("--zpk-z", type=float, nargs="+", + help="(optional) Zero-pole-gain (zpk) filter strain. " + "A list of zeros for transfer function") + data_reading_group.add_argument("--zpk-p", type=float, nargs="+", + help="(optional) Zero-pole-gain (zpk) filter strain. " + "A list of poles for transfer function") + data_reading_group.add_argument("--zpk-k", type=float, + help="(optional) Zero-pole-gain (zpk) filter strain. " + "Transfer function gain") + + # Options to apply to subtract noise from a witness channel and known + # transfer function. + data_reading_group.add_argument("--witness-frame-type", type=str, + help="(optional), frame type which will be use to query the" + " witness channel data.") + data_reading_group.add_argument("--witness-tf-file", type=str, + help="an hdf file containing the transfer" + " functions and the associated channel names") + data_reading_group.add_argument("--witness-filter-length", type=float, + help="filter length in seconds for the transfer function") + + return data_reading_group
+ + +# FIXME: This repeats almost all of the options above. Any nice way of reducing +# this? +
+[docs] +def insert_strain_option_group_multi_ifo(parser, gps_times=True): + """ + Adds the options used to call the pycbc.strain.from_cli function to an + optparser as an OptionGroup. This should be used if you + want to use these options in your code. + + Parameters + ----------- + parser : object + OptionParser instance. + gps_times : bool, optional + Include ``--gps-start-time`` and ``--gps-end-time`` options. Default + is True. + """ + + data_reading_group_multi = parser.add_argument_group("Options for obtaining" + " h(t)", + "These options are used for generating h(t) either by " + "reading from a file or by generating it. This is only " + "needed if the PSD is to be estimated from the data, ie. " + "if the --psd-estimation option is given. This group " + "supports reading from multiple ifos simultaneously.") + + # Required options + if gps_times: + data_reading_group_multi.add_argument( + "--gps-start-time", nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', type=int, + help="The gps start time of the data (integer seconds)") + data_reading_group_multi.add_argument( + "--gps-end-time", nargs='+', action=MultiDetOptionAction, + metavar='IFO:TIME', type=int, + help="The gps end time of the data (integer seconds)") + + data_reading_group_multi.add_argument("--strain-high-pass", nargs='+', + action=MultiDetOptionAction, + type=float, metavar='IFO:FREQUENCY', + help="High pass frequency") + data_reading_group_multi.add_argument("--strain-low-pass", nargs='+', + action=MultiDetOptionAction, + type=float, metavar='IFO:FREQUENCY', + help="Low pass frequency") + data_reading_group_multi.add_argument("--pad-data", nargs='+', default=8, + action=MultiDetOptionAction, + type=int, metavar='IFO:LENGTH', + help="Extra padding to remove highpass corruption " + "(integer seconds, default 8)") + data_reading_group_multi.add_argument("--taper-data", nargs='+', + action=MultiDetOptionAction, + type=int, default=0, metavar='IFO:LENGTH', + help="Taper ends of data to zero using the " + "supplied length as a window (integer seconds)") + data_reading_group_multi.add_argument("--sample-rate", type=float, + nargs='+', + action=MultiDetOptionAction, metavar='IFO:RATE', + help="The sample rate to use for h(t) generation " + " (integer Hz).") + data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+', + action=MultiDetOptionActionSpecial, + metavar='IFO:CHANNEL', + help="The channel containing the gravitational " + "strain data") + + # Read from cache file + data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+", + action=MultiDetOptionAppendAction, + metavar='IFO:FRAME_CACHE', + help="Cache file containing the frame locations.") + # Read from frame files + data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+", + action=MultiDetOptionAppendAction, + metavar='IFO:FRAME_FILES', + help="list of frame files") + # Read from hdf store file + data_reading_group_multi.add_argument("--hdf-store", type=str, nargs='+', + action=MultiDetOptionAction, + metavar='IFO:HDF_STORE_FILE', + help="Store of time series data in hdf format") + # Use datafind to get frame files + data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+", + action=MultiDetOptionActionSpecial, + metavar='IFO:FRAME_TYPE', + help="(optional) Replaces frame-files. " + "Use datafind to get the needed frame " + "file(s) of this type.") + # Filter frame files by URL + data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+", + action=MultiDetOptionAction, + metavar='IFO:FRAME_SIEVE', + help="(optional), Only use frame files where the " + "URL matches the regular expression given.") + # Generate gaussian noise with given psd + data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+", + action=MultiDetOptionAction, metavar='IFO:CHOICE', + help="Name of model PSD for generating fake " + "gaussian noise. Choose from %s or zeroNoise" \ + %((', ').join(pycbc.psd.get_lalsim_psd_list()),) ) + data_reading_group_multi.add_argument("--fake-strain-extra-args", + nargs='+', action=MultiDetDictOptionAction, + metavar='DETECTOR:PARAM:VALUE', default={}, + type=float, help="(optional) Extra arguments " + "passed to the PSD models.") + data_reading_group_multi.add_argument("--fake-strain-seed", type=int, + default=0, nargs="+", action=MultiDetOptionAction, + metavar='IFO:SEED', + help="Seed value for the generation of fake " + "colored gaussian noise") + data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+", + action=MultiDetOptionAction, metavar='IFO:FILE', + help="File containing ASD for generating fake " + "noise from it.") + data_reading_group_multi.add_argument("--fake-strain-flow", + default=1.0, type=float, + nargs="+", action=MultiDetOptionAction, + help="Low frequency cutoff of the fake strain") + data_reading_group_multi.add_argument("--fake-strain-filter-duration", + default=128.0, type=float, + nargs="+", action=MultiDetOptionAction, + help="Duration in seconds of the fake data coloring filter") + data_reading_group_multi.add_argument("--fake-strain-sample-rate", + default=16384, type=float, + nargs="+", action=MultiDetOptionAction, + help="Sample rate of the fake data generation") + + # Injection options + data_reading_group_multi.add_argument("--injection-file", type=str, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:FILE', + help="(optional) Injection file containing parameters" + "of CBC signals to be added to the strain") + data_reading_group_multi.add_argument("--sgburst-injection-file", type=str, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:FILE', + help="(optional) Injection file containing parameters" + "of sine-Gaussian burst signals to add to the strain") + data_reading_group_multi.add_argument("--injection-scale-factor", + type=float, nargs="+", action=MultiDetOptionAction, + metavar="IFO:VAL", default=1., + help="Divide injections by this factor " + "before adding to the strain data") + data_reading_group_multi.add_argument("--injection-sample-rate", + type=float, nargs="+", action=MultiDetOptionAction, + metavar="IFO:VAL", + help="Sample rate to use for injections (integer Hz). " + "Typically similar to the strain data sample rate." + "If not provided, the strain sample rate will be " + "used") + + data_reading_group_multi.add_argument("--injection-f-ref", type=float, + action=MultiDetOptionAction, metavar='IFO:VALUE', + help="Reference frequency in Hz for creating CBC " + "injections from an XML file") + data_reading_group_multi.add_argument('--injection-f-final', type=float, + action=MultiDetOptionAction, metavar='IFO:VALUE', + help="Override the f_final field of a CBC XML " + "injection file (frequency in Hz)") + + # Gating options + data_reading_group_multi.add_argument("--gating-file", nargs="+", + action=MultiDetOptionAction, + metavar='IFO:FILE', + help='(optional) Text file of gating segments to apply.' + ' Format of each line (units s) :' + ' gps_time zeros_half_width pad_half_width') + data_reading_group_multi.add_argument('--autogating-threshold', type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:SIGMA', + help='If given, find and gate glitches producing a ' + 'deviation larger than SIGMA in the whitened strain' + ' time series') + data_reading_group_multi.add_argument('--autogating-max-iterations', type=int, + metavar='SIGMA', default=1, + help='If given, iteratively apply ' + 'autogating') + data_reading_group_multi.add_argument('--autogating-cluster', type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:SECONDS', default=5., + help='Length of clustering window for ' + 'detecting glitches for autogating.') + data_reading_group_multi.add_argument('--autogating-width', type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:SECONDS', default=0.25, + help='Half-width of the gating window.') + data_reading_group_multi.add_argument('--autogating-taper', type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:SECONDS', default=0.25, + help='Taper the strain before and after ' + 'each gating window over a duration ' + 'of SECONDS.') + data_reading_group_multi.add_argument('--autogating-pad', type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:SECONDS', default=16, + help='Ignore the given length of whitened ' + 'strain at the ends of a segment, to ' + 'avoid filters ringing.') + data_reading_group_multi.add_argument('--gating-method', type=str, + nargs='+', action=MultiDetOptionAction, + default='taper', + help='Choose the method for gating. ' + 'Default: `taper`', + choices=['hard', 'taper', 'paint']) + + # Optional + data_reading_group_multi.add_argument("--normalize-strain", type=float, + nargs="+", action=MultiDetOptionAction, + metavar='IFO:VALUE', + help="(optional) Divide frame data by constant.") + data_reading_group_multi.add_argument("--zpk-z", type=float, + nargs="+", action=MultiDetOptionAppendAction, + metavar='IFO:VALUE', + help="(optional) Zero-pole-gain (zpk) filter strain. " + "A list of zeros for transfer function") + data_reading_group_multi.add_argument("--zpk-p", type=float, + nargs="+", action=MultiDetOptionAppendAction, + metavar='IFO:VALUE', + help="(optional) Zero-pole-gain (zpk) filter strain. " + "A list of poles for transfer function") + data_reading_group_multi.add_argument("--zpk-k", type=float, + nargs="+", action=MultiDetOptionAppendAction, + metavar='IFO:VALUE', + help="(optional) Zero-pole-gain (zpk) filter strain. " + "Transfer function gain") + + return data_reading_group_multi
+ + + +ensure_one_opt_groups = [] +ensure_one_opt_groups.append(['--frame-cache','--fake-strain', + '--fake-strain-from-file', + '--frame-files', '--frame-type', + '--hdf-store']) + +required_opts_list = ['--gps-start-time', '--gps-end-time', + '--pad-data', '--sample-rate', + '--channel-name'] + + +
+[docs] +def verify_strain_options(opts, parser): + """Sanity check provided strain arguments. + + Parses the strain data CLI options and verifies that they are consistent + and reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (gps-start-time, gps-end-time, strain-high-pass, + pad-data, sample-rate, frame-cache, channel-name, fake-strain, + fake-strain-seed). + parser : object + OptionParser instance. + """ + for opt_group in ensure_one_opt_groups: + ensure_one_opt(opts, parser, opt_group) + required_opts(opts, parser, required_opts_list)
+ + + +
+[docs] +def verify_strain_options_multi_ifo(opts, parser, ifos): + """Sanity check provided strain arguments. + + Parses the strain data CLI options and verifies that they are consistent + and reasonable. + + Parameters + ---------- + opt : object + Result of parsing the CLI with OptionParser, or any object with the + required attributes (gps-start-time, gps-end-time, strain-high-pass, + pad-data, sample-rate, frame-cache, channel-name, fake-strain, + fake-strain-seed). + parser : object + OptionParser instance. + ifos : list of strings + List of ifos for which to verify options for + """ + for ifo in ifos: + for opt_group in ensure_one_opt_groups: + ensure_one_opt_multi_ifo(opts, parser, ifo, opt_group) + required_opts_multi_ifo(opts, parser, ifo, required_opts_list)
+ + + +
+[docs] +def gate_data(data, gate_params): + """Apply a set of gating windows to a time series. + + Each gating window is + defined by a central time, a given duration (centered on the given + time) to zero out, and a given duration of smooth tapering on each side of + the window. The window function used for tapering is a Tukey window. + + Parameters + ---------- + data : TimeSeries + The time series to be gated. + gate_params : list + List of parameters for the gating windows. Each element should be a + list or tuple with 3 elements: the central time of the gating window, + the half-duration of the portion to zero out, and the duration of the + Tukey tapering on each side. All times in seconds. The total duration + of the data affected by one gating window is thus twice the second + parameter plus twice the third parameter. + + Returns + ------- + data: TimeSeries + The gated time series. + """ + def inverted_tukey(M, n_pad): + midlen = M - 2*n_pad + if midlen < 0: + raise ValueError("No zeros left after applying padding.") + padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad)) + return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1])) + + sample_rate = 1./data.delta_t + temp = data.data + + for glitch_time, glitch_width, pad_width in gate_params: + t_start = glitch_time - glitch_width - pad_width - data.start_time + t_end = glitch_time + glitch_width + pad_width - data.start_time + if t_start > data.duration or t_end < 0.: + continue # Skip gate segments that don't overlap + win_samples = int(2*sample_rate*(glitch_width+pad_width)) + pad_samples = int(sample_rate*pad_width) + window = inverted_tukey(win_samples, pad_samples) + offset = int(t_start * sample_rate) + idx1 = max(0, -offset) + idx2 = min(len(window), len(data)-offset) + temp[idx1+offset:idx2+offset] *= window[idx1:idx2] + + return data
+ + + +
+[docs] +class StrainSegments(object): + """ Class for managing manipulation of strain data for the purpose of + matched filtering. This includes methods for segmenting and + conditioning. + """ + def __init__(self, strain, segment_length=None, segment_start_pad=0, + segment_end_pad=0, trigger_start=None, trigger_end=None, + filter_inj_only=False, injection_window=None, + allow_zero_padding=False): + """ Determine how to chop up the strain data into smaller segments + for analysis. + """ + self._fourier_segments = None + self.strain = strain + + self.delta_t = strain.delta_t + self.sample_rate = strain.sample_rate + + if segment_length: + seg_len = segment_length + else: + seg_len = strain.duration + + self.delta_f = 1.0 / seg_len + self.time_len = int(seg_len * self.sample_rate) + self.freq_len = self.time_len // 2 + 1 + + seg_end_pad = segment_end_pad + seg_start_pad = segment_start_pad + + if not trigger_start: + trigger_start = int(strain.start_time) + segment_start_pad + else: + if not allow_zero_padding: + min_start_time = int(strain.start_time) + segment_start_pad + else: + min_start_time = int(strain.start_time) + if trigger_start < min_start_time: + err_msg = "Trigger start time must be within analysable " + err_msg += "window. Asked to start from %d " %(trigger_start) + err_msg += "but can only analyse from %d." %(min_start_time) + raise ValueError(err_msg) + + if not trigger_end: + trigger_end = int(strain.end_time) - segment_end_pad + else: + if not allow_zero_padding: + max_end_time = int(strain.end_time) - segment_end_pad + else: + max_end_time = int(strain.end_time) + if trigger_end > max_end_time: + err_msg = "Trigger end time must be within analysable " + err_msg += "window. Asked to end at %d " %(trigger_end) + err_msg += "but can only analyse to %d." %(max_end_time) + raise ValueError(err_msg) + + + throwaway_size = seg_start_pad + seg_end_pad + seg_width = seg_len - throwaway_size + + # The amount of time we can actually analyze given the + # amount of padding that is needed + analyzable = trigger_end - trigger_start + data_start = (trigger_start - segment_start_pad) - \ + int(strain.start_time) + data_end = trigger_end + segment_end_pad - int(strain.start_time) + data_dur = data_end - data_start + data_start = data_start * strain.sample_rate + data_end = data_end * strain.sample_rate + + #number of segments we need to analyze this data + num_segs = int(numpy.ceil(float(analyzable) / float(seg_width))) + + # The offset we will use between segments + seg_offset = int(numpy.ceil(analyzable / float(num_segs))) + self.segment_slices = [] + self.analyze_slices = [] + + # Determine how to chop up the strain into smaller segments + for nseg in range(num_segs-1): + # boundaries for time slices into the strain + seg_start = int(data_start + (nseg*seg_offset) * strain.sample_rate) + seg_end = int(seg_start + seg_len * strain.sample_rate) + seg_slice = slice(seg_start, seg_end) + self.segment_slices.append(seg_slice) + + # boundaries for the analyzable portion of the segment + ana_start = int(seg_start_pad * strain.sample_rate) + ana_end = int(ana_start + seg_offset * strain.sample_rate) + ana_slice = slice(ana_start, ana_end) + self.analyze_slices.append(ana_slice) + + # The last segment takes up any integer boundary slop + seg_end = int(data_end) + seg_start = int(seg_end - seg_len * strain.sample_rate) + seg_slice = slice(seg_start, seg_end) + self.segment_slices.append(seg_slice) + + remaining = (data_dur - ((num_segs - 1) * seg_offset + seg_start_pad)) + ana_start = int((seg_len - remaining) * strain.sample_rate) + ana_end = int((seg_len - seg_end_pad) * strain.sample_rate) + ana_slice = slice(ana_start, ana_end) + self.analyze_slices.append(ana_slice) + + self.full_segment_slices = copy.deepcopy(self.segment_slices) + + #Remove segments that are outside trig start and end + segment_slices_red = [] + analyze_slices_red = [] + trig_start_idx = (trigger_start - int(strain.start_time)) * strain.sample_rate + trig_end_idx = (trigger_end - int(strain.start_time)) * strain.sample_rate + + if filter_inj_only and hasattr(strain, 'injections'): + end_times = strain.injections.end_times() + end_times = [time for time in end_times if float(time) < trigger_end and float(time) > trigger_start] + inj_idx = [(float(time) - float(strain.start_time)) * strain.sample_rate for time in end_times] + + for seg, ana in zip(self.segment_slices, self.analyze_slices): + start = ana.start + stop = ana.stop + cum_start = start + seg.start + cum_end = stop + seg.start + + # adjust first segment + if trig_start_idx > cum_start: + start += (trig_start_idx - cum_start) + + # adjust last segment + if trig_end_idx < cum_end: + stop -= (cum_end - trig_end_idx) + + if filter_inj_only and hasattr(strain, 'injections'): + analyze_this = False + inj_window = strain.sample_rate * 8 + for inj_id in inj_idx: + if inj_id < (cum_end + inj_window) and \ + inj_id > (cum_start - inj_window): + analyze_this = True + + if not analyze_this: + continue + + if start < stop: + segment_slices_red.append(seg) + analyze_slices_red.append(slice(start, stop)) + + self.segment_slices = segment_slices_red + self.analyze_slices = analyze_slices_red + +
+[docs] + def fourier_segments(self): + """ Return a list of the FFT'd segments. + Return the list of FrequencySeries. Additional properties are + added that describe the strain segment. The property 'analyze' + is a slice corresponding to the portion of the time domain equivalent + of the segment to analyze for triggers. The value 'cumulative_index' + indexes from the beginning of the original strain series. + """ + if not self._fourier_segments: + self._fourier_segments = [] + for seg_slice, ana in zip(self.segment_slices, self.analyze_slices): + if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain): + freq_seg = make_frequency_series(self.strain[seg_slice]) + # Assume that we cannot have a case where we both zero-pad on + # both sides + elif seg_slice.start < 0: + strain_chunk = self.strain[:seg_slice.stop] + strain_chunk.prepend_zeros(-seg_slice.start) + freq_seg = make_frequency_series(strain_chunk) + elif seg_slice.stop > len(self.strain): + strain_chunk = self.strain[seg_slice.start:] + strain_chunk.append_zeros(seg_slice.stop - len(self.strain)) + freq_seg = make_frequency_series(strain_chunk) + freq_seg.analyze = ana + freq_seg.cumulative_index = seg_slice.start + ana.start + freq_seg.seg_slice = seg_slice + self._fourier_segments.append(freq_seg) + + return self._fourier_segments
+ + +
+[docs] + @classmethod + def from_cli(cls, opt, strain): + """Calculate the segmentation of the strain data for analysis from + the command line options. + """ + return cls(strain, segment_length=opt.segment_length, + segment_start_pad=opt.segment_start_pad, + segment_end_pad=opt.segment_end_pad, + trigger_start=opt.trig_start_time, + trigger_end=opt.trig_end_time, + filter_inj_only=opt.filter_inj_only, + injection_window=opt.injection_window, + allow_zero_padding=opt.allow_zero_padding)
+ + +
+[docs] + @classmethod + def insert_segment_option_group(cls, parser): + segment_group = parser.add_argument_group( + "Options for segmenting the strain", + "These options are used to determine how to " + "segment the strain into smaller chunks, " + "and for determining the portion of each to " + "analyze for triggers. ") + segment_group.add_argument("--trig-start-time", type=int, default=0, + help="(optional) The gps time to start recording triggers") + segment_group.add_argument("--trig-end-time", type=int, default=0, + help="(optional) The gps time to stop recording triggers") + segment_group.add_argument("--segment-length", type=int, + help="The length of each strain segment in seconds.") + segment_group.add_argument("--segment-start-pad", type=int, + help="The time in seconds to ignore of the " + "beginning of each segment in seconds. ") + segment_group.add_argument("--segment-end-pad", type=int, + help="The time in seconds to ignore at the " + "end of each segment in seconds.") + segment_group.add_argument("--allow-zero-padding", action='store_true', + help="Allow for zero padding of data to " + "analyze requested times, if needed.") + # Injection optimization options + segment_group.add_argument("--filter-inj-only", action='store_true', + help="Analyze only segments that contain an injection.") + segment_group.add_argument("--injection-window", default=None, + type=float, help="""If using --filter-inj-only then + only search for injections within +/- injection + window of the injections's end time. This is useful + to speed up a coherent search or a search where we + initially filter at lower sample rate, and then + filter at full rate where needed. NOTE: Reverts to + full analysis if two injections are in the same + segment.""")
+ + + +
+[docs] + @classmethod + def from_cli_single_ifo(cls, opt, strain, ifo): + """Calculate the segmentation of the strain data for analysis from + the command line options. + """ + return cls(strain, segment_length=opt.segment_length[ifo], + segment_start_pad=opt.segment_start_pad[ifo], + segment_end_pad=opt.segment_end_pad[ifo], + trigger_start=opt.trig_start_time[ifo], + trigger_end=opt.trig_end_time[ifo], + filter_inj_only=opt.filter_inj_only, + allow_zero_padding=opt.allow_zero_padding)
+ + +
+[docs] + @classmethod + def from_cli_multi_ifos(cls, opt, strain_dict, ifos): + """Calculate the segmentation of the strain data for analysis from + the command line options. + """ + strain_segments = {} + for ifo in ifos: + strain_segments[ifo] = cls.from_cli_single_ifo( + opt, strain_dict[ifo], ifo) + return strain_segments
+ + +
+[docs] + @classmethod + def insert_segment_option_group_multi_ifo(cls, parser): + segment_group = parser.add_argument_group( + "Options for segmenting the strain", + "These options are used to determine how to " + "segment the strain into smaller chunks, " + "and for determining the portion of each to " + "analyze for triggers. ") + segment_group.add_argument("--trig-start-time", type=int, default=0, + nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME', + help="(optional) The gps time to start recording triggers") + segment_group.add_argument("--trig-end-time", type=int, default=0, + nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME', + help="(optional) The gps time to stop recording triggers") + segment_group.add_argument("--segment-length", type=int, + nargs='+', action=MultiDetOptionAction, + metavar='IFO:LENGTH', + help="The length of each strain segment in seconds.") + segment_group.add_argument("--segment-start-pad", type=int, + nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME', + help="The time in seconds to ignore of the " + "beginning of each segment in seconds. ") + segment_group.add_argument("--segment-end-pad", type=int, + nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME', + help="The time in seconds to ignore at the " + "end of each segment in seconds.") + segment_group.add_argument("--allow-zero-padding", action='store_true', + help="Allow for zero padding of data to analyze " + "requested times, if needed.") + segment_group.add_argument("--filter-inj-only", action='store_true', + help="Analyze only segments that contain " + "an injection.")
+ + + required_opts_list = ['--segment-length', + '--segment-start-pad', + '--segment-end-pad', + ] + +
+[docs] + @classmethod + def verify_segment_options(cls, opt, parser): + required_opts(opt, parser, cls.required_opts_list)
+ + +
+[docs] + @classmethod + def verify_segment_options_multi_ifo(cls, opt, parser, ifos): + for ifo in ifos: + required_opts_multi_ifo(opt, parser, ifo, cls.required_opts_list)
+
+ + + +
+[docs] +@functools.lru_cache(maxsize=500) +def create_memory_and_engine_for_class_based_fft( + npoints_time, + dtype, + delta_t=1, + ifft=False, + uid=0 +): + """ Create memory and engine for class-based FFT/IFFT + + Currently only supports R2C FFT / C2R IFFTs, but this could be expanded + if use-cases arise. + + Parameters + ---------- + npoints_time : int + Number of time samples of the real input vector (or real output vector + if doing an IFFT). + dtype : np.dtype + The dtype for the real input vector (or real output vector if doing an + IFFT). np.float32 or np.float64 I think in all cases. + delta_t : float (default: 1) + delta_t of the real vector. If not given this will be set to 1, and we + will assume it is not needed in the returned TimeSeries/FrequencySeries + ifft : boolean (default: False) + By default will use the FFT class, set to true to use IFFT. + uid : int (default: 0) + Provide a unique identifier. This is used to provide a separate set + of memory in the cache, for instance if calling this from different + codes. + """ + npoints_freq = npoints_time // 2 + 1 + delta_f_tmp = 1.0 / (npoints_time * delta_t) + vec = TimeSeries( + zeros( + npoints_time, + dtype=dtype + ), + delta_t=delta_t, + copy=False + ) + vectilde = FrequencySeries( + zeros( + npoints_freq, + dtype=complex_same_precision_as(vec) + ), + delta_f=delta_f_tmp, + copy=False + ) + if ifft: + fft_class = IFFT(vectilde, vec) + invec = vectilde + outvec = vec + else: + fft_class = FFT(vec, vectilde) + invec = vec + outvec = vectilde + + return invec, outvec, fft_class
+ + + +
+[docs] +def execute_cached_fft(invec_data, normalize_by_rate=True, ifft=False, + copy_output=True, uid=0): + """ Executes a cached FFT + + Parameters + ----------- + invec_data : Array + Array which will be used as input when fft_class is executed. + normalize_by_rate : boolean (optional, default:False) + If True, then normalize by delta_t (for an FFT) or delta_f (for an + IFFT). + ifft : boolean (optional, default:False) + If true assume this is an IFFT and multiply by delta_f not delta_t. + Will do nothing if normalize_by_rate is False. + copy_output : boolean (optional, default:True) + If True we will copy the output into a new array. This avoids the issue + that calling this function again might overwrite output. However, if + you know that the output array will not be used before this function + might be called again with the same length, then setting this to False + will provide some increase in efficiency. The uid can also be used to + help ensure that data doesn't get unintentionally overwritten! + uid : int (default: 0) + Provide a unique identifier. This is used to provide a separate set + of memory in the cache, for instance if calling this from different + codes. + """ + from pycbc.types import real_same_precision_as + if ifft: + npoints_time = (len(invec_data) - 1) * 2 + else: + npoints_time = len(invec_data) + + try: + delta_t = invec_data.delta_t + except AttributeError: + if not normalize_by_rate: + # Don't need this + delta_t = 1 + else: + raise + + dtype = real_same_precision_as(invec_data) + + invec, outvec, fft_class = create_memory_and_engine_for_class_based_fft( + npoints_time, + dtype, + delta_t=delta_t, + ifft=ifft, + uid=uid + ) + + if invec_data is not None: + invec._data[:] = invec_data._data[:] + fft_class.execute() + if normalize_by_rate: + if ifft: + outvec._data *= invec._delta_f + else: + outvec._data *= invec._delta_t + if copy_output: + outvec = outvec.copy() + try: + outvec._epoch = invec_data._epoch + except AttributeError: + pass + return outvec
+ + + +
+[docs] +def execute_cached_ifft(*args, **kwargs): + """ Executes a cached IFFT + + Parameters + ----------- + invec_data : Array + Array which will be used as input when fft_class is executed. + normalize_by_rate : boolean (optional, default:False) + If True, then normalize by delta_t (for an FFT) or delta_f (for an + IFFT). + copy_output : boolean (optional, default:True) + If True we will copy the output into a new array. This avoids the issue + that calling this function again might overwrite output. However, if + you know that the output array will not be used before this function + might be called again with the same length, then setting this to False + will provide some increase in efficiency. The uid can also be used to + help ensure that data doesn't get unintentionally overwritten! + uid : int (default: 0) + Provide a unique identifier. This is used to provide a separate set + of memory in the cache, for instance if calling this from different + codes. + """ + return execute_cached_fft(*args, **kwargs, ifft=True)
+ + + +# If using caching we want output to be unique if called at different places +# (and if called from different modules/functions), these unique IDs acheive +# that. The numbers are not significant, only that they are unique. +STRAINBUFFER_UNIQUE_ID_1 = 236546845 +STRAINBUFFER_UNIQUE_ID_2 = 778946541 +STRAINBUFFER_UNIQUE_ID_3 = 665849947 + +
+[docs] +class StrainBuffer(pycbc.frame.DataBuffer): + def __init__(self, frame_src, channel_name, start_time, + max_buffer, + sample_rate, + low_frequency_cutoff=20, + highpass_frequency=15.0, + highpass_reduction=200.0, + highpass_bandwidth=5.0, + psd_samples=30, + psd_segment_length=4, + psd_inverse_length=3.5, + trim_padding=0.25, + autogating_threshold=None, + autogating_cluster=None, + autogating_pad=None, + autogating_width=None, + autogating_taper=None, + autogating_duration=None, + autogating_psd_segment_length=None, + autogating_psd_stride=None, + state_channel=None, + data_quality_channel=None, + idq_channel=None, + idq_state_channel=None, + idq_threshold=None, + dyn_range_fac=pycbc.DYN_RANGE_FAC, + psd_abort_difference=None, + psd_recalculate_difference=None, + force_update_cache=True, + increment_update_cache=None, + analyze_flags=None, + data_quality_flags=None, + dq_padding=0): + """ Class to produce overwhitened strain incrementally + + Parameters + ---------- + frame_src: str of list of strings + Strings that indicate where to read from files from. This can be a + list of frame files, a glob, etc. + channel_name: str + Name of the channel to read from the frame files + start_time: + Time to start reading from. + max_buffer: int + Length of the strain buffer in seconds. + sample_rate: int, Optional + Rate in Hz to sample the data. + low_frequency_cutoff: {float, 20}, Optional + The low frequency cutoff to use for inverse spectrum truncation + highpass_frequency: {float, 15}, Optional + The frequency to apply a highpass filter at before downsampling. + highpass_reduction: {float, 200}, Optional + The amount of reduction to apply to the low frequencies. + highpass_bandwidth: {float, 5}, Optional + The width of the transition region for the highpass filter. + psd_samples: {int, 30}, Optional + The number of sample to use for psd estimation + psd_segment_length: {float, 4}, Optional + The number of seconds in each psd sample. + psd_inverse_length: {float, 3.5}, Optional + The length in seconds for fourier transform of the inverse of the + PSD to be truncated to. + trim_padding: {float, 0.25}, Optional + Amount of padding in seconds to give for truncated the overwhitened + data stream. + autogating_threshold: float, Optional + Sigma deviation required to cause autogating of data. + If None, no autogating is performed. + autogating_cluster: float, Optional + Seconds to cluster possible gating locations. + autogating_pad: float, Optional + Seconds of corrupted whitened strain to ignore when generating a gate. + autogating_width: float, Optional + Half-duration of the zeroed-out portion of autogates. + autogating_taper: float, Optional + Duration of taper on either side of the gating window in seconds. + autogating_duration: float, Optional + Amount of data in seconds to apply autogating on. + autogating_psd_segment_length: float, Optional + The length in seconds of each segment used to estimate the PSD with Welch's method. + autogating_psd_stride: float, Optional + The overlap in seconds between each segment used to estimate the PSD with Welch's method. + state_channel: {str, None}, Optional + Channel to use for state information about the strain + data_quality_channel: {str, None}, Optional + Channel to use for data quality information about the strain + idq_channel: {str, None}, Optional + Channel to use for idq timeseries + idq_state_channel : {str, None}, Optional + Channel containing information about usability of idq + idq_threshold : float, Optional + Threshold which triggers a veto if iDQ channel falls below this threshold + dyn_range_fac: {float, pycbc.DYN_RANGE_FAC}, Optional + Scale factor to apply to strain + psd_abort_difference: {float, None}, Optional + The relative change in the inspiral range from the previous PSD + estimate to trigger the data to be considered invalid. + psd_recalculate_difference: {float, None}, Optional + the relative change in the inspiral range from the previous PSD + to trigger a re-estimatoin of the PSD. + force_update_cache: {boolean, True}, Optional + Re-check the filesystem for frame files on every attempt to + read more data. + analyze_flags: list of strs + The flags that must be on to mark the current data as valid for + *any* use. + data_quality_flags: list of strs + The flags used to determine if to keep triggers. + dq_padding: {float, 0}, optional + Extra seconds to consider invalid before/after times with bad DQ. + increment_update_cache: {str, None}, Optional + Pattern to look for frame files in a GPS dependent directory. This + is an alternate to the forced updated of the frame cache, and + apptempts to predict the next frame file name without probing the + filesystem. + """ + super(StrainBuffer, self).__init__(frame_src, channel_name, start_time, + max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + + self.low_frequency_cutoff = low_frequency_cutoff + + # Set up status buffers + self.analyze_flags = analyze_flags + self.data_quality_flags = data_quality_flags + self.state = None + self.dq = None + self.idq = None + self.dq_padding = dq_padding + + # State channel + if state_channel is not None: + valid_mask = pycbc.frame.flag_names_to_bitmask(self.analyze_flags) + logger.info('State channel %s interpreted as bitmask %s = good', + state_channel, bin(valid_mask)) + self.state = pycbc.frame.StatusBuffer( + frame_src, + state_channel, start_time, + max_buffer=max_buffer, + valid_mask=valid_mask, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + + # low latency dq channel + if data_quality_channel is not None: + sb_kwargs = dict(max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + if len(self.data_quality_flags) == 1 \ + and self.data_quality_flags[0] == 'veto_nonzero': + sb_kwargs['valid_on_zero'] = True + logger.info('DQ channel %s interpreted as zero = good', + data_quality_channel) + else: + sb_kwargs['valid_mask'] = pycbc.frame.flag_names_to_bitmask( + self.data_quality_flags) + logger.info( + 'DQ channel %s interpreted as bitmask %s = good', + data_quality_channel, + bin(sb_kwargs['valid_mask']) + ) + self.dq = pycbc.frame.StatusBuffer(frame_src, data_quality_channel, + start_time, **sb_kwargs) + + if idq_channel is not None: + if idq_state_channel is None: + raise ValueError( + 'Each detector with an iDQ channel requires an iDQ state channel as well') + if idq_threshold is None: + raise ValueError( + 'If an iDQ channel is provided, a veto threshold must also be provided') + self.idq = pycbc.frame.iDQBuffer(frame_src, + idq_channel, + idq_state_channel, + idq_threshold, + start_time, + max_buffer=max_buffer, + force_update_cache=force_update_cache, + increment_update_cache=increment_update_cache) + + self.highpass_frequency = highpass_frequency + self.highpass_reduction = highpass_reduction + self.highpass_bandwidth = highpass_bandwidth + + self.autogating_threshold = autogating_threshold + self.autogating_cluster = autogating_cluster + self.autogating_pad = autogating_pad + self.autogating_width = autogating_width + self.autogating_taper = autogating_taper + self.autogating_duration = autogating_duration + self.autogating_psd_segment_length = autogating_psd_segment_length + self.autogating_psd_stride = autogating_psd_stride + self.gate_params = [] + + self.sample_rate = sample_rate + self.dyn_range_fac = dyn_range_fac + + self.psd_abort_difference = psd_abort_difference + self.psd_recalculate_difference = psd_recalculate_difference + self.psd_segment_length = psd_segment_length + self.psd_samples = psd_samples + self.psd_inverse_length = psd_inverse_length + self.psd = None + self.psds = {} + + strain_len = int(max_buffer * self.sample_rate) + self.strain = TimeSeries(zeros(strain_len, dtype=numpy.float32), + delta_t=1.0/self.sample_rate, + epoch=start_time-max_buffer) + + # Determine the total number of corrupted samples for highpass + # and PSD over whitening + highpass_samples, self.beta = kaiserord(self.highpass_reduction, + self.highpass_bandwidth / self.raw_buffer.sample_rate * 2 * numpy.pi) + self.highpass_samples = int(highpass_samples / 2) + resample_corruption = 10 # If using the ldas method + self.factor = round(1.0 / self.raw_buffer.delta_t / self.sample_rate) + self.corruption = self.highpass_samples // self.factor + resample_corruption + + self.psd_corruption = self.psd_inverse_length * self.sample_rate + self.total_corruption = self.corruption + self.psd_corruption + + # Determine how much padding is needed after removing the parts + # associated with PSD over whitening and highpass filtering + self.trim_padding = int(trim_padding * self.sample_rate) + if self.trim_padding > self.total_corruption: + self.trim_padding = self.total_corruption + + self.psd_duration = (psd_samples - 1) // 2 * psd_segment_length + + self.reduced_pad = int(self.total_corruption - self.trim_padding) + self.segments = {} + + # time to ignore output of frame (for initial buffering) + self.add_hard_count() + self.taper_immediate_strain = True + + @property + def start_time(self): + """ Return the start time of the current valid segment of data """ + return self.end_time - self.blocksize + + @property + def end_time(self): + """ Return the end time of the current valid segment of data """ + return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate) + +
+[docs] + def add_hard_count(self): + """ Reset the countdown timer, so that we don't analyze data long enough + to generate a new PSD. + """ + self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration)) + self.invalidate_psd()
+ + +
+[docs] + def invalidate_psd(self): + """ Make the current PSD invalid. A new one will be generated when + it is next required """ + self.psd = None + self.psds = {}
+ + +
+[docs] + def recalculate_psd(self): + """ Recalculate the psd + """ + + seg_len = int(self.sample_rate * self.psd_segment_length) + e = len(self.strain) + s = e - (self.psd_samples + 1) * seg_len // 2 + psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len//2) + + psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC + + # If the new psd is similar to the old one, don't replace it + if self.psd and self.psd_recalculate_difference: + if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference: + logger.info("Skipping recalculation of %s PSD, %s-%s", + self.detector, self.psd.dist, psd.dist) + return True + + # If the new psd is *really* different than the old one, return an error + if self.psd and self.psd_abort_difference: + if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference: + logger.info("%s PSD is CRAZY, aborting!!!!, %s-%s", + self.detector, self.psd.dist, psd.dist) + self.psd = psd + self.psds = {} + return False + + # If the new estimate replaces the current one, invalide the ineterpolate PSDs + self.psd = psd + self.psds = {} + logger.info("Recalculating %s PSD, %s", self.detector, psd.dist) + return True
+ + +
+[docs] + def check_psd_dist(self, min_dist, max_dist): + """Check that the horizon distance of a detector is within a required + range. If so, return True, otherwise log a warning and return False. + """ + if self.psd is None: + # ignore check + return True + # Note that the distance can in principle be inf or nan, e.g. if h(t) + # is identically zero. The check must fail in those cases. Be careful + # with how the logic works out when comparing inf's or nan's! + good = self.psd.dist >= min_dist and self.psd.dist <= max_dist + if not good: + logger.info( + "%s PSD dist %s outside acceptable range [%s, %s]", + self.detector, + self.psd.dist, + min_dist, + max_dist + ) + return good
+ + +
+[docs] + def overwhitened_data(self, delta_f): + """ Return overwhitened data + + Parameters + ---------- + delta_f: float + The sample step to generate overwhitened frequency domain data for + + Returns + ------- + htilde: FrequencySeries + Overwhited strain data + """ + # we haven't already computed htilde for this delta_f + if delta_f not in self.segments: + buffer_length = int(1.0 / delta_f) + e = len(self.strain) + s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2) + + # FFT the contents of self.strain[s:e] into fseries + fseries = execute_cached_fft(self.strain[s:e], + copy_output=False, + uid=STRAINBUFFER_UNIQUE_ID_1) + fseries._epoch = self.strain._epoch + s*self.strain.delta_t + + # we haven't calculated a resample psd for this delta_f + if delta_f not in self.psds: + psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f) + psdt = pycbc.psd.inverse_spectrum_truncation(psdt, + int(self.sample_rate * self.psd_inverse_length), + low_frequency_cutoff=self.low_frequency_cutoff) + psdt._delta_f = fseries.delta_f + + psd = pycbc.psd.interpolate(self.psd, delta_f) + psd = pycbc.psd.inverse_spectrum_truncation(psd, + int(self.sample_rate * self.psd_inverse_length), + low_frequency_cutoff=self.low_frequency_cutoff) + + psd.psdt = psdt + self.psds[delta_f] = psd + + psd = self.psds[delta_f] + fseries /= psd.psdt + + # trim ends of strain + if self.reduced_pad != 0: + # IFFT the contents of fseries into overwhite + overwhite = execute_cached_ifft(fseries, + copy_output=False, + uid=STRAINBUFFER_UNIQUE_ID_2) + + overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad] + taper_window = self.trim_padding / 2.0 / overwhite.sample_rate + gate_params = [(overwhite2.start_time, 0., taper_window), + (overwhite2.end_time, 0., taper_window)] + gate_data(overwhite2, gate_params) + + # FFT the contents of overwhite2 into fseries_trimmed + fseries_trimmed = execute_cached_fft( + overwhite2, + copy_output=True, + uid=STRAINBUFFER_UNIQUE_ID_3 + ) + + fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t + else: + fseries_trimmed = fseries + + fseries_trimmed.psd = psd + self.segments[delta_f] = fseries_trimmed + + stilde = self.segments[delta_f] + return stilde
+ + +
+[docs] + def near_hwinj(self): + """Check that the current set of triggers could be influenced by + a hardware injection. + """ + if not self.state: + return False + + if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ): + return True + return False
+ + +
+[docs] + def null_advance_strain(self, blocksize): + """ Advance and insert zeros + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + """ + sample_step = int(blocksize * self.sample_rate) + csize = sample_step + self.corruption * 2 + self.strain.roll(-sample_step) + + # We should roll this off at some point too... + self.strain[len(self.strain) - csize + self.corruption:] = 0 + self.strain.start_time += blocksize + + # The next time we need strain will need to be tapered + self.taper_immediate_strain = True
+ + +
+[docs] + def advance(self, blocksize, timeout=10): + """Advanced buffer blocksize seconds. + + Add blocksize seconds more to the buffer, push blocksize seconds + from the beginning. + + Parameters + ---------- + blocksize: int + The number of seconds to attempt to read from the channel + + Returns + ------- + status: boolean + Returns True if this block is analyzable. + """ + ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout) + self.blocksize = blocksize + + self.gate_params = [] + + # We have given up so there is no time series + if ts is None: + logger.info("%s frame is late, giving up", self.detector) + self.null_advance_strain(blocksize) + if self.state: + self.state.null_advance(blocksize) + if self.dq: + self.dq.null_advance(blocksize) + if self.idq: + self.idq.null_advance(blocksize) + return False + + # We collected some data so we are closer to being able to analyze data + self.wait_duration -= blocksize + + # If the data we got was invalid, reset the counter on how much to collect + # This behavior corresponds to how we handle CAT1 vetoes + if self.state and self.state.advance(blocksize) is False: + self.add_hard_count() + self.null_advance_strain(blocksize) + if self.dq: + self.dq.null_advance(blocksize) + if self.idq: + self.idq.null_advance(blocksize) + logger.info("%s time has invalid data, resetting buffer", + self.detector) + return False + + # Also advance the dq vector and idq timeseries in lockstep + if self.dq: + self.dq.advance(blocksize) + if self.idq: + self.idq.advance(blocksize) + + self.segments = {} + + # only condition with the needed raw data so we can continuously add + # to the existing result + + # Precondition + sample_step = int(blocksize * self.sample_rate) + csize = sample_step + self.corruption * 2 + start = len(self.raw_buffer) - csize * self.factor + strain = self.raw_buffer[start:] + + strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency, + self.highpass_samples, + beta=self.beta) + strain = (strain * self.dyn_range_fac).astype(numpy.float32) + + strain = pycbc.filter.resample_to_delta_t(strain, + 1.0/self.sample_rate, method='ldas') + + # remove corruption at beginning + strain = strain[self.corruption:] + + # taper beginning if needed + if self.taper_immediate_strain: + logger.info("Tapering start of %s strain block", self.detector) + strain = gate_data( + strain, [(strain.start_time, 0., self.autogating_taper)]) + self.taper_immediate_strain = False + + # Stitch into continuous stream + self.strain.roll(-sample_step) + self.strain[len(self.strain) - csize + self.corruption:] = strain[:] + self.strain.start_time += blocksize + + # apply gating if needed + if self.autogating_threshold is not None: + autogating_duration_length = self.autogating_duration * self.sample_rate + autogating_start_sample = int(len(self.strain) - autogating_duration_length) + glitch_times = detect_loud_glitches( + self.strain[autogating_start_sample:-self.corruption], + psd_duration=self.autogating_psd_segment_length, psd_stride=self.autogating_psd_stride, + threshold=self.autogating_threshold, + cluster_window=self.autogating_cluster, + low_freq_cutoff=self.highpass_frequency, + corrupt_time=self.autogating_pad) + if len(glitch_times) > 0: + logger.info('Autogating %s at %s', self.detector, + ', '.join(['%.3f' % gt for gt in glitch_times])) + self.gate_params = \ + [(gt, self.autogating_width, self.autogating_taper) + for gt in glitch_times] + self.strain = gate_data(self.strain, self.gate_params) + + if self.psd is None and self.wait_duration <=0: + self.recalculate_psd() + + return self.wait_duration <= 0
+ + +
+[docs] + @classmethod + def from_cli(cls, ifo, args): + """Initialize a StrainBuffer object (data reader) for a particular + detector. + """ + state_channel = analyze_flags = None + if args.state_channel and ifo in args.state_channel \ + and args.analyze_flags and ifo in args.analyze_flags: + state_channel = ':'.join([ifo, args.state_channel[ifo]]) + analyze_flags = args.analyze_flags[ifo].split(',') + + dq_channel = dq_flags = None + if args.data_quality_channel and ifo in args.data_quality_channel \ + and args.data_quality_flags and ifo in args.data_quality_flags: + dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]]) + dq_flags = args.data_quality_flags[ifo].split(',') + + idq_channel = None + if args.idq_channel and ifo in args.idq_channel: + idq_channel = ':'.join([ifo, args.idq_channel[ifo]]) + + idq_state_channel = None + if args.idq_state_channel and ifo in args.idq_state_channel: + idq_state_channel = ':'.join([ifo, args.idq_state_channel[ifo]]) + + if args.frame_type: + frame_src = pycbc.frame.frame_paths( + args.frame_type[ifo], + args.start_time, + args.end_time, + site=ifo[0] + ) + else: + frame_src = [args.frame_src[ifo]] + strain_channel = ':'.join([ifo, args.channel_name[ifo]]) + + return cls( + frame_src, + strain_channel, + args.start_time, + max_buffer=args.max_length, + state_channel=state_channel, + data_quality_channel=dq_channel, + idq_channel=idq_channel, + idq_state_channel=idq_state_channel, + idq_threshold=args.idq_threshold, + sample_rate=args.sample_rate, + low_frequency_cutoff=args.low_frequency_cutoff, + highpass_frequency=args.highpass_frequency, + highpass_reduction=args.highpass_reduction, + highpass_bandwidth=args.highpass_bandwidth, + psd_samples=args.psd_samples, + trim_padding=args.trim_padding, + psd_segment_length=args.psd_segment_length, + psd_inverse_length=args.psd_inverse_length, + autogating_threshold=args.autogating_threshold, + autogating_cluster=args.autogating_cluster, + autogating_pad=args.autogating_pad, + autogating_width=args.autogating_width, + autogating_taper=args.autogating_taper, + autogating_duration=args.autogating_duration, + autogating_psd_segment_length=args.autogating_psd_segment_length, + autogating_psd_stride=args.autogating_psd_stride, + psd_abort_difference=args.psd_abort_difference, + psd_recalculate_difference=args.psd_recalculate_difference, + force_update_cache=args.force_update_cache, + increment_update_cache=args.increment_update_cache[ifo], + analyze_flags=analyze_flags, + data_quality_flags=dq_flags, + dq_padding=args.data_quality_padding + )
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/bank_conversions.html b/latest/html/_modules/pycbc/tmpltbank/bank_conversions.html new file mode 100644 index 00000000000..003d9523575 --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/bank_conversions.html @@ -0,0 +1,285 @@ + + + + + + pycbc.tmpltbank.bank_conversions — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.bank_conversions

+# Copyright (C) 2022 Gareth Cabourn Davies
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is supplied to make a convenience function for converting into
+specific values from PyCBC template banks.
+"""
+
+import logging
+import numpy as np
+
+from pycbc import conversions as conv
+from pycbc import pnutils
+
+logger = logging.getLogger('pycbc.tmpltbank.bank_conversions')
+
+# Convert from parameter name to helper function
+# some multiple names are used for the same function
+conversion_options = ['mass1', 'mass2', 'spin1z', 'spin2z', 'duration',
+                      'template_duration', 'mtotal', 'total_mass',
+                      'q', 'invq', 'eta', 'chirp_mass', 'mchirp',
+                      'chieff', 'chi_eff', 'effective_spin', 'chi_a',
+                      'premerger_duration']
+
+
+mass_conversions = {
+    'mtotal': conv.mtotal_from_mass1_mass2,
+    'total_mass': conv.mtotal_from_mass1_mass2,
+    'q': conv.q_from_mass1_mass2,
+    'invq': conv.invq_from_mass1_mass2,
+    'eta': conv.eta_from_mass1_mass2,
+    'mchirp': conv.mchirp_from_mass1_mass2,
+    'chirp_mass': conv.mchirp_from_mass1_mass2,
+}
+
+spin_conversions = {
+    'chieff': conv.chi_eff,
+    'chi_eff': conv.chi_eff,
+    'effective_spin': conv.chi_eff,
+    'chi_a': conv.chi_a
+}
+
+
+
+[docs] +def get_bank_property(parameter, bank, template_ids): + """ Get a specific value from a hdf file object in standard PyCBC + template bank format + + Parameters + ---------- + parameter: str + the parameter to convert to, must be in conversion_options + + bank: h5py File object or dictionary of arrays + Template bank containing the parameters for use in conversions + must contain mass1, mass2, spin1z, spin2z as a minimum + + template_ids: numpy array + Array of template IDs for reading a set of templates from the bank + + Returns + ------- + values: numpy array, same size as template_ids + Array of whatever the requested parameter is calculated for + the specified templates in the bank + + """ + # These just give things already in the bank + if parameter in bank: + values = bank[parameter][:][template_ids] + # Duration may be in the bank, but if not, we need to calculate + elif parameter.endswith('duration'): + fullband_req = False + prem_required = False + if parameter != "premerger_duration" and 'template_duration' in bank: + # This statement should be the reached only if 'duration' + # is given, but 'template_duration' is in the bank + fullband_dur = bank['template_duration'][:][template_ids] + elif parameter in ['template_duration', 'duration']: + # Only calculate fullband/premerger durations if we need to + fullband_req = True + if 'f_final' in bank: + prem_required = True + elif parameter == "premerger_duration": + prem_required = True + + # Set up the arguments for get_imr_duration + imr_args = ['mass1', 'mass2', 'spin1z', 'spin2z'] + if 'approximant' in bank: + kwargs = {'approximant': bank['approximant'][:][template_ids]} + else: + kwargs = {} + + if fullband_req: + # Unpack the appropriate arguments + fullband_dur = pnutils.get_imr_duration( + *[bank[k][:][template_ids] + for k in imr_args + ['f_lower']], + **kwargs) + + if prem_required and 'f_final' in bank: + # If f_final is in the bank, then we need to calculate + # the premerger time of the end of the template + prem_dur = pnutils.get_imr_duration( + *[bank[k][:][template_ids] + for k in imr_args + ['f_final']], + **kwargs) + elif prem_required: + # Pre-merger for bank without f_final is zero + prem_dur = np.zeros_like(template_ids) + + # Now we decide what to return: + if parameter in ['template_duration', 'duration']: + values = fullband_dur + if prem_required: + values -= prem_dur + else: + values = prem_dur + + # Basic conversions + elif parameter in mass_conversions.keys(): + values = mass_conversions[parameter](bank['mass1'][:][template_ids], + bank['mass2'][:][template_ids]) + + elif parameter in spin_conversions.keys(): + values = spin_conversions[parameter](bank['mass1'][:][template_ids], + bank['mass2'][:][template_ids], + bank['spin1z'][:][template_ids], + bank['spin2z'][:][template_ids]) + + else: + # parameter not in the current conversion parameter list + raise NotImplementedError("Bank conversion function " + parameter + + " not recognised: choose from '" + + "', '".join(conversion_options) + "'.") + return values
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/bank_output_utils.html b/latest/html/_modules/pycbc/tmpltbank/bank_output_utils.html new file mode 100644 index 00000000000..d2009aaa9bf --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/bank_output_utils.html @@ -0,0 +1,507 @@ + + + + + + pycbc.tmpltbank.bank_output_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.bank_output_utils

+import logging
+import numpy
+
+from lal import PI, MTSUN_SI, TWOPI, GAMMA
+from ligo.lw import ligolw, lsctables, utils as ligolw_utils
+
+from pycbc import pnutils
+from pycbc.tmpltbank.lambda_mapping import ethinca_order_from_string
+from pycbc.io.ligolw import (
+    return_empty_sngl, return_search_summary, create_process_table
+)
+from pycbc.io.hdf import HFile
+
+from pycbc.waveform import get_waveform_filter_length_in_time as gwflit
+
+logger = logging.getLogger('pycbc.tmpltbank.bank_output_utils')
+
+
+[docs] +def convert_to_sngl_inspiral_table(params, proc_id): + ''' + Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral + table with mass and spin parameters populated and event IDs assigned + + Parameters + ----------- + params : iterable + Each entry in the params iterable should be a sequence of + [mass1, mass2, spin1z, spin2z] in that order + proc_id : int + Process ID to add to each row of the sngl_inspiral table + + Returns + ---------- + SnglInspiralTable + Bank of templates in SnglInspiralTable format + ''' + sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) + col_names = ['mass1','mass2','spin1z','spin2z'] + + for values in params: + tmplt = return_empty_sngl() + + tmplt.process_id = proc_id + for colname, value in zip(col_names, values): + setattr(tmplt, colname, value) + tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta( + tmplt.mass1, tmplt.mass2) + tmplt.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( + tmplt.mass1, tmplt.mass2) + tmplt.template_duration = 0 # FIXME + tmplt.event_id = sngl_inspiral_table.get_next_id() + sngl_inspiral_table.append(tmplt) + + return sngl_inspiral_table
+ + +
+[docs] +def calculate_ethinca_metric_comps(metricParams, ethincaParams, mass1, mass2, + spin1z=0., spin2z=0., full_ethinca=True): + """ + Calculate the Gamma components needed to use the ethinca metric. + At present this outputs the standard TaylorF2 metric over the end time + and chirp times \tau_0 and \tau_3. + A desirable upgrade might be to use the \chi coordinates [defined WHERE?] + for metric distance instead of \tau_0 and \tau_3. + The lower frequency cutoff is currently hard-coded to be the same as the + bank layout options fLow and f0 (which must be the same as each other). + + Parameters + ----------- + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + ethincaParams : ethincaParameters instance + Structure holding options relevant to the ethinca metric computation. + mass1 : float + Mass of the heavier body in the considered template. + mass2 : float + Mass of the lighter body in the considered template. + spin1z : float (optional, default=0) + Spin of the heavier body in the considered template. + spin2z : float (optional, default=0) + Spin of the lighter body in the considered template. + full_ethinca : boolean (optional, default=True) + If True calculate the ethinca components in all 3 directions (mass1, + mass2 and time). If False calculate only the time component (which is + stored in Gamma0). + Returns + -------- + fMax_theor : float + Value of the upper frequency cutoff given by the template parameters + and the cutoff formula requested. + + gammaVals : numpy_array + Array holding 6 independent metric components in + (end_time, tau_0, tau_3) coordinates to be stored in the Gamma0-5 + slots of a SnglInspiral object. + """ + if (float(spin1z) != 0. or float(spin2z) != 0.) and full_ethinca: + raise NotImplementedError("Ethinca cannot at present be calculated " + "for nonzero component spins!") + f0 = metricParams.f0 + if f0 != metricParams.fLow: + raise ValueError("If calculating ethinca the bank f0 value must be " + "equal to f-low!") + if ethincaParams.fLow is not None and ( + ethincaParams.fLow != metricParams.fLow): + raise NotImplementedError("An ethinca metric f-low different from the" + " bank metric f-low is not supported!") + + twicePNOrder = ethinca_order_from_string(ethincaParams.pnOrder) + + piFl = PI * f0 + totalMass, eta = pnutils.mass1_mass2_to_mtotal_eta(mass1, mass2) + totalMass = totalMass * MTSUN_SI + v0cube = totalMass*piFl + v0 = v0cube**(1./3.) + + # Get theoretical cutoff frequency and work out the closest + # frequency for which moments were calculated + fMax_theor = pnutils.frequency_cutoff_from_name( + ethincaParams.cutoff, mass1, mass2, spin1z, spin2z) + fMaxes = list(metricParams.moments['J4'].keys()) + fMaxIdx = abs(numpy.array(fMaxes,dtype=float) - fMax_theor).argmin() + fMax = fMaxes[fMaxIdx] + + # Set the appropriate moments + Js = numpy.zeros([18,3],dtype=float) + for i in range(18): + Js[i,0] = metricParams.moments['J%d'%(i)][fMax] + Js[i,1] = metricParams.moments['log%d'%(i)][fMax] + Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax] + + # Compute the time-dependent metric term. + two_pi_flower_sq = TWOPI * f0 * TWOPI * f0 + gammaVals = numpy.zeros([6],dtype=float) + gammaVals[0] = 0.5 * two_pi_flower_sq * \ + ( Js[(1,0)] - (Js[(4,0)]*Js[(4,0)]) ) + + # If mass terms not required stop here + if not full_ethinca: + return fMax_theor, gammaVals + + # 3pN is a mess, so split it into pieces + a0 = 11583231236531/200286535680 - 5*PI*PI - 107*GAMMA/14 + a1 = (-15737765635/130056192 + 2255*PI*PI/512)*eta + a2 = (76055/73728)*eta*eta + a3 = (-127825/55296)*eta*eta*eta + alog = numpy.log(4*v0) # Log terms are tricky - be careful + + # Get the Psi coefficients + Psi = [{},{}] #Psi = numpy.zeros([2,8,2],dtype=float) + Psi[0][0,0] = 3/5 + Psi[0][2,0] = (743/756 + 11*eta/3)*v0*v0 + Psi[0][3,0] = 0. + Psi[0][4,0] = (-3058673/508032 + 5429*eta/504 + 617*eta*eta/24)\ + *v0cube*v0 + Psi[0][5,1] = (-7729*PI/126)*v0cube*v0*v0/3 + Psi[0][6,0] = (128/15)*(-3*a0 - a1 + a2 + 3*a3 + 107*(1+3*alog)/14)\ + *v0cube*v0cube + Psi[0][6,1] = (6848/35)*v0cube*v0cube/3 + Psi[0][7,0] = (-15419335/63504 - 75703*eta/756)*PI*v0cube*v0cube*v0 + + Psi[1][0,0] = 0. + Psi[1][2,0] = (3715/12096 - 55*eta/96)/PI/v0; + Psi[1][3,0] = -3/2 + Psi[1][4,0] = (15293365/4064256 - 27145*eta/16128 - 3085*eta*eta/384)\ + *v0/PI + Psi[1][5,1] = (193225/8064)*v0*v0/3 + Psi[1][6,0] = (4/PI)*(2*a0 + a1/3 - 4*a2/3 - 3*a3 -107*(1+6*alog)/42)\ + *v0cube + Psi[1][6,1] = (-428/PI/7)*v0cube/3 + Psi[1][7,0] = (77096675/1161216 + 378515*eta/24192 + 74045*eta*eta/8064)\ + *v0cube*v0 + + # Set the appropriate moments + Js = numpy.zeros([18,3],dtype=float) + for i in range(18): + Js[i,0] = metricParams.moments['J%d'%(i)][fMax] + Js[i,1] = metricParams.moments['log%d'%(i)][fMax] + Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax] + + # Calculate the g matrix + PNterms = [(0,0),(2,0),(3,0),(4,0),(5,1),(6,0),(6,1),(7,0)] + PNterms = [term for term in PNterms if term[0] <= twicePNOrder] + + # Now can compute the mass-dependent gamma values + for m in [0, 1]: + for k in PNterms: + gammaVals[1+m] += 0.5 * two_pi_flower_sq * Psi[m][k] * \ + ( Js[(9-k[0],k[1])] + - Js[(12-k[0],k[1])] * Js[(4,0)] ) + + g = numpy.zeros([2,2],dtype=float) + for (m,n) in [(0,0),(0,1),(1,1)]: + for k in PNterms: + for l in PNterms: + g[m,n] += Psi[m][k] * Psi[n][l] * \ + ( Js[(17-k[0]-l[0], k[1]+l[1])] + - Js[(12-k[0],k[1])] * Js[(12-l[0],l[1])] ) + g[m,n] = 0.5 * two_pi_flower_sq * g[m,n] + g[n,m] = g[m,n] + + gammaVals[3] = g[0,0] + gammaVals[4] = g[0,1] + gammaVals[5] = g[1,1] + + return fMax_theor, gammaVals
+ + +
+[docs] +def output_sngl_inspiral_table(outputFile, tempBank, programName="", + optDict = None, outdoc=None, + **kwargs): # pylint:disable=unused-argument + """ + Function that converts the information produced by the various PyCBC bank + generation codes into a valid LIGOLW XML file containing a sngl_inspiral + table and outputs to file. + + Parameters + ----------- + outputFile : string + Name of the file that the bank will be written to + tempBank : iterable + Each entry in the tempBank iterable should be a sequence of + [mass1,mass2,spin1z,spin2z] in that order. + programName (key-word-argument) : string + Name of the executable that has been run + optDict (key-word argument) : dictionary + Dictionary of the command line arguments passed to the program + outdoc (key-word argument) : ligolw xml document + If given add template bank to this representation of a xml document and + write to disk. If not given create a new document. + kwargs : optional key-word arguments + Allows unused options to be passed to this function (for modularity) + """ + if optDict is None: + optDict = {} + if outdoc is None: + outdoc = ligolw.Document() + outdoc.appendChild(ligolw.LIGO_LW()) + + # get IFO to put in search summary table + ifos = [] + if 'channel_name' in optDict.keys(): + if optDict['channel_name'] is not None: + ifos = [optDict['channel_name'][0:2]] + + proc = create_process_table( + outdoc, + program_name=programName, + detectors=ifos, + options=optDict + ) + proc_id = proc.process_id + sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id) + + # set per-template low-frequency cutoff + if 'f_low_column' in optDict and 'f_low' in optDict and \ + optDict['f_low_column'] is not None: + for sngl in sngl_inspiral_table: + setattr(sngl, optDict['f_low_column'], optDict['f_low']) + + outdoc.childNodes[0].appendChild(sngl_inspiral_table) + + # get times to put in search summary table + start_time = 0 + end_time = 0 + if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys(): + start_time = optDict['gps_start_time'] + end_time = optDict['gps_end_time'] + + # make search summary table + search_summary_table = lsctables.New(lsctables.SearchSummaryTable) + search_summary = return_search_summary( + start_time, end_time, len(sngl_inspiral_table), ifos + ) + search_summary_table.append(search_summary) + outdoc.childNodes[0].appendChild(search_summary_table) + + # write the xml doc to disk + ligolw_utils.write_filename(outdoc, outputFile)
+ + + +
+[docs] +def output_bank_to_hdf(outputFile, tempBank, optDict=None, programName='', + approximant=None, output_duration=False, + **kwargs): # pylint:disable=unused-argument + """ + Function that converts the information produced by the various PyCBC bank + generation codes into a hdf5 file. + + Parameters + ----------- + outputFile : string + Name of the file that the bank will be written to + tempBank : iterable + Each entry in the tempBank iterable should be a sequence of + [mass1,mass2,spin1z,spin2z] in that order. + programName (key-word-argument) : string + Name of the executable that has been run + optDict (key-word argument) : dictionary + Dictionary of the command line arguments passed to the program + approximant : string + The approximant to be outputted to the file, + if output_duration is True, this is also used for that calculation. + output_duration : boolean + Output the duration of the template, calculated using + get_waveform_filter_length_in_time, to the file. + kwargs : optional key-word arguments + Allows unused options to be passed to this function (for modularity) + """ + bank_dict = {} + mass1, mass2, spin1z, spin2z = list(zip(*tempBank)) + bank_dict['mass1'] = mass1 + bank_dict['mass2'] = mass2 + bank_dict['spin1z'] = spin1z + bank_dict['spin2z'] = spin2z + + # Add other values to the bank dictionary as appropriate + if optDict is not None: + bank_dict['f_lower'] = numpy.ones_like(mass1) * \ + optDict['f_low'] + argument_string = [f'{k}:{v}' for k, v in optDict.items()] + + if optDict is not None and optDict['output_f_final']: + bank_dict['f_final'] = numpy.ones_like(mass1) * \ + optDict['f_upper'] + + if approximant: + if not isinstance(approximant, bytes): + appx = approximant.encode() + bank_dict['approximant'] = numpy.repeat(appx, len(mass1)) + + if output_duration: + appx = approximant if approximant else 'SPAtmplt' + tmplt_durations = numpy.zeros_like(mass1) + for i in range(len(mass1)): + wfrm_length = gwflit(appx, + mass1=mass1[i], + mass2=mass2[i], + f_lower=optDict['f_low'], + phase_order=7) + tmplt_durations[i] = wfrm_length + bank_dict['template_duration'] = tmplt_durations + + with HFile(outputFile, 'w') as bankf_out: + bankf_out.attrs['program'] = programName + if optDict is not None: + bankf_out.attrs['arguments'] = argument_string + for k, v in bank_dict.items(): + bankf_out[k] = v
+ + + +
+[docs] +def output_bank_to_file(outputFile, tempBank, **kwargs): + if outputFile.endswith(('.xml','.xml.gz','.xmlgz')): + output_sngl_inspiral_table( + outputFile, + tempBank, + **kwargs + ) + elif outputFile.endswith(('.h5','.hdf','.hdf5')): + output_bank_to_hdf( + outputFile, + tempBank, + **kwargs + ) + else: + err_msg = f"Unrecognized extension for file {outputFile}." + raise ValueError(err_msg)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/brute_force_methods.html b/latest/html/_modules/pycbc/tmpltbank/brute_force_methods.html new file mode 100644 index 00000000000..134890be29d --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/brute_force_methods.html @@ -0,0 +1,656 @@ + + + + + + pycbc.tmpltbank.brute_force_methods — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.brute_force_methods

+import logging
+import numpy
+
+from pycbc.tmpltbank.coord_utils import get_cov_params
+
+logger = logging.getLogger('pycbc.tmpltbank.brute_force_methods')
+
+
+
+[docs] +def get_physical_covaried_masses(xis, bestMasses, bestXis, req_match, + massRangeParams, metricParams, fUpper, + giveUpThresh = 5000): + """ + This function takes the position of a point in the xi parameter space and + iteratively finds a close point in the physical coordinate space (masses + and spins). + + Parameters + ----------- + xis : list or array + Desired position of the point in the xi space. If only N values are + provided and the xi space's dimension is larger then it is assumed that + *any* value in the remaining xi coordinates is acceptable. + bestMasses : list + Contains [totalMass, eta, spin1z, spin2z]. Is a physical position + mapped to xi coordinates in bestXis that is close to the desired point. + This is aimed to give the code a starting point. + bestXis : list + Contains the position of bestMasses in the xi coordinate system. + req_match : float + Desired maximum mismatch between xis and the obtained point. If a point + is found with mismatch < req_match immediately stop and return that + point. A point with this mismatch will not always be found. + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper that was used when obtaining the xi_i + coordinates. This lets us know how to rotate potential physical points + into the correct xi_i space. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + giveUpThresh : int, optional (default = 5000) + The program will try this many iterations. If no close matching point + has been found after this it will give up. + + Returns + -------- + mass1 : float + The heavier mass of the obtained point. + mass2 : float + The smaller mass of the obtained point + spin1z : float + The heavier bodies spin of the obtained point. + spin2z : float + The smaller bodies spin of the obtained point. + count : int + How many iterations it took to find the point. For debugging. + mismatch : float + The mismatch between the obtained point and the input xis. + new_xis : list + The position of the point in the xi space + """ + # TUNABLE PARAMETERS GO HERE! + # This states how far apart to scatter test points in the first proposal + origScaleFactor = 1 + + # Set up + xi_size = len(xis) + scaleFactor = origScaleFactor + bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) + count = 0 + unFixedCount = 0 + currDist = 100000000000000000 + while(1): + # If we are a long way away we use larger jumps + if count: + if currDist > 1 and scaleFactor == origScaleFactor: + scaleFactor = origScaleFactor*10 + # Get a set of test points with mass -> xi mappings + totmass, eta, spin1z, spin2z, mass1, mass2, new_xis = \ + get_mass_distribution([bestChirpmass, bestMasses[1], bestMasses[2], + bestMasses[3]], + scaleFactor, massRangeParams, metricParams, + fUpper) + cDist = (new_xis[0] - xis[0])**2 + for j in range(1,xi_size): + cDist += (new_xis[j] - xis[j])**2 + if (cDist.min() < req_match): + idx = cDist.argmin() + scaleFactor = origScaleFactor + new_xis_list = [new_xis[ldx][idx] for ldx in range(len(new_xis))] + return mass1[idx], mass2[idx], spin1z[idx], spin2z[idx], count, \ + cDist.min(), new_xis_list + if (cDist.min() < currDist): + idx = cDist.argmin() + bestMasses[0] = totmass[idx] + bestMasses[1] = eta[idx] + bestMasses[2] = spin1z[idx] + bestMasses[3] = spin2z[idx] + bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) + currDist = cDist.min() + unFixedCount = 0 + scaleFactor = origScaleFactor + count += 1 + unFixedCount += 1 + if unFixedCount > giveUpThresh: + # Stop at this point + diff = (bestMasses[0]*bestMasses[0] * (1-4*bestMasses[1]))**0.5 + mass1 = (bestMasses[0] + diff)/2. + mass2 = (bestMasses[0] - diff)/2. + new_xis_list = [new_xis[ldx][0] for ldx in range(len(new_xis))] + return mass1, mass2, bestMasses[2], bestMasses[3], count, \ + currDist, new_xis_list + if not unFixedCount % 100: + scaleFactor *= 2 + if scaleFactor > 64: + scaleFactor = 1 + # Shouldn't be here! + raise RuntimeError
+ + +
+[docs] +def get_mass_distribution(bestMasses, scaleFactor, massRangeParams, + metricParams, fUpper, + numJumpPoints=100, chirpMassJumpFac=0.0001, + etaJumpFac=0.01, spin1zJumpFac=0.01, + spin2zJumpFac=0.01): + """ + Given a set of masses, this function will create a set of points nearby + in the mass space and map these to the xi space. + + Parameters + ----------- + bestMasses : list + Contains [ChirpMass, eta, spin1z, spin2z]. Points will be placed around + tjos + scaleFactor : float + This parameter describes the radius away from bestMasses that points + will be placed in. + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper that was used when obtaining the xi_i + coordinates. This lets us know how to rotate potential physical points + into the correct xi_i space. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + numJumpPoints : int, optional (default = 100) + The number of points that will be generated every iteration + chirpMassJumpFac : float, optional (default=0.0001) + The jump points will be chosen with fractional variation in chirpMass + up to this multiplied by scaleFactor. + etaJumpFac : float, optional (default=0.01) + The jump points will be chosen with fractional variation in eta + up to this multiplied by scaleFactor. + spin1zJumpFac : float, optional (default=0.01) + The jump points will be chosen with absolute variation in spin1z up to + this multiplied by scaleFactor. + spin2zJumpFac : float, optional (default=0.01) + The jump points will be chosen with absolute variation in spin2z up to + this multiplied by scaleFactor. + + Returns + -------- + Totmass : numpy.array + Total mass of the resulting points + Eta : numpy.array + Symmetric mass ratio of the resulting points + Spin1z : numpy.array + Spin of the heavier body of the resulting points + Spin2z : numpy.array + Spin of the smaller body of the resulting points + Diff : numpy.array + Mass1 - Mass2 of the resulting points + Mass1 : numpy.array + Mass1 (mass of heavier body) of the resulting points + Mass2 : numpy.array + Mass2 (mass of smaller body) of the resulting points + new_xis : list of numpy.array + Position of points in the xi coordinates + """ + # FIXME: It would be better if rejected values could be drawn from the + # full possible mass/spin distribution. However speed in this function is + # a major factor and must be considered. + bestChirpmass = bestMasses[0] + bestEta = bestMasses[1] + bestSpin1z = bestMasses[2] + bestSpin2z = bestMasses[3] + + # Firstly choose a set of values for masses and spins + chirpmass = bestChirpmass * (1 - (numpy.random.random(numJumpPoints)-0.5) \ + * chirpMassJumpFac * scaleFactor ) + etaRange = massRangeParams.maxEta - massRangeParams.minEta + currJumpFac = etaJumpFac * scaleFactor + if currJumpFac > etaRange: + currJumpFac = etaRange + eta = bestEta * ( 1 - (numpy.random.random(numJumpPoints) - 0.5) \ + * currJumpFac) + + maxSpinMag = max(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag) + minSpinMag = min(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag) + # Note that these two are cranged by spinxzFac, *not* spinxzFac/spinxz + currJumpFac = spin1zJumpFac * scaleFactor + if currJumpFac > maxSpinMag: + currJumpFac = maxSpinMag + + # Actually set the new spin trial points + if massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): + curr_spin_1z_jump_fac = currJumpFac + curr_spin_2z_jump_fac = currJumpFac + # Check spins aren't going to be unphysical + if currJumpFac > massRangeParams.maxBHSpinMag: + curr_spin_1z_jump_fac = massRangeParams.maxBHSpinMag + if currJumpFac > massRangeParams.maxNSSpinMag: + curr_spin_2z_jump_fac = massRangeParams.maxNSSpinMag + spin1z = bestSpin1z + ( (numpy.random.random(numJumpPoints) - 0.5) \ + * curr_spin_1z_jump_fac) + spin2z = bestSpin2z + ( (numpy.random.random(numJumpPoints) - 0.5) \ + * curr_spin_2z_jump_fac) + else: + # If maxNSSpinMag is very low (0) and maxBHSpinMag is high we can + # find it hard to place any points. So mix these when + # masses are swapping between the NS and BH. + curr_spin_bh_jump_fac = currJumpFac + curr_spin_ns_jump_fac = currJumpFac + # Check spins aren't going to be unphysical + if currJumpFac > massRangeParams.maxBHSpinMag: + curr_spin_bh_jump_fac = massRangeParams.maxBHSpinMag + if currJumpFac > massRangeParams.maxNSSpinMag: + curr_spin_ns_jump_fac = massRangeParams.maxNSSpinMag + spin1z = numpy.zeros(numJumpPoints, dtype=float) + spin2z = numpy.zeros(numJumpPoints, dtype=float) + split_point = int(numJumpPoints/2) + # So set the first half to be at least within the BH range and the + # second half to be at least within the NS range + spin1z[:split_point] = bestSpin1z + \ + ( (numpy.random.random(split_point) - 0.5)\ + * curr_spin_bh_jump_fac) + spin1z[split_point:] = bestSpin1z + \ + ( (numpy.random.random(numJumpPoints-split_point) - 0.5)\ + * curr_spin_ns_jump_fac) + spin2z[:split_point] = bestSpin2z + \ + ( (numpy.random.random(split_point) - 0.5)\ + * curr_spin_bh_jump_fac) + spin2z[split_point:] = bestSpin2z + \ + ( (numpy.random.random(numJumpPoints-split_point) - 0.5)\ + * curr_spin_ns_jump_fac) + + # Point[0] is always set to the original point + chirpmass[0] = bestChirpmass + eta[0] = bestEta + spin1z[0] = bestSpin1z + spin2z[0] = bestSpin2z + + # Remove points where eta becomes unphysical + eta[eta > massRangeParams.maxEta] = massRangeParams.maxEta + if massRangeParams.minEta: + eta[eta < massRangeParams.minEta] = massRangeParams.minEta + else: + eta[eta < 0.0001] = 0.0001 + + # Total mass, masses and mass diff + totmass = chirpmass / (eta**(3./5.)) + diff = (totmass*totmass * (1-4*eta))**0.5 + mass1 = (totmass + diff)/2. + mass2 = (totmass - diff)/2. + + # Check the validity of the spin values + # Do the first spin + + if maxSpinMag == 0: + # Shortcut if non-spinning + pass + elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): + # Simple case where I don't have to worry about correlation with mass + numploga = abs(spin1z) > massRangeParams.maxBHSpinMag + spin1z[numploga] = 0 + else: + # Do have to consider masses + boundary_mass = massRangeParams.ns_bh_boundary_mass + numploga1 = numpy.logical_and(mass1 >= boundary_mass, + abs(spin1z) <= massRangeParams.maxBHSpinMag) + numploga2 = numpy.logical_and(mass1 < boundary_mass, + abs(spin1z) <= massRangeParams.maxNSSpinMag) + numploga = numpy.logical_or(numploga1, numploga2) + numploga = numpy.logical_not(numploga) + spin1z[numploga] = 0 + + # Same for the second spin + + if maxSpinMag == 0: + # Shortcut if non-spinning + pass + elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): + numplogb = abs(spin2z) > massRangeParams.maxNSSpinMag + spin2z[numplogb] = 0 + else: + # Do have to consider masses + boundary_mass = massRangeParams.ns_bh_boundary_mass + numplogb1 = numpy.logical_and(mass2 >= boundary_mass, + abs(spin2z) <= massRangeParams.maxBHSpinMag) + numplogb2 = numpy.logical_and(mass2 < boundary_mass, + abs(spin2z) <= massRangeParams.maxNSSpinMag) + numplogb = numpy.logical_or(numplogb1, numplogb2) + numplogb = numpy.logical_not(numplogb) + spin2z[numplogb] = 0 + + if (maxSpinMag) and (numploga[0] or numplogb[0]): + raise ValueError("Cannot remove the guide point!") + + # And remove points where the individual masses are outside of the physical + # range. Or the total masses are. + # These "removed" points will have metric distances that will be much, much + # larger than any thresholds used in the functions in brute_force_utils.py + # and will always be rejected. An unphysical value cannot be used as it + # would result in unphysical metric distances and cause failures. + totmass[mass1 < massRangeParams.minMass1*0.9999] = 0.0001 + totmass[mass1 > massRangeParams.maxMass1*1.0001] = 0.0001 + totmass[mass2 < massRangeParams.minMass2*0.9999] = 0.0001 + totmass[mass2 > massRangeParams.maxMass2*1.0001] = 0.0001 + # There is some numerical error which can push this a bit higher. We do + # *not* want to reject the initial guide point. This error comes from + # Masses -> totmass, eta -> masses conversion, we will have points pushing + # onto the boudaries of the space. + totmass[totmass > massRangeParams.maxTotMass*1.0001] = 0.0001 + totmass[totmass < massRangeParams.minTotMass*0.9999] = 0.0001 + if massRangeParams.max_chirp_mass: + totmass[chirpmass > massRangeParams.max_chirp_mass*1.0001] = 0.0001 + if massRangeParams.min_chirp_mass: + totmass[chirpmass < massRangeParams.min_chirp_mass*0.9999] = 0.0001 + + if totmass[0] < 0.00011: + raise ValueError("Cannot remove the guide point!") + + mass1[totmass < 0.00011] = 0.0001 + mass2[totmass < 0.00011] = 0.0001 + + # Then map to xis + new_xis = get_cov_params(mass1, mass2, spin1z, spin2z, + metricParams, fUpper) + return totmass, eta, spin1z, spin2z, mass1, mass2, new_xis
+ + +
+[docs] +def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num, + req_match, massRangeParams, metricParams, fUpper, + scaleFactor=0.8, numIterations=3000): + """ + This function is used to assess the depth of the xi_space in a specified + dimension at a specified point in the higher dimensions. It does this by + iteratively throwing points at the space to find maxima and minima. + + Parameters + ----------- + + xis : list or array + Position in the xi space at which to assess the depth. This can be only + a subset of the higher dimensions than that being sampled. + bestMasses : list + Contains [totalMass, eta, spin1z, spin2z]. Is a physical position + mapped to xi coordinates in bestXis that is close to the xis point. + This is aimed to give the code a starting point. + bestXis : list + Contains the position of bestMasses in the xi coordinate system. + direction_num : int + The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) + req_match : float + When considering points to assess the depth with, only consider points + with a mismatch that is smaller than this with xis. + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper that was used when obtaining the xi_i + coordinates. This lets us know how to rotate potential physical points + into the correct xi_i space. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + scaleFactor : float, optional (default = 0.8) + The value of the scale factor that is used when calling + pycbc.tmpltbank.get_mass_distribution. + numIterations : int, optional (default = 3000) + The number of times to make calls to get_mass_distribution when + assessing the maximum/minimum of this parameter space. Making this + smaller makes the code faster, but at the cost of accuracy. + + Returns + -------- + xi_min : float + The minimal value of the specified dimension at the specified point in + parameter space. + xi_max : float + The maximal value of the specified dimension at the specified point in + parameter space. + """ + + # Find minimum + ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ + req_match, massRangeParams, metricParams, \ + fUpper, find_minimum=True, \ + scaleFactor=scaleFactor, \ + numIterations=numIterations) + + # Find maximum + ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ + req_match, massRangeParams, metricParams, \ + fUpper, find_minimum=False, \ + scaleFactor=scaleFactor, \ + numIterations=numIterations) + + return ximin, ximax
+ + +
+[docs] +def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \ + massRangeParams, metricParams, fUpper, \ + find_minimum=False, scaleFactor=0.8, \ + numIterations=3000): + """ + This function is used to find the largest or smallest value of the xi + space in a specified + dimension at a specified point in the higher dimensions. It does this by + iteratively throwing points at the space to find extrema. + + Parameters + ----------- + + xis : list or array + Position in the xi space at which to assess the depth. This can be only + a subset of the higher dimensions than that being sampled. + bestMasses : list + Contains [totalMass, eta, spin1z, spin2z]. Is a physical position + mapped to xi coordinates in bestXis that is close to the xis point. + This is aimed to give the code a starting point. + bestXis : list + Contains the position of bestMasses in the xi coordinate system. + direction_num : int + The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) + req_match : float + When considering points to assess the depth with, only consider points + with a mismatch that is smaller than this with xis. + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper that was used when obtaining the xi_i + coordinates. This lets us know how to rotate potential physical points + into the correct xi_i space. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + find_minimum : boolean, optional (default = False) + If True, find the minimum value of the xi direction. If False find the + maximum value. + scaleFactor : float, optional (default = 0.8) + The value of the scale factor that is used when calling + pycbc.tmpltbank.get_mass_distribution. + numIterations : int, optional (default = 3000) + The number of times to make calls to get_mass_distribution when + assessing the maximum/minimum of this parameter space. Making this + smaller makes the code faster, but at the cost of accuracy. + + Returns + -------- + xi_extent : float + The extremal value of the specified dimension at the specified point in + parameter space. + """ + + # Setup + xi_size = len(xis) + bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) + if find_minimum: + xiextrema = 10000000000 + else: + xiextrema = -100000000000 + + for _ in range(numIterations): + # Evaluate extrema of the xi direction specified + totmass, eta, spin1z, spin2z, _, _, new_xis = \ + get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2], + bestMasses[3]], + scaleFactor, massRangeParams, metricParams, + fUpper) + cDist = (new_xis[0] - xis[0])**2 + for j in range(1, xi_size): + cDist += (new_xis[j] - xis[j])**2 + redCDist = cDist[cDist < req_match] + if len(redCDist): + if not find_minimum: + new_xis[direction_num][cDist > req_match] = -10000000 + currXiExtrema = (new_xis[direction_num]).max() + idx = (new_xis[direction_num]).argmax() + else: + new_xis[direction_num][cDist > req_match] = 10000000 + currXiExtrema = (new_xis[direction_num]).min() + idx = (new_xis[direction_num]).argmin() + if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \ + (find_minimum and (currXiExtrema < xiextrema)) ): + xiextrema = currXiExtrema + bestMasses[0] = totmass[idx] + bestMasses[1] = eta[idx] + bestMasses[2] = spin1z[idx] + bestMasses[3] = spin2z[idx] + bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) + return xiextrema
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/calc_moments.html b/latest/html/_modules/pycbc/tmpltbank/calc_moments.html new file mode 100644 index 00000000000..0b975fecd69 --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/calc_moments.html @@ -0,0 +1,683 @@ + + + + + + pycbc.tmpltbank.calc_moments — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.calc_moments

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import numpy
+
+from pycbc.tmpltbank.lambda_mapping import generate_mapping
+
+logger = logging.getLogger('pycbc.tmpltbank.calc_moments')
+
+
+
+[docs] +def determine_eigen_directions(metricParams, preserveMoments=False, + vary_fmax=False, vary_density=None): + """ + This function will calculate the coordinate transfomations that are needed + to rotate from a coordinate system described by the various Lambda + components in the frequency expansion, to a coordinate system where the + metric is Cartesian. + + Parameters + ----------- + metricParams : metricParameters instance + Structure holding all the options for construction of the metric. + preserveMoments : boolean, optional (default False) + Currently only used for debugging. + If this is given then if the moments structure is already set + within metricParams then they will not be recalculated. + vary_fmax : boolean, optional (default False) + If set to False the metric and rotations are calculated once, for the + full range of frequency [f_low,f_upper). + If set to True the metric and rotations are calculated multiple times, + for frequency ranges [f_low,f_low + i*vary_density), where i starts at + 1 and runs up until f_low + (i+1)*vary_density > f_upper. + Thus values greater than f_upper are *not* computed. + The calculation for the full range [f_low,f_upper) is also done. + vary_density : float, optional + If vary_fmax is True, this will be used in computing the frequency + ranges as described for vary_fmax. + + Returns + -------- + metricParams : metricParameters instance + Structure holding all the options for construction of the metric. + **THIS FUNCTION ONLY RETURNS THE CLASS** + The following will be **added** to this structure + metricParams.evals : Dictionary of numpy.array + Each entry in the dictionary corresponds to the different frequency + ranges described in vary_fmax. If vary_fmax = False, the only entry + will be f_upper, this corresponds to integrals in [f_low,f_upper). This + entry is always present. Each other entry will use floats as keys to + the dictionary. These floats give the upper frequency cutoff when it is + varying. + Each numpy.array contains the eigenvalues which, with the eigenvectors + in evecs, are needed to rotate the + coordinate system to one in which the metric is the identity matrix. + metricParams.evecs : Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the eigenvectors which, with the eigenvalues + in evals, are needed to rotate the + coordinate system to one in which the metric is the identity matrix. + metricParams.metric : Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the metric of the parameter space in the + Lambda_i coordinate system. + metricParams.moments : Moments structure + See the structure documentation for a description of this. This + contains the result of all the integrals used in computing the metrics + above. It can be used for the ethinca components calculation, or other + similar calculations. + """ + + evals = {} + evecs = {} + metric = {} + unmax_metric = {} + + # First step is to get the moments needed to calculate the metric + if not (metricParams.moments and preserveMoments): + get_moments(metricParams, vary_fmax=vary_fmax, + vary_density=vary_density) + + # What values are going to be in the moments + # J7 is the normalization factor so it *MUST* be present + list = metricParams.moments['J7'].keys() + + # We start looping over every item in the list of metrics + for item in list: + # Here we convert the moments into a form easier to use here + Js = {} + for i in range(-7,18): + Js[i] = metricParams.moments['J%d'%(i)][item] + + logJs = {} + for i in range(-1,18): + logJs[i] = metricParams.moments['log%d'%(i)][item] + + loglogJs = {} + for i in range(-1,18): + loglogJs[i] = metricParams.moments['loglog%d'%(i)][item] + + logloglogJs = {} + for i in range(-1,18): + logloglogJs[i] = metricParams.moments['logloglog%d'%(i)][item] + + loglogloglogJs = {} + for i in range(-1,18): + loglogloglogJs[i] = metricParams.moments['loglogloglog%d'%(i)][item] + + mapping = generate_mapping(metricParams.pnOrder) + + # Calculate the metric + gs, unmax_metric_curr = calculate_metric(Js, logJs, loglogJs, + logloglogJs, loglogloglogJs, mapping) + metric[item] = gs + unmax_metric[item] = unmax_metric_curr + + # And the eigenvalues + evals[item], evecs[item] = numpy.linalg.eig(gs) + + # Numerical error can lead to small negative eigenvalues. + for i in range(len(evals[item])): + if evals[item][i] < 0: + # Due to numerical imprecision the very small eigenvalues can + # be negative. Make these positive. + evals[item][i] = -evals[item][i] + if evecs[item][i,i] < 0: + # We demand a convention that all diagonal terms in the matrix + # of eigenvalues are positive. + # This is done to help visualization of the spaces (increasing + # mchirp always goes the same way) + evecs[item][:,i] = - evecs[item][:,i] + + metricParams.evals = evals + metricParams.evecs = evecs + metricParams.metric = metric + metricParams.time_unprojected_metric = unmax_metric + + return metricParams
+ + +
+[docs] +def get_moments(metricParams, vary_fmax=False, vary_density=None): + """ + This function will calculate the various integrals (moments) that are + needed to compute the metric used in template bank placement and + coincidence. + + Parameters + ----------- + metricParams : metricParameters instance + Structure holding all the options for construction of the metric. + vary_fmax : boolean, optional (default False) + If set to False the metric and rotations are calculated once, for the + full range of frequency [f_low,f_upper). + If set to True the metric and rotations are calculated multiple times, + for frequency ranges [f_low,f_low + i*vary_density), where i starts at + 1 and runs up until f_low + (i+1)*vary_density > f_upper. + Thus values greater than f_upper are *not* computed. + The calculation for the full range [f_low,f_upper) is also done. + vary_density : float, optional + If vary_fmax is True, this will be used in computing the frequency + ranges as described for vary_fmax. + + Returns + -------- + None : None + **THIS FUNCTION RETURNS NOTHING** + The following will be **added** to the metricParams structure + metricParams.moments : Moments structure + This contains the result of all the integrals used in computing the + metrics above. It can be used for the ethinca components calculation, + or other similar calculations. This is composed of two compound + dictionaries. The first entry indicates which moment is being + calculated and the second entry indicates the upper frequency cutoff + that was used. + + In all cases x = f/f0. + + For the first entries the options are: + + moments['J%d' %(i)][f_cutoff] + This stores the integral of + x**((-i)/3.) * delta X / PSD(x) + + moments['log%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x) + + The second entry stores the frequency cutoff used when computing + the integral. See description of the vary_fmax option above. + + All of these values are nomralized by a factor of + + x**((-7)/3.) * delta X / PSD(x) + + The normalization factor can be obtained in + + moments['I7'][f_cutoff] + """ + # NOTE: Unless the TaylorR2F4 metric is used the log^3 and log^4 terms are + # not needed. As this calculation is not too slow compared to bank + # placement we just do this anyway. + + psd_amp = metricParams.psd.data + psd_f = numpy.arange(len(psd_amp), dtype=float) * metricParams.deltaF + new_f, new_amp = interpolate_psd(psd_f, psd_amp, metricParams.deltaF) + + # Need I7 first as this is the normalization factor + funct = lambda x,f0: 1 + I7 = calculate_moment(new_f, new_amp, metricParams.fLow, \ + metricParams.fUpper, metricParams.f0, funct,\ + vary_fmax=vary_fmax, vary_density=vary_density) + + # Do all the J moments + moments = {} + moments['I7'] = I7 + for i in range(-7,18): + funct = lambda x,f0: x**((-i+7)/3.) + moments['J%d' %(i)] = calculate_moment(new_f, new_amp, \ + metricParams.fLow, metricParams.fUpper, \ + metricParams.f0, funct, norm=I7, \ + vary_fmax=vary_fmax, vary_density=vary_density) + + # Do the logx multiplied by some power terms + for i in range(-1,18): + funct = lambda x,f0: (numpy.log((x*f0)**(1./3.))) * x**((-i+7)/3.) + moments['log%d' %(i)] = calculate_moment(new_f, new_amp, \ + metricParams.fLow, metricParams.fUpper, \ + metricParams.f0, funct, norm=I7, \ + vary_fmax=vary_fmax, vary_density=vary_density) + + # Do the loglog term + for i in range(-1,18): + funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**2 * x**((-i+7)/3.) + moments['loglog%d' %(i)] = calculate_moment(new_f, new_amp, \ + metricParams.fLow, metricParams.fUpper, \ + metricParams.f0, funct, norm=I7, \ + vary_fmax=vary_fmax, vary_density=vary_density) + + # Do the logloglog term + for i in range(-1,18): + funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**3 * x**((-i+7)/3.) + moments['logloglog%d' %(i)] = calculate_moment(new_f, new_amp, \ + metricParams.fLow, metricParams.fUpper, \ + metricParams.f0, funct, norm=I7, \ + vary_fmax=vary_fmax, vary_density=vary_density) + + # Do the logloglog term + for i in range(-1,18): + funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**4 * x**((-i+7)/3.) + moments['loglogloglog%d' %(i)] = calculate_moment(new_f, new_amp, \ + metricParams.fLow, metricParams.fUpper, \ + metricParams.f0, funct, norm=I7, \ + vary_fmax=vary_fmax, vary_density=vary_density) + + metricParams.moments = moments
+ + +
+[docs] +def interpolate_psd(psd_f, psd_amp, deltaF): + """ + Function to interpolate a PSD to a different value of deltaF. Uses linear + interpolation. + + Parameters + ---------- + psd_f : numpy.array or list or similar + List of the frequencies contained within the PSD. + psd_amp : numpy.array or list or similar + List of the PSD values at the frequencies in psd_f. + deltaF : float + Value of deltaF to interpolate the PSD to. + + Returns + -------- + new_psd_f : numpy.array + Array of the frequencies contained within the interpolated PSD + new_psd_amp : numpy.array + Array of the interpolated PSD values at the frequencies in new_psd_f. + """ + # In some cases this will be a no-op. I thought about removing this, but + # this function can take unequally sampled PSDs and it is difficult to + # check for this. As this function runs quickly anyway (compared to the + # moment calculation) I decided to always interpolate. + + new_psd_f = [] + new_psd_amp = [] + fcurr = psd_f[0] + + for i in range(len(psd_f) - 1): + f_low = psd_f[i] + f_high = psd_f[i+1] + amp_low = psd_amp[i] + amp_high = psd_amp[i+1] + while(1): + if fcurr > f_high: + break + new_psd_f.append(fcurr) + gradient = (amp_high - amp_low) / (f_high - f_low) + fDiff = fcurr - f_low + new_psd_amp.append(amp_low + fDiff * gradient) + fcurr = fcurr + deltaF + return numpy.asarray(new_psd_f), numpy.asarray(new_psd_amp)
+ + + +
+[docs] +def calculate_moment(psd_f, psd_amp, fmin, fmax, f0, funct, + norm=None, vary_fmax=False, vary_density=None): + """ + Function for calculating one of the integrals used to construct a template + bank placement metric. The integral calculated will be + + \int funct(x) * (psd_x)**(-7./3.) * delta_x / PSD(x) + + where x = f / f0. The lower frequency cutoff is given by fmin, see + the parameters below for details on how the upper frequency cutoff is + chosen + + Parameters + ----------- + psd_f : numpy.array + numpy array holding the set of evenly spaced frequencies used in the PSD + psd_amp : numpy.array + numpy array holding the PSD values corresponding to the psd_f + frequencies + fmin : float + The lower frequency cutoff used in the calculation of the integrals + used to obtain the metric. + fmax : float + The upper frequency cutoff used in the calculation of the integrals + used to obtain the metric. This can be varied (see the vary_fmax + option below). + f0 : float + This is an arbitrary scaling factor introduced to avoid the potential + for numerical overflow when calculating this. Generally the default + value (70) is safe here. **IMPORTANT, if you want to calculate the + ethinca metric components later this MUST be set equal to f_low.** + funct : Lambda function + The function to use when computing the integral as described above. + norm : Dictionary of floats + If given then moment[f_cutoff] will be divided by norm[f_cutoff] + vary_fmax : boolean, optional (default False) + If set to False the metric and rotations are calculated once, for the + full range of frequency [f_low,f_upper). + If set to True the metric and rotations are calculated multiple times, + for frequency ranges [f_low,f_low + i*vary_density), where i starts at + 1 and runs up until f_low + (i+1)*vary_density > f_upper. + Thus values greater than f_upper are *not* computed. + The calculation for the full range [f_low,f_upper) is also done. + vary_density : float, optional + If vary_fmax is True, this will be used in computing the frequency + ranges as described for vary_fmax. + + Returns + -------- + moment : Dictionary of floats + moment[f_cutoff] will store the value of the moment at the frequency + cutoff given by f_cutoff. + """ + + # Must ensure deltaF in psd_f is constant + psd_x = psd_f / f0 + deltax = psd_x[1] - psd_x[0] + + mask = numpy.logical_and(psd_f > fmin, psd_f < fmax) + psdf_red = psd_f[mask] + comps_red = psd_x[mask] ** (-7./3.) * funct(psd_x[mask], f0) * deltax / \ + psd_amp[mask] + moment = {} + moment[fmax] = comps_red.sum() + if norm: + moment[fmax] = moment[fmax] / norm[fmax] + if vary_fmax: + for t_fmax in numpy.arange(fmin + vary_density, fmax, vary_density): + moment[t_fmax] = comps_red[psdf_red < t_fmax].sum() + if norm: + moment[t_fmax] = moment[t_fmax] / norm[t_fmax] + return moment
+ + +
+[docs] +def calculate_metric(Js, logJs, loglogJs, logloglogJs, loglogloglogJs, \ + mapping): + """ + This function will take the various integrals calculated by get_moments and + convert this into a metric for the appropriate parameter space. + + Parameters + ----------- + Js : Dictionary + The list of (log^0 x) * x**(-i/3) integrals computed by get_moments() + The index is Js[i] + logJs : Dictionary + The list of (log^1 x) * x**(-i/3) integrals computed by get_moments() + The index is logJs[i] + loglogJs : Dictionary + The list of (log^2 x) * x**(-i/3) integrals computed by get_moments() + The index is loglogJs[i] + logloglogJs : Dictionary + The list of (log^3 x) * x**(-i/3) integrals computed by get_moments() + The index is logloglogJs[i] + loglogloglogJs : Dictionary + The list of (log^4 x) * x**(-i/3) integrals computed by get_moments() + The index is loglogloglogJs[i] + mapping : dictionary + Used to identify which Lambda components are active in this parameter + space and map these to entries in the metric matrix. + + Returns + -------- + metric : numpy.matrix + The resulting metric. + """ + + # How many dimensions in the parameter space? + maxLen = len(mapping.keys()) + + metric = numpy.zeros(shape=(maxLen,maxLen), dtype=float) + unmax_metric = numpy.zeros(shape=(maxLen+1,maxLen+1), dtype=float) + + for i in range(16): + for j in range(16): + calculate_metric_comp(metric, unmax_metric, i, j, Js, + logJs, loglogJs, logloglogJs, + loglogloglogJs, mapping) + return metric, unmax_metric
+ + + +
+[docs] +def calculate_metric_comp(gs, unmax_metric, i, j, Js, logJs, loglogJs, + logloglogJs, loglogloglogJs, mapping): + """ + Used to compute part of the metric. Only call this from within + calculate_metric(). Please see the documentation for that function. + """ + # Time term in unmax_metric. Note that these terms are recomputed a bunch + # of time, but this cost is insignificant compared to computing the moments + unmax_metric[-1,-1] = (Js[1] - Js[4]*Js[4]) + + # Normal terms + if 'Lambda%d'%i in mapping and 'Lambda%d'%j in mapping: + gammaij = Js[17-i-j] - Js[12-i]*Js[12-j] + gamma0i = (Js[9-i] - Js[4]*Js[12-i]) + gamma0j = (Js[9-j] - Js[4] * Js[12-j]) + gs[mapping['Lambda%d'%i],mapping['Lambda%d'%j]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['Lambda%d'%j]] = gamma0j + unmax_metric[mapping['Lambda%d'%i],mapping['Lambda%d'%j]] = gammaij + # Normal,log cross terms + if 'Lambda%d'%i in mapping and 'LogLambda%d'%j in mapping: + gammaij = logJs[17-i-j] - logJs[12-j] * Js[12-i] + gamma0i = (Js[9-i] - Js[4] * Js[12-i]) + gamma0j = logJs[9-j] - logJs[12-j] * Js[4] + gs[mapping['Lambda%d'%i],mapping['LogLambda%d'%j]] = \ + gs[mapping['LogLambda%d'%j],mapping['Lambda%d'%i]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['Lambda%d'%i]] = gamma0i + unmax_metric[-1, mapping['LogLambda%d'%j]] = gamma0j + unmax_metric[mapping['LogLambda%d'%j], -1] = gamma0j + unmax_metric[mapping['Lambda%d'%i],mapping['LogLambda%d'%j]] = gammaij + unmax_metric[mapping['LogLambda%d'%j],mapping['Lambda%d'%i]] = gammaij + # Log,log terms + if 'LogLambda%d'%i in mapping and 'LogLambda%d'%j in mapping: + gammaij = loglogJs[17-i-j] - logJs[12-j] * logJs[12-i] + gamma0i = (logJs[9-i] - Js[4] * logJs[12-i]) + gamma0j = logJs[9-j] - logJs[12-j] * Js[4] + gs[mapping['LogLambda%d'%i],mapping['LogLambda%d'%j]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['LogLambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['LogLambda%d'%j]] = gamma0j + unmax_metric[mapping['LogLambda%d'%i],mapping['LogLambda%d'%j]] =\ + gammaij + + # Normal,loglog cross terms + if 'Lambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping: + gammaij = loglogJs[17-i-j] - loglogJs[12-j] * Js[12-i] + gamma0i = (Js[9-i] - Js[4] * Js[12-i]) + gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4] + gs[mapping['Lambda%d'%i],mapping['LogLogLambda%d'%j]] = \ + gs[mapping['LogLogLambda%d'%j],mapping['Lambda%d'%i]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['Lambda%d'%i]] = gamma0i + unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j + unmax_metric[mapping['LogLogLambda%d'%j], -1] = gamma0j + unmax_metric[mapping['Lambda%d'%i],mapping['LogLogLambda%d'%j]] = \ + gammaij + unmax_metric[mapping['LogLogLambda%d'%j],mapping['Lambda%d'%i]] = \ + gammaij + + # log,loglog cross terms + if 'LogLambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping: + gammaij = logloglogJs[17-i-j] - loglogJs[12-j] * logJs[12-i] + gamma0i = (logJs[9-i] - Js[4] * logJs[12-i]) + gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4] + gs[mapping['LogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \ + gs[mapping['LogLogLambda%d'%j],mapping['LogLambda%d'%i]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['LogLambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['LogLambda%d'%i]] = gamma0i + unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j + unmax_metric[mapping['LogLogLambda%d'%j], -1] = gamma0j + unmax_metric[mapping['LogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \ + gammaij + unmax_metric[mapping['LogLogLambda%d'%j],mapping['LogLambda%d'%i]] = \ + gammaij + + # Loglog,loglog terms + if 'LogLogLambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping: + gammaij = loglogloglogJs[17-i-j] - loglogJs[12-j] * loglogJs[12-i] + gamma0i = (loglogJs[9-i] - Js[4] * loglogJs[12-i]) + gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4] + gs[mapping['LogLogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \ + 0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4])) + unmax_metric[mapping['LogLogLambda%d'%i], -1] = gamma0i + unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j + unmax_metric[mapping['LogLogLambda%d'%i],mapping['LogLogLambda%d'%j]] =\ + gammaij
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/coord_utils.html b/latest/html/_modules/pycbc/tmpltbank/coord_utils.html new file mode 100644 index 00000000000..7d7312c36bf --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/coord_utils.html @@ -0,0 +1,1022 @@ + + + + + + pycbc.tmpltbank.coord_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.coord_utils

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import numpy
+
+from pycbc.tmpltbank.lambda_mapping import get_chirp_params
+from pycbc import conversions
+from pycbc import pnutils
+from pycbc.neutron_stars import load_ns_sequence
+
+logger = logging.getLogger('pycbc.tmpltbank.coord_utils')
+
+
+
+[docs] +def estimate_mass_range(numPoints, massRangeParams, metricParams, fUpper,\ + covary=True): + """ + This function will generate a large set of points with random masses and + spins (using pycbc.tmpltbank.get_random_mass) and translate these points + into the xi_i coordinate system for the given upper frequency cutoff. + + Parameters + ---------- + numPoints : int + Number of systems to simulate + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper to use when getting the mu coordinates from the + lambda coordinates. This must be a key in metricParams.evals and + metricParams.evecs (ie. we must know how to do the transformation for + the given value of fUpper). It also must be a key in + metricParams.evecsCV if covary=True. + covary : boolean, optional (default = True) + If this is given then evecsCV will be used to rotate from the Cartesian + coordinate system into the principal coordinate direction (xi_i). If + not given then points in the original Cartesian coordinates are + returned. + + + Returns + ------- + xis : numpy.array + A list of the positions of each point in the xi_i coordinate system. + """ + vals_set = get_random_mass(numPoints, massRangeParams) + mass1 = vals_set[0] + mass2 = vals_set[1] + spin1z = vals_set[2] + spin2z = vals_set[3] + if covary: + lambdas = get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, + fUpper) + else: + lambdas = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, + fUpper) + + return numpy.array(lambdas)
+ + +
+[docs] +def get_random_mass_point_particles(numPoints, massRangeParams): + """ + This function will generate a large set of points within the chosen mass + and spin space. It will also return the corresponding PN spin coefficients + for ease of use later (though these may be removed at some future point). + + Parameters + ---------- + numPoints : int + Number of systems to simulate + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + + Returns + -------- + mass1 : float + Mass of heavier body. + mass2 : float + Mass of lighter body. + spin1z : float + Spin of body 1. + spin2z : float + Spin of body 2. + """ + + # WARNING: We expect mass1 > mass2 ALWAYS + + # First we choose the total masses from a unifrom distribution in mass + # to the -5/3. power. + mass = numpy.random.random(numPoints) * \ + (massRangeParams.minTotMass**(-5./3.) \ + - massRangeParams.maxTotMass**(-5./3.)) \ + + massRangeParams.maxTotMass**(-5./3.) + mass = mass**(-3./5.) + + # Next we choose the mass ratios, this will take different limits based on + # the value of total mass + maxmass2 = numpy.minimum(mass/2., massRangeParams.maxMass2) + minmass1 = numpy.maximum(massRangeParams.minMass1, mass/2.) + mineta = numpy.maximum(massRangeParams.minCompMass \ + * (mass-massRangeParams.minCompMass)/(mass*mass), \ + massRangeParams.maxCompMass \ + * (mass-massRangeParams.maxCompMass)/(mass*mass)) + # Note that mineta is a numpy.array because mineta depends on the total + # mass. Therefore this is not precomputed in the massRangeParams instance + if massRangeParams.minEta: + mineta = numpy.maximum(massRangeParams.minEta, mineta) + # Eta also restricted by chirp mass restrictions + if massRangeParams.min_chirp_mass: + eta_val_at_min_chirp = massRangeParams.min_chirp_mass / mass + eta_val_at_min_chirp = eta_val_at_min_chirp**(5./3.) + mineta = numpy.maximum(mineta, eta_val_at_min_chirp) + + maxeta = numpy.minimum(massRangeParams.maxEta, maxmass2 \ + * (mass - maxmass2) / (mass*mass)) + maxeta = numpy.minimum(maxeta, minmass1 \ + * (mass - minmass1) / (mass*mass)) + # max eta also affected by chirp mass restrictions + if massRangeParams.max_chirp_mass: + eta_val_at_max_chirp = massRangeParams.max_chirp_mass / mass + eta_val_at_max_chirp = eta_val_at_max_chirp**(5./3.) + maxeta = numpy.minimum(maxeta, eta_val_at_max_chirp) + + if (maxeta < mineta).any(): + errMsg = "ERROR: Maximum eta is smaller than minimum eta!!" + raise ValueError(errMsg) + eta = numpy.random.random(numPoints) * (maxeta - mineta) + mineta + + # Also calculate the component masses; mass1 > mass2 + diff = (mass*mass * (1-4*eta))**0.5 + mass1 = (mass + diff)/2. + mass2 = (mass - diff)/2. + # Check the masses are where we want them to be (allowing some floating + # point rounding error). + if (mass1 > massRangeParams.maxMass1*1.001).any() \ + or (mass1 < massRangeParams.minMass1*0.999).any(): + errMsg = "Mass1 is not within the specified mass range." + raise ValueError(errMsg) + if (mass2 > massRangeParams.maxMass2*1.001).any() \ + or (mass2 < massRangeParams.minMass2*0.999).any(): + errMsg = "Mass2 is not within the specified mass range." + raise ValueError(errMsg) + + # Next up is the spins. First check if we have non-zero spins + if massRangeParams.maxNSSpinMag == 0 and massRangeParams.maxBHSpinMag == 0: + spin1z = numpy.zeros(numPoints,dtype=float) + spin2z = numpy.zeros(numPoints,dtype=float) + elif massRangeParams.nsbhFlag: + # Spin 1 first + mspin = numpy.zeros(len(mass1)) + mspin += massRangeParams.maxBHSpinMag + spin1z = (2*numpy.random.random(numPoints) - 1) * mspin + # Then spin2 + mspin = numpy.zeros(len(mass2)) + mspin += massRangeParams.maxNSSpinMag + spin2z = (2*numpy.random.random(numPoints) - 1) * mspin + else: + boundary_mass = massRangeParams.ns_bh_boundary_mass + # Spin 1 first + mspin = numpy.zeros(len(mass1)) + mspin += massRangeParams.maxNSSpinMag + mspin[mass1 > boundary_mass] = massRangeParams.maxBHSpinMag + spin1z = (2*numpy.random.random(numPoints) - 1) * mspin + # Then spin 2 + mspin = numpy.zeros(len(mass2)) + mspin += massRangeParams.maxNSSpinMag + mspin[mass2 > boundary_mass] = massRangeParams.maxBHSpinMag + spin2z = (2*numpy.random.random(numPoints) - 1) * mspin + + return mass1, mass2, spin1z, spin2z
+ + +
+[docs] +def get_random_mass(numPoints, massRangeParams, eos='2H'): + """ + This function will generate a large set of points within the chosen mass + and spin space, and with the desired minimum remnant disk mass (this applies + to NS-BH systems only). It will also return the corresponding PN spin + coefficients for ease of use later (though these may be removed at some + future point). + + Parameters + ---------- + numPoints : int + Number of systems to simulate + massRangeParams : massRangeParameters instance + Instance holding all the details of mass ranges and spin ranges. + eos : string + Name of equation of state of neutron star. + + Returns + -------- + mass1 : float + Mass of heavier body. + mass2 : float + Mass of lighter body. + spin1z : float + Spin of body 1. + spin2z : float + Spin of body 2. + """ + + # WARNING: We expect mass1 > mass2 ALWAYS + + # Check if EM contraints are required, i.e. if the systems must produce + # a minimum remnant disk mass. If this is not the case, proceed treating + # the systems as point particle binaries + if massRangeParams.remnant_mass_threshold is None: + mass1, mass2, spin1z, spin2z = \ + get_random_mass_point_particles(numPoints, massRangeParams) + # otherwise, load EOS dependent data, generate the EM constraint + # (i.e. compute the minimum symmetric mass ratio needed to + # generate a given remnant disk mass as a function of the NS + # mass and the BH spin along z) and then proceed by accepting + # only systems that can yield (at least) the desired remnant + # disk mass and that pass the mass and spin range cuts. + else: + max_ns_g_mass = load_ns_sequence(massRangeParams.ns_eos)[1] + boundary_mass = massRangeParams.ns_bh_boundary_mass + if max_ns_g_mass < boundary_mass: + warn_msg = "WARNING: " + warn_msg += "Option of ns-bh-boundary-mass is %s " %(boundary_mass) + warn_msg += "which is higher than the maximum NS gravitational " + warn_msg += "mass admitted by the EOS that was prescribed " + warn_msg += "(%s). " %(max_ns_g_mass) + warn_msg += "The code will proceed using the latter value " + warn_msg += "as the boundary mass." + logger.warning(warn_msg) + boundary_mass = max_ns_g_mass + + # Empty arrays to store points that pass all cuts + mass1_out = [] + mass2_out = [] + spin1z_out = [] + spin2z_out = [] + + # As the EM cut can remove several randomly generated + # binaries, track the number of accepted points that pass + # all cuts and stop only once enough of them are generated + numPointsFound = 0 + while numPointsFound < numPoints: + # Generate the random points within the required mass + # and spin cuts + mass1, mass2, spin1z, spin2z = \ + get_random_mass_point_particles(numPoints-numPointsFound, + massRangeParams) + + # Now proceed with cutting out EM dim systems + # Use a logical mask to track points that do not correspond to + # BBHs. The remaining points will be BNSs and NSBHs. + # Further down, EM-dim NSBHs will also be removed. + mask_not_bbh = numpy.zeros(len(mass1), dtype=bool) + + # Keep a point if: + # 1) the secondary object is a not a BH (mass2 < boundary mass) + # [Store masses and spins of non BBH] + mask_not_bbh[mass2 < boundary_mass] = True + mass1_not_bbh = mass1[mask_not_bbh] + mass2_not_bbh = mass2[mask_not_bbh] + spin1z_not_bbh = spin1z[mask_not_bbh] + spin2z_not_bbh = spin2z[mask_not_bbh] + # 2) and if the primary mass is a NS (i.e., it is a BNS), or... + mask_nsbh = numpy.zeros(len(mass1_not_bbh), dtype=bool) + # [mask_nsbh identifies NSBH systems] + mask_nsbh[mass1_not_bbh > boundary_mass] = True + # [mask_bns identifies BNS systems] + mask_bns = ~mask_nsbh + # [Store masses and spins of BNSs] + mass1_bns = mass1_not_bbh[mask_bns] + mass2_bns = mass2_not_bbh[mask_bns] + spin1z_bns = spin1z_not_bbh[mask_bns] + spin2z_bns = spin2z_not_bbh[mask_bns] + # 3) ...it is an NS-BH with remnant mass greater than the threshold + # required to have a counterpart + # [Store masses and spins of all NSBHs] + mass1_nsbh = mass1_not_bbh[mask_nsbh] + mass2_nsbh = mass2_not_bbh[mask_nsbh] + spin1z_nsbh = spin1z_not_bbh[mask_nsbh] + spin2z_nsbh = spin2z_not_bbh[mask_nsbh] + # [Store etas of all NSBHs] + eta_nsbh = conversions.eta_from_mass1_mass2(mass1_nsbh, mass2_nsbh) + # [mask_bright_nsbh will identify NSBH systems with high enough + # threshold mass] + mask_bright_nsbh = numpy.zeros(len(mass1_nsbh), dtype=bool) + if eta_nsbh.size != 0: + remnant = conversions.remnant_mass_from_mass1_mass2_cartesian_spin_eos( + mass1_nsbh, + mass2_nsbh, + spin1x=0.0, + spin1y=0.0, + spin1z=spin1z_nsbh, + eos=eos + ) + mask_bright_nsbh[remnant + > + massRangeParams.remnant_mass_threshold] = True + + # Keep only points that correspond to binaries that can produce an + # EM counterpart (i.e., BNSs and EM-bright NSBHs) and add their + # properties to the pile of accpeted points to output + mass1_out = numpy.concatenate((mass1_out, mass1_bns, + mass1_nsbh[mask_bright_nsbh])) + mass2_out = numpy.concatenate((mass2_out, mass2_bns, + mass2_nsbh[mask_bright_nsbh])) + spin1z_out = numpy.concatenate((spin1z_out, spin1z_bns, + spin1z_nsbh[mask_bright_nsbh])) + spin2z_out = numpy.concatenate((spin2z_out, spin2z_bns, + spin2z_nsbh[mask_bright_nsbh])) + + # Number of points that survived all cuts + numPointsFound = len(mass1_out) + + # Ready to go + mass1 = mass1_out + mass2 = mass2_out + spin1z = spin1z_out + spin2z = spin2z_out + + return mass1, mass2, spin1z, spin2z
+ + +
+[docs] +def get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper, + lambda1=None, lambda2=None, quadparam1=None, + quadparam2=None): + """ + Function to convert between masses and spins and locations in the xi + parameter space. Xi = Cartesian metric and rotated to principal components. + + Parameters + ----------- + mass1 : float + Mass of heavier body. + mass2 : float + Mass of lighter body. + spin1z : float + Spin of body 1. + spin2z : float + Spin of body 2. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper to use when getting the mu coordinates from the + lambda coordinates. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + + Returns + -------- + xis : list of floats or numpy.arrays + Position of the system(s) in the xi coordinate system + """ + + # Do this by doing masses - > lambdas -> mus + mus = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper, + lambda1=lambda1, lambda2=lambda2, + quadparam1=quadparam1, quadparam2=quadparam2) + # and then mus -> xis + xis = get_covaried_params(mus, metricParams.evecsCV[fUpper]) + return xis
+ + +
+[docs] +def get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper, + lambda1=None, lambda2=None, quadparam1=None, + quadparam2=None): + """ + Function to convert between masses and spins and locations in the mu + parameter space. Mu = Cartesian metric, but not principal components. + + Parameters + ----------- + mass1 : float + Mass of heavier body. + mass2 : float + Mass of lighter body. + spin1z : float + Spin of body 1. + spin2z : float + Spin of body 2. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper to use when getting the mu coordinates from the + lambda coordinates. This must be a key in metricParams.evals and + metricParams.evecs (ie. we must know how to do the transformation for + the given value of fUpper) + + Returns + -------- + mus : list of floats or numpy.arrays + Position of the system(s) in the mu coordinate system + """ + + # Do this by masses -> lambdas + lambdas = get_chirp_params(mass1, mass2, spin1z, spin2z, + metricParams.f0, metricParams.pnOrder, + lambda1=lambda1, lambda2=lambda2, + quadparam1=quadparam1, quadparam2=quadparam2) + # and lambdas -> mus + mus = get_mu_params(lambdas, metricParams, fUpper) + return mus
+ + +
+[docs] +def get_mu_params(lambdas, metricParams, fUpper): + """ + Function to rotate from the lambda coefficients into position in the mu + coordinate system. Mu = Cartesian metric, but not principal components. + + Parameters + ----------- + lambdas : list of floats or numpy.arrays + Position of the system(s) in the lambda coefficients + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper to use when getting the mu coordinates from the + lambda coordinates. This must be a key in metricParams.evals and + metricParams.evecs (ie. we must know how to do the transformation for + the given value of fUpper) + + Returns + -------- + mus : list of floats or numpy.arrays + Position of the system(s) in the mu coordinate system + """ + lambdas = numpy.asarray(lambdas) + # If original inputs were floats we need to make this a 2D array + if len(lambdas.shape) == 1: + resize_needed = True + lambdas = lambdas[:,None] + else: + resize_needed = False + + evecs = metricParams.evecs[fUpper] + evals = metricParams.evals[fUpper] + + evecs = numpy.asarray(evecs) + + mus = ((lambdas.T).dot(evecs)).T + mus = mus * numpy.sqrt(evals)[:,None] + + if resize_needed: + mus = numpy.ndarray.flatten(mus) + + return mus
+ + +
+[docs] +def get_covaried_params(mus, evecsCV): + """ + Function to rotate from position(s) in the mu_i coordinate system into the + position(s) in the xi_i coordinate system + + Parameters + ----------- + mus : list of floats or numpy.arrays + Position of the system(s) in the mu coordinate system + evecsCV : numpy.matrix + This matrix is used to perform the rotation to the xi_i + coordinate system. + + Returns + -------- + xis : list of floats or numpy.arrays + Position of the system(s) in the xi coordinate system + """ + mus = numpy.asarray(mus) + # If original inputs were floats we need to make this a 2D array + if len(mus.shape) == 1: + resize_needed = True + mus = mus[:,None] + else: + resize_needed = False + + xis = ((mus.T).dot(evecsCV)).T + + if resize_needed: + xis = numpy.ndarray.flatten(xis) + + return xis
+ + +
+[docs] +def rotate_vector(evecs, old_vector, rescale_factor, index): + """ + Function to find the position of the system(s) in one of the xi_i or mu_i + directions. + + Parameters + ----------- + evecs : numpy.matrix + Matrix of the eigenvectors of the metric in lambda_i coordinates. Used + to rotate to a Cartesian coordinate system. + old_vector : list of floats or numpy.arrays + The position of the system(s) in the original coordinates + rescale_factor : float + Scaling factor to apply to resulting position(s) + index : int + The index of the final coordinate system that is being computed. Ie. + if we are going from mu_i -> xi_j, this will give j. + + Returns + -------- + positions : float or numpy.array + Position of the point(s) in the resulting coordinate. + """ + temp = 0 + for i in range(len(evecs)): + temp += (evecs[i,index] * rescale_factor) * old_vector[i] + return temp
+ + +
+[docs] +def get_point_distance(point1, point2, metricParams, fUpper): + """ + Function to calculate the mismatch between two points, supplied in terms + of the masses and spins, using the xi_i parameter space metric to + approximate the mismatch of the two points. Can also take one of the points + as an array of points and return an array of mismatches (but only one can + be an array!) + + point1 : List of floats or numpy.arrays + point1[0] contains the mass(es) of the heaviest body(ies). + point1[1] contains the mass(es) of the smallest body(ies). + point1[2] contains the spin(es) of the heaviest body(ies). + point1[3] contains the spin(es) of the smallest body(ies). + point2 : List of floats + point2[0] contains the mass of the heaviest body. + point2[1] contains the mass of the smallest body. + point2[2] contains the spin of the heaviest body. + point2[3] contains the spin of the smallest body. + metricParams : metricParameters instance + Structure holding all the options for construction of the metric + and the eigenvalues, eigenvectors and covariance matrix + needed to manipulate the space. + fUpper : float + The value of fUpper to use when getting the mu coordinates from the + lambda coordinates. This must be a key in metricParams.evals, + metricParams.evecs and metricParams.evecsCV + (ie. we must know how to do the transformation for + the given value of fUpper) + + Returns + -------- + dist : float or numpy.array + Distance between the point2 and all points in point1 + xis1 : List of floats or numpy.arrays + Position of the input point1(s) in the xi_i parameter space + xis2 : List of floats + Position of the input point2 in the xi_i parameter space + """ + aMass1 = point1[0] + aMass2 = point1[1] + aSpin1 = point1[2] + aSpin2 = point1[3] + + bMass1 = point2[0] + bMass2 = point2[1] + bSpin1 = point2[2] + bSpin2 = point2[3] + + aXis = get_cov_params(aMass1, aMass2, aSpin1, aSpin2, metricParams, fUpper) + + bXis = get_cov_params(bMass1, bMass2, bSpin1, bSpin2, metricParams, fUpper) + + dist = (aXis[0] - bXis[0])**2 + for i in range(1,len(aXis)): + dist += (aXis[i] - bXis[i])**2 + + return dist, aXis, bXis
+ + +
+[docs] +def calc_point_dist(vsA, entryA): + """ + This function is used to determine the distance between two points. + + Parameters + ---------- + vsA : list or numpy.array or similar + An array of point 1's position in the \chi_i coordinate system + entryA : list or numpy.array or similar + An array of point 2's position in the \chi_i coordinate system + MMdistA : float + The minimal mismatch allowed between the points + + Returns + -------- + val : float + The metric distance between the two points. + """ + chi_diffs = vsA - entryA + val = ((chi_diffs)*(chi_diffs)).sum() + return val
+ + +
+[docs] +def test_point_dist(point_1_chis, point_2_chis, distance_threshold): + """ + This function tests if the difference between two points in the chi + parameter space is less than a distance threshold. Returns True if it is + and False if it is not. + + Parameters + ---------- + point_1_chis : numpy.array + An array of point 1's position in the \chi_i coordinate system + point_2_chis : numpy.array + An array of point 2's position in the \chi_i coordinate system + distance_threshold : float + The distance threshold to use. + """ + return calc_point_dist(point_1_chis, point_2_chis) < distance_threshold
+ + + +
+[docs] +def calc_point_dist_vary(mus1, fUpper1, mus2, fUpper2, fMap, norm_map, MMdistA): + """ + Function to determine if two points, with differing upper frequency cutoffs + have a mismatch < MMdistA for *both* upper frequency cutoffs. + + Parameters + ---------- + mus1 : List of numpy arrays + mus1[i] will give the array of point 1's position in the \chi_j + coordinate system. The i element corresponds to varying values of the + upper frequency cutoff. fMap is used to map between i and actual + frequencies + fUpper1 : float + The upper frequency cutoff of point 1. + mus2 : List of numpy arrays + mus2[i] will give the array of point 2's position in the \chi_j + coordinate system. The i element corresponds to varying values of the + upper frequency cutoff. fMap is used to map between i and actual + frequencies + fUpper2 : float + The upper frequency cutoff of point 2. + fMap : dictionary + fMap[fUpper] will give the index needed to get the \chi_j coordinates + in the two sets of mus + norm_map : dictionary + norm_map[fUpper] will give the relative frequency domain template + amplitude (sigma) at the given value of fUpper. + MMdistA + The minimal mismatch allowed between the points + + Returns + -------- + Boolean + True if the points have a mismatch < MMdistA + False if the points have a mismatch > MMdistA + """ + f_upper = min(fUpper1, fUpper2) + f_other = max(fUpper1, fUpper2) + idx = fMap[f_upper] + vecs1 = mus1[idx] + vecs2 = mus2[idx] + val = ((vecs1 - vecs2)*(vecs1 - vecs2)).sum() + if (val > MMdistA): + return False + # Reduce match to account for normalization. + norm_fac = norm_map[f_upper] / norm_map[f_other] + val = 1 - (1 - val)*norm_fac + return (val < MMdistA)
+ + + +
+[docs] +def find_max_and_min_frequencies(name, mass_range_params, freqs): + """ + ADD DOCS + """ + + cutoff_fns = pnutils.named_frequency_cutoffs + if name not in cutoff_fns.keys(): + err_msg = "%s not recognized as a valid cutoff frequency choice." %name + err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) + raise ValueError(err_msg) + + # Can I do this quickly? + total_mass_approxs = { + "SchwarzISCO": pnutils.f_SchwarzISCO, + "LightRing" : pnutils.f_LightRing, + "ERD" : pnutils.f_ERD + } + + if name in total_mass_approxs.keys(): + # This can be done quickly if the cutoff only depends on total mass + # Assumes that lower total mass = higher cutoff frequency + upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass) + lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass) + else: + # Do this numerically + # FIXME: Is 1000000 the right choice? I think so, but just highlighting + mass1, mass2, spin1z, spin2z = \ + get_random_mass(1000000, mass_range_params) + mass_dict = {} + mass_dict['mass1'] = mass1 + mass_dict['mass2'] = mass2 + mass_dict['spin1z'] = spin1z + mass_dict['spin2z'] = spin2z + tmp_freqs = cutoff_fns[name](mass_dict) + upper_f_cutoff = tmp_freqs.max() + lower_f_cutoff = tmp_freqs.min() + + cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff]) + if lower_f_cutoff < freqs.min(): + warn_msg = "WARNING: " + warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,) + warn_msg += "which is lower than the lowest frequency calculated " + warn_msg += "for the metric: %s Hz. " %(freqs.min()) + warn_msg += "Distances for these waveforms will be calculated at " + warn_msg += "the lowest available metric frequency." + logger.warning(warn_msg) + if upper_f_cutoff > freqs.max(): + warn_msg = "WARNING: " + warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,) + warn_msg += "which is larger than the highest frequency calculated " + warn_msg += "for the metric: %s Hz. " %(freqs.max()) + warn_msg += "Distances for these waveforms will be calculated at " + warn_msg += "the largest available metric frequency." + logger.warning(warn_msg) + return find_closest_calculated_frequencies(cutoffs, freqs)
+ + + +
+[docs] +def return_nearest_cutoff(name, mass_dict, freqs): + """ + Given an array of total mass values and an (ascending) list of + frequencies, this will calculate the specified cutoff formula for each + mtotal and return the nearest frequency to each cutoff from the input + list. + Currently only supports cutoffs that are functions of the total mass + and no other parameters (SchwarzISCO, LightRing, ERD) + + Parameters + ---------- + name : string + Name of the cutoff formula to be approximated + mass_dict : Dictionary where the keys are used to call the functions + returned by tmpltbank.named_frequency_cutoffs. The values can be + numpy arrays or single values. + freqs : list of floats + A list of frequencies (must be sorted ascending) + + Returns + ------- + numpy.array + The frequencies closest to the cutoff for each value of totmass. + """ + # A bypass for the redundant case + if len(freqs) == 1: + return numpy.zeros(len(mass_dict['m1']), dtype=float) + freqs[0] + cutoff_fns = pnutils.named_frequency_cutoffs + if name not in cutoff_fns.keys(): + err_msg = "%s not recognized as a valid cutoff frequency choice." %name + err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) + raise ValueError(err_msg) + f_cutoff = cutoff_fns[name](mass_dict) + return find_closest_calculated_frequencies(f_cutoff, freqs)
+ + +
+[docs] +def find_closest_calculated_frequencies(input_freqs, metric_freqs): + """ + Given a value (or array) of input frequencies find the closest values in + the list of frequencies calculated in the metric. + + Parameters + ----------- + input_freqs : numpy.array or float + The frequency(ies) that you want to find the closest value in + metric_freqs + metric_freqs : numpy.array + The list of frequencies calculated by the metric + + Returns + -------- + output_freqs : numpy.array or float + The list of closest values to input_freqs for which the metric was + computed + """ + try: + refEv = numpy.zeros(len(input_freqs),dtype=float) + except TypeError: + refEv = numpy.zeros(1, dtype=float) + input_freqs = numpy.array([input_freqs]) + + if len(metric_freqs) == 1: + refEv[:] = metric_freqs[0] + return refEv + + # FIXME: This seems complicated for what is a simple operation. Is there + # a simpler *and* faster way of doing this? + # NOTE: This function assumes a sorted list of frequencies + # NOTE: totmass and f_cutoff are both numpy arrays as this function is + # designed so that the cutoff can be calculated for many systems + # simulataneously + for i in range(len(metric_freqs)): + if i == 0: + # If frequency is lower than halfway between the first two entries + # use the first (lowest) value + logicArr = input_freqs < ((metric_freqs[0] + metric_freqs[1])/2.) + elif i == (len(metric_freqs)-1): + # If frequency is larger than halfway between the last two entries + # use the last (highest) value + logicArr = input_freqs > ((metric_freqs[-2] + metric_freqs[-1])/2.) + else: + # For frequencies within the range in freqs, check which points + # should use the frequency corresponding to index i. + logicArrA = input_freqs > ((metric_freqs[i-1] + metric_freqs[i])/2.) + logicArrB = input_freqs < ((metric_freqs[i] + metric_freqs[i+1])/2.) + logicArr = numpy.logical_and(logicArrA,logicArrB) + if logicArr.any(): + refEv[logicArr] = metric_freqs[i] + return refEv
+ + + +
+[docs] +def outspiral_loop(N): + """ + Return a list of points that will loop outwards in a 2D lattice in terms + of distance from a central point. So if N=2 this will be [0,0], [0,1], + [0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over + a number of bins, but want to start in the center and work outwards. + """ + # Create a 2D lattice of all points + X,Y = numpy.meshgrid(numpy.arange(-N,N+1), numpy.arange(-N,N+1)) + + # Flatten it + X = numpy.ndarray.flatten(X) + Y = numpy.ndarray.flatten(Y) + + # Force to an integer + X = numpy.array(X, dtype=int) + Y = numpy.array(Y, dtype=int) + + # Calculate distances + G = numpy.sqrt(X**2+Y**2) + + # Combine back into an array + out_arr = numpy.array([X,Y,G]) + + # And order correctly + sorted_out_arr = out_arr[:,out_arr[2].argsort()] + + return sorted_out_arr[:2,:].T
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/lambda_mapping.html b/latest/html/_modules/pycbc/tmpltbank/lambda_mapping.html new file mode 100644 index 00000000000..06dae2c3b18 --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/lambda_mapping.html @@ -0,0 +1,444 @@ + + + + + + pycbc.tmpltbank.lambda_mapping — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.lambda_mapping

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+import re
+import logging
+import numpy
+
+from lal import MTSUN_SI, PI, CreateREAL8Vector
+
+import pycbc.libutils
+
+lalsimulation = pycbc.libutils.import_optional('lalsimulation')
+
+logger = logging.getLogger('pycbc.tmpltbank.lambda_mapping')
+
+# PLEASE ENSURE THESE ARE KEPT UP TO DATE WITH THE REST OF THIS FILE
+pycbcValidTmpltbankOrders = ['zeroPN','onePN','onePointFivePN','twoPN',\
+      'twoPointFivePN','threePN','threePointFivePN']
+
+pycbcValidOrdersHelpDescriptions="""
+     * zeroPN: Will only include the dominant term (proportional to chirp mass)
+     * onePN: Will only the leading orbit term and first correction at 1PN
+     * onePointFivePN: Will include orbit and spin terms to 1.5PN.
+     * twoPN: Will include orbit and spin terms to 2PN.
+     * twoPointFivePN: Will include orbit and spin terms to 2.5PN.
+     * threePN: Will include orbit terms to 3PN and spin terms to 2.5PN.
+     * threePointFivePN: Include orbit terms to 3.5PN and spin terms to 2.5PN
+"""
+
+
+
+[docs] +def generate_mapping(order): + """ + This function will take an order string and return a mapping between + components in the metric and the various Lambda components. This must be + used (and consistently used) when generating the metric *and* when + transforming to/from the xi_i coordinates to the lambda_i coordinates. + + NOTE: This is not a great way of doing this. It would be nice to clean + this up. Hence pulling this function out. The valid PN orders are + {} + + Parameters + ---------- + order : string + A string containing a PN order. Valid values are given above. + + Returns + -------- + mapping : dictionary + A mapping between the active Lambda terms and index in the metric + """ + mapping = {} + mapping['Lambda0'] = 0 + if order == 'zeroPN': + return mapping + mapping['Lambda2'] = 1 + if order == 'onePN': + return mapping + mapping['Lambda3'] = 2 + if order == 'onePointFivePN': + return mapping + mapping['Lambda4'] = 3 + if order == 'twoPN': + return mapping + mapping['LogLambda5'] = 4 + if order == 'twoPointFivePN': + return mapping + mapping['Lambda6'] = 5 + mapping['LogLambda6'] = 6 + if order == 'threePN': + return mapping + mapping['Lambda7'] = 7 + if order == 'threePointFivePN': + return mapping + # For some as-of-yet unknown reason, the tidal terms are not giving correct + # match estimates when enabled. So, for now, this order is commented out. + #if order == 'tidalTesting': + # mapping['Lambda10'] = 8 + # mapping['Lambda12'] = 9 + # return mapping + raise ValueError("Order %s is not understood." %(order))
+ + +# Override doc so the PN orders are added automatically to online docs +generate_mapping.__doc__ = \ + generate_mapping.__doc__.format(pycbcValidOrdersHelpDescriptions) + +
+[docs] +def generate_inverse_mapping(order): + """Genereate a lambda entry -> PN order map. + + This function will generate the opposite of generate mapping. So where + generate_mapping gives dict[key] = item this will give + dict[item] = key. Valid PN orders are: + {} + + Parameters + ---------- + order : string + A string containing a PN order. Valid values are given above. + + Returns + -------- + mapping : dictionary + An inverse mapping between the active Lambda terms and index in the + metric + """ + mapping = generate_mapping(order) + inv_mapping = {} + for key,value in mapping.items(): + inv_mapping[value] = key + + return inv_mapping
+ + +generate_inverse_mapping.__doc__ = \ + generate_inverse_mapping.__doc__.format(pycbcValidOrdersHelpDescriptions) + +
+[docs] +def get_ethinca_orders(): + """ + Returns the dictionary mapping TaylorF2 PN order names to twice-PN + orders (powers of v/c) + """ + ethinca_orders = {"zeroPN" : 0, + "onePN" : 2, + "onePointFivePN" : 3, + "twoPN" : 4, + "twoPointFivePN" : 5, + "threePN" : 6, + "threePointFivePN" : 7 + } + return ethinca_orders
+ + +
+[docs] +def ethinca_order_from_string(order): + """ + Returns the integer giving twice the post-Newtonian order + used by the ethinca calculation. Currently valid only for TaylorF2 metric + + Parameters + ---------- + order : string + + Returns + ------- + int + """ + if order in get_ethinca_orders().keys(): + return get_ethinca_orders()[order] + else: raise ValueError("Order "+str(order)+" is not valid for ethinca" + "calculation! Valid orders: "+ + str(get_ethinca_orders().keys()))
+ + +
+[docs] +def get_chirp_params(mass1, mass2, spin1z, spin2z, f0, order, + quadparam1=None, quadparam2=None, lambda1=None, + lambda2=None): + """ + Take a set of masses and spins and convert to the various lambda + coordinates that describe the orbital phase. Accepted PN orders are: + {} + + Parameters + ---------- + mass1 : float or array + Mass1 of input(s). + mass2 : float or array + Mass2 of input(s). + spin1z : float or array + Parallel spin component(s) of body 1. + spin2z : float or array + Parallel spin component(s) of body 2. + f0 : float + This is an arbitrary scaling factor introduced to avoid the potential + for numerical overflow when calculating this. Generally the default + value (70) is safe here. **IMPORTANT, if you want to calculate the + ethinca metric components later this MUST be set equal to f_low.** + This value must also be used consistently (ie. don't change its value + when calling different functions!). + order : string + The Post-Newtonian order that is used to translate from masses and + spins to the lambda_i coordinate system. Valid orders given above. + + Returns + -------- + lambdas : list of floats or numpy.arrays + The lambda coordinates for the input system(s) + """ + + # Determine whether array or single value input + sngl_inp = False + try: + num_points = len(mass1) + except TypeError: + sngl_inp = True + # If you care about speed, you aren't calling this function one entry + # at a time. + mass1 = numpy.array([mass1]) + mass2 = numpy.array([mass2]) + spin1z = numpy.array([spin1z]) + spin2z = numpy.array([spin2z]) + if quadparam1 is not None: + quadparam1 = numpy.array([quadparam1]) + if quadparam2 is not None: + quadparam2 = numpy.array([quadparam2]) + if lambda1 is not None: + lambda1 = numpy.array([lambda1]) + if lambda2 is not None: + lambda2 = numpy.array([lambda2]) + num_points = 1 + + if quadparam1 is None: + quadparam1 = numpy.ones(len(mass1), dtype=float) + if quadparam2 is None: + quadparam2 = numpy.ones(len(mass1), dtype=float) + if lambda1 is None: + lambda1 = numpy.zeros(len(mass1), dtype=float) + if lambda2 is None: + lambda2 = numpy.zeros(len(mass1), dtype=float) + + mass1_v = CreateREAL8Vector(len(mass1)) + mass1_v.data[:] = mass1[:] + mass2_v = CreateREAL8Vector(len(mass1)) + mass2_v.data[:] = mass2[:] + spin1z_v = CreateREAL8Vector(len(mass1)) + spin1z_v.data[:] = spin1z[:] + spin2z_v = CreateREAL8Vector(len(mass1)) + spin2z_v.data[:] = spin2z[:] + lambda1_v = CreateREAL8Vector(len(mass1)) + lambda1_v.data[:] = lambda1[:] + lambda2_v = CreateREAL8Vector(len(mass1)) + lambda2_v.data[:] = lambda2[:] + dquadparam1_v = CreateREAL8Vector(len(mass1)) + dquadparam1_v.data[:] = quadparam1[:] - 1. + dquadparam2_v = CreateREAL8Vector(len(mass1)) + dquadparam2_v.data[:] = quadparam2[:] - 1. + + phasing_arr = lalsimulation.SimInspiralTaylorF2AlignedPhasingArray\ + (mass1_v, mass2_v, spin1z_v, spin2z_v, lambda1_v, lambda2_v, + dquadparam1_v, dquadparam2_v) + + vec_len = lalsimulation.PN_PHASING_SERIES_MAX_ORDER + 1; + phasing_vs = numpy.zeros([num_points, vec_len]) + phasing_vlogvs = numpy.zeros([num_points, vec_len]) + phasing_vlogvsqs = numpy.zeros([num_points, vec_len]) + + lng = len(mass1) + jmp = lng * vec_len + for idx in range(vec_len): + phasing_vs[:,idx] = phasing_arr.data[lng*idx : lng*(idx+1)] + phasing_vlogvs[:,idx] = \ + phasing_arr.data[jmp + lng*idx : jmp + lng*(idx+1)] + phasing_vlogvsqs[:,idx] = \ + phasing_arr.data[2*jmp + lng*idx : 2*jmp + lng*(idx+1)] + + pim = PI * (mass1 + mass2)*MTSUN_SI + pmf = pim * f0 + pmf13 = pmf**(1./3.) + logpim13 = numpy.log((pim)**(1./3.)) + + mapping = generate_inverse_mapping(order) + lambdas = [] + lambda_str = '^Lambda([0-9]+)' + loglambda_str = '^LogLambda([0-9]+)' + logloglambda_str = '^LogLogLambda([0-9]+)' + for idx in range(len(mapping.keys())): + # RE magic engage! + rematch = re.match(lambda_str, mapping[idx]) + if rematch: + pn_order = int(rematch.groups()[0]) + term = phasing_vs[:,pn_order] + term = term + logpim13 * phasing_vlogvs[:,pn_order] + lambdas.append(term * pmf13**(-5+pn_order)) + continue + rematch = re.match(loglambda_str, mapping[idx]) + if rematch: + pn_order = int(rematch.groups()[0]) + lambdas.append((phasing_vlogvs[:,pn_order]) * pmf13**(-5+pn_order)) + continue + rematch = re.match(logloglambda_str, mapping[idx]) + if rematch: + raise ValueError("LOGLOG terms are not implemented") + #pn_order = int(rematch.groups()[0]) + #lambdas.append(phasing_vlogvsqs[:,pn_order] * pmf13**(-5+pn_order)) + #continue + err_msg = "Failed to parse " + mapping[idx] + raise ValueError(err_msg) + + if sngl_inp: + return [l[0] for l in lambdas] + else: + return lambdas
+ + +get_chirp_params.__doc__ = \ + get_chirp_params.__doc__.format(pycbcValidOrdersHelpDescriptions) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/lattice_utils.html b/latest/html/_modules/pycbc/tmpltbank/lattice_utils.html new file mode 100644 index 00000000000..28c383d880c --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/lattice_utils.html @@ -0,0 +1,296 @@ + + + + + + pycbc.tmpltbank.lattice_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.lattice_utils

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import copy
+import numpy
+
+import lal
+
+logger = logging.getLogger('pycbc.tmpltbank.lattice_utils')
+
+
+[docs] +def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist): + """ + This function generates a 2-dimensional lattice of points using a hexagonal + lattice. + + Parameters + ----------- + maxv1 : float + Largest value in the 1st dimension to cover + minv1 : float + Smallest value in the 1st dimension to cover + maxv2 : float + Largest value in the 2nd dimension to cover + minv2 : float + Smallest value in the 2nd dimension to cover + mindist : float + Maximum allowed mismatch between a point in the parameter space and the + generated bank of points. + + Returns + -------- + v1s : numpy.array + Array of positions in the first dimension + v2s : numpy.array + Array of positions in the second dimension + """ + if minv1 > maxv1: + raise ValueError("Invalid input to function.") + if minv2 > maxv2: + raise ValueError("Invalid input to function.") + # Place first point + v1s = [minv1] + v2s = [minv2] + initPoint = [minv1,minv2] + # Place first line + initLine = [initPoint] + tmpv1 = minv1 + while (tmpv1 < maxv1): + tmpv1 = tmpv1 + (3 * mindist)**(0.5) + initLine.append([tmpv1,minv2]) + v1s.append(tmpv1) + v2s.append(minv2) + initLine = numpy.array(initLine) + initLine2 = copy.deepcopy(initLine) + initLine2[:,0] += 0.5 * (3*mindist)**0.5 + initLine2[:,1] += 1.5 * (mindist)**0.5 + for i in range(len(initLine2)): + v1s.append(initLine2[i,0]) + v2s.append(initLine2[i,1]) + tmpv2_1 = initLine[0,1] + tmpv2_2 = initLine2[0,1] + while tmpv2_1 < maxv2 and tmpv2_2 < maxv2: + tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5 + tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5 + initLine[:,1] = tmpv2_1 + initLine2[:,1] = tmpv2_2 + for i in range(len(initLine)): + v1s.append(initLine[i,0]) + v2s.append(initLine[i,1]) + for i in range(len(initLine2)): + v1s.append(initLine2[i,0]) + v2s.append(initLine2[i,1]) + v1s = numpy.array(v1s) + v2s = numpy.array(v2s) + return v1s, v2s
+ + +
+[docs] +def generate_anstar_3d_lattice(maxv1, minv1, maxv2, minv2, maxv3, minv3, \ + mindist): + """ + This function calls into LAL routines to generate a 3-dimensional array + of points using the An^* lattice. + + Parameters + ----------- + maxv1 : float + Largest value in the 1st dimension to cover + minv1 : float + Smallest value in the 1st dimension to cover + maxv2 : float + Largest value in the 2nd dimension to cover + minv2 : float + Smallest value in the 2nd dimension to cover + maxv3 : float + Largest value in the 3rd dimension to cover + minv3 : float + Smallest value in the 3rd dimension to cover + mindist : float + Maximum allowed mismatch between a point in the parameter space and the + generated bank of points. + + Returns + -------- + v1s : numpy.array + Array of positions in the first dimension + v2s : numpy.array + Array of positions in the second dimension + v3s : numpy.array + Array of positions in the second dimension + """ + # Lalpulsar not a requirement for the rest of pycbc, so check if we have it + # here in this function. + try: + import lalpulsar + except: + raise ImportError("A SWIG-wrapped install of lalpulsar is needed to use the anstar tiling functionality.") + + tiling = lalpulsar.CreateLatticeTiling(3) + lalpulsar.SetLatticeTilingConstantBound(tiling, 0, minv1, maxv1) + lalpulsar.SetLatticeTilingConstantBound(tiling, 1, minv2, maxv2) + lalpulsar.SetLatticeTilingConstantBound(tiling, 2, minv3, maxv3) + # Make a 3x3 Euclidean lattice + a = lal.gsl_matrix(3,3) + a.data[0,0] = 1 + a.data[1,1] = 1 + a.data[2,2] = 1 + try: + # old versions of lalpulsar used an enumeration + lattice = lalpulsar.TILING_LATTICE_ANSTAR + except AttributeError: + # newer versions of lalpulsar use a string + lattice = 'An-star' + lalpulsar.SetTilingLatticeAndMetric(tiling, lattice, a, mindist) + try: + iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3) + except TypeError: + # old versions of lalpulsar required the flags argument + # (set to 0 for defaults) + iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3, 0) + + vs1 = [] + vs2 = [] + vs3 = [] + curr_point = lal.gsl_vector(3) + while (lalpulsar.NextLatticeTilingPoint(iterator, curr_point) > 0): + vs1.append(curr_point.data[0]) + vs2.append(curr_point.data[1]) + vs3.append(curr_point.data[2]) + return vs1, vs2, vs3
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/option_utils.html b/latest/html/_modules/pycbc/tmpltbank/option_utils.html new file mode 100644 index 00000000000..a89a726f2eb --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/option_utils.html @@ -0,0 +1,1397 @@ + + + + + + pycbc.tmpltbank.option_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.option_utils

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import argparse
+import logging
+import textwrap
+import numpy
+import os
+
+from pycbc.tmpltbank.lambda_mapping import get_ethinca_orders, pycbcValidOrdersHelpDescriptions
+from pycbc import pnutils
+from pycbc.neutron_stars import load_ns_sequence
+from pycbc.types import positive_float, nonnegative_float
+
+logger = logging.getLogger('pycbc.tmpltbank.option_utils')
+
+
+
+[docs] +class IndentedHelpFormatterWithNL(argparse.ArgumentDefaultsHelpFormatter): + """ + This class taken from + https://groups.google.com/forum/#!topic/comp.lang.python/bfbmtUGhW8I + and is used to format the argparse help messages to deal with line breaking + nicer. Specfically the pn-order help is large and looks crappy without this. + This function is (C) Tim Chase + """ +
+[docs] + def format_description(self, description): + """ + No documentation + """ + if not description: return "" + desc_width = self.width - self.current_indent + indent = " "*self.current_indent + # the above is still the same + bits = description.split('\n') + formatted_bits = [ + textwrap.fill(bit, + desc_width, + initial_indent=indent, + subsequent_indent=indent) + for bit in bits] + result = "\n".join(formatted_bits) + "\n" + return result
+ + +
+[docs] + def format_option(self, option): + """ + No documentation + """ + # The help for each option consists of two parts: + # * the opt strings and metavars + # eg. ("-x", or "-fFILENAME, --file=FILENAME") + # * the user-supplied help string + # eg. ("turn on expert mode", "read data from FILENAME") + # + # If possible, we write both of these on the same line: + # -x turn on expert mode + # + # But if the opt string list is too long, we put the help + # string on a second line, indented to the same column it would + # start in if it fit on the first line. + # -fFILENAME, --file=FILENAME + # read data from FILENAME + result = [] + opts = self.option_strings[option] + opt_width = self.help_position - self.current_indent - 2 + if len(opts) > opt_width: + opts = "%*s%s\n" % (self.current_indent, "", opts) + indent_first = self.help_position + else: # start help on same line as opts + opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) + indent_first = 0 + result.append(opts) + if option.help: + help_text = self.expand_default(option) + # Everything is the same up through here + help_lines = [] + for para in help_text.split("\n"): + help_lines.extend(textwrap.wrap(para, self.help_width)) + # Everything is the same after here + result.append("%*s%s\n" % ( + indent_first, "", help_lines[0])) + result.extend(["%*s%s\n" % (self.help_position, "", line) + for line in help_lines[1:]]) + elif opts[-1] != "\n": + result.append("\n") + return "".join(result)
+
+ + +
+[docs] +def get_options_from_group(option_group): + """ + Take an option group and return all the options that are defined in that + group. + """ + option_list = option_group._group_actions + command_lines = [] + for option in option_list: + option_strings = option.option_strings + for string in option_strings: + if string.startswith('--'): + command_lines.append(string) + return command_lines
+ + +
+[docs] +def insert_base_bank_options(parser, match_req=True): + """ + Adds essential common options for template bank generation to an + ArgumentParser instance. + """ + + def match_type(s): + err_msg = "must be a number between 0 and 1 excluded, not %r" % s + try: + value = float(s) + except ValueError: + raise argparse.ArgumentTypeError(err_msg) + if value <= 0 or value >= 1: + raise argparse.ArgumentTypeError(err_msg) + return value + + parser.add_argument( + '-m', '--min-match', type=match_type, required=match_req, + help="Generate bank with specified minimum match. Required.") + parser.add_argument( + '-O', '--output-file', required=True, + help="Output file name. Required.") + parser.add_argument('--f-low-column', type=str, metavar='NAME', + help='If given, store the lower frequency cutoff into ' + 'column NAME of the single-inspiral table. ' + '(Requires an output file ending in .xml)') + parser.add_argument('--output-f-final', action='store_true', + help="Include 'f_final' in the output hdf file.")
+ + +
+[docs] +def insert_metric_calculation_options(parser): + """ + Adds the options used to obtain a metric in the bank generation codes to an + argparser as an OptionGroup. This should be used if you want to use these + options in your code. + """ + metricOpts = parser.add_argument_group( + "Options related to calculating the parameter space metric") + metricOpts.add_argument("--pn-order", action="store", type=str, + required=True, + help="Determines the PN order to use. For a bank of " + "non-spinning templates, spin-related terms in the " + "metric will be zero. REQUIRED. " + "Choices: %s" %(pycbcValidOrdersHelpDescriptions)) + metricOpts.add_argument("--f0", action="store", type=positive_float, + default=70.,\ + help="f0 is used as a dynamic scaling factor when " + "calculating integrals used in metric construction. " + "I.e. instead of integrating F(f) we integrate F(f/f0) " + "then rescale by powers of f0. The default value 70Hz " + "should be fine for most applications. OPTIONAL. " + "UNITS=Hz. **WARNING: If the ethinca metric is to be " + "calculated, f0 must be set equal to f-low**") + metricOpts.add_argument("--f-low", action="store", type=positive_float, + required=True, + help="Lower frequency cutoff used in computing the " + "parameter space metric. REQUIRED. UNITS=Hz") + metricOpts.add_argument("--f-upper", action="store", type=positive_float, + required=True, + help="Upper frequency cutoff used in computing the " + "parameter space metric. REQUIRED. UNITS=Hz") + metricOpts.add_argument("--delta-f", action="store", type=positive_float, + required=True, + help="Frequency spacing used in computing the parameter " + "space metric: integrals of the form \int F(f) df " + "are approximated as \sum F(f) delta_f. REQUIRED. " + "UNITS=Hz") + metricOpts.add_argument("--write-metric", action="store_true", + default=False, help="If given write the metric components " + "to disk as they are calculated.") + return metricOpts
+ + +
+[docs] +def verify_metric_calculation_options(opts, parser): + """ + Parses the metric calculation options given and verifies that they are + correct. + + Parameters + ---------- + opts : argparse.Values instance + Result of parsing the input options with OptionParser + parser : object + The OptionParser instance. + """ + if not opts.pn_order: + parser.error("Must supply --pn-order")
+ + +
+[docs] +class metricParameters(object): + """ + This class holds all of the options that are parsed in the function + insert_metric_calculation_options + and all products produced using these options. It can also be initialized + from the __init__ function, providing directly the options normally + provided on the command line. + """ + _psd = None + _metric = None + _evals = None + _evecs = None + _evecsCV = None + def __init__(self, pnOrder, fLow, fUpper, deltaF, f0=70, + write_metric=False): + """ + Initialize an instance of the metricParameters by providing all + options directly. See the help message associated with any code + that uses the metric options for more details of how to set each of + these, e.g. pycbc_aligned_stoch_bank --help + """ + self.pnOrder=pnOrder + self.fLow=fLow + self.fUpper=fUpper + self.deltaF=deltaF + self.f0=f0 + self._moments=None + self.write_metric=write_metric + +
+[docs] + @classmethod + def from_argparse(cls, opts): + """ + Initialize an instance of the metricParameters class from an + argparse.OptionParser instance. This assumes that + insert_metric_calculation_options + and + verify_metric_calculation_options + have already been called before initializing the class. + """ + return cls(opts.pn_order, opts.f_low, opts.f_upper, opts.delta_f,\ + f0=opts.f0, write_metric=opts.write_metric)
+ + + @property + def psd(self): + """ + A pyCBC FrequencySeries holding the appropriate PSD. + Return the PSD used in the metric calculation. + """ + if not self._psd: + errMsg = "The PSD has not been set in the metricParameters " + errMsg += "instance." + raise ValueError(errMsg) + return self._psd + + @psd.setter + def psd(self, inPsd): + self._psd = inPsd + + @property + def moments(self): + """ + Moments structure + This contains the result of all the integrals used in computing the + metrics above. It can be used for the ethinca components calculation, + or other similar calculations. This is composed of two compound + dictionaries. The first entry indicates which moment is being + calculated and the second entry indicates the upper frequency cutoff + that was used. + + In all cases x = f/f0. + + For the first entries the options are: + + moments['J%d' %(i)][f_cutoff] + This stores the integral of + x**((-i)/3.) * delta X / PSD(x) + + moments['log%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x) + + moments['loglog%d' %(i)][f_cutoff] + This stores the integral of + (numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x) + + The second entry stores the frequency cutoff that was used when + computing the integral. + """ + return self._moments + + @moments.setter + def moments(self, inMoments): + self._moments=inMoments + + @property + def evals(self): + """ + The eigenvalues of the parameter space. + This is a Dictionary of numpy.array + Each entry in the dictionary corresponds to the different frequency + ranges described in vary_fmax. If vary_fmax = False, the only entry + will be f_upper, this corresponds to integrals in [f_low,f_upper). This + entry is always present. Each other entry will use floats as keys to + the dictionary. These floats give the upper frequency cutoff when it is + varying. + Each numpy.array contains the eigenvalues which, with the eigenvectors + in evecs, are needed to rotate the + coordinate system to one in which the metric is the identity matrix. + """ + if self._evals is None: + errMsg = "The metric eigenvalues have not been set in the " + errMsg += "metricParameters instance." + raise ValueError(errMsg) + return self._evals + + @evals.setter + def evals(self, inEvals): + if self.write_metric: + for frequency in inEvals.keys(): + numpy.savetxt("metric_evals_%d.dat" %(frequency), + inEvals[frequency]) + self._evals = inEvals + + @property + def evecs(self): + """ + The eigenvectors of the parameter space. + This is a Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the eigenvectors which, with the eigenvalues + in evals, are needed to rotate the + coordinate system to one in which the metric is the identity matrix. + """ + if self._evecs is None: + errMsg = "The metric eigenvectors have not been set in the " + errMsg += "metricParameters instance." + raise ValueError(errMsg) + return self._evecs + + @evecs.setter + def evecs(self, inEvecs): + if self.write_metric: + for frequency in inEvecs.keys(): + numpy.savetxt("metric_evecs_%d.dat" %(frequency), + inEvecs[frequency]) + self._evecs = inEvecs + + @property + def metric(self): + """ + The metric of the parameter space. + This is a Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the metric of the parameter space in the + Lambda_i coordinate system. + """ + if self._metric is None: + errMsg = "The metric eigenvectors have not been set in the " + errMsg += "metricParameters instance." + raise ValueError(errMsg) + return self._metric + + @metric.setter + def metric(self, inMetric): + if self.write_metric: + for frequency in inMetric.keys(): + numpy.savetxt("metric_components_%d.dat" %(frequency), + inMetric[frequency]) + self._metric = inMetric + + @property + def time_unprojected_metric(self): + """ + The metric of the parameter space with the time dimension unprojected. + This is a Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the metric of the parameter space in the + Lambda_i, t coordinate system. The time components are always in the + last [-1] position in the matrix. + """ + if self._time_unprojected_metric is None: + err_msg = "The time unprojected metric has not been set in the " + err_msg += "metricParameters instance." + raise ValueError(err_msg) + return self._time_unprojected_metric + + @time_unprojected_metric.setter + def time_unprojected_metric(self, inMetric): + if self.write_metric: + for frequency in inMetric.keys(): + numpy.savetxt("metric_timeunprojected_%d.dat" %(frequency), + inMetric[frequency]) + self._time_unprojected_metric = inMetric + + @property + def evecsCV(self): + """ + The eigenvectors of the principal directions of the mu space. + This is a Dictionary of numpy.matrix + Each entry in the dictionary is as described under evals. + Each numpy.matrix contains the eigenvectors which, with the eigenvalues + in evals, are needed to rotate the + coordinate system to one in which the metric is the identity matrix. + """ + if self._evecsCV is None: + errMsg = "The covariance eigenvectors have not been set in the " + errMsg += "metricParameters instance." + raise ValueError(errMsg) + return self._evecsCV + + @evecsCV.setter + def evecsCV(self, inEvecs): + if self.write_metric: + for frequency in inEvecs.keys(): + numpy.savetxt("covariance_evecs_%d.dat" %(frequency), + inEvecs[frequency]) + self._evecsCV = inEvecs
+ + + +
+[docs] +def insert_mass_range_option_group(parser,nonSpin=False): + """ + Adds the options used to specify mass ranges in the bank generation codes + to an argparser as an OptionGroup. This should be used if you + want to use these options in your code. + + Parameters + ----------- + parser : object + OptionParser instance. + nonSpin : boolean, optional (default=False) + If this is provided the spin-related options will not be added. + """ + massOpts = parser.add_argument_group("Options related to mass and spin " + "limits for bank generation") + massOpts.add_argument("--min-mass1", action="store", type=positive_float, + required=True, + help="Minimum mass1: must be >= min-mass2. " + "REQUIRED. UNITS=Solar mass") + massOpts.add_argument("--max-mass1", action="store", type=positive_float, + required=True, + help="Maximum mass1: must be >= max-mass2. " + "REQUIRED. UNITS=Solar mass") + massOpts.add_argument("--min-mass2", action="store", type=positive_float, + required=True, + help="Minimum mass2. REQUIRED. UNITS=Solar mass") + massOpts.add_argument("--max-mass2", action="store", type=positive_float, + required=True, + help="Maximum mass2. REQUIRED. UNITS=Solar mass") + massOpts.add_argument("--max-total-mass", action="store", + type=positive_float, default=None, + help="Maximum total mass. OPTIONAL, if not provided " + "the max total mass is determined by the component " + "masses. UNITS=Solar mass") + massOpts.add_argument("--min-total-mass", action="store", + type=positive_float, default=None, + help="Minimum total mass. OPTIONAL, if not provided the " + "min total mass is determined by the component masses." + " UNITS=Solar mass") + massOpts.add_argument("--max-chirp-mass", action="store", + type=positive_float, default=None, + help="Maximum chirp mass. OPTIONAL, if not provided the " + "max chirp mass is determined by the component masses." + " UNITS=Solar mass") + massOpts.add_argument("--min-chirp-mass", action="store", + type=positive_float, default=None, + help="Minimum total mass. OPTIONAL, if not provided the " + "min chirp mass is determined by the component masses." + " UNITS=Solar mass") + massOpts.add_argument("--max-eta", action="store", type=positive_float, + default=0.25, + help="Maximum symmetric mass ratio. OPTIONAL, no upper bound" + " on eta will be imposed if not provided. " + "UNITS=Solar mass.") + massOpts.add_argument("--min-eta", action="store", type=nonnegative_float, + default=0., + help="Minimum symmetric mass ratio. OPTIONAL, no lower bound" + " on eta will be imposed if not provided. " + "UNITS=Solar mass.") + massOpts.add_argument("--ns-eos", action="store", + default=None, + help="Select the EOS to be used for the NS when calculating " + "the remnant disk mass. Only 2H is currently supported. " + "OPTIONAL") + massOpts.add_argument("--remnant-mass-threshold", action="store", + type=nonnegative_float, default=None, + help="Setting this filters EM dim NS-BH binaries: if the " + "remnant disk mass does not exceed this value, the NS-BH " + "binary is dropped from the target parameter space. " + "When it is set to None (default value) the EM dim " + "filter is not activated. OPTIONAL") + massOpts.add_argument("--use-eos-max-ns-mass", action="store_true", default=False, + help="Cut the mass range of the smaller object to the maximum " + "mass allowed by EOS. " + "OPTIONAL") + massOpts.add_argument("--delta-bh-spin", action="store", + type=positive_float, default=None, + help="Grid spacing used for the BH spin z component when " + "generating the surface of the minumum minimum symmetric " + "mass ratio as a function of BH spin and NS mass required " + "to produce a remnant disk mass that exceeds the threshold " + "specificed in --remnant-mass-threshold. " + "OPTIONAL (0.1 by default) ") + massOpts.add_argument("--delta-ns-mass", action="store", + type=positive_float, default=None, + help="Grid spacing used for the NS mass when generating the " + "surface of the minumum minimum symmetric mass ratio as " + "a function of BH spin and NS mass required to produce " + "a remnant disk mass that exceeds the thrsehold specified " + "in --remnant-mass-threshold. " + "OPTIONAL (0.1 by default) ") + if nonSpin: + parser.add_argument_group(massOpts) + return massOpts + + massOpts.add_argument("--max-ns-spin-mag", action="store", + type=nonnegative_float, default=None, + help="Maximum neutron star spin magnitude. Neutron stars " + "are defined as components lighter than the NS-BH " + "boundary (3 Msun by default). REQUIRED if min-mass2 " + "< ns-bh-boundary-mass") + massOpts.add_argument("--max-bh-spin-mag", action="store", + type=nonnegative_float, default=None, + help="Maximum black hole spin magnitude. Black holes are " + "defined as components at or above the NS-BH boundary " + "(3 Msun by default). REQUIRED if max-mass1 >= " + "ns-bh-boundary-mass") + # Mutually exclusive group prevents both options being set on command line + # If --nsbh-flag is True then spinning bank generation must ignore the + # default value of ns-bh-boundary-mass. + action = massOpts.add_mutually_exclusive_group(required=False) + action.add_argument("--ns-bh-boundary-mass", action='store', + type=positive_float, + help="Mass boundary between neutron stars and black holes. " + "Components below this mass are considered neutron " + "stars and are subject to the neutron star spin limits. " + "Components at/above are subject to the black hole spin " + "limits. OPTIONAL, default=%f. UNITS=Solar mass" \ + % massRangeParameters.default_nsbh_boundary_mass) + action.add_argument("--nsbh-flag", action="store_true", default=False, + help="Set this flag if generating a bank that contains only " + "systems with 1 black hole and 1 neutron star. With " + "this flag set the heavier body will always be subject " + "to the black hole spin restriction and the lighter " + "to the neutron star spin restriction, regardless of " + "mass. OPTIONAL. If set, the value of " + "--ns-bh-boundary-mass will be ignored.") + return massOpts
+ + +
+[docs] +def verify_mass_range_options(opts, parser, nonSpin=False): + """ + Parses the metric calculation options given and verifies that they are + correct. + + Parameters + ---------- + opts : argparse.Values instance + Result of parsing the input options with OptionParser + parser : object + The OptionParser instance. + nonSpin : boolean, optional (default=False) + If this is provided the spin-related options will not be checked. + """ + # Mass1 must be the heavier! + if opts.min_mass1 < opts.min_mass2: + parser.error("min-mass1 cannot be less than min-mass2!") + if opts.max_mass1 < opts.max_mass2: + parser.error("max-mass1 cannot be less than max-mass2!") + # If given are min/max total mass/chirp mass possible? + if opts.min_total_mass \ + and (opts.min_total_mass > opts.max_mass1 + opts.max_mass2): + err_msg = "Supplied minimum total mass %f " %(opts.min_total_mass,) + err_msg += "greater than the sum of the two max component masses " + err_msg += " %f and %f." %(opts.max_mass1,opts.max_mass2) + parser.error(err_msg) + if opts.max_total_mass \ + and (opts.max_total_mass < opts.min_mass1 + opts.min_mass2): + err_msg = "Supplied maximum total mass %f " %(opts.max_total_mass,) + err_msg += "smaller than the sum of the two min component masses " + err_msg += " %f and %f." %(opts.min_mass1,opts.min_mass2) + parser.error(err_msg) + if opts.max_total_mass and opts.min_total_mass \ + and (opts.max_total_mass < opts.min_total_mass): + parser.error("Min total mass must be larger than max total mass.") + # Warn the user that his/her setup is such that EM dim NS-BH binaries + # will not be targeted by the template bank that is being built. Also + # inform him/her about the caveats involved in this. + if hasattr(opts, 'remnant_mass_threshold') \ + and opts.remnant_mass_threshold is not None: + logger.info("""You have asked to exclude EM dim NS-BH systems from the + target parameter space. The script will assume that m1 + is the BH and m2 is the NS: make sure that your settings + respect this convention. The script will also treat the + NS as non-spinning: use NS spins in the template bank + at your own risk!""") + if opts.use_eos_max_ns_mass: + logger.info("""You have asked to take into account the maximum NS + mass value for the EOS in use.""") + # Find out if the EM constraint surface data already exists or not + # and inform user whether this will be read from file or generated. + # This is the minumum eta as a function of BH spin and NS mass + # required to produce an EM counterpart + if os.path.isfile('constraint_em_bright.npz'): + logger.info("""The constraint surface for EM bright binaries + will be read in from constraint_em_bright.npz.""") + + # Assign min/max total mass from mass1, mass2 if not specified + if (not opts.min_total_mass) or \ + ((opts.min_mass1 + opts.min_mass2) > opts.min_total_mass): + opts.min_total_mass = opts.min_mass1 + opts.min_mass2 + if (not opts.max_total_mass) or \ + ((opts.max_mass1 + opts.max_mass2) < opts.max_total_mass): + opts.max_total_mass = opts.max_mass1 + opts.max_mass2 + + # It is vital that min and max total mass be set correctly. + # This is becasue the heavily-used function get_random_mass will place + # points first in total mass (to some power), and then in eta. If the total + # mass limits are not well known ahead of time it will place unphysical + # points and fail. + # This test is a bit convoluted as we identify the maximum and minimum + # possible total mass from chirp mass and/or eta restrictions. + if opts.min_chirp_mass is not None: + # Need to get the smallest possible min_tot_mass from this chirp mass + # There are 4 possibilities for where the min_tot_mass is found on the + # line of min_chirp_mass that interacts with the component mass limits. + # Either it is found at max_m2, or at min_m1, or it starts on the equal + # mass line within the parameter space, or it doesn't intersect + # at all. + # First let's get the masses at both of these possible points + m1_at_max_m2 = pnutils.mchirp_mass1_to_mass2(opts.min_chirp_mass, + opts.max_mass2) + if m1_at_max_m2 < opts.max_mass2: + # Unphysical, remove + m1_at_max_m2 = -1 + m2_at_min_m1 = pnutils.mchirp_mass1_to_mass2(opts.min_chirp_mass, + opts.min_mass1) + if m2_at_min_m1 > opts.min_mass1: + # Unphysical, remove + m2_at_min_m1 = -1 + # Get the values on the equal mass line + m1_at_equal_mass, m2_at_equal_mass = pnutils.mchirp_eta_to_mass1_mass2( + opts.min_chirp_mass, 0.25) + + # Are any of these possible? + if m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1: + min_tot_mass = opts.max_mass2 + m1_at_max_m2 + elif m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2: + min_tot_mass = opts.min_mass1 + m2_at_min_m1 + elif m1_at_equal_mass <= opts.max_mass1 and \ + m1_at_equal_mass >= opts.min_mass1 and \ + m2_at_equal_mass <= opts.max_mass2 and \ + m2_at_equal_mass >= opts.min_mass2: + min_tot_mass = m1_at_equal_mass + m2_at_equal_mass + # So either the restriction is low enough to be redundant, or is + # removing all the parameter space + elif m2_at_min_m1 < opts.min_mass2: + # This is the redundant case, ignore + min_tot_mass = opts.min_total_mass + else: + # And this is the bad case + err_msg = "The minimum chirp mass provided is not possible given " + err_msg += "restrictions on component masses." + raise ValueError(err_msg) + # Is there also an eta restriction? + if opts.max_eta: + # Get the value of m1,m2 at max_eta, min_chirp_mass + max_eta_m1, max_eta_m2 = pnutils.mchirp_eta_to_mass1_mass2( + opts.min_chirp_mass, opts.max_eta) + max_eta_min_tot_mass = max_eta_m1 + max_eta_m2 + if max_eta_min_tot_mass > min_tot_mass: + # Okay, eta does restrict this further. Still physical? + min_tot_mass = max_eta_min_tot_mass + if max_eta_m1 > opts.max_mass1: + err_msg = "The combination of component mass, chirp " + err_msg += "mass, eta and (possibly) total mass limits " + err_msg += "have precluded all systems." + raise ValueError(err_msg) + # Update min_tot_mass if needed + if min_tot_mass > opts.min_total_mass: + opts.min_total_mass = float(min_tot_mass) + + # Then need to do max_chirp_mass and min_eta + if opts.max_chirp_mass is not None: + # Need to get the largest possible maxn_tot_mass from this chirp mass + # There are 3 possibilities for where the max_tot_mass is found on the + # line of max_chirp_mass that interacts with the component mass limits. + # Either it is found at min_m2, or at max_m1, or it doesn't intersect + # at all. + # First let's get the masses at both of these possible points + m1_at_min_m2 = pnutils.mchirp_mass1_to_mass2(opts.max_chirp_mass, + opts.min_mass2) + m2_at_max_m1 = pnutils.mchirp_mass1_to_mass2(opts.max_chirp_mass, + opts.max_mass1) + # Are either of these possible? + if m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1: + max_tot_mass = opts.min_mass2 + m1_at_min_m2 + elif m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2: + max_tot_mass = opts.max_mass1 + m2_at_max_m1 + # So either the restriction is low enough to be redundant, or is + # removing all the paramter space + elif m2_at_max_m1 > opts.max_mass2: + # This is the redundant case, ignore + max_tot_mass = opts.max_total_mass + else: + # And this is the bad case + err_msg = "The maximum chirp mass provided is not possible given " + err_msg += "restrictions on component masses." + raise ValueError(err_msg) + # Is there also an eta restriction? + if opts.min_eta: + # Get the value of m1,m2 at max_eta, min_chirp_mass + min_eta_m1, min_eta_m2 = pnutils.mchirp_eta_to_mass1_mass2( + opts.max_chirp_mass, opts.min_eta) + min_eta_max_tot_mass = min_eta_m1 + min_eta_m2 + if min_eta_max_tot_mass < max_tot_mass: + # Okay, eta does restrict this further. Still physical? + max_tot_mass = min_eta_max_tot_mass + if min_eta_m1 < opts.min_mass1: + err_msg = "The combination of component mass, chirp " + err_msg += "mass, eta and (possibly) total mass limits " + err_msg += "have precluded all systems." + raise ValueError(err_msg) + # Update min_tot_mass if needed + if max_tot_mass < opts.max_total_mass: + opts.max_total_mass = float(max_tot_mass) + + # Need to check max_eta alone for minimum and maximum mass + if opts.max_eta: + # Similar to above except this can affect both the minimum and maximum + # total mass. Need to identify where the line of max_eta intersects + # the parameter space, and if it affects mass restrictions. + m1_at_min_m2 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.min_mass2, + return_mass_heavier=True) + m2_at_min_m1 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.min_mass1, + return_mass_heavier=False) + m1_at_max_m2 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.max_mass2, + return_mass_heavier=True) + m2_at_max_m1 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.max_mass1, + return_mass_heavier=False) + # Check for restrictions on the minimum total mass + # Are either of these possible? + if m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1: + min_tot_mass = opts.min_mass2 + m1_at_min_m2 + elif m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2: + # This case doesn't change the minimal total mass + min_tot_mass = opts.min_total_mass + # So either the restriction is low enough to be redundant, or is + # removing all the paramter space + elif m2_at_min_m1 > opts.max_mass2: + # This is the redundant case, ignore + min_tot_mass = opts.min_total_mass + elif opts.max_eta == 0.25 and (m1_at_min_m2 < opts.min_mass2 or \ + m2_at_min_m1 > opts.min_mass1): + # This just catches potential roundoff issues in the case that + # max-eta is not used + min_tot_mass = opts.min_total_mass + else: + # And this is the bad case + err_msg = "The maximum eta provided is not possible given " + err_msg += "restrictions on component masses." + print(m1_at_min_m2, m2_at_min_m1, m1_at_max_m2, m2_at_max_m1) + print(opts.min_mass1, opts.max_mass1, opts.min_mass2, opts.max_mass2) + raise ValueError(err_msg) + # Update min_tot_mass if needed + if min_tot_mass > opts.min_total_mass: + opts.min_total_mass = float(min_tot_mass) + + # Check for restrictions on the maximum total mass + # Are either of these possible? + if m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2: + max_tot_mass = opts.max_mass1 + m2_at_max_m1 + elif m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1: + # This case doesn't change the maximal total mass + max_tot_mass = opts.max_total_mass + # So either the restriction is low enough to be redundant, or is + # removing all the paramter space, the latter case is already tested + else: + # This is the redundant case, ignore + max_tot_mass = opts.max_total_mass + if max_tot_mass < opts.max_total_mass: + opts.max_total_mass = float(max_tot_mass) + + # Need to check min_eta alone for maximum and minimum total mass + if opts.min_eta: + # Same as max_eta. + # Need to identify where the line of max_eta intersects + # the parameter space, and if it affects mass restrictions. + m1_at_min_m2 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.min_mass2, + return_mass_heavier=True) + m2_at_min_m1 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.min_mass1, + return_mass_heavier=False) + m1_at_max_m2 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.max_mass2, + return_mass_heavier=True) + m2_at_max_m1 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.max_mass1, + return_mass_heavier=False) + + # Check for restrictions on the maximum total mass + # Are either of these possible? + if m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1: + max_tot_mass = opts.max_mass2 + m1_at_max_m2 + + elif m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2: + # This case doesn't affect the maximum total mass + max_tot_mass = opts.max_total_mass + # So either the restriction is low enough to be redundant, or is + # removing all the paramter space + elif m2_at_max_m1 < opts.min_mass2: + # This is the redundant case, ignore + max_tot_mass = opts.max_total_mass + else: + # And this is the bad case + err_msg = "The minimum eta provided is not possible given " + err_msg += "restrictions on component masses." + raise ValueError(err_msg) + # Update min_tot_mass if needed + if max_tot_mass < opts.max_total_mass: + opts.max_total_mass = float(max_tot_mass) + + # Check for restrictions on the minimum total mass + # Are either of these possible? + if m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2: + min_tot_mass = opts.min_mass1 + m2_at_min_m1 + elif m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1: + # This case doesn't change the maximal total mass + min_tot_mass = opts.min_total_mass + # So either the restriction is low enough to be redundant, or is + # removing all the paramter space, which is tested above + else: + # This is the redundant case, ignore + min_tot_mass = opts.min_total_mass + if min_tot_mass > opts.min_total_mass: + opts.min_total_mass = float(min_tot_mass) + + if opts.max_total_mass < opts.min_total_mass: + err_msg = "After including restrictions on chirp mass, component mass, " + err_msg += "eta and total mass, no physical systems are possible." + raise ValueError(err_msg) + + if opts.max_eta and opts.min_eta and (opts.max_eta < opts.min_eta): + parser.error("--max-eta must be larger than --min-eta.") + if nonSpin: + return + + if opts.max_ns_spin_mag is None: + if opts.nsbh_flag: + parser.error("Must supply --max_ns_spin_mag with --nsbh-flag") + # Can ignore this if no NSs will be generated + elif opts.min_mass2 < (opts.ns_bh_boundary_mass or + massRangeParameters.default_nsbh_boundary_mass): + parser.error("Must supply --max-ns-spin-mag for the chosen" + " value of --min_mass2") + else: + opts.max_ns_spin_mag = opts.max_bh_spin_mag + if opts.max_bh_spin_mag is None: + if opts.nsbh_flag: + parser.error("Must supply --max_bh_spin_mag with --nsbh-flag") + # Can ignore this if no BHs will be generated + if opts.max_mass1 >= (opts.ns_bh_boundary_mass or + massRangeParameters.default_nsbh_boundary_mass): + parser.error("Must supply --max-bh-spin-mag for the chosen" + " value of --max_mass1") + else: + opts.max_bh_spin_mag = opts.max_ns_spin_mag
+ + +
+[docs] +class massRangeParameters(object): + """ + This class holds all of the options that are parsed in the function + insert_mass_range_option_group + and all products produced using these options. It can also be initialized + from the __init__ function providing directly the options normally + provided on the command line + """ + + default_nsbh_boundary_mass = 3. + default_ns_eos = '2H' + default_delta_bh_spin = 0.1 + default_delta_ns_mass = 0.1 + + def __init__(self, minMass1, maxMass1, minMass2, maxMass2, + maxNSSpinMag=0, maxBHSpinMag=0, maxTotMass=None, + minTotMass=None, maxEta=None, minEta=0, + max_chirp_mass=None, min_chirp_mass=None, + ns_bh_boundary_mass=None, nsbhFlag=False, + remnant_mass_threshold=None, ns_eos=None, use_eos_max_ns_mass=False, + delta_bh_spin=None, delta_ns_mass=None): + """ + Initialize an instance of the massRangeParameters by providing all + options directly. See the help message associated with any code + that uses the metric options for more details of how to set each of + these. For e.g. pycbc_aligned_stoch_bank --help + """ + self.minMass1=minMass1 + self.maxMass1=maxMass1 + self.minMass2=minMass2 + self.maxMass2=maxMass2 + self.maxNSSpinMag=maxNSSpinMag + self.maxBHSpinMag=maxBHSpinMag + self.minTotMass = minMass1 + minMass2 + if minTotMass and (minTotMass > self.minTotMass): + self.minTotMass = minTotMass + self.maxTotMass = maxMass1 + maxMass2 + if maxTotMass and (maxTotMass < self.maxTotMass): + self.maxTotMass = maxTotMass + self.maxTotMass=maxTotMass + self.minTotMass=minTotMass + if maxEta: + self.maxEta=maxEta + else: + self.maxEta=0.25 + self.max_chirp_mass = max_chirp_mass + self.min_chirp_mass = min_chirp_mass + self.minEta=minEta + self.ns_bh_boundary_mass = ( + ns_bh_boundary_mass or self.default_nsbh_boundary_mass) + self.nsbhFlag=nsbhFlag + self.remnant_mass_threshold = remnant_mass_threshold + self.ns_eos = ( + ns_eos or self.default_ns_eos) + self.delta_bh_spin = ( + delta_bh_spin or self.default_delta_bh_spin) + self.delta_ns_mass = ( + delta_ns_mass or self.default_delta_ns_mass) + self.use_eos_max_ns_mass = use_eos_max_ns_mass + if self.remnant_mass_threshold is not None: + if self.ns_eos != '2H': + errMsg = """ + By setting a value for --remnant-mass-threshold + you have asked to filter out EM dim NS-BH templates. + The EOS you chose is not supported currently: please + remove the --ns-eos option from your command line or + set it to '2H'. + """ + raise ValueError(errMsg) + if use_eos_max_ns_mass: + _, max_ns_g_mass = load_ns_sequence(self.ns_eos) + if(self.maxMass2 > max_ns_g_mass): + errMsg = """ + The maximum NS mass supported by this EOS is + {0}. Please set --max-mass2 to this value or run + without the --use-eos-max-ns-mass flag. + """.format(max_ns_g_mass-0.0000000001) + raise ValueError(errMsg) + self.delta_bh_spin = ( + delta_bh_spin or self.default_delta_bh_spin) + self.delta_ns_mass = ( + delta_ns_mass or self.default_delta_ns_mass) + + # FIXME: This may be inaccurate if Eta limits are given + # This will not cause any problems, but maybe could be fixed. + self.minCompMass = self.minMass2 + self.maxCompMass = self.maxMass1 + + # WARNING: We expect mass1 > mass2 ALWAYS + # Check input: + if (minMass2 > minMass1) or (maxMass2 > maxMass1): + errMsg = "Mass1 must be larger than mass2. Check input options." + raise ValueError(errMsg) + + if (minMass2 > maxMass2) or (minMass1 > maxMass1): + errMsg = "Minimum masses cannot be larger than maximum masses." + errMsg += "Check input options." + raise ValueError(errMsg) + + +
+[docs] + @classmethod + def from_argparse(cls, opts, nonSpin=False): + """ + Initialize an instance of the massRangeParameters class from an + argparse.OptionParser instance. This assumes that + insert_mass_range_option_group + and + verify_mass_range_options + have already been called before initializing the class. + """ + if nonSpin: + return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2, + opts.max_mass2, maxTotMass=opts.max_total_mass, + minTotMass=opts.min_total_mass, maxEta=opts.max_eta, + minEta=opts.min_eta, max_chirp_mass=opts.max_chirp_mass, + min_chirp_mass=opts.min_chirp_mass, + remnant_mass_threshold=opts.remnant_mass_threshold, + ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass, + delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass) + else: + return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2, + opts.max_mass2, maxTotMass=opts.max_total_mass, + minTotMass=opts.min_total_mass, maxEta=opts.max_eta, + minEta=opts.min_eta, maxNSSpinMag=opts.max_ns_spin_mag, + maxBHSpinMag=opts.max_bh_spin_mag, + nsbhFlag=opts.nsbh_flag, + max_chirp_mass=opts.max_chirp_mass, + min_chirp_mass=opts.min_chirp_mass, + ns_bh_boundary_mass=opts.ns_bh_boundary_mass, + remnant_mass_threshold=opts.remnant_mass_threshold, + ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass, + delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
+ + +
+[docs] + def is_outside_range(self, mass1, mass2, spin1z, spin2z): + """ + Test if a given location in mass1, mass2, spin1z, spin2z is within the + range of parameters allowed by the massParams object. + """ + # Mass1 test + if mass1 * 1.001 < self.minMass1: + return 1 + if mass1 > self.maxMass1 * 1.001: + return 1 + # Mass2 test + if mass2 * 1.001 < self.minMass2: + return 1 + if mass2 > self.maxMass2 * 1.001: + return 1 + # Spin1 test + if self.nsbhFlag: + if (abs(spin1z) > self.maxBHSpinMag * 1.001): + return 1 + else: + spin1zM = abs(spin1z) + if not( (mass1 * 1.001 > self.ns_bh_boundary_mass \ + and spin1zM <= self.maxBHSpinMag * 1.001) \ + or (mass1 < self.ns_bh_boundary_mass * 1.001 \ + and spin1zM <= self.maxNSSpinMag * 1.001)): + return 1 + # Spin2 test + if self.nsbhFlag: + if (abs(spin2z) > self.maxNSSpinMag * 1.001): + return 1 + else: + spin2zM = abs(spin2z) + if not( (mass2 * 1.001 > self.ns_bh_boundary_mass \ + and spin2zM <= self.maxBHSpinMag * 1.001) \ + or (mass2 < self.ns_bh_boundary_mass * 1.001 and \ + spin2zM <= self.maxNSSpinMag * 1.001)): + return 1 + # Total mass test + mTot = mass1 + mass2 + if mTot > self.maxTotMass * 1.001: + return 1 + if mTot * 1.001 < self.minTotMass: + return 1 + + # Eta test + eta = mass1 * mass2 / (mTot * mTot) + if eta > self.maxEta * 1.001: + return 1 + if eta * 1.001 < self.minEta: + return 1 + + # Chirp mass test + chirp_mass = mTot * eta**(3./5.) + if self.min_chirp_mass is not None \ + and chirp_mass * 1.001 < self.min_chirp_mass: + return 1 + if self.max_chirp_mass is not None \ + and chirp_mass > self.max_chirp_mass * 1.001: + return 1 + + return 0
+
+ + +
+[docs] +class ethincaParameters(object): + """ + This class holds all of the options that are parsed in the function + insert_ethinca_metric_options + and all products produced using these options. It can also be initialized + from the __init__ function, providing directly the options normally + provided on the command line + """ + def __init__(self, pnOrder, cutoff, freqStep, fLow=None, full_ethinca=False, + time_ethinca=False): + """ + Initialize an instance of ethincaParameters by providing all + options directly. See the insert_ethinca_metric_options() function + for explanation or e.g. run pycbc_geom_nonspinbank --help + """ + self.full_ethinca=full_ethinca + self.time_ethinca=time_ethinca + self.doEthinca= self.full_ethinca or self.time_ethinca + self.pnOrder=pnOrder + self.cutoff=cutoff + self.freqStep=freqStep + # independent fLow for ethinca metric is currently not used + self.fLow=fLow + # check that ethinca options make sense + if self.full_ethinca and self.time_ethinca: + err_msg = "It does not make sense to ask me to do the time " + err_msg += "restricted ethinca and also the full ethinca." + raise ValueError(err_msg) + if self.doEthinca and not ( + cutoff in pnutils.named_frequency_cutoffs.keys()): + raise ValueError("Need a valid cutoff formula to calculate " + "ethinca! Possible values are "+ + str(tuple(pnutils.named_frequency_cutoffs.keys()))) + if self.doEthinca and not freqStep: + raise ValueError("Need to specify a cutoff frequency step to " + "calculate ethinca! (ethincaFreqStep)") + +
+[docs] + @classmethod + def from_argparse(cls, opts): + """ + Initialize an instance of the ethincaParameters class from an + argparse.OptionParser instance. This assumes that + insert_ethinca_metric_options + and + verify_ethinca_metric_options + have already been called before initializing the class. + """ + return cls(opts.ethinca_pn_order, opts.filter_cutoff, + opts.ethinca_frequency_step, fLow=None, + full_ethinca=opts.calculate_ethinca_metric, + time_ethinca=opts.calculate_time_metric_components)
+
+ + +
+[docs] +def insert_ethinca_metric_options(parser): + """ + Adds the options used to calculate the ethinca metric, if required. + + Parameters + ----------- + parser : object + OptionParser instance. + """ + ethincaGroup = parser.add_argument_group("Ethinca metric options", + "Options used in the calculation of Gamma metric " + "components for the ethinca coincidence test and for " + "assigning high-frequency cutoffs to templates.") + ethinca_methods = ethincaGroup.add_mutually_exclusive_group() + ethinca_methods.add_argument("--calculate-time-metric-components", + action="store_true", default=False, + help="If given, the ethinca metric will be calculated " + "for only the time component, and stored in the Gamma0 " + "entry of the sngl_inspiral table. OPTIONAL, default=False") + ethinca_methods.add_argument("--calculate-ethinca-metric", + action="store_true", default=False, + help="If given, the ethinca metric will be calculated " + "and stored in the Gamma entries of the sngl_inspiral " + "table. OPTIONAL, default=False") + ethincaGroup.add_argument("--ethinca-pn-order", + default=None, choices=get_ethinca_orders(), + help="Specify a PN order to be used in calculating the " + "ethinca metric. OPTIONAL: if not specified, the same " + "order will be used as for the bank metric.") + ethincaGroup.add_argument("--filter-cutoff", + default=None, + choices=tuple(pnutils.named_frequency_cutoffs.keys()), + help="Specify an upper frequency cutoff formula for the " + "ethinca metric calculation, and for the values of f_final" + " assigned to the templates. REQUIRED if the " + "calculate-ethinca-metric option is given.") + ethincaGroup.add_argument("--ethinca-frequency-step", action="store", + type=float, default=10., + help="Control the precision of the upper frequency cutoff." + " For speed, the metric is calculated only for discrete " + "f_max values with a spacing given by this option. Each " + "template is assigned the metric for the f_max closest to " + "its analytical cutoff formula. OPTIONAL, default=10. " + "UNITS=Hz") + + return ethincaGroup
+ + +
+[docs] +def verify_ethinca_metric_options(opts, parser): + """ + Checks that the necessary options are given for the ethinca metric + calculation. + + Parameters + ---------- + opts : argparse.Values instance + Result of parsing the input options with OptionParser + parser : object + The OptionParser instance. + """ + if opts.filter_cutoff is not None and not (opts.filter_cutoff in + pnutils.named_frequency_cutoffs.keys()): + parser.error("Need a valid cutoff formula to calculate ethinca or " + "assign filter f_final values! Possible values are " + +str(tuple(pnutils.named_frequency_cutoffs.keys()))) + if (opts.calculate_ethinca_metric or opts.calculate_time_metric_components)\ + and not opts.ethinca_frequency_step: + parser.error("Need to specify a cutoff frequency step to calculate " + "ethinca!") + if not (opts.calculate_ethinca_metric or\ + opts.calculate_time_metric_components) and opts.ethinca_pn_order: + parser.error("Can't specify an ethinca PN order if not " + "calculating ethinca metric!")
+ + +
+[docs] +def check_ethinca_against_bank_params(ethincaParams, metricParams): + """ + Cross-check the ethinca and bank layout metric calculation parameters + and set the ethinca metric PN order equal to the bank PN order if not + previously set. + + Parameters + ---------- + ethincaParams: instance of ethincaParameters + metricParams: instance of metricParameters + """ + if ethincaParams.doEthinca: + if metricParams.f0 != metricParams.fLow: + raise ValueError("If calculating ethinca metric, f0 and f-low " + "must be equal!") + if ethincaParams.fLow is not None and ( + ethincaParams.fLow != metricParams.fLow): + raise ValueError("Ethinca metric calculation does not currently " + "support a f-low value different from the bank " + "metric!") + if ethincaParams.pnOrder is None: + ethincaParams.pnOrder = metricParams.pnOrder + else: pass
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/partitioned_bank.html b/latest/html/_modules/pycbc/tmpltbank/partitioned_bank.html new file mode 100644 index 00000000000..e89ddfc5632 --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/partitioned_bank.html @@ -0,0 +1,812 @@ + + + + + + pycbc.tmpltbank.partitioned_bank — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.partitioned_bank

+# Copyright (C) 2013 Ian W. Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import copy
+import numpy
+import logging
+
+from pycbc.tmpltbank import coord_utils
+
+logger = logging.getLogger('pycbc.tmpltbank.partitioned_bank')
+
+
+[docs] +class PartitionedTmpltbank(object): + """ + This class is used to hold a template bank partitioned into numerous bins + based on position in the Cartesian parameter space where the axes are the + principal components. It can also be used to hold intermediary + products used while constructing (e.g.) a stochastic template bank. + """ + def __init__(self, mass_range_params, metric_params, ref_freq, + bin_spacing, bin_range_check=1): + """ + Set up the partitioned template bank class. The combination of the + reference frequency, the bin spacing and the metric dictates how the + parameter space will be partitioned. + + Parameters + ----------- + mass_range_params : massRangeParameters object + An initialized massRangeParameters object holding the details of + the mass and spin ranges being considered. + metric_params : metricParameters object + An initialized metricParameters object holding the details of the + parameter space metric that is being used. + ref_freq : float + The reference frequency to use as the upper frequency cutoff of + the metric when partitioning the bank. In general this would be + set to the *smallest* upper frequency cutoff that is possible in + the given parameter space. However, in some cases this can lead + to only a small number of partitions and the computational cost + will increase dramatically. NOTE: when using the vary-fupper + option this upper frequency cutoff is only used to determine which + points should be matched against each other, it is *not* used in + the actual metric-based calculation of the distance (which uses the + frequency cutoffs of the points being considered). + bin_spacing : float + The metric distance to space the bins by. NOTE: If you want to + place the bins to have a width corresponding to a minimal match of + 0.97 you would set this to :math:`(1 - 0.97)^{0.5}`. + Note the square root, + matches correspond to the square of parameter space distance. + bin_range_check : int + When computing matches consider points in the corresponding bin and + all bins +/- this value in both chi_1 and chi_2 directions. + DEFAULT = 1. + """ + # Flags to be used in other methods of this class. Initialized here for + # simplicity + self.spin_warning_given = False + + # These will probably be used a lot, so add to object + self.mass_range_params = mass_range_params + self.metric_params = metric_params + self.ref_freq = ref_freq + self.bin_spacing = bin_spacing + + # Get parameter space extent + vals = coord_utils.estimate_mass_range(1000000, mass_range_params, + metric_params, ref_freq, covary=True) + chi1_max = vals[0].max() + chi1_min = vals[0].min() + chi1_diff = chi1_max - chi1_min + chi2_max = vals[1].max() + chi2_min = vals[1].min() + chi2_diff = chi2_max - chi2_min + # Add a little bit extra as we may not have reached the edges. + # FIXME: Maybe better to use the numerical code to find maxima here? + chi1_min = chi1_min - 0.1*chi1_diff + chi1_max = chi1_max + 0.1*chi1_diff + chi2_min = chi2_min - 0.1*chi2_diff + chi2_max = chi2_max + 0.1*chi2_diff + + massbank = {} + bank = {} + # Also add a little bit here + for i in range(-2, int((chi1_max - chi1_min) // bin_spacing + 2)): + bank[i] = {} + massbank[i] = {} + for j in range(-2, int((chi2_max - chi2_min) // bin_spacing + 2)): + bank[i][j] = [] + massbank[i][j] = {} + massbank[i][j]['mass1s'] = numpy.array([]) + + self.massbank = massbank + self.bank = bank + # Record minimum and maximum bins + self.min_chi1_bin = -2 + self.min_chi2_bin = -2 + self.max_chi1_bin = int((chi1_max - chi1_min) // bin_spacing + 1) + self.max_chi2_bin = int((chi2_max - chi2_min) // bin_spacing + 1) + self.chi1_min = chi1_min + self.chi1_max = chi1_max + self.chi2_min = chi2_min + self.chi2_max = chi2_max + + # How many adjacent bins should we check? + self.bin_range_check = 1 + self.bin_loop_order = coord_utils.outspiral_loop(self.bin_range_check) + +
+[docs] + def get_point_from_bins_and_idx(self, chi1_bin, chi2_bin, idx): + """Find masses and spins given bin numbers and index. + + Given the chi1 bin, chi2 bin and an index, return the masses and spins + of the point at that index. Will fail if no point exists there. + + Parameters + ----------- + chi1_bin : int + The bin number for chi1. + chi2_bin : int + The bin number for chi2. + idx : int + The index within the chi1, chi2 bin. + + Returns + -------- + mass1 : float + Mass of heavier body. + mass2 : float + Mass of lighter body. + spin1z : float + Spin of heavier body. + spin2z : float + Spin of lighter body. + """ + mass1 = self.massbank[chi1_bin][chi2_bin]['mass1s'][idx] + mass2 = self.massbank[chi1_bin][chi2_bin]['mass2s'][idx] + spin1z = self.massbank[chi1_bin][chi2_bin]['spin1s'][idx] + spin2z = self.massbank[chi1_bin][chi2_bin]['spin2s'][idx] + return mass1, mass2, spin1z, spin2z
+ + +
+[docs] + def get_freq_map_and_normalizations(self, frequency_list, + upper_freq_formula): + """ + If using the --vary-fupper capability we need to store the mapping + between index and frequencies in the list. We also precalculate the + normalization factor at every frequency, which is used when estimating + overlaps to account for abrupt changes in termination frequency. + + Parameters + ----------- + frequency_list : array of floats + The frequencies for which the metric has been computed and lie + within the parameter space being considered. + upper_freq_formula : string + """ + self.frequency_map = {} + self.normalization_map = {} + self.upper_freq_formula = upper_freq_formula + # FIXME: Must this be sorted on input + frequency_list.sort() + + for idx, frequency in enumerate(frequency_list): + self.frequency_map[frequency] = idx + self.normalization_map[frequency] = \ + (self.metric_params.moments['I7'][frequency])**0.5
+ + +
+[docs] + def find_point_bin(self, chi_coords): + """ + Given a set of coordinates in the chi parameter space, identify the + indices of the chi1 and chi2 bins that the point occurs in. Returns + these indices. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + + Returns + -------- + chi1_bin : int + Index of the chi_1 bin. + chi2_bin : int + Index of the chi_2 bin. + """ + # Identify bin + chi1_bin = int((chi_coords[0] - self.chi1_min) // self.bin_spacing) + chi2_bin = int((chi_coords[1] - self.chi2_min) // self.bin_spacing) + self.check_bin_existence(chi1_bin, chi2_bin) + return chi1_bin, chi2_bin
+ + + +
+[docs] + def check_bin_existence(self, chi1_bin, chi2_bin): + """ + Given indices for bins in chi1 and chi2 space check that the bin + exists in the object. If not add it. Also check for the existence of + all bins within +/- self.bin_range_check and add if not present. + + Parameters + ----------- + chi1_bin : int + The index of the chi1_bin to check + chi2_bin : int + The index of the chi2_bin to check + """ + bin_range_check = self.bin_range_check + # Check if this bin actually exists. If not add it + if ( (chi1_bin < self.min_chi1_bin+bin_range_check) or + (chi1_bin > self.max_chi1_bin-bin_range_check) or + (chi2_bin < self.min_chi2_bin+bin_range_check) or + (chi2_bin > self.max_chi2_bin-bin_range_check) ): + for temp_chi1 in range(chi1_bin-bin_range_check, + chi1_bin+bin_range_check+1): + if temp_chi1 not in self.massbank: + self.massbank[temp_chi1] = {} + self.bank[temp_chi1] = {} + for temp_chi2 in range(chi2_bin-bin_range_check, + chi2_bin+bin_range_check+1): + if temp_chi2 not in self.massbank[temp_chi1]: + self.massbank[temp_chi1][temp_chi2] = {} + self.massbank[temp_chi1][temp_chi2]['mass1s'] =\ + numpy.array([]) + self.bank[temp_chi1][temp_chi2] = []
+ + +
+[docs] + def calc_point_distance(self, chi_coords): + """ + Calculate distance between point and the bank. Return the closest + distance. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + + Returns + -------- + min_dist : float + The smallest **SQUARED** metric distance between the test point and + the bank. + indexes : The chi1_bin, chi2_bin and position within that bin at which + the closest matching point lies. + """ + chi1_bin, chi2_bin = self.find_point_bin(chi_coords) + min_dist = 1000000000 + indexes = None + for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: + curr_chi1_bin = chi1_bin + chi1_bin_offset + curr_chi2_bin = chi2_bin + chi2_bin_offset + for idx, bank_chis in \ + enumerate(self.bank[curr_chi1_bin][curr_chi2_bin]): + dist = coord_utils.calc_point_dist(chi_coords, bank_chis) + if dist < min_dist: + min_dist = dist + indexes = (curr_chi1_bin, curr_chi2_bin, idx) + return min_dist, indexes
+ + +
+[docs] + def test_point_distance(self, chi_coords, distance_threshold): + """ + Test if the distance between the supplied point and the bank is less + than the supplied distance theshold. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + distance_threshold : float + The **SQUARE ROOT** of the metric distance to test as threshold. + E.g. if you want to test to a minimal match of 0.97 you would + use 1 - 0.97 = 0.03 for this value. + + Returns + -------- + Boolean + True if point is within the distance threshold. False if not. + + """ + chi1_bin, chi2_bin = self.find_point_bin(chi_coords) + for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: + curr_chi1_bin = chi1_bin + chi1_bin_offset + curr_chi2_bin = chi2_bin + chi2_bin_offset + for bank_chis in self.bank[curr_chi1_bin][curr_chi2_bin]: + dist = coord_utils.calc_point_dist(chi_coords, bank_chis) + if dist < distance_threshold: + return True + else: + return False
+ + +
+[docs] + def calc_point_distance_vary(self, chi_coords, point_fupper, mus): + """ + Calculate distance between point and the bank allowing the metric to + vary based on varying upper frequency cutoff. Slower than + calc_point_distance, but more reliable when upper frequency cutoff can + change a lot. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + point_fupper : float + The upper frequency cutoff to use for this point. This value must + be one of the ones already calculated in the metric. + mus : numpy.array + A 2D array where idx 0 holds the upper frequency cutoff and idx 1 + holds the coordinates in the [not covaried] mu parameter space for + each value of the upper frequency cutoff. + + Returns + -------- + min_dist : float + The smallest **SQUARED** metric distance between the test point and + the bank. + indexes : The chi1_bin, chi2_bin and position within that bin at which + the closest matching point lies. + """ + chi1_bin, chi2_bin = self.find_point_bin(chi_coords) + min_dist = 1000000000 + indexes = None + for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: + curr_chi1_bin = chi1_bin + chi1_bin_offset + curr_chi2_bin = chi2_bin + chi2_bin_offset + # No points = Next iteration + curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin] + if not curr_bank['mass1s'].size: + continue + + # *NOT* the same of .min and .max + f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts']) + f_other = numpy.maximum(point_fupper, curr_bank['freqcuts']) + # NOTE: freq_idxes is a vector! + freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper]) + # vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index + vecs1 = mus[freq_idxes, :] + # vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index + range_idxes = numpy.arange(len(freq_idxes)) + vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :] + + # Now do the sums + dists = (vecs1 - vecs2)*(vecs1 - vecs2) + # This reduces to 1D: idx = stored index + dists = numpy.sum(dists, axis=1) + norm_upper = numpy.array([self.normalization_map[f] \ + for f in f_upper]) + norm_other = numpy.array([self.normalization_map[f] \ + for f in f_other]) + norm_fac = norm_upper / norm_other + renormed_dists = 1 - (1 - dists)*norm_fac + curr_min_dist = renormed_dists.min() + if curr_min_dist < min_dist: + min_dist = curr_min_dist + indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin() + + return min_dist, indexes
+ + +
+[docs] + def test_point_distance_vary(self, chi_coords, point_fupper, mus, + distance_threshold): + """ + Test if distance between point and the bank is greater than distance + threshold while allowing the metric to + vary based on varying upper frequency cutoff. Slower than + test_point_distance, but more reliable when upper frequency cutoff can + change a lot. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + point_fupper : float + The upper frequency cutoff to use for this point. This value must + be one of the ones already calculated in the metric. + mus : numpy.array + A 2D array where idx 0 holds the upper frequency cutoff and idx 1 + holds the coordinates in the [not covaried] mu parameter space for + each value of the upper frequency cutoff. + distance_threshold : float + The **SQUARE ROOT** of the metric distance to test as threshold. + E.g. if you want to test to a minimal match of 0.97 you would + use 1 - 0.97 = 0.03 for this value. + + Returns + -------- + Boolean + True if point is within the distance threshold. False if not. + """ + chi1_bin, chi2_bin = self.find_point_bin(chi_coords) + for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: + curr_chi1_bin = chi1_bin + chi1_bin_offset + curr_chi2_bin = chi2_bin + chi2_bin_offset + # No points = Next iteration + curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin] + if not curr_bank['mass1s'].size: + continue + + # *NOT* the same of .min and .max + f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts']) + f_other = numpy.maximum(point_fupper, curr_bank['freqcuts']) + # NOTE: freq_idxes is a vector! + freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper]) + # vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index + vecs1 = mus[freq_idxes, :] + # vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index + range_idxes = numpy.arange(len(freq_idxes)) + vecs2 = curr_bank['mus'][range_idxes,freq_idxes,:] + + # Now do the sums + dists = (vecs1 - vecs2)*(vecs1 - vecs2) + # This reduces to 1D: idx = stored index + dists = numpy.sum(dists, axis=1) + # I wonder if this line actually speeds things up? + if (dists > distance_threshold).all(): + continue + # This is only needed for close templates, should we prune? + norm_upper = numpy.array([self.normalization_map[f] \ + for f in f_upper]) + norm_other = numpy.array([self.normalization_map[f] \ + for f in f_other]) + norm_fac = norm_upper / norm_other + renormed_dists = 1 - (1 - dists)*norm_fac + if (renormed_dists < distance_threshold).any(): + return True + else: + return False
+ + +
+[docs] + def add_point_by_chi_coords(self, chi_coords, mass1, mass2, spin1z, spin2z, + point_fupper=None, mus=None): + """ + Add a point to the partitioned template bank. The point_fupper and mus + kwargs must be provided for all templates if the vary fupper capability + is desired. This requires that the chi_coords, as well as mus and + point_fupper if needed, to be precalculated. If you just have the + masses and don't want to worry about translations see + add_point_by_masses, which will do translations and then call this. + + Parameters + ----------- + chi_coords : numpy.array + The position of the point in the chi coordinates. + mass1 : float + The heavier mass of the point to add. + mass2 : float + The lighter mass of the point to add. + spin1z: float + The [aligned] spin on the heavier body. + spin2z: float + The [aligned] spin on the lighter body. + The upper frequency cutoff to use for this point. This value must + be one of the ones already calculated in the metric. + mus : numpy.array + A 2D array where idx 0 holds the upper frequency cutoff and idx 1 + holds the coordinates in the [not covaried] mu parameter space for + each value of the upper frequency cutoff. + """ + chi1_bin, chi2_bin = self.find_point_bin(chi_coords) + self.bank[chi1_bin][chi2_bin].append(copy.deepcopy(chi_coords)) + curr_bank = self.massbank[chi1_bin][chi2_bin] + + if curr_bank['mass1s'].size: + curr_bank['mass1s'] = numpy.append(curr_bank['mass1s'], + numpy.array([mass1])) + curr_bank['mass2s'] = numpy.append(curr_bank['mass2s'], + numpy.array([mass2])) + curr_bank['spin1s'] = numpy.append(curr_bank['spin1s'], + numpy.array([spin1z])) + curr_bank['spin2s'] = numpy.append(curr_bank['spin2s'], + numpy.array([spin2z])) + if point_fupper is not None: + curr_bank['freqcuts'] = numpy.append(curr_bank['freqcuts'], + numpy.array([point_fupper])) + # Mus needs to append onto axis 0. See below for contents of + # the mus variable + if mus is not None: + curr_bank['mus'] = numpy.append(curr_bank['mus'], + numpy.array([mus[:,:]]), axis=0) + else: + curr_bank['mass1s'] = numpy.array([mass1]) + curr_bank['mass2s'] = numpy.array([mass2]) + curr_bank['spin1s'] = numpy.array([spin1z]) + curr_bank['spin2s'] = numpy.array([spin2z]) + if point_fupper is not None: + curr_bank['freqcuts'] = numpy.array([point_fupper]) + # curr_bank['mus'] is a 3D array + # NOTE: mu relates to the non-covaried Cartesian coordinate system + # Axis 0: Template index + # Axis 1: Frequency cutoff index + # Axis 2: Mu coordinate index + if mus is not None: + curr_bank['mus'] = numpy.array([mus[:,:]])
+ + +
+[docs] + def add_point_by_masses(self, mass1, mass2, spin1z, spin2z, + vary_fupper=False): + """ + Add a point to the template bank. This differs from add point to bank + as it assumes that the chi coordinates and the products needed to use + vary_fupper have not already been calculated. This function calculates + these products and then calls add_point_by_chi_coords. + This function also + carries out a number of sanity checks (eg. is the point within the + ranges given by mass_range_params) that add_point_by_chi_coords does + not do for speed concerns. + + Parameters + ----------- + mass1 : float + Mass of the heavier body + mass2 : float + Mass of the lighter body + spin1z : float + Spin of the heavier body + spin2z : float + Spin of the lighter body + """ + # Test that masses are the expected way around (ie. mass1 > mass2) + if mass2 > mass1: + if not self.spin_warning_given: + warn_msg = "Am adding a template where mass2 > mass1. The " + warn_msg += "convention is that mass1 > mass2. Swapping mass1 " + warn_msg += "and mass2 and adding point to bank. This message " + warn_msg += "will not be repeated." + logger.warning(warn_msg) + self.spin_warning_given = True + + # These that masses obey the restrictions of mass_range_params + if self.mass_range_params.is_outside_range(mass1, mass2, spin1z, + spin2z): + err_msg = "Point with masses given by " + err_msg += "%f %f %f %f " %(mass1, mass2, spin1z, spin2z) + err_msg += "(mass1, mass2, spin1z, spin2z) is not consistent " + err_msg += "with the provided command-line restrictions on masses " + err_msg += "and spins." + raise ValueError(err_msg) + + # Get chi coordinates + chi_coords = coord_utils.get_cov_params(mass1, mass2, spin1z, spin2z, + self.metric_params, + self.ref_freq) + + # Get mus and best fupper for this point, if needed + if vary_fupper: + mass_dict = {} + mass_dict['m1'] = numpy.array([mass1]) + mass_dict['m2'] = numpy.array([mass2]) + mass_dict['s1z'] = numpy.array([spin1z]) + mass_dict['s2z'] = numpy.array([spin2z]) + freqs = numpy.array(list(self.frequency_map.keys()), dtype=float) + freq_cutoff = coord_utils.return_nearest_cutoff(\ + self.upper_freq_formula, mass_dict, freqs) + freq_cutoff = freq_cutoff[0] + lambdas = coord_utils.get_chirp_params\ + (mass1, mass2, spin1z, spin2z, self.metric_params.f0, + self.metric_params.pnOrder) + mus = [] + for freq in self.frequency_map: + mus.append(coord_utils.get_mu_params(lambdas, + self.metric_params, freq) ) + mus = numpy.array(mus) + else: + freq_cutoff=None + mus=None + + self.add_point_by_chi_coords(chi_coords, mass1, mass2, spin1z, spin2z, + point_fupper=freq_cutoff, mus=mus)
+ + + +
+[docs] + def add_tmpltbank_from_xml_table(self, sngl_table, vary_fupper=False): + """ + This function will take a sngl_inspiral_table of templates and add them + into the partitioned template bank object. + + Parameters + ----------- + sngl_table : sngl_inspiral_table + List of sngl_inspiral templates. + vary_fupper : False + If given also include the additional information needed to compute + distances with a varying upper frequency cutoff. + """ + for sngl in sngl_table: + self.add_point_by_masses(sngl.mass1, sngl.mass2, sngl.spin1z, + sngl.spin2z, vary_fupper=vary_fupper)
+ + +
+[docs] + def add_tmpltbank_from_hdf_file(self, hdf_fp, vary_fupper=False): + """ + This function will take a pointer to an open HDF File object containing + a list of templates and add them into the partitioned template bank + object. + + Parameters + ----------- + hdf_fp : h5py.File object + The template bank in HDF5 format. + vary_fupper : False + If given also include the additional information needed to compute + distances with a varying upper frequency cutoff. + """ + mass1s = hdf_fp['mass1'][:] + mass2s = hdf_fp['mass2'][:] + spin1zs = hdf_fp['spin1z'][:] + spin2zs = hdf_fp['spin2z'][:] + for idx in range(len(mass1s)): + self.add_point_by_masses(mass1s[idx], mass2s[idx], spin1zs[idx], + spin2zs[idx], vary_fupper=vary_fupper)
+ + +
+[docs] + def output_all_points(self): + """Return all points in the bank. + + Return all points in the bank as lists of m1, m2, spin1z, spin2z. + + Returns + ------- + mass1 : list + List of mass1 values. + mass2 : list + List of mass2 values. + spin1z : list + List of spin1z values. + spin2z : list + List of spin2z values. + """ + mass1 = [] + mass2 = [] + spin1z = [] + spin2z = [] + for i in self.massbank.keys(): + for j in self.massbank[i].keys(): + for k in range(len(self.massbank[i][j]['mass1s'])): + curr_bank = self.massbank[i][j] + mass1.append(curr_bank['mass1s'][k]) + mass2.append(curr_bank['mass2s'][k]) + spin1z.append(curr_bank['spin1s'][k]) + spin2z.append(curr_bank['spin2s'][k]) + + return mass1, mass2, spin1z, spin2z
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/tmpltbank/sky_grid.html b/latest/html/_modules/pycbc/tmpltbank/sky_grid.html new file mode 100644 index 00000000000..cf31bdbd82a --- /dev/null +++ b/latest/html/_modules/pycbc/tmpltbank/sky_grid.html @@ -0,0 +1,281 @@ + + + + + + pycbc.tmpltbank.sky_grid — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.tmpltbank.sky_grid

+"""Functionality for handling grids of points in the sky for coherent SNR
+calculation via `pycbc_multi_inspiral`. The main operation to be performed on
+these points is calculating the antenna pattern functions and time delays from
+the Earth center for a network of detectors.
+"""
+
+import numpy as np
+import h5py
+
+from pycbc.detector import Detector
+
+
+
+[docs] +class SkyGrid: + def __init__(self, ra, dec, detectors, ref_gps_time): + """Initialize a sky grid from a list of RA/dec coordinates. + + Parameters + ---------- + ra: iterable of floats + Right ascensions for each point in radians, in the interval [0,2π). + dec: iterable of floats + Declination for each point in radians, where π/2 is the North pole, + -π/2 is the South pole, and 0 is the celestial equator. + detectors: iterable of str + List of detector names associated with the sky grid, typically the + detectors for which the grid has been placed or is to be placed. + The detectors will be used when calculating the antenna pattern + functions and time delays from Earth center. + ref_gps_time: float + Reference GPS time associated with the sky grid. This will be used + when calculating the antenna pattern functions and time delays from + Earth center. + """ + # We store the points in a 2D array internally, first dimension runs + # over the list of points, second dimension is RA/dec. + # Question: should we use Astropy sky positions instead? + self.positions = np.vstack([ra, dec]).T + self.detectors = sorted(detectors) + self.ref_gps_time = ref_gps_time + + def __len__(self): + """Returns the number of points in the sky grid.""" + return self.positions.shape[0] + + def __getitem__(self, index): + """Returns the coordinates of a single point in the grid.""" + return self.positions[index] + + @property + def ras(self): + """Returns all right ascensions in radians, in the interval [0,2π).""" + return self.positions[:, 0] + + @property + def decs(self): + """Returns all declinations in radians, where π/2 is the North pole, + -π/2 is the South pole, and 0 is the celestial equator.""" + return self.positions[:, 1] + +
+[docs] + @classmethod + def from_cli(cls, cli_parser, cli_args): + """Initialize a sky grid from command-line interface, via argparse + objects. + """ + if cli_args.sky_grid is not None: + if cli_args.ra is not None or cli_args.dec is not None: + cli_parser.error( + 'Please provide either a sky grid via --sky-grid or a ' + 'single sky position via --ra and --dec, not both' + ) + return cls.read_from_file(cli_args.sky_grid) + if cli_args.ra is not None and cli_args.dec is not None: + return cls( + [cli_args.ra], + [cli_args.dec], + cli_args.instruments, + cli_args.trigger_time + ) + cli_parser.error( + 'Please specify a sky grid via --sky-grid or a single sky ' + 'position via --ra and --dec' + )
+ + +
+[docs] + @classmethod + def read_from_file(cls, path): + """Initialize a sky grid from a given HDF5 file.""" + with h5py.File(path, 'r') as hf: + ra = hf['ra'][:] + dec = hf['dec'][:] + detectors = hf.attrs['detectors'] + ref_gps_time = hf.attrs['ref_gps_time'] + return cls(ra, dec, detectors, ref_gps_time)
+ + +
+[docs] + def write_to_file(self, path): + """Writes a sky grid to an HDF5 file.""" + with h5py.File(path, 'w') as hf: + hf['ra'] = self.ras + hf['dec'] = self.decs + hf.attrs['detectors'] = self.detectors + hf.attrs['ref_gps_time'] = self.ref_gps_time
+ + +
+[docs] + def calculate_antenna_patterns(self): + """Calculate the antenna pattern functions at each point in the grid + for the list of GW detectors specified at instantiation. Return a dict, + keyed by detector name, whose items are 2-dimensional Numpy arrays. + The first dimension of these arrays runs over the sky grid, and the + second dimension runs over the plus and cross polarizations. + """ + result = {} + for det_name in self.detectors: + det = Detector(det_name) + result[det_name] = np.empty((len(self), 2)) + for i, (ra, dec) in enumerate(self): + result[det_name][i] = det.antenna_pattern( + ra, dec, 0, t_gps=self.ref_gps_time + ) + return result
+ + +
+[docs] + def calculate_time_delays(self): + """Calculate the time delays from the Earth center to each GW detector + specified at instantiation, for each point in the grid. Return a dict, + keyed by detector name, whose items are 1-dimensional Numpy arrays + containing the time delays for each sky point. + """ + result = {} + for det_name in self.detectors: + det = Detector(det_name) + result[det_name] = np.empty(len(self)) + for i, (ra, dec) in enumerate(self): + result[det_name][i] = det.time_delay_from_earth_center( + ra, dec, t_gps=self.ref_gps_time + ) + return result
+
+ + + +__all__ = ['SkyGrid'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/transforms.html b/latest/html/_modules/pycbc/transforms.html new file mode 100644 index 00000000000..3c8b1a9f087 --- /dev/null +++ b/latest/html/_modules/pycbc/transforms.html @@ -0,0 +1,3538 @@ + + + + + + pycbc.transforms — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.transforms

+# Copyright (C) 2017  Christopher M. Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules provides classes and functions for transforming parameters.
+"""
+
+import os
+import logging
+import numpy
+
+from pycbc import conversions
+from pycbc import coordinates
+from pycbc import cosmology
+from pycbc.io import record
+from pycbc.waveform import parameters
+from pycbc.boundaries import Bounds
+from pycbc import VARARGS_DELIM
+from pycbc.pnutils import jframe_to_l0frame
+
+logger = logging.getLogger('pycbc.transforms')
+
+
+
+[docs] +class BaseTransform(object): + """A base class for transforming between two sets of parameters.""" + + name = None + inverse = None + _inputs = [] + _outputs = [] + + def __init__(self): + self.inputs = set(self._inputs) + self.outputs = set(self._outputs) + + def __call__(self, maps): + return self.transform(maps) + +
+[docs] + def transform(self, maps): + """This function transforms from inputs to outputs.""" + raise NotImplementedError("Not added.")
+ + +
+[docs] + def inverse_transform(self, maps): + """The inverse conversions of transform. This function transforms from + outputs to inputs. + """ + raise NotImplementedError("Not added.")
+ + +
+[docs] + def jacobian(self, maps): + """The Jacobian for the inputs to outputs transformation.""" + raise NotImplementedError("Jacobian transform not implemented.")
+ + +
+[docs] + def inverse_jacobian(self, maps): + """The Jacobian for the outputs to inputs transformation.""" + raise NotImplementedError("Jacobian transform not implemented.")
+ + +
+[docs] + @staticmethod + def format_output(old_maps, new_maps): + """This function takes the returned dict from `transform` and converts + it to the same datatype as the input. + + Parameters + ---------- + old_maps : {FieldArray, dict} + The mapping object to add new maps to. + new_maps : dict + A dict with key as parameter name and value is numpy.array. + + Returns + ------- + {FieldArray, dict} + The old_maps object with new keys from new_maps. + """ + + # if input is FieldArray then return FieldArray + if isinstance(old_maps, record.FieldArray): + keys = new_maps.keys() + values = [new_maps[key] for key in keys] + for key, vals in zip(keys, values): + try: + old_maps = old_maps.add_fields([vals], [key]) + except ValueError: + old_maps[key] = vals + return old_maps + + # if input is dict then return dict + elif isinstance(old_maps, dict): + out = old_maps.copy() + out.update(new_maps) + return out + + # else error + else: + raise TypeError("Input type must be FieldArray or dict.")
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs, + skip_opts=None, additional_opts=None): + """Initializes a transform from the given section. + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the transform options. + section : str + Name of the section in the configuration file. + outputs : str + The names of the parameters that are output by this transformation, + separated by `VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + skip_opts : list, optional + Do not read options in the given list. + additional_opts : dict, optional + Any additional arguments to pass to the class. If an option is + provided that also exists in the config file, the value provided + will be used instead of being read from the file. + + Returns + ------- + cls + An instance of the class. + """ + tag = outputs + if skip_opts is None: + skip_opts = [] + if additional_opts is None: + additional_opts = {} + else: + additional_opts = additional_opts.copy() + outputs = set(outputs.split(VARARGS_DELIM)) + special_args = ["name"] + skip_opts + list(additional_opts.keys()) + # get any extra arguments to pass to init + extra_args = {} + for opt in cp.options("-".join([section, tag])): + if opt in special_args: + continue + # check if option can be cast as a float + val = cp.get_opt_tag(section, opt, tag) + try: + val = float(val) + except ValueError: + pass + # add option + extra_args.update({opt: val}) + extra_args.update(additional_opts) + out = cls(**extra_args) + # check that the outputs matches + if outputs - out.outputs != set() or out.outputs - outputs != set(): + raise ValueError( + "outputs of class do not match outputs specified " "in section" + ) + return out
+
+ + + +
+[docs] +class CustomTransform(BaseTransform): + """Allows for any transform to be defined. + + Parameters + ---------- + input_args : (list of) str + The names of the input parameters. + output_args : (list of) str + The names of the output parameters. + transform_functions : dict + Dictionary mapping input args to a string giving a function call; + e.g., ``{'q': 'q_from_mass1_mass2(mass1, mass2)'}``. + jacobian : str, optional + String giving a jacobian function. The function must be in terms of + the input arguments. + + Examples + -------- + Create a custom transform that converts mass1, mass2 to mtotal, q: + + >>> t = transforms.CustomTransform(['mass1', 'mass2'], ['mtotal', 'q'], {'mtotal': 'mass1+mass2', 'q': 'mass1/mass2'}, '(mass1 + mass2) / mass2**2') + + Evaluate a pair of masses: + + >>> t.transform({'mass1': 10., 'mass2': 5.}) + {'mass1': 10.0, 'mass2': 5.0, 'mtotal': 15.0, 'q': 2.0} + + The Jacobian for the same pair of masses: + + >>> t.jacobian({'mass1': 10., 'mass2': 5.}) + 0.59999999999999998 + + """ + + name = "custom" + + def __init__(self, input_args, output_args, transform_functions, + jacobian=None): + if isinstance(input_args, str): + input_args = [input_args] + if isinstance(output_args, str): + output_args = [output_args] + self.inputs = set(input_args) + self.outputs = set(output_args) + self.transform_functions = transform_functions + self._jacobian = jacobian + # we'll create a scratch FieldArray space to do transforms on + # we'll default to length 1; this will be changed if a map is passed + # with more than one value in it + self._createscratch() + + def _createscratch(self, shape=1): + """Creates a scratch FieldArray to use for transforms.""" + self._scratch = record.FieldArray( + shape, dtype=[(p, float) for p in self.inputs] + ) + + def _copytoscratch(self, maps): + """Copies the data in maps to the scratch space. + + If the maps contain arrays that are not the same shape as the scratch + space, a new scratch space will be created. + """ + try: + for p in self.inputs: + self._scratch[p][:] = maps[p] + except ValueError: + # we'll get a ValueError if the scratch space isn't the same size + # as the maps; in that case, re-create the scratch space with the + # appropriate size and try again + invals = maps[list(self.inputs)[0]] + if isinstance(invals, numpy.ndarray): + shape = invals.shape + else: + shape = len(invals) + self._createscratch(shape) + for p in self.inputs: + self._scratch[p][:] = maps[p] + + def _getslice(self, maps): + """Determines how to slice the scratch for returning values.""" + invals = maps[list(self.inputs)[0]] + if not isinstance(invals, (numpy.ndarray, list)): + getslice = 0 + else: + getslice = slice(None, None) + return getslice + +
+[docs] + def transform(self, maps): + """Applies the transform functions to the given maps object. + + Parameters + ---------- + maps : dict, or FieldArray + + Returns + ------- + dict or FieldArray + A map object containing the transformed variables, along with the + original variables. The type of the output will be the same as the + input. + """ + if self.transform_functions is None: + raise NotImplementedError("no transform function(s) provided") + # copy values to scratch + self._copytoscratch(maps) + # ensure that we return the same data type in each dict + getslice = self._getslice(maps) + # evaluate the functions + out = { + p: self._scratch[func][getslice] + for p, func in self.transform_functions.items() + } + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + if self._jacobian is None: + raise NotImplementedError("no jacobian provided") + # copy values to scratch + self._copytoscratch(maps) + out = self._scratch[self._jacobian] + if isinstance(out, numpy.ndarray): + out = out[self._getslice(maps)] + return out
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + """Loads a CustomTransform from the given config file. + + Example section: + + .. code-block:: ini + + [{section}-outvar1+outvar2] + name = custom + inputs = inputvar1, inputvar2 + outvar1 = func1(inputs) + outvar2 = func2(inputs) + jacobian = func(inputs) + """ + tag = outputs + outputs = set(outputs.split(VARARGS_DELIM)) + inputs = map(str.strip, + cp.get_opt_tag(section, "inputs", tag).split(",")) + # get the functions for each output + transform_functions = {} + for var in outputs: + # check if option can be cast as a float + func = cp.get_opt_tag(section, var, tag) + transform_functions[var] = func + s = "-".join([section, tag]) + if cp.has_option(s, "jacobian"): + jacobian = cp.get_opt_tag(section, "jacobian", tag) + else: + jacobian = None + return cls(inputs, outputs, transform_functions, jacobian=jacobian)
+
+ + + +
+[docs] +class CustomTransformMultiOutputs(CustomTransform): + """Allows for any transform to be defined. Based on CustomTransform, + but also supports multi-returning value functions. + + Parameters + ---------- + input_args : (list of) str + The names of the input parameters. + output_args : (list of) str + The names of the output parameters. + transform_functions : dict + Dictionary mapping input args to a string giving a function call; + e.g., ``{'q': 'q_from_mass1_mass2(mass1, mass2)'}``. + jacobian : str, optional + String giving a jacobian function. The function must be in terms of + the input arguments. + """ + + name = "custom_multi" + + def __init__(self, input_args, output_args, transform_functions, + jacobian=None): + super(CustomTransformMultiOutputs, self).__init__( + input_args, output_args, transform_functions, jacobian) + +
+[docs] + def transform(self, maps): + """Applies the transform functions to the given maps object. + Parameters + ---------- + maps : dict, or FieldArray + Returns + ------- + dict or FieldArray + A map object containing the transformed variables, along with the + original variables. The type of the output will be the same as the + input. + """ + if self.transform_functions is None: + raise NotImplementedError("no transform function(s) provided") + # copy values to scratch + self._copytoscratch(maps) + # ensure that we return the same data type in each dict + getslice = self._getslice(maps) + # evaluate the functions + # func[0] is the function itself, func[1] is the index, + # this supports multiple returning values function + out = { + p: self._scratch[func[0]][func[1]][getslice] if + len(self._scratch[func[0]]) > 1 else + self._scratch[func[0]][getslice] + for p, func in self.transform_functions.items() + } + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + """Loads a CustomTransformMultiOutputs from the given config file. + + Example section: + + .. code-block:: ini + + [{section}-outvar1+outvar2] + name = custom_multi + inputs = inputvar1, inputvar2 + outvar1, outvar2 = func1(inputs) + jacobian = func2(inputs) + """ + tag = outputs + outputs = list(outputs.split(VARARGS_DELIM)) + all_vars = ", ".join(outputs) + inputs = map(str.strip, + cp.get_opt_tag(section, "inputs", tag).split(",")) + # get the functions for each output + transform_functions = {} + output_index = slice(None, None, None) + for var in outputs: + # check if option can be cast as a float + try: + func = cp.get_opt_tag(section, var, tag) + except Exception: + func = cp.get_opt_tag(section, all_vars, tag) + output_index = slice(outputs.index(var), outputs.index(var)+1) + transform_functions[var] = [func, output_index] + s = "-".join([section, tag]) + if cp.has_option(s, "jacobian"): + jacobian = cp.get_opt_tag(section, "jacobian", tag) + else: + jacobian = None + return cls(inputs, outputs, transform_functions, jacobian=jacobian)
+
+ + + +# +# ============================================================================= +# +# Forward Transforms +# +# ============================================================================= +# + + +
+[docs] +class MchirpQToMass1Mass2(BaseTransform): + """Converts chirp mass and mass ratio to component masses.""" + + name = "mchirp_q_to_mass1_mass2" + + def __init__( + self, mass1_param=None, mass2_param=None, mchirp_param=None, q_param=None + ): + if mass1_param is None: + mass1_param = parameters.mass1 + if mass2_param is None: + mass2_param = parameters.mass2 + if mchirp_param is None: + mchirp_param = parameters.mchirp + if q_param is None: + q_param = parameters.q + self.mass1_param = mass1_param + self.mass2_param = mass2_param + self.mchirp_param = mchirp_param + self.q_param = q_param + self._inputs = [self.mchirp_param, self.q_param] + self._outputs = [self.mass1_param, self.mass2_param] + super(MchirpQToMass1Mass2, self).__init__() + +
+[docs] + def transform(self, maps): + """This function transforms from chirp mass and mass ratio to component + masses. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.MchirpQToMass1Mass2() + >>> t.transform({'mchirp': numpy.array([10.]), 'q': numpy.array([2.])}) + {'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]), + 'mchirp': array([ 10.]), 'q': array([ 2.])} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.mass1_param] = conversions.mass1_from_mchirp_q( + maps[self.mchirp_param], maps[self.q_param] + ) + out[self.mass2_param] = conversions.mass2_from_mchirp_q( + maps[self.mchirp_param], maps[self.q_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from component masses to chirp mass and + mass ratio. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.MchirpQToMass1Mass2() + >>> t.inverse_transform({'mass1': numpy.array([16.4]), 'mass2': numpy.array([8.2])}) + {'mass1': array([ 16.4]), 'mass2': array([ 8.2]), + 'mchirp': array([ 9.97717521]), 'q': 2.0} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + m1 = maps[self.mass1_param] + m2 = maps[self.mass2_param] + out[self.mchirp_param] = conversions.mchirp_from_mass1_mass2(m1, m2) + out[self.q_param] = m1 / m2 + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + """Returns the Jacobian for transforming mchirp and q to mass1 and + mass2. + """ + mchirp = maps[self.mchirp_param] + q = maps[self.q_param] + return mchirp * ((1.0 + q) / q ** 3.0) ** (2.0 / 5)
+ + +
+[docs] + def inverse_jacobian(self, maps): + """Returns the Jacobian for transforming mass1 and mass2 to + mchirp and q. + """ + m1 = maps[self.mass1_param] + m2 = maps[self.mass2_param] + return conversions.mchirp_from_mass1_mass2(m1, m2) / m2 ** 2.0
+
+ + + +
+[docs] +class MchirpEtaToMass1Mass2(BaseTransform): + """Converts chirp mass and symmetric mass ratio to component masses.""" + + name = "mchirp_eta_to_mass1_mass2" + _inputs = [parameters.mchirp, parameters.eta] + _outputs = [parameters.mass1, parameters.mass2] + +
+[docs] + def transform(self, maps): + """This function transforms from chirp mass and symmetric mass ratio to + component masses. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.MchirpEtaToMass1Mass2() + >>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])}) + {'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]), + 'mchirp': array([ 10.]), 'eta': array([ 0.25])} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[parameters.mass1] = conversions.mass1_from_mchirp_eta( + maps[parameters.mchirp], maps[parameters.eta] + ) + out[parameters.mass2] = conversions.mass2_from_mchirp_eta( + maps[parameters.mchirp], maps[parameters.eta] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from component masses to chirp mass and + symmetric mass ratio. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.MchirpQToMass1Mass2() + >>> t.inverse_transform({'mass1': numpy.array([8.2]), 'mass2': numpy.array([8.2])}) + {'mass1': array([ 8.2]), 'mass2': array([ 8.2]), + 'mchirp': array([ 9.97717521]), 'eta': 0.25} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + m1 = maps[parameters.mass1] + m2 = maps[parameters.mass2] + out[parameters.mchirp] = conversions.mchirp_from_mass1_mass2(m1, m2) + out[parameters.eta] = conversions.eta_from_mass1_mass2(m1, m2) + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + """Returns the Jacobian for transforming mchirp and eta to mass1 and + mass2. + """ + mchirp = maps[parameters.mchirp] + eta = maps[parameters.eta] + m1 = conversions.mass1_from_mchirp_eta(mchirp, eta) + m2 = conversions.mass2_from_mchirp_eta(mchirp, eta) + return mchirp * (m1 - m2) / (m1 + m2) ** 3
+ + +
+[docs] + def inverse_jacobian(self, maps): + """Returns the Jacobian for transforming mass1 and mass2 to + mchirp and eta. + """ + m1 = maps[parameters.mass1] + m2 = maps[parameters.mass2] + mchirp = conversions.mchirp_from_mass1_mass2(m1, m2) + eta = conversions.eta_from_mass1_mass2(m1, m2) + return -1.0 * mchirp / eta ** (6.0 / 5)
+
+ + + +
+[docs] +class ChirpDistanceToDistance(BaseTransform): + """Converts chirp distance to luminosity distance, given the chirp mass.""" + + name = "chirp_distance_to_distance" + _inputs = [parameters.chirp_distance, parameters.mchirp] + _outputs = [parameters.distance] + + def __init__(self, ref_mass=1.4): + self.inputs = set(self._inputs) + self.outputs = set(self._outputs) + self.ref_mass = ref_mass + +
+[docs] + def transform(self, maps): + """This function transforms from chirp distance to luminosity distance, + given the chirp mass. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy as np + >>> from pycbc import transforms + >>> t = transforms.ChirpDistanceToDistance() + >>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])}) + {'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[parameters.distance] = conversions.distance_from_chirp_distance_mchirp( + maps[parameters.chirp_distance], + maps[parameters.mchirp], + ref_mass=self.ref_mass, + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from luminosity distance to chirp distance, + given the chirp mass. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy as np + >>> from pycbc import transforms + >>> t = transforms.ChirpDistanceToDistance() + >>> t.inverse_transform({'distance': np.array([40.]), 'mchirp': np.array([1.2])}) + {'distance': array([ 40.]), 'chirp_distance': array([ 40.52073522]), 'mchirp': array([ 1.2])} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[parameters.chirp_distance] = conversions.chirp_distance( + maps[parameters.distance], maps[parameters.mchirp], ref_mass=self.ref_mass + ) + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + """Returns the Jacobian for transforming chirp distance to + luminosity distance, given the chirp mass. + """ + ref_mass = 1.4 + mchirp = maps["mchirp"] + return (2.0 ** (-1.0 / 5) * self.ref_mass / mchirp) ** (-5.0 / 6)
+ + +
+[docs] + def inverse_jacobian(self, maps): + """Returns the Jacobian for transforming luminosity distance to + chirp distance, given the chirp mass. + """ + ref_mass = 1.4 + mchirp = maps["mchirp"] + return (2.0 ** (-1.0 / 5) * self.ref_mass / mchirp) ** (5.0 / 6)
+
+ + + +
+[docs] +class AlignTotalSpin(BaseTransform): + """Converts angles from total angular momentum J frame to orbital angular + momentum L (waveform) frame""" + + name = "align_total_spin" + _inputs = [parameters.thetajn, parameters.spin1x, parameters.spin1y, + parameters.spin1z, parameters.spin2x, parameters.spin2y, + parameters.spin2z, parameters.mass1, parameters.mass2, + parameters.f_ref, "phi_ref"] + _outputs = [parameters.inclination, parameters.spin1x, parameters.spin1y, + parameters.spin1z, parameters.spin2x, parameters.spin2y, + parameters.spin2z] + + def __init__(self): + self.inputs = set(self._inputs) + self.outputs = set(self._outputs) + super(AlignTotalSpin, self).__init__() + +
+[docs] + def transform(self, maps): + """ + Rigidly rotate binary so that the total angular momentum has the given + inclination (iota) instead of the orbital angular momentum. Return + the new inclination, s1, and s2. s1 and s2 are dimensionless spin. + Note: the spins are assumed to be given in the frame defined by the + orbital angular momentum. + """ + + if isinstance(maps, dict): + maps = record.FieldArray.from_kwargs(**maps) + newfields = [n for n in self._outputs if n not in maps.fieldnames] + newmaps = maps.add_fields([numpy.zeros(len(maps))]*len(newfields), + names=newfields) + for item in newmaps: + if not all(s == 0.0 for s in + [item[parameters.spin1x], item[parameters.spin1y], + item[parameters.spin2x], item[parameters.spin2y]]): + + # Calculate the quantities required by jframe_to_l0frame + s1_a, s1_az, s1_pol = coordinates.cartesian_to_spherical( + item[parameters.spin1x], item[parameters.spin1y], + item[parameters.spin1z]) + s2_a, s2_az, s2_pol = coordinates.cartesian_to_spherical( + item[parameters.spin2x], item[parameters.spin2y], + item[parameters.spin2z]) + + out = jframe_to_l0frame( + item[parameters.mass1], + item[parameters.mass2], + item[parameters.f_ref], + phiref=item["phi_ref"], + thetajn=item[parameters.thetajn], + phijl=numpy.pi, + spin1_a=s1_a, + spin2_a=s2_a, + spin1_polar=s1_pol, + spin2_polar=s2_pol, + spin12_deltaphi=s1_az-s2_az + ) + + for key in out: + item[key] = out[key] + else: + item[parameters.inclination] = item[parameters.thetajn] + + return newmaps
+
+ + + +
+[docs] +class SphericalToCartesian(BaseTransform): + """Converts spherical coordinates to cartesian. + + Parameters + ---------- + x : str + The name of the x parameter. + y : str + The name of the y parameter. + z : str + The name of the z parameter. + radial : str + The name of the radial parameter. + azimuthal : str + The name of the azimuthal angle parameter. + polar : str + The name of the polar angle parameter. + """ + + name = "spherical_to_cartesian" + + def __init__(self, x, y, z, radial, azimuthal, polar): + self.x = x + self.y = y + self.z = z + self.radial = radial + self.polar = polar + self.azimuthal = azimuthal + self._inputs = [self.radial, self.azimuthal, self.polar] + self._outputs = [self.x, self.y, self.z] + super(SphericalToCartesian, self).__init__() + +
+[docs] + def transform(self, maps): + """This function transforms from spherical to cartesian spins. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.SphericalToCartesian('x', 'y', 'z', + 'a', 'phi', 'theta') + >>> t.transform({'a': numpy.array([0.1]), 'phi': numpy.array([0.1]), + 'theta': numpy.array([0.1])}) + {'a': array([ 0.1]), 'phi': array([ 0.1]), 'theta': array([ 0.1]), + 'x': array([ 0.00993347]), 'y': array([ 0.00099667]), + 'z': array([ 0.09950042])} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + a = self.radial + az = self.azimuthal + po = self.polar + x, y, z = coordinates.spherical_to_cartesian(maps[a], maps[az], maps[po]) + out = {self.x: x, self.y: y, self.z: z} + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from cartesian to spherical spins. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + x = self.x + y = self.y + z = self.z + a, az, po = coordinates.cartesian_to_spherical(maps[x], maps[y], maps[z]) + out = {self.radial: a, self.azimuthal: az, self.polar: po} + return self.format_output(maps, out)
+
+ + + +
+[docs] +class SphericalSpin1ToCartesianSpin1(SphericalToCartesian): + """Converts spherical spin parameters (radial and two angles) to + catesian spin parameters. This class only transforms spins for the first + component mass. + + **Deprecation Warning:** This will be removed in a future update. Use + :py:class:`SphericalToCartesian` with spin-parameter names passed in + instead. + """ + + name = "spherical_spin_1_to_cartesian_spin_1" + + def __init__(self): + logger.warning( + "Deprecation warning: the %s transform will be " + "removed in a future update. Please use %s instead, " + "passing spin1x, spin1y, spin1z, spin1_a, " + "spin1_azimuthal, spin1_polar as arguments.", + self.name, SphericalToCartesian.name + ) + super(SphericalSpin1ToCartesianSpin1, self).__init__( + "spin1x", "spin1y", "spin1z", "spin1_a", + "spin1_azimuthal", "spin1_polar" + )
+ + + +
+[docs] +class SphericalSpin2ToCartesianSpin2(SphericalToCartesian): + """Converts spherical spin parameters (radial and two angles) to + catesian spin parameters. This class only transforms spins for the first + component mass. + + **Deprecation Warning:** This will be removed in a future update. Use + :py:class:`SphericalToCartesian` with spin-parameter names passed in + instead. + """ + + name = "spherical_spin_2_to_cartesian_spin_2" + + def __init__(self): + logger.warning( + "Deprecation warning: the %s transform will be " + "removed in a future update. Please use %s instead, " + "passing spin2x, spin2y, spin2z, spin2_a, " + "spin2_azimuthal, spin2_polar as arguments.", + self.name, SphericalToCartesian.name + ) + super(SphericalSpin2ToCartesianSpin2, self).__init__( + "spin2x", "spin2y", "spin2z", + "spin2_a", "spin2_azimuthal", "spin2_polar" + )
+ + + +
+[docs] +class DistanceToRedshift(BaseTransform): + """Converts distance to redshift.""" + + name = "distance_to_redshift" + inverse = None + _inputs = [parameters.distance] + _outputs = [parameters.redshift] + +
+[docs] + def transform(self, maps): + """This function transforms from distance to redshift. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + >>> import numpy + >>> from pycbc import transforms + >>> t = transforms.DistanceToRedshift() + >>> t.transform({'distance': numpy.array([1000])}) + {'distance': array([1000]), 'redshift': 0.19650987609144363} + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {parameters.redshift: cosmology.redshift(maps[parameters.distance])} + return self.format_output(maps, out)
+
+ + + +
+[docs] +class AlignedMassSpinToCartesianSpin(BaseTransform): + """Converts mass-weighted spins to cartesian z-axis spins.""" + + name = "aligned_mass_spin_to_cartesian_spin" + _inputs = [parameters.mass1, parameters.mass2, parameters.chi_eff, "chi_a"] + _outputs = [ + parameters.mass1, + parameters.mass2, + parameters.spin1z, + parameters.spin2z, + ] + +
+[docs] + def transform(self, maps): + """This function transforms from aligned mass-weighted spins to + cartesian spins aligned along the z-axis. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + mass1 = maps[parameters.mass1] + mass2 = maps[parameters.mass2] + out = {} + out[parameters.spin1z] = conversions.spin1z_from_mass1_mass2_chi_eff_chi_a( + mass1, mass2, maps[parameters.chi_eff], maps["chi_a"] + ) + out[parameters.spin2z] = conversions.spin2z_from_mass1_mass2_chi_eff_chi_a( + mass1, mass2, maps[parameters.chi_eff], maps["chi_a"] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from component masses and cartesian spins + to mass-weighted spin parameters aligned with the angular momentum. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + mass1 = maps[parameters.mass1] + spin1z = maps[parameters.spin1z] + mass2 = maps[parameters.mass2] + spin2z = maps[parameters.spin2z] + out = { + parameters.chi_eff: + conversions.chi_eff(mass1, mass2, spin1z, spin2z), + "chi_a": conversions.chi_a(mass1, mass2, spin1z, spin2z), + } + return self.format_output(maps, out)
+
+ + + +
+[docs] +class PrecessionMassSpinToCartesianSpin(BaseTransform): + """Converts mass-weighted spins to cartesian x-y plane spins.""" + + name = "precession_mass_spin_to_cartesian_spin" + _inputs = [parameters.mass1, parameters.mass2, + "xi1", "xi2", "phi_a", "phi_s"] + _outputs = [ + parameters.mass1, + parameters.mass2, + parameters.spin1x, + parameters.spin1y, + parameters.spin2x, + parameters.spin2y, + ] + +
+[docs] + def transform(self, maps): + """This function transforms from mass-weighted spins to caretsian spins + in the x-y plane. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + + # find primary and secondary masses + # since functions in conversions.py map to primary/secondary masses + m_p = conversions.primary_mass(maps["mass1"], maps["mass2"]) + m_s = conversions.secondary_mass(maps["mass1"], maps["mass2"]) + + # find primary and secondary xi + # can re-purpose spin functions for just a generic variable + xi_p = conversions.primary_spin( + maps["mass1"], maps["mass2"], maps["xi1"], maps["xi2"] + ) + xi_s = conversions.secondary_spin( + maps["mass1"], maps["mass2"], maps["xi1"], maps["xi2"] + ) + + # convert using convention of conversions.py that is mass1 > mass2 + spinx_p = conversions.spin1x_from_xi1_phi_a_phi_s( + xi_p, maps["phi_a"], maps["phi_s"] + ) + spiny_p = conversions.spin1y_from_xi1_phi_a_phi_s( + xi_p, maps["phi_a"], maps["phi_s"] + ) + spinx_s = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s( + m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"] + ) + spiny_s = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s( + m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"] + ) + + # map parameters from primary/secondary to indices + out = {} + if isinstance(m_p, numpy.ndarray): + mass1, mass2 = map(numpy.array, [maps["mass1"], maps["mass2"]]) + mask_mass1_gte_mass2 = mass1 >= mass2 + mask_mass1_lt_mass2 = mass1 < mass2 + out[parameters.spin1x] = numpy.concatenate( + (spinx_p[mask_mass1_gte_mass2], spinx_s[mask_mass1_lt_mass2]) + ) + out[parameters.spin1y] = numpy.concatenate( + (spiny_p[mask_mass1_gte_mass2], spiny_s[mask_mass1_lt_mass2]) + ) + out[parameters.spin2x] = numpy.concatenate( + (spinx_p[mask_mass1_lt_mass2], spinx_s[mask_mass1_gte_mass2]) + ) + out[parameters.spin2y] = numpy.concatenate( + (spinx_p[mask_mass1_lt_mass2], spinx_s[mask_mass1_gte_mass2]) + ) + elif maps["mass1"] > maps["mass2"]: + out[parameters.spin1x] = spinx_p + out[parameters.spin1y] = spiny_p + out[parameters.spin2x] = spinx_s + out[parameters.spin2y] = spiny_s + else: + out[parameters.spin1x] = spinx_s + out[parameters.spin1y] = spiny_s + out[parameters.spin2x] = spinx_p + out[parameters.spin2y] = spiny_p + + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms from component masses and cartesian spins to + mass-weighted spin parameters perpendicular with the angular momentum. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + + # convert + out = {} + xi1 = conversions.primary_xi( + maps[parameters.mass1], + maps[parameters.mass2], + maps[parameters.spin1x], + maps[parameters.spin1y], + maps[parameters.spin2x], + maps[parameters.spin2y], + ) + xi2 = conversions.secondary_xi( + maps[parameters.mass1], + maps[parameters.mass2], + maps[parameters.spin1x], + maps[parameters.spin1y], + maps[parameters.spin2x], + maps[parameters.spin2y], + ) + out["phi_a"] = conversions.phi_a( + maps[parameters.mass1], + maps[parameters.mass2], + maps[parameters.spin1x], + maps[parameters.spin1y], + maps[parameters.spin2x], + maps[parameters.spin2y], + ) + out["phi_s"] = conversions.phi_s( + maps[parameters.spin1x], + maps[parameters.spin1y], + maps[parameters.spin2x], + maps[parameters.spin2y], + ) + + # map parameters from primary/secondary to indices + if isinstance(xi1, numpy.ndarray): + mass1, mass2 = map( + numpy.array, [maps[parameters.mass1], maps[parameters.mass2]] + ) + mask_mass1_gte_mass2 = mass1 >= mass2 + mask_mass1_lt_mass2 = mass1 < mass2 + out["xi1"] = numpy.concatenate( + (xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2]) + ) + out["xi2"] = numpy.concatenate( + (xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2]) + ) + elif maps["mass1"] > maps["mass2"]: + out["xi1"] = xi1 + out["xi2"] = xi2 + else: + out["xi1"] = xi2 + out["xi2"] = xi1 + + return self.format_output(maps, out)
+
+ + + +
+[docs] +class CartesianSpinToChiP(BaseTransform): + """Converts cartesian spins to `chi_p`.""" + + name = "cartesian_spin_to_chi_p" + _inputs = [ + parameters.mass1, + parameters.mass2, + parameters.spin1x, + parameters.spin1y, + parameters.spin2x, + parameters.spin2y, + ] + _outputs = ["chi_p"] + +
+[docs] + def transform(self, maps): + """This function transforms from component masses and caretsian spins + to chi_p. + + Parameters + ---------- + maps : a mapping object + + Examples + -------- + Convert a dict of numpy.array: + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out["chi_p"] = conversions.chi_p( + maps[parameters.mass1], + maps[parameters.mass2], + maps[parameters.spin1x], + maps[parameters.spin1y], + maps[parameters.spin2x], + maps[parameters.spin2y], + ) + return self.format_output(maps, out)
+
+ + + +
+[docs] +class LambdaFromTOVFile(BaseTransform): + """Transforms mass values corresponding to Lambda values for a given EOS + interpolating from the mass-Lambda data for that EOS read in from an + external ASCII file. + + The interpolation of the mass-Lambda data is a one-dimensional piecewise + linear interpolation. If the ``redshift_mass`` keyword argument is ``True`` + (the default), the mass values to be transformed are assumed to be detector + frame masses. In that case, a distance should be provided along with the + mass for transformation to the source frame mass before the Lambda values + are extracted from the interpolation. If the transform is read in from a + config file, an example code block would be: + + .. code-block:: ini + + [{section}-lambda1] + name = lambda_from_tov_file + mass_param = mass1 + lambda_param = lambda1 + distance = 40 + mass_lambda_file = {filepath} + + If this transform is used in a parameter estimation analysis where + distance is a variable parameter, the distance to be used will vary + with each draw. In that case, the example code block will be: + + .. code-block:: ini + + [{section}-lambda1] + name = lambda_from_tov_file + mass_param = mass1 + lambda_param = lambda1 + mass_lambda_file = filepath + + If your prior is in terms of the source-frame masses (``srcmass``), then + you can shut off the redshifting by adding ``do-not-redshift-mass`` to the + config file. In this case you do not need to worry about a distance. + Example: + + .. code-block:: ini + + [{section}-lambda1] + name = lambda_from_tov_file + mass_param = srcmass1 + lambda_param = lambda1 + mass_lambda_file = filepath + do-not-redshift-mass = + + Parameters + ---------- + mass_param : str + The name of the mass parameter to transform. + lambda_param : str + The name of the tidal deformability parameter that mass_param is to + be converted to interpolating from the data in the mass-Lambda file. + mass_lambda_file : str + Path of the mass-Lambda data file. The first column in the data file + should contain mass values, and the second column Lambda values. + distance : float, optional + The distance (in Mpc) of the source. Used to redshift the mass. Needed + if ``redshift_mass`` is True and no distance parameter exists If + None, then a distance must be provided to the transform. + redshift_mass : bool, optional + Redshift the mass parameters when computing the lambdas. Default is + False. + file_columns : list of str, optional + The names and order of columns in the ``mass_lambda_file``. Must + contain at least 'mass' and 'lambda'. If not provided, will assume the + order is ('mass', 'lambda'). + """ + + name = "lambda_from_tov_file" + + def __init__( + self, + mass_param, + lambda_param, + mass_lambda_file, + distance=None, + redshift_mass=True, + file_columns=None, + ): + self._mass_lambda_file = mass_lambda_file + self._mass_param = mass_param + self._lambda_param = lambda_param + self.redshift_mass = redshift_mass + self._distance = distance + self._inputs = [mass_param, "distance"] + self._outputs = [lambda_param] + if file_columns is None: + file_columns = ["mass", "lambda"] + dtype = [(fname, float) for fname in file_columns] + data = numpy.loadtxt(self._mass_lambda_file, dtype=dtype) + self._data = data + super(LambdaFromTOVFile, self).__init__() + + @property + def mass_param(self): + """Returns the input mass parameter.""" + return self._mass_param + + @property + def lambda_param(self): + """Returns the output lambda parameter.""" + return self._lambda_param + + @property + def data(self): + return self._data + + @property + def mass_data(self): + """Returns the mass data read from the mass-Lambda data file for + an EOS. + """ + return self._data["mass"] + + @property + def lambda_data(self): + """Returns the Lambda data read from the mass-Lambda data file for + an EOS. + """ + return self._data["lambda"] + + @property + def distance(self): + """Returns the fixed distance to transform mass samples from detector + to source frame if one is specified. + """ + return self._distance + +
+[docs] + @staticmethod + def lambda_from_tov_data(m_src, mass_data, lambda_data): + """Returns Lambda corresponding to a given mass interpolating from the + TOV data. + + Parameters + ---------- + m : float + Value of the mass. + mass_data : array + Mass array from the Lambda-M curve of an EOS. + lambda_data : array + Lambda array from the Lambda-M curve of an EOS. + + Returns + ------- + lambdav : float + The Lambda corresponding to the mass `m` for the EOS considered. + """ + if m_src > mass_data.max(): + # assume black hole + lambdav = 0.0 + else: + lambdav = numpy.interp(m_src, mass_data, lambda_data) + return lambdav
+ + +
+[docs] + def transform(self, maps): + """Computes the transformation of mass to Lambda. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + out : dict or FieldArray + A map between the transformed variable name and value(s), along + with the original variable name and value(s). + """ + m = maps[self._mass_param] + if self.redshift_mass: + if self._distance is not None: + d = self._distance + else: + try: + d = maps["distance"] + except KeyError as e: + logger.warning( + "Either provide distance samples in the " + "list of samples to be transformed, or " + "provide a fixed distance value as input " + "when initializing LambdaFromTOVFile." + ) + raise e + shift = 1.0 / (1.0 + cosmology.redshift(abs(d))) + else: + shift = 1.0 + out = { + self._lambda_param: self.lambda_from_tov_data( + m * shift, self._data["mass"], self._data["lambda"] + ) + } + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + # see if we're redshifting masses + if cp.has_option("-".join([section, outputs]), "do-not-redshift-mass"): + additional_opts = {"redshift_mass": False} + skip_opts = ["do-not-redshift-mass"] + else: + additional_opts = None + skip_opts = None + return super(LambdaFromTOVFile, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, additional_opts=additional_opts + )
+
+ + + +
+[docs] +class LambdaFromMultipleTOVFiles(BaseTransform): + """Uses multiple equation of states. + + Parameters + ---------- + mass_param : str + The name of the mass parameter to transform. + lambda_param : str + The name of the tidal deformability parameter that mass_param is to + be converted to interpolating from the data in the mass-Lambda file. + mass_lambda_file : str + Path of the mass-Lambda data file. The first column in the data file + should contain mass values, and the second column Lambda values. + distance : float, optional + The distance (in Mpc) of the source. Used to redshift the mass. If + None, then a distance must be provided to the transform. + file_columns : list of str, optional + The names and order of columns in the ``mass_lambda_file``. Must + contain at least 'mass' and 'lambda'. If not provided, will assume the + order is ('radius', 'mass', 'lambda'). + """ + + name = "lambda_from_multiple_tov_files" + + def __init__( + self, + mass_param, + lambda_param, + map_file, + distance=None, + redshift_mass=True, + file_columns=None, + ): + self._map_file = map_file + self._mass_param = mass_param + self._lambda_param = lambda_param + self._distance = distance + self.redshift_mass = redshift_mass + self._inputs = [mass_param, "eos", "distance"] + self._outputs = [lambda_param] + # create a dictionary of the EOS files from the map_file + self._eos_files = {} + with open(self._map_file, "r") as fp: + for line in fp: + fname = line.rstrip("\n") + eosidx = int(os.path.basename(fname).split(".")[0]) + self._eos_files[eosidx] = os.path.abspath(fname) + # create an eos cache for fast load later + self._eos_cache = {} + if file_columns is None: + file_columns = ("radius", "mass", "lambda") + self._file_columns = file_columns + super(LambdaFromMultipleTOVFiles, self).__init__() + + @property + def mass_param(self): + """Returns the input mass parameter.""" + return self._mass_param + + @property + def lambda_param(self): + """Returns the output lambda parameter.""" + return self._lambda_param + + @property + def map_file(self): + """Returns the mass data read from the mass-Lambda data file for + an EOS. + """ + return self._map_file + + @property + def distance(self): + """Returns the fixed distance to transform mass samples from detector + to source frame if one is specified. + """ + return self._distance + +
+[docs] + def get_eos(self, eos_index): + """Gets the EOS for the given index. + + If the index is not in range returns None. + """ + try: + eos = self._eos_cache[eos_index] + except KeyError: + try: + fname = self._eos_files[eos_index] + eos = LambdaFromTOVFile( + mass_param=self._mass_param, + lambda_param=self._lambda_param, + mass_lambda_file=fname, + distance=self._distance, + redshift_mass=self.redshift_mass, + file_columns=self._file_columns, + ) + self._eos_cache[eos_index] = eos + except KeyError: + eos = None + return eos
+ + +
+[docs] + def transform(self, maps): + """Transforms mass value and eos index into a lambda value""" + m = maps[self._mass_param] + # floor + eos_index = int(maps["eos"]) + eos = self.get_eos(eos_index) + if eos is not None: + return eos.transform(maps) + else: + # no eos, just return nan + out = {self._lambda_param: numpy.nan} + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + # see if we're redshifting masses + if cp.has_option("-".join([section, outputs]), "do-not-redshift-mass"): + additional_opts = {"redshift_mass": False} + skip_opts = ["do-not-redshift-mass"] + else: + additional_opts = None + skip_opts = None + return super(LambdaFromMultipleTOVFiles, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, additional_opts=additional_opts + )
+
+ + + +
+[docs] +class GEOToSSB(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + geocentric frame to the corresponding values in the SSB frame.""" + + name = "geo_to_ssb" + + default_params_name = { + 'default_tc_geo': parameters.tc, + 'default_longitude_geo': parameters.ra, + 'default_latitude_geo': parameters.dec, + 'default_polarization_geo': parameters.polarization, + 'default_tc_ssb': parameters.tc, + 'default_longitude_ssb': parameters.eclipticlongitude, + 'default_latitude_ssb': parameters.eclipticlatitude, + 'default_polarization_ssb': parameters.polarization + } + + def __init__( + self, tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_geo_param = params[0] + self.longitude_geo_param = params[1] + self.latitude_geo_param = params[2] + self.polarization_geo_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + self._outputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + + super(GEOToSSB, self).__init__() + +
+[docs] + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the geocentric frame to the corresponding + values in the SSB frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_ssb_param], out[self.longitude_ssb_param], \ + out[self.latitude_ssb_param], out[self.polarization_ssb_param] = \ + coordinates.geo_to_ssb( + maps[self.tc_geo_param], maps[self.longitude_geo_param], + maps[self.latitude_geo_param], maps[self.polarization_geo_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the SSB frame to the corresponding + values in the geocentric frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_geo_param], out[self.longitude_geo_param], \ + out[self.latitude_geo_param], out[self.polarization_geo_param] = \ + coordinates.ssb_to_geo( + maps[self.tc_ssb_param], maps[self.longitude_ssb_param], + maps[self.latitude_ssb_param], maps[self.polarization_ssb_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-geo': cls.default_params_name['default_tc_geo'], + 'longitude-geo': cls.default_params_name['default_longitude_geo'], + 'latitude-geo': cls.default_params_name['default_latitude_geo'], + 'polarization-geo': cls.default_params_name[ + 'default_polarization_geo'], + 'tc-ssb': cls.default_params_name['default_tc_ssb'], + 'longitude-ssb': cls.default_params_name['default_longitude_ssb'], + 'latitude-ssb': cls.default_params_name['default_latitude_ssb'], + 'polarization-ssb': cls.default_params_name[ + 'default_polarization_ssb'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(GEOToSSB, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + )
+
+ + + +
+[docs] +class LISAToSSB(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + LISA frame to the corresponding values in the SSB frame.""" + + name = "lisa_to_ssb" + + default_params_name = { + 'default_tc_lisa': parameters.tc, + 'default_longitude_lisa': parameters.eclipticlongitude, + 'default_latitude_lisa': parameters.eclipticlatitude, + 'default_polarization_lisa': parameters.polarization, + 'default_tc_ssb': parameters.tc, + 'default_longitude_ssb': parameters.eclipticlongitude, + 'default_latitude_ssb': parameters.eclipticlatitude, + 'default_polarization_ssb': parameters.polarization + } + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + self._outputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + super(LISAToSSB, self).__init__() + +
+[docs] + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the LISA frame to the corresponding + values in the SSB frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_ssb_param], out[self.longitude_ssb_param], \ + out[self.latitude_ssb_param], out[self.polarization_ssb_param] = \ + coordinates.lisa_to_ssb( + maps[self.tc_lisa_param], maps[self.longitude_lisa_param], + maps[self.latitude_lisa_param], maps[self.polarization_lisa_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the SSB frame to the corresponding + values in the LISA frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_lisa_param], out[self.longitude_lisa_param], \ + out[self.latitude_lisa_param], \ + out[self.polarization_lisa_param] = \ + coordinates.ssb_to_lisa( + maps[self.tc_ssb_param], maps[self.longitude_ssb_param], + maps[self.latitude_ssb_param], maps[self.polarization_ssb_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-lisa': cls.default_params_name['default_tc_lisa'], + 'longitude-lisa': cls.default_params_name[ + 'default_longitude_lisa'], + 'latitude-lisa': cls.default_params_name['default_latitude_lisa'], + 'polarization-lisa': cls.default_params_name[ + 'default_polarization_lisa'], + 'tc-ssb': cls.default_params_name['default_tc_ssb'], + 'longitude-ssb': cls.default_params_name['default_longitude_ssb'], + 'latitude-ssb': cls.default_params_name['default_latitude_ssb'], + 'polarization-ssb': cls.default_params_name[ + 'default_polarization_ssb'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(LISAToSSB, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + )
+
+ + + +
+[docs] +class LISAToGEO(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + LISA frame to the corresponding values in the geocentric frame.""" + + name = "lisa_to_geo" + + default_params_name = { + 'default_tc_lisa': parameters.tc, + 'default_longitude_lisa': parameters.eclipticlongitude, + 'default_latitude_lisa': parameters.eclipticlatitude, + 'default_polarization_lisa': parameters.polarization, + 'default_tc_geo': parameters.tc, + 'default_longitude_geo': parameters.ra, + 'default_latitude_geo': parameters.dec, + 'default_polarization_geo': parameters.polarization + } + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_geo_param = params[4] + self.longitude_geo_param = params[5] + self.latitude_geo_param = params[6] + self.polarization_geo_param = params[7] + self._inputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + self._outputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + super(LISAToGEO, self).__init__() + +
+[docs] + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the LISA frame to the corresponding + values in the geocentric frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_geo_param], out[self.longitude_geo_param], \ + out[self.latitude_geo_param], out[self.polarization_geo_param] = \ + coordinates.lisa_to_geo( + maps[self.tc_lisa_param], maps[self.longitude_lisa_param], + maps[self.latitude_lisa_param], maps[self.polarization_lisa_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the geocentric frame to the corresponding + values in the LISA frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_lisa_param], out[self.longitude_lisa_param], \ + out[self.latitude_lisa_param], \ + out[self.polarization_lisa_param] = \ + coordinates.geo_to_lisa( + maps[self.tc_geo_param], maps[self.longitude_geo_param], + maps[self.latitude_geo_param], maps[self.polarization_geo_param] + ) + return self.format_output(maps, out)
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-lisa': cls.default_params_name['default_tc_lisa'], + 'longitude-lisa': cls.default_params_name[ + 'default_longitude_lisa'], + 'latitude-lisa': cls.default_params_name['default_latitude_lisa'], + 'polarization-lisa': cls.default_params_name[ + 'default_polarization_lisa'], + 'tc-geo': cls.default_params_name['default_tc_geo'], + 'longitude-geo': cls.default_params_name['default_longitude_geo'], + 'latitude-geo': cls.default_params_name['default_latitude_geo'], + 'polarization-geo': cls.default_params_name[ + 'default_polarization_geo'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(LISAToGEO, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + )
+
+ + + +
+[docs] +class Log(BaseTransform): + """Applies a log transform from an `inputvar` parameter to an `outputvar` + parameter. This is the inverse of the exponent transform. + + Parameters + ---------- + inputvar : str + The name of the parameter to transform. + outputvar : str + The name of the transformed parameter. + """ + + name = "log" + + def __init__(self, inputvar, outputvar): + self._inputvar = inputvar + self._outputvar = outputvar + self._inputs = [inputvar] + self._outputs = [outputvar] + super(Log, self).__init__() + + @property + def inputvar(self): + """Returns the input parameter.""" + return self._inputvar + + @property + def outputvar(self): + """Returns the output parameter.""" + return self._outputvar + +
+[docs] + def transform(self, maps): + r"""Computes :math:`\log(x)`. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + out : dict or FieldArray + A map between the transformed variable name and value(s), along + with the original variable name and value(s). + """ + x = maps[self._inputvar] + out = {self._outputvar: numpy.log(x)} + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + r"""Computes :math:`y = e^{x}`. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + out : dict or FieldArray + A map between the transformed variable name and value(s), along + with the original variable name and value(s). + """ + y = maps[self._outputvar] + out = {self._inputvar: numpy.exp(y)} + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + r"""Computes the Jacobian of :math:`y = \log(x)`. + + This is: + + .. math:: + + \frac{\mathrm{d}y}{\mathrm{d}x} = \frac{1}{x}. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + float + The value of the jacobian at the given point(s). + """ + x = maps[self._inputvar] + return 1.0 / x
+ + +
+[docs] + def inverse_jacobian(self, maps): + r"""Computes the Jacobian of :math:`y = e^{x}`. + + This is: + + .. math:: + + \frac{\mathrm{d}y}{\mathrm{d}x} = e^{x}. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + float + The value of the jacobian at the given point(s). + """ + x = maps[self._outputvar] + return numpy.exp(x)
+
+ + + +
+[docs] +class Logit(BaseTransform): + """Applies a logit transform from an `inputvar` parameter to an `outputvar` + parameter. This is the inverse of the logistic transform. + + Typically, the input of the logit function is assumed to have domain + :math:`\in (0, 1)`. However, the `domain` argument can be used to expand + this to any finite real interval. + + Parameters + ---------- + inputvar : str + The name of the parameter to transform. + outputvar : str + The name of the transformed parameter. + domain : tuple or distributions.bounds.Bounds, optional + The domain of the input parameter. Can be any finite + interval. Default is (0., 1.). + """ + + name = "logit" + + def __init__(self, inputvar, outputvar, domain=(0.0, 1.0)): + self._inputvar = inputvar + self._outputvar = outputvar + self._inputs = [inputvar] + self._outputs = [outputvar] + self._bounds = Bounds(domain[0], domain[1], + btype_min="open", btype_max="open") + # shortcuts for quick access later + self._a = domain[0] + self._b = domain[1] + super(Logit, self).__init__() + + @property + def inputvar(self): + """Returns the input parameter.""" + return self._inputvar + + @property + def outputvar(self): + """Returns the output parameter.""" + return self._outputvar + + @property + def bounds(self): + """Returns the domain of the input parameter.""" + return self._bounds + +
+[docs] + @staticmethod + def logit(x, a=0.0, b=1.0): + r"""Computes the logit function with domain :math:`x \in (a, b)`. + + This is given by: + + .. math:: + + \mathrm{logit}(x; a, b) = \log\left(\frac{x-a}{b-x}\right). + + Note that this is also the inverse of the logistic function with range + :math:`(a, b)`. + + Parameters + ---------- + x : float + The value to evaluate. + a : float, optional + The minimum bound of the domain of x. Default is 0. + b : float, optional + The maximum bound of the domain of x. Default is 1. + + Returns + ------- + float + The logit of x. + """ + return numpy.log(x - a) - numpy.log(b - x)
+ + +
+[docs] + @staticmethod + def logistic(x, a=0.0, b=1.0): + r"""Computes the logistic function with range :math:`\in (a, b)`. + + This is given by: + + .. math:: + + \mathrm{logistic}(x; a, b) = \frac{a + b e^x}{1 + e^x}. + + Note that this is also the inverse of the logit function with domain + :math:`(a, b)`. + + Parameters + ---------- + x : float + The value to evaluate. + a : float, optional + The minimum bound of the range of the logistic function. Default + is 0. + b : float, optional + The maximum bound of the range of the logistic function. Default + is 1. + + Returns + ------- + float + The logistic of x. + """ + expx = numpy.exp(x) + return (a + b * expx) / (1.0 + expx)
+ + +
+[docs] + def transform(self, maps): + r"""Computes :math:`\mathrm{logit}(x; a, b)`. + + The domain :math:`a, b` of :math:`x` are given by the class's bounds. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + out : dict or FieldArray + A map between the transformed variable name and value(s), along + with the original variable name and value(s). + """ + x = maps[self._inputvar] + # check that x is in bounds + isin = self._bounds.__contains__(x) + if isinstance(isin, numpy.ndarray): + isin = isin.all() + if not isin: + raise ValueError("one or more values are not in bounds") + out = {self._outputvar: self.logit(x, self._a, self._b)} + return self.format_output(maps, out)
+ + +
+[docs] + def inverse_transform(self, maps): + r"""Computes :math:`y = \mathrm{logistic}(x; a,b)`. + + The codomain :math:`a, b` of :math:`y` are given by the class's bounds. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + out : dict or FieldArray + A map between the transformed variable name and value(s), along + with the original variable name and value(s). + """ + y = maps[self._outputvar] + out = {self._inputvar: self.logistic(y, self._a, self._b)} + return self.format_output(maps, out)
+ + +
+[docs] + def jacobian(self, maps): + r"""Computes the Jacobian of :math:`y = \mathrm{logit}(x; a,b)`. + + This is: + + .. math:: + + \frac{\mathrm{d}y}{\mathrm{d}x} = \frac{b -a}{(x-a)(b-x)}, + + where :math:`x \in (a, b)`. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + float + The value of the jacobian at the given point(s). + """ + x = maps[self._inputvar] + # check that x is in bounds + isin = self._bounds.__contains__(x) + if isinstance(isin, numpy.ndarray) and not isin.all(): + raise ValueError("one or more values are not in bounds") + elif not isin: + raise ValueError("{} is not in bounds".format(x)) + return (self._b - self._a) / ((x - self._a) * (self._b - x))
+ + +
+[docs] + def inverse_jacobian(self, maps): + r"""Computes the Jacobian of :math:`y = \mathrm{logistic}(x; a,b)`. + + This is: + + .. math:: + + \frac{\mathrm{d}y}{\mathrm{d}x} = \frac{e^x (b-a)}{(1+e^y)^2}, + + where :math:`y \in (a, b)`. + + Parameters + ---------- + maps : dict or FieldArray + A dictionary or FieldArray which provides a map between the + parameter name of the variable to transform and its value(s). + + Returns + ------- + float + The value of the jacobian at the given point(s). + """ + x = maps[self._outputvar] + expx = numpy.exp(x) + return expx * (self._b - self._a) / (1.0 + expx) ** 2.0
+ + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs, + skip_opts=None, additional_opts=None): + """Initializes a Logit transform from the given section. + + The section must specify an input and output variable name. The domain + of the input may be specified using `min-{input}`, `max-{input}`. + Example: + + .. code-block:: ini + + [{section}-logitq] + name = logit + inputvar = q + outputvar = logitq + min-q = 1 + max-q = 8 + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the transform options. + section : str + Name of the section in the configuration file. + outputs : str + The names of the parameters that are output by this transformation, + separated by `VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + skip_opts : list, optional + Do not read options in the given list. + additional_opts : dict, optional + Any additional arguments to pass to the class. If an option is + provided that also exists in the config file, the value provided + will be used instead of being read from the file. + + Returns + ------- + cls + An instance of the class. + """ + # pull out the minimum, maximum values of the input variable + inputvar = cp.get_opt_tag(section, "inputvar", outputs) + s = "-".join([section, outputs]) + opt = "min-{}".format(inputvar) + if skip_opts is None: + skip_opts = [] + if additional_opts is None: + additional_opts = {} + else: + additional_opts = additional_opts.copy() + if cp.has_option(s, opt): + a = cp.get_opt_tag(section, opt, outputs) + skip_opts.append(opt) + else: + a = None + opt = "max-{}".format(inputvar) + if cp.has_option(s, opt): + b = cp.get_opt_tag(section, opt, outputs) + skip_opts.append(opt) + else: + b = None + if a is None and b is not None or b is None and a is not None: + raise ValueError( + "if providing a min(max)-{}, must also provide " + "a max(min)-{}".format(inputvar, inputvar) + ) + elif a is not None: + additional_opts.update({"domain": (float(a), float(b))}) + return super(Logit, cls).from_config( + cp, section, outputs, skip_opts, additional_opts + )
+
+ + + +# +# ============================================================================= +# +# Inverse Transforms +# +# ============================================================================= +# +
+[docs] +class Mass1Mass2ToMchirpQ(MchirpQToMass1Mass2): + """The inverse of MchirpQToMass1Mass2.""" + + name = "mass1_mass2_to_mchirp_q" + inverse = MchirpQToMass1Mass2 + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian + + def __init__( + self, mass1_param=None, mass2_param=None, mchirp_param=None, q_param=None + ): + if mass1_param is None: + mass1_param = parameters.mass1 + if mass2_param is None: + mass2_param = parameters.mass2 + if mchirp_param is None: + mchirp_param = parameters.mchirp + if q_param is None: + q_param = parameters.q + self.mass1_param = mass1_param + self.mass2_param = mass2_param + self.mchirp_param = mchirp_param + self.q_param = q_param + self._inputs = [self.mass1_param, self.mass2_param] + self._outputs = [self.mchirp_param, self.q_param] + BaseTransform.__init__(self)
+ + + +
+[docs] +class Mass1Mass2ToMchirpEta(MchirpEtaToMass1Mass2): + """The inverse of MchirpEtaToMass1Mass2.""" + + name = "mass1_mass2_to_mchirp_eta" + inverse = MchirpEtaToMass1Mass2 + _inputs = inverse._outputs + _outputs = inverse._inputs + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian
+ + + +
+[docs] +class DistanceToChirpDistance(ChirpDistanceToDistance): + """The inverse of ChirpDistanceToDistance.""" + + name = "distance_to_chirp_distance" + inverse = ChirpDistanceToDistance + _inputs = [parameters.distance, parameters.mchirp] + _outputs = [parameters.chirp_distance] + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian
+ + + +
+[docs] +class CartesianToSpherical(SphericalToCartesian): + """Converts spherical coordinates to cartesian. + + Parameters + ---------- + x : str + The name of the x parameter. + y : str + The name of the y parameter. + z : str + The name of the z parameter. + radial : str + The name of the radial parameter. + azimuthal : str + The name of the azimuthal angle parameter. + polar : str + The name of the polar angle parameter. + """ + + name = "cartesian_to_spherical" + inverse = SphericalToCartesian + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian + + def __init__(self, *args): + super(CartesianToSpherical, self).__init__(*args) + # swap inputs and outputs + outputs = self._inputs + inputs = self._outputs + self._inputs = inputs + self._outputs = outputs + self.inputs = set(self._inputs) + self.outputs = set(self._outputs)
+ + + +
+[docs] +class CartesianSpin1ToSphericalSpin1(CartesianToSpherical): + """The inverse of SphericalSpin1ToCartesianSpin1. + + **Deprecation Warning:** This will be removed in a future update. Use + :py:class:`CartesianToSpherical` with spin-parameter names passed in + instead. + """ + + name = "cartesian_spin_1_to_spherical_spin_1" + + def __init__(self): + logger.warning( + "Deprecation warning: the %s transform will be " + "removed in a future update. Please use %s instead, " + "passing spin1x, spin1y, spin1z, spin1_a, " + "spin1_azimuthal, spin1_polar as arguments.", + self.name, CartesianToSpherical.name + ) + super(CartesianSpin1ToSphericalSpin1, self).__init__( + "spin1x", "spin1y", "spin1z", + "spin1_a", "spin1_azimuthal", "spin1_polar" + )
+ + + +
+[docs] +class CartesianSpin2ToSphericalSpin2(CartesianToSpherical): + """The inverse of SphericalSpin2ToCartesianSpin2. + + **Deprecation Warning:** This will be removed in a future update. Use + :py:class:`CartesianToSpherical` with spin-parameter names passed in + instead. + """ + + name = "cartesian_spin_2_to_spherical_spin_2" + + def __init__(self): + logger.warning( + "Deprecation warning: the %s transform will be " + "removed in a future update. Please use %s instead, " + "passing spin2x, spin2y, spin2z, spin2_a, " + "spin2_azimuthal, spin2_polar as arguments.", + self.name, CartesianToSpherical.name + ) + super(CartesianSpin2ToSphericalSpin2, self).__init__( + "spin2x", "spin2y", "spin2z", + "spin2_a", "spin2_azimuthal", "spin2_polar" + )
+ + + +
+[docs] +class CartesianSpinToAlignedMassSpin(AlignedMassSpinToCartesianSpin): + """The inverse of AlignedMassSpinToCartesianSpin.""" + + name = "cartesian_spin_to_aligned_mass_spin" + inverse = AlignedMassSpinToCartesianSpin + _inputs = inverse._outputs + _outputs = inverse._inputs + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian
+ + + +
+[docs] +class CartesianSpinToPrecessionMassSpin(PrecessionMassSpinToCartesianSpin): + """The inverse of PrecessionMassSpinToCartesianSpin.""" + + name = "cartesian_spin_to_precession_mass_spin" + inverse = PrecessionMassSpinToCartesianSpin + _inputs = inverse._outputs + _outputs = inverse._inputs + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian
+ + + +
+[docs] +class ChiPToCartesianSpin(CartesianSpinToChiP): + """The inverse of `CartesianSpinToChiP`.""" + + name = "cartesian_spin_to_chi_p" + inverse = CartesianSpinToChiP + _inputs = inverse._outputs + _outputs = inverse._inputs + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian
+ + + +
+[docs] +class SSBToGEO(GEOToSSB): + """The inverse of GEOToSSB.""" + + name = "ssb_to_geo" + inverse = GEOToSSB + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_geo_param = params[0] + self.longitude_geo_param = params[1] + self.latitude_geo_param = params[2] + self.polarization_geo_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + self._outputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param]
+ + + +
+[docs] +class SSBToLISA(LISAToSSB): + """The inverse of LISAToSSB.""" + + name = "ssb_to_lisa" + inverse = LISAToSSB + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + self._outputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param]
+ + + +
+[docs] +class GEOToLISA(LISAToGEO): + """The inverse of LISAToGEO.""" + + name = "geo_to_lisa" + inverse = LISAToGEO + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_geo_param = params[4] + self.longitude_geo_param = params[5] + self.latitude_geo_param = params[6] + self.polarization_geo_param = params[7] + self._inputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + self._outputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param]
+ + + +
+[docs] +class Exponent(Log): + """Applies an exponent transform to an `inputvar` parameter. + + This is the inverse of the log transform. + + Parameters + ---------- + inputvar : str + The name of the parameter to transform. + outputvar : str + The name of the transformed parameter. + """ + + name = "exponent" + inverse = Log + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian + + def __init__(self, inputvar, outputvar): + super(Exponent, self).__init__(outputvar, inputvar)
+ + + +
+[docs] +class Logistic(Logit): + """Applies a logistic transform from an `input` parameter to an `output` + parameter. This is the inverse of the logit transform. + + Typically, the output of the logistic function has range :math:`\in [0,1)`. + However, the `codomain` argument can be used to expand this to any + finite real interval. + + Parameters + ---------- + inputvar : str + The name of the parameter to transform. + outputvar : str + The name of the transformed parameter. + frange : tuple or distributions.bounds.Bounds, optional + The range of the output parameter. Can be any finite + interval. Default is (0., 1.). + """ + + name = "logistic" + inverse = Logit + transform = inverse.inverse_transform + inverse_transform = inverse.transform + jacobian = inverse.inverse_jacobian + inverse_jacobian = inverse.jacobian + + def __init__(self, inputvar, outputvar, codomain=(0.0, 1.0)): + super(Logistic, self).__init__(outputvar, inputvar, domain=codomain) + + @property + def bounds(self): + """Returns the range of the output parameter.""" + return self._bounds + +
+[docs] + @classmethod + def from_config(cls, cp, section, outputs, + skip_opts=None, additional_opts=None): + """Initializes a Logistic transform from the given section. + + The section must specify an input and output variable name. The + codomain of the output may be specified using `min-{output}`, + `max-{output}`. Example: + + .. code-block:: ini + + [{section}-q] + name = logistic + inputvar = logitq + outputvar = q + min-q = 1 + max-q = 8 + + Parameters + ---------- + cp : pycbc.workflow.WorkflowConfigParser + A parsed configuration file that contains the transform options. + section : str + Name of the section in the configuration file. + outputs : str + The names of the parameters that are output by this transformation, + separated by `VARARGS_DELIM`. These must appear in the "tag" part + of the section header. + skip_opts : list, optional + Do not read options in the given list. + additional_opts : dict, optional + Any additional arguments to pass to the class. If an option is + provided that also exists in the config file, the value provided + will be used instead of being read from the file. + + Returns + ------- + cls + An instance of the class. + """ + # pull out the minimum, maximum values of the output variable + outputvar = cp.get_opt_tag(section, "output", outputs) + if skip_opts is None: + skip_opts = [] + if additional_opts is None: + additional_opts = {} + else: + additional_opts = additional_opts.copy() + s = "-".join([section, outputs]) + opt = "min-{}".format(outputvar) + if cp.has_option(s, opt): + a = cp.get_opt_tag(section, opt, outputs) + skip_opts.append(opt) + else: + a = None + opt = "max-{}".format(outputvar) + if cp.has_option(s, opt): + b = cp.get_opt_tag(section, opt, outputs) + skip_opts.append(opt) + else: + b = None + if a is None and b is not None or b is None and a is not None: + raise ValueError( + "if providing a min(max)-{}, must also provide " + "a max(min)-{}".format(outputvar, outputvar) + ) + elif a is not None: + additional_opts.update({"codomain": (float(a), float(b))}) + return super(Logistic, cls).from_config( + cp, section, outputs, skip_opts, additional_opts + )
+
+ + + +# set the inverse of the forward transforms to the inverse transforms +MchirpQToMass1Mass2.inverse = Mass1Mass2ToMchirpQ +ChirpDistanceToDistance.inverse = DistanceToChirpDistance +SphericalToCartesian.inverse = CartesianToSpherical +SphericalSpin1ToCartesianSpin1.inverse = CartesianSpin1ToSphericalSpin1 +SphericalSpin2ToCartesianSpin2.inverse = CartesianSpin2ToSphericalSpin2 +AlignedMassSpinToCartesianSpin.inverse = CartesianSpinToAlignedMassSpin +PrecessionMassSpinToCartesianSpin.inverse = CartesianSpinToPrecessionMassSpin +ChiPToCartesianSpin.inverse = CartesianSpinToChiP +Log.inverse = Exponent +Logit.inverse = Logistic +GEOToSSB.inverse = SSBToGEO +LISAToSSB.inverse = SSBToLISA +LISAToGEO.inverse = GEOToLISA + + +# +# ============================================================================= +# +# Collections of transforms +# +# ============================================================================= +# + +# dictionary of all transforms +transforms = { + CustomTransform.name: CustomTransform, + CustomTransformMultiOutputs.name: CustomTransformMultiOutputs, + MchirpQToMass1Mass2.name: MchirpQToMass1Mass2, + Mass1Mass2ToMchirpQ.name: Mass1Mass2ToMchirpQ, + MchirpEtaToMass1Mass2.name: MchirpEtaToMass1Mass2, + Mass1Mass2ToMchirpEta.name: Mass1Mass2ToMchirpEta, + ChirpDistanceToDistance.name: ChirpDistanceToDistance, + DistanceToChirpDistance.name: DistanceToChirpDistance, + SphericalToCartesian.name: SphericalToCartesian, + CartesianToSpherical.name: CartesianToSpherical, + SphericalSpin1ToCartesianSpin1.name: SphericalSpin1ToCartesianSpin1, + CartesianSpin1ToSphericalSpin1.name: CartesianSpin1ToSphericalSpin1, + SphericalSpin2ToCartesianSpin2.name: SphericalSpin2ToCartesianSpin2, + CartesianSpin2ToSphericalSpin2.name: CartesianSpin2ToSphericalSpin2, + DistanceToRedshift.name: DistanceToRedshift, + AlignedMassSpinToCartesianSpin.name: AlignedMassSpinToCartesianSpin, + CartesianSpinToAlignedMassSpin.name: CartesianSpinToAlignedMassSpin, + PrecessionMassSpinToCartesianSpin.name: PrecessionMassSpinToCartesianSpin, + CartesianSpinToPrecessionMassSpin.name: CartesianSpinToPrecessionMassSpin, + ChiPToCartesianSpin.name: ChiPToCartesianSpin, + CartesianSpinToChiP.name: CartesianSpinToChiP, + Log.name: Log, + Exponent.name: Exponent, + Logit.name: Logit, + Logistic.name: Logistic, + LambdaFromTOVFile.name: LambdaFromTOVFile, + LambdaFromMultipleTOVFiles.name: LambdaFromMultipleTOVFiles, + AlignTotalSpin.name: AlignTotalSpin, + GEOToSSB.name: GEOToSSB, + SSBToGEO.name: SSBToGEO, + LISAToSSB.name: LISAToSSB, + SSBToLISA.name: SSBToLISA, + LISAToGEO.name: LISAToGEO, + GEOToLISA.name: GEOToLISA, +} + +# standard CBC transforms: these are transforms that do not require input +# arguments; they are typically used in CBC parameter estimation to transform +# to coordinates understood by the waveform generator +common_cbc_forward_transforms = [ + MchirpQToMass1Mass2(), + DistanceToRedshift(), + SphericalToCartesian( + parameters.spin1x, + parameters.spin1y, + parameters.spin1z, + parameters.spin1_a, + parameters.spin1_azimuthal, + parameters.spin1_polar, + ), + SphericalToCartesian( + parameters.spin2x, + parameters.spin2y, + parameters.spin2z, + parameters.spin2_a, + parameters.spin2_azimuthal, + parameters.spin2_polar, + ), + AlignedMassSpinToCartesianSpin(), + PrecessionMassSpinToCartesianSpin(), + ChiPToCartesianSpin(), + ChirpDistanceToDistance(), + GEOToSSB(), + LISAToSSB(), + LISAToGEO(), +] +common_cbc_inverse_transforms = [ + _t.inverse() + for _t in common_cbc_forward_transforms + if not (_t.inverse is None or _t.name == "spherical_to_cartesian") +] +common_cbc_inverse_transforms.extend( + [ + CartesianToSpherical( + parameters.spin1x, + parameters.spin1y, + parameters.spin1z, + parameters.spin1_a, + parameters.spin1_azimuthal, + parameters.spin1_polar, + ), + CartesianToSpherical( + parameters.spin2x, + parameters.spin2y, + parameters.spin2z, + parameters.spin2_a, + parameters.spin2_azimuthal, + parameters.spin2_polar, + ), + ] +) + +common_cbc_transforms = common_cbc_forward_transforms \ + + common_cbc_inverse_transforms + + +
+[docs] +def get_common_cbc_transforms(requested_params, variable_args, valid_params=None): + """Determines if any additional parameters from the InferenceFile are + needed to get derived parameters that user has asked for. + + First it will try to add any base parameters that are required to calculate + the derived parameters. Then it will add any sampling parameters that are + required to calculate the base parameters needed. + + Parameters + ---------- + requested_params : list + List of parameters that user wants. + variable_args : list + List of parameters that InferenceFile has. + valid_params : list + List of parameters that can be accepted. + + Returns + ------- + requested_params : list + Updated list of parameters that user wants. + all_c : list + List of BaseTransforms to apply. + """ + variable_args = ( + set(variable_args) if not isinstance(variable_args, set) else variable_args + ) + + # try to parse any equations by putting all strings together + # this will get some garbage but ensures all alphanumeric/underscored + # parameter names are added + new_params = [] + for opt in requested_params: + s = "" + for ch in opt: + s += ch if ch.isalnum() or ch == "_" else " " + new_params += s.split(" ") + requested_params = set(list(requested_params) + list(new_params)) + + # can pass a list of valid parameters to remove garbage from parsing above + if valid_params: + valid_params = set(valid_params) + requested_params = requested_params.intersection(valid_params) + + # find all the transforms for the requested derived parameters + # calculated from base parameters + from_base_c = [] + for converter in common_cbc_inverse_transforms: + if converter.outputs.issubset(variable_args) or \ + converter.outputs.isdisjoint(requested_params): + continue + intersect = converter.outputs.intersection(requested_params) + if ( + not intersect + or intersect.issubset(converter.inputs) + or intersect.issubset(variable_args) + ): + continue + requested_params.update(converter.inputs) + from_base_c.append(converter) + + # find all the tranforms for the required base parameters + # calculated from sampling parameters + to_base_c = [] + for converter in common_cbc_forward_transforms: + if ( + converter.inputs.issubset(variable_args) + and len(converter.outputs.intersection(requested_params)) > 0 + ): + requested_params.update(converter.inputs) + to_base_c.append(converter) + variable_args.update(converter.outputs) + + # get list of transforms that converts sampling parameters to the base + # parameters and then converts base parameters to the derived parameters + all_c = to_base_c + from_base_c + + return list(requested_params), all_c
+ + + +
+[docs] +def apply_transforms(samples, transforms, inverse=False): + """Applies a list of BaseTransform instances on a mapping object. + + Parameters + ---------- + samples : {FieldArray, dict} + Mapping object to apply transforms to. + transforms : list + List of BaseTransform instances to apply. Nested transforms are assumed + to be in order for forward transforms. + inverse : bool, optional + Apply inverse transforms. In this case transforms will be applied in + the opposite order. Default is False. + + Returns + ------- + samples : {FieldArray, dict} + Mapping object with transforms applied. Same type as input. + """ + if inverse: + transforms = transforms[::-1] + for t in transforms: + try: + if inverse: + samples = t.inverse_transform(samples) + else: + samples = t.transform(samples) + except NotImplementedError: + continue + return samples
+ + + +
+[docs] +def compute_jacobian(samples, transforms, inverse=False): + """Computes the jacobian of the list of transforms at the given sample + points. + + Parameters + ---------- + samples : {FieldArray, dict} + Mapping object specifying points at which to compute jacobians. + transforms : list + List of BaseTransform instances to apply. Nested transforms are assumed + to be in order for forward transforms. + inverse : bool, optional + Compute inverse jacobians. Default is False. + + Returns + ------- + float : + The product of the jacobians of all fo the transforms. + """ + j = 1.0 + if inverse: + for t in transforms: + j *= t.inverse_jacobian(samples) + else: + for t in transforms: + j *= t.jacobian(samples) + return j
+ + + +
+[docs] +def order_transforms(transforms): + """Orders transforms to ensure proper chaining. + + For example, if `transforms = [B, A, C]`, and `A` produces outputs needed + by `B`, the transforms will be re-rorderd to `[A, B, C]`. + + Parameters + ---------- + transforms : list + List of transform instances to order. + + Outputs + ------- + list : + List of transformed ordered such that forward transforms can be carried + out without error. + """ + # get a set of all inputs and all outputs + outputs = set().union(*[set(t.outputs)-set(t.inputs) for t in transforms]) + out = [] + remaining = [t for t in transforms] + while remaining: + # pull out transforms that have no inputs in the set of outputs + leftover = [] + for t in remaining: + if t.inputs.isdisjoint(outputs): + out.append(t) + outputs -= t.outputs + else: + leftover.append(t) + remaining = leftover + return out
+ + + +
+[docs] +def read_transforms_from_config(cp, section="transforms"): + """Returns a list of PyCBC transform instances for a section in the + given configuration file. + + If the transforms are nested (i.e., the output of one transform is the + input of another), the returned list will be sorted by the order of the + nests. + + Parameters + ---------- + cp : WorflowConfigParser + An open config file to read. + section : {"transforms", string} + Prefix on section names from which to retrieve the transforms. + + Returns + ------- + list + A list of the parsed transforms. + """ + trans = [] + for subsection in cp.get_subsections(section): + name = cp.get_opt_tag(section, "name", subsection) + t = transforms[name].from_config(cp, section, subsection) + trans.append(t) + return order_transforms(trans)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/aligned.html b/latest/html/_modules/pycbc/types/aligned.html new file mode 100644 index 00000000000..205aa26d949 --- /dev/null +++ b/latest/html/_modules/pycbc/types/aligned.html @@ -0,0 +1,190 @@ + + + + + + pycbc.types.aligned — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.aligned

+# Copyright (C) 2014  Josh Willis, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides a class derived from numpy.ndarray that also indicates
+whether or not its memory is aligned.  It further provides functions for
+creating zeros and empty (unitialized) arrays with this class.
+"""
+import numpy as _np
+from pycbc import PYCBC_ALIGNMENT
+
+
+[docs] +def check_aligned(ndarr): + return ((ndarr.ctypes.data % PYCBC_ALIGNMENT) == 0)
+ + +
+[docs] +def zeros(n, dtype): + d = _np.dtype(dtype) + nbytes = (d.itemsize)*int(n) + tmp = _np.zeros(nbytes+PYCBC_ALIGNMENT, dtype=_np.uint8) + address = tmp.__array_interface__['data'][0] + offset = (PYCBC_ALIGNMENT - address%PYCBC_ALIGNMENT)%PYCBC_ALIGNMENT + ret_ary = tmp[offset:offset+nbytes].view(dtype=d) + del tmp + return ret_ary
+ + +
+[docs] +def empty(n, dtype): + d = _np.dtype(dtype) + nbytes = (d.itemsize)*int(n) + tmp = _np.empty(nbytes+PYCBC_ALIGNMENT, dtype=_np.uint8) + address = tmp.__array_interface__['data'][0] + offset = (PYCBC_ALIGNMENT - address%PYCBC_ALIGNMENT)%PYCBC_ALIGNMENT + ret_ary = tmp[offset:offset+nbytes].view(dtype=d) + del tmp + return ret_ary
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/array.html b/latest/html/_modules/pycbc/types/array.html new file mode 100644 index 00000000000..101f2023f48 --- /dev/null +++ b/latest/html/_modules/pycbc/types/array.html @@ -0,0 +1,1395 @@ + + + + + + pycbc.types.array — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.array

+# Copyright (C) 2012  Alex Nitz, Josh Willis, Andrew Miller, Tito Dal Canton
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides a device independent Array class based on PyCUDA and Numpy.
+"""
+
+BACKEND_PREFIX="pycbc.types.array_"
+
+import os as _os
+
+from functools import wraps
+
+import h5py
+import lal as _lal
+import numpy as _numpy
+from numpy import float32, float64, complex64, complex128, ones
+from numpy.linalg import norm
+
+import pycbc.scheme as _scheme
+from pycbc.scheme import schemed, cpuonly
+from pycbc.opt import LimitedSizeDict
+
+#! FIXME: the uint32 datatype has not been fully tested,
+# we should restrict any functions that do not allow an
+# array of uint32 integers
+_ALLOWED_DTYPES = [_numpy.float32, _numpy.float64, _numpy.complex64,
+                   _numpy.complex128, _numpy.uint32, _numpy.int32, int]
+try:
+    _ALLOWED_SCALARS = [int, long, float, complex] + _ALLOWED_DTYPES
+except NameError:
+    _ALLOWED_SCALARS = [int, float, complex] + _ALLOWED_DTYPES
+
+def _convert_to_scheme(ary):
+    if not isinstance(ary._scheme, _scheme.mgr.state.__class__):
+        converted_array = Array(ary, dtype=ary._data.dtype)
+        ary._data = converted_array._data
+        ary._scheme = _scheme.mgr.state
+      
+def _convert(func):
+    @wraps(func)
+    def convert(self, *args, **kwargs):
+        _convert_to_scheme(self)
+        return func(self, *args, **kwargs)
+    return convert
+    
+def _nocomplex(func):
+    @wraps(func)
+    def nocomplex(self, *args, **kwargs):
+        if self.kind == 'real':
+            return func(self, *args, **kwargs)
+        else:
+            raise TypeError( func.__name__ + " does not support complex types")
+    return nocomplex
+
+def _noreal(func):
+    @wraps(func)
+    def noreal(self, *args, **kwargs):
+        if self.kind == 'complex':
+            return func(self, *args, **kwargs)
+        else:
+            raise TypeError( func.__name__ + " does not support real types")
+    return noreal
+
+
+[docs] +def force_precision_to_match(scalar, precision): + if _numpy.iscomplexobj(scalar): + if precision == 'single': + return _numpy.complex64(scalar) + else: + return _numpy.complex128(scalar) + else: + if precision == 'single': + return _numpy.float32(scalar) + else: + return _numpy.float64(scalar)
+ + +
+[docs] +def common_kind(*dtypes): + for dtype in dtypes: + if dtype.kind == 'c': + return dtype + return dtypes[0]
+ + +@schemed(BACKEND_PREFIX) +def _to_device(array): + """ Move input to device """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + +@schemed(BACKEND_PREFIX) +def _copy_base_array(array): + """ Copy a backend array""" + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + +@schemed(BACKEND_PREFIX) +def _scheme_matches_base_array(array): + """ Check that input matches array type for scheme """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + +
+[docs] +def check_same_len_precision(a, b): + """Check that the two arguments have the same length and precision. + Raises ValueError if they do not. + """ + if len(a) != len(b): + msg = 'lengths do not match ({} vs {})'.format( + len(a), len(b)) + raise ValueError(msg) + if a.precision != b.precision: + msg = 'precisions do not match ({} vs {})'.format( + a.precision, b.precision) + raise TypeError(msg)
+ + +
+[docs] +class Array(object): + """Array used to do numeric calculations on a various compute + devices. It is a convience wrapper around numpy, and + pycuda. + """ + + def __init__(self, initial_array, dtype=None, copy=True): + """ initial_array: An array-like object as specified by NumPy, this + also includes instances of an underlying data type as described in + section 3 or an instance of the PYCBC Array class itself. This + object is used to populate the data of the array. + + dtype: A NumPy style dtype that describes the type of + encapsulated data (float32,compex64, etc) + + copy: This defines whether the initial_array is copied to instantiate + the array or is simply referenced. If copy is false, new data is not + created, and so all arguments that would force a copy are ignored. + The default is to copy the given object. + """ + self._scheme=_scheme.mgr.state + self._saved = LimitedSizeDict(size_limit=2**5) + + #Unwrap initial_array + if isinstance(initial_array, Array): + initial_array = initial_array._data + + if not copy: + if not _scheme_matches_base_array(initial_array): + raise TypeError("Cannot avoid a copy of this array") + else: + self._data = initial_array + + # Check that the dtype is supported. + if self._data.dtype not in _ALLOWED_DTYPES: + raise TypeError(str(self._data.dtype) + ' is not supported') + + if dtype and dtype != self._data.dtype: + raise TypeError("Can only set dtype when allowed to copy data") + + + if copy: + # First we will check the dtype that we are given + if not hasattr(initial_array, 'dtype'): + initial_array = _numpy.array(initial_array) + + # Determine the dtype to use + if dtype is not None: + dtype = _numpy.dtype(dtype) + if dtype not in _ALLOWED_DTYPES: + raise TypeError(str(dtype) + ' is not supported') + if dtype.kind != 'c' and initial_array.dtype.kind == 'c': + raise TypeError(str(initial_array.dtype) + ' cannot be cast as ' + str(dtype)) + elif initial_array.dtype in _ALLOWED_DTYPES: + dtype = initial_array.dtype + else: + if initial_array.dtype.kind == 'c': + dtype = complex128 + else: + dtype = float64 + + # Cast to the final dtype if needed + if initial_array.dtype != dtype: + initial_array = initial_array.astype(dtype) + + #Create new instance with initial_array as initialization. + if issubclass(type(self._scheme), _scheme.CPUScheme): + if hasattr(initial_array, 'get'): + self._data = _numpy.array(initial_array.get()) + else: + self._data = _numpy.array(initial_array, dtype=dtype, ndmin=1) + elif _scheme_matches_base_array(initial_array): + self._data = _copy_base_array(initial_array) # pylint:disable=assignment-from-no-return + else: + initial_array = _numpy.array(initial_array, dtype=dtype, ndmin=1) + self._data = _to_device(initial_array) # pylint:disable=assignment-from-no-return + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + inputs = [i.numpy() if isinstance(i, Array) else i for i in inputs] + ret = getattr(ufunc, method)(*inputs, **kwargs) + if hasattr(ret, 'shape') and ret.shape == self.shape: + ret = self._return(ret) + return ret + + def __array__(self, dtype=None): + arr = self.numpy() + if dtype is not None: + arr = arr.astype(dtype) + return arr + + @property + def shape(self): + return self._data.shape + + def _memoize_single(func): + @wraps(func) + def memoize_single(self, arg): + badh = str(arg) + + if badh in self._saved: + return self._saved[badh] + + res = func(self, arg) # pylint:disable=not-callable + self._saved[badh] = res + return res + return memoize_single + + def _returnarray(func): + @wraps(func) + def returnarray(self, *args, **kwargs): + return Array(func(self, *args, **kwargs), copy=False) # pylint:disable=not-callable + return returnarray + + def _returntype(func): + @wraps(func) + def returntype(self, *args, **kwargs): + ary = func(self, *args, **kwargs) # pylint:disable=not-callable + if ary is NotImplemented: + return NotImplemented + return self._return(ary) + return returntype + + def _return(self, ary): + """Wrap the ary to return an Array type """ + if isinstance(ary, Array): + return ary + return Array(ary, copy=False) + + def _checkother(func): + @wraps(func) + def checkother(self, *args): + nargs = () + for other in args: + self._typecheck(other) + if type(other) in _ALLOWED_SCALARS: + other = force_precision_to_match(other, self.precision) + nargs +=(other,) + elif isinstance(other, type(self)) or type(other) is Array: + check_same_len_precision(self, other) + _convert_to_scheme(other) + nargs += (other._data,) + else: + return NotImplemented + + return func(self, *nargs) # pylint:disable=not-callable + return checkother + + def _vcheckother(func): + @wraps(func) + def vcheckother(self, *args): + nargs = () + for other in args: + self._typecheck(other) + if isinstance(other, type(self)) or type(other) is Array: + check_same_len_precision(self, other) + _convert_to_scheme(other) + nargs += (other._data,) + else: + raise TypeError('array argument required') + + return func(self, *nargs) # pylint:disable=not-callable + return vcheckother + + def _vrcheckother(func): + @wraps(func) + def vrcheckother(self, *args): + nargs = () + for other in args: + if isinstance(other, type(self)) or type(other) is Array: + check_same_len_precision(self, other) + _convert_to_scheme(other) + nargs += (other._data,) + else: + raise TypeError('array argument required') + + return func(self, *nargs) # pylint:disable=not-callable + return vrcheckother + + def _icheckother(func): + @wraps(func) + def icheckother(self, other): + """ Checks the input to in-place operations """ + self._typecheck(other) + if type(other) in _ALLOWED_SCALARS: + if self.kind == 'real' and type(other) == complex: + raise TypeError('dtypes are incompatible') + other = force_precision_to_match(other, self.precision) + elif isinstance(other, type(self)) or type(other) is Array: + check_same_len_precision(self, other) + if self.kind == 'real' and other.kind == 'complex': + raise TypeError('dtypes are incompatible') + _convert_to_scheme(other) + other = other._data + else: + return NotImplemented + + return func(self, other) # pylint:disable=not-callable + return icheckother + + def _typecheck(self, other): + """ Additional typechecking for other. Placeholder for use by derived + types. + """ + pass + + @_returntype + @_convert + @_checkother + def __mul__(self,other): + """ Multiply by an Array or a scalar and return an Array. """ + return self._data * other + + __rmul__ = __mul__ + + @_convert + @_icheckother + def __imul__(self,other): + """ Multiply by an Array or a scalar and return an Array. """ + self._data *= other + return self + + @_returntype + @_convert + @_checkother + def __add__(self,other): + """ Add Array to Array or scalar and return an Array. """ + return self._data + other + + __radd__ = __add__ + +
+[docs] + def fill(self, value): + self._data.fill(value)
+ + + @_convert + @_icheckother + def __iadd__(self,other): + """ Add Array to Array or scalar and return an Array. """ + self._data += other + return self + + @_convert + @_checkother + @_returntype + def __truediv__(self,other): + """ Divide Array by Array or scalar and return an Array. """ + return self._data / other + + @_returntype + @_convert + @_checkother + def __rtruediv__(self,other): + """ Divide Array by Array or scalar and return an Array. """ + return self._data.__rtruediv__(other) + + @_convert + @_icheckother + def __itruediv__(self,other): + """ Divide Array by Array or scalar and return an Array. """ + self._data /= other + return self + + __div__ = __truediv__ + __idiv__ = __itruediv__ + __rdiv__ = __rtruediv__ + + @_returntype + @_convert + def __neg__(self): + """ Return negation of self """ + return - self._data + + @_returntype + @_convert + @_checkother + def __sub__(self,other): + """ Subtract Array or scalar from Array and return an Array. """ + return self._data - other + + @_returntype + @_convert + @_checkother + def __rsub__(self,other): + """ Subtract Array or scalar from Array and return an Array. """ + return self._data.__rsub__(other) + + @_convert + @_icheckother + def __isub__(self,other): + """ Subtract Array or scalar from Array and return an Array. """ + self._data -= other + return self + + @_returntype + @_convert + @_checkother + def __pow__(self,other): + """ Exponentiate Array by scalar """ + return self._data ** other + + @_returntype + @_convert + def __abs__(self): + """ Return absolute value of Array """ + return abs(self._data) + + def __len__(self): + """ Return length of Array """ + return len(self._data) + + def __str__(self): + return str(self._data) + + @property + def ndim(self): + return self._data.ndim + + def __eq__(self,other): + """ + This is the Python special method invoked whenever the '==' + comparison is used. It will return true if the data of two + PyCBC arrays are identical, and all of the numeric meta-data + are identical, irrespective of whether or not the two + instances live in the same memory (for that comparison, the + Python statement 'a is b' should be used instead). + + Thus, this method returns 'True' if the types of both 'self' + and 'other' are identical, as well as their lengths, dtypes + and the data in the arrays, element by element. It will always + do the comparison on the CPU, but will *not* move either object + to the CPU if it is not already there, nor change the scheme of + either object. It is possible to compare a CPU object to a GPU + object, and the comparison should be true if the data and + meta-data of the two objects are the same. + + Note in particular that this function returns a single boolean, + and not an array of booleans as Numpy does. If the numpy + behavior is instead desired it can be obtained using the numpy() + method of the PyCBC type to get a numpy instance from each + object, and invoking '==' on those two instances. + + Parameters + ---------- + other: another Python object, that should be tested for equality + with 'self'. + + Returns + ------- + boolean: 'True' if the types, dtypes, lengths, and data of the + two objects are each identical. + """ + + # Writing the first test as below allows this method to be safely + # called from subclasses. + if type(self) != type(other): + return False + if self.dtype != other.dtype: + return False + if len(self) != len(other): + return False + + # Now we've checked meta-data, so look at the actual data itself: + # The numpy() method call will put a copy of GPU data onto a CPU + # array, and could therefore be slow. As noted in the help for + # this function we don't worry about that. + + sary = self.numpy() + oary = other.numpy() + + # Now we know that both sary and oary are numpy arrays. The + # '==' statement returns an array of booleans, and the all() + # method of that array returns 'True' only if every element + # of that array of booleans is True. + return (sary == oary).all() + +
+[docs] + def almost_equal_elem(self,other,tol,relative=True): + """ + Compare whether two array types are almost equal, element + by element. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(self[i]-other[i]) <= tol*abs(self[i]) + for all elements of the array. + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(self[i]-other[i]) <= tol + for all elements of the array. + + Other meta-data (type, dtype, and length) must be exactly equal. + If either object's memory lives on the GPU it will be copied to + the CPU for the comparison, which may be slow. But the original + object itself will not have its memory relocated nor scheme + changed. + + Parameters + ---------- + other + Another Python object, that should be tested for + almost-equality with 'self', element-by-element. + tol + A non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative + A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + + Returns + ------- + boolean + 'True' if the data agree within the tolerance, as + interpreted by the 'relative' keyword, and if the types, + lengths, and dtypes are exactly the same. + """ + # Check that the tolerance is non-negative and raise an + # exception otherwise. + if (tol<0): + raise ValueError("Tolerance cannot be negative") + # Check that the meta-data agree; the type check is written in + # this way so that this method may be safely called from + # subclasses as well. + if type(other) != type(self): + return False + if self.dtype != other.dtype: + return False + if len(self) != len(other): + return False + + # The numpy() method will move any GPU memory onto the CPU. + # Slow, but the user was warned. + + diff = abs(self.numpy()-other.numpy()) + if relative: + cmpary = tol*abs(self.numpy()) + else: + cmpary = tol*ones(len(self),dtype=self.dtype) + + return (diff<=cmpary).all()
+ + +
+[docs] + def almost_equal_norm(self,other,tol,relative=True): + """ + Compare whether two array types are almost equal, normwise. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(norm(self-other)) <= tol*abs(norm(self)). + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(norm(self-other)) <= tol + + Other meta-data (type, dtype, and length) must be exactly equal. + If either object's memory lives on the GPU it will be copied to + the CPU for the comparison, which may be slow. But the original + object itself will not have its memory relocated nor scheme + changed. + + Parameters + ---------- + other + another Python object, that should be tested for + almost-equality with 'self', based on their norms. + tol + a non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative + A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + + Returns + ------- + boolean + 'True' if the data agree within the tolerance, as + interpreted by the 'relative' keyword, and if the types, + lengths, and dtypes are exactly the same. + """ + # Check that the tolerance is non-negative and raise an + # exception otherwise. + if (tol<0): + raise ValueError("Tolerance cannot be negative") + # Check that the meta-data agree; the type check is written in + # this way so that this method may be safely called from + # subclasses as well. + if type(other) != type(self): + return False + if self.dtype != other.dtype: + return False + if len(self) != len(other): + return False + + # The numpy() method will move any GPU memory onto the CPU. + # Slow, but the user was warned. + + diff = self.numpy()-other.numpy() + dnorm = norm(diff) + if relative: + return (dnorm <= tol*norm(self)) + else: + return (dnorm <= tol)
+ + +
+[docs] + @_returntype + @_convert + def real(self): + """ Return real part of Array """ + return Array(self._data.real, copy=True)
+ + +
+[docs] + @_returntype + @_convert + def imag(self): + """ Return imaginary part of Array """ + return Array(self._data.imag, copy=True)
+ + +
+[docs] + @_returntype + @_convert + def conj(self): + """ Return complex conjugate of Array. """ + return self._data.conj()
+ + +
+[docs] + @_returntype + @_convert + @schemed(BACKEND_PREFIX) + def squared_norm(self): + """ Return the elementwise squared norm of the array """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_returntype + @_checkother + @_convert + @schemed(BACKEND_PREFIX) + def multiply_and_add(self, other, mult_fac): + """ Return other multiplied by mult_fac and with self added. + Self is modified in place and returned as output. + Precisions of inputs must match. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_vrcheckother + @_convert + @schemed(BACKEND_PREFIX) + def inner(self, other): + """ Return the inner product of the array with complex conjugation. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_vrcheckother + @_convert + @schemed(BACKEND_PREFIX) + def vdot(self, other): + """ Return the inner product of the array with complex conjugation. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @schemed(BACKEND_PREFIX) + def clear(self): + """ Clear out the values of the array. """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_vrcheckother + @_convert + @schemed(BACKEND_PREFIX) + def weighted_inner(self, other, weight): + """ Return the inner product of the array with complex conjugation. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @schemed(BACKEND_PREFIX) + def sum(self): + """ Return the sum of the the array. """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_returntype + @_convert + @schemed(BACKEND_PREFIX) + def cumsum(self): + """ Return the cumulative sum of the the array. """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @_nocomplex + @schemed(BACKEND_PREFIX) + def max(self): + """ Return the maximum value in the array. """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @_nocomplex + @schemed(BACKEND_PREFIX) + def max_loc(self): + """Return the maximum value in the array along with the index location """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @schemed(BACKEND_PREFIX) + def abs_arg_max(self): + """ Return location of the maximum argument max """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @schemed(BACKEND_PREFIX) + def abs_max_loc(self): + """Return the maximum elementwise norm in the array along with the index location""" + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @_nocomplex + @schemed(BACKEND_PREFIX) + def min(self): + """ Return the maximum value in the array. """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_returnarray + @_convert + @schemed(BACKEND_PREFIX) + def take(self, indices): + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + @_vcheckother + @schemed(BACKEND_PREFIX) + def dot(self, other): + """ Return the dot product""" + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + + @schemed(BACKEND_PREFIX) + def _getvalue(self, index): + """Helper function to return a single value from an array. May be very + slow if the memory is on a gpu. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + + @_memoize_single + @_returntype + def _getslice(self, index): + return self._return(self._data[index]) + + @_convert + def __getitem__(self, index): + """ Return items from the Array. This not guaranteed to be fast for + returning single values. + """ + if isinstance(index, slice): + return self._getslice(index) + else: + return self._getvalue(index) + +
+[docs] + @_convert + def resize(self, new_size): + """Resize self to new_size + """ + if new_size == len(self): + return + else: + self._saved = LimitedSizeDict(size_limit=2**5) + new_arr = zeros(new_size, dtype=self.dtype) + if len(self) <= new_size: + new_arr[0:len(self)] = self + else: + new_arr[:] = self[0:new_size] + + self._data = new_arr._data
+ + +
+[docs] + @_convert + def roll(self, shift): + """shift vector + """ + new_arr = zeros(len(self), dtype=self.dtype) + + if shift < 0: + shift = shift - len(self) * (shift // len(self)) + + if shift == 0: + return + + new_arr[0:shift] = self[len(self)-shift: len(self)] + new_arr[shift:len(self)] = self[0:len(self)-shift] + + self._saved = LimitedSizeDict(size_limit=2**5) + + self._data = new_arr._data
+ + +
+[docs] + @_returntype + @_convert + def astype(self, dtype): + if _numpy.dtype(self.dtype) == _numpy.dtype(dtype): + return self + else: + return self._data.astype(dtype)
+ + + @schemed(BACKEND_PREFIX) + def _copy(self, self_ref, other_ref): + """Helper function to copy between two arrays. The arrays references + should be bare array types and not `Array` class instances. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + + @_convert + def __setitem__(self, index, other): + if isinstance(other,Array): + _convert_to_scheme(other) + + if self.kind == 'real' and other.kind == 'complex': + raise ValueError('Cannot set real value with complex') + + if isinstance(index,slice): + self_ref = self._data[index] + other_ref = other._data + else: + self_ref = self._data[index:index+1] + other_ref = other._data + + self._copy(self_ref, other_ref) + + elif type(other) in _ALLOWED_SCALARS: + if isinstance(index, slice): + self[index].fill(other) + else: + self[index:index+1].fill(other) + else: + raise TypeError('Can only copy data from another Array') + + @property + def precision(self): + if self.dtype == float32 or self.dtype == complex64: + return 'single' + else: + return 'double' + + @property + def kind(self): + if self.dtype == float32 or self.dtype == float64: + return 'real' + elif self.dtype == complex64 or self.dtype == complex128: + return 'complex' + else: + return 'unknown' + + @property + @_convert + def data(self): + """Returns the internal python array """ + return self._data + + @data.setter + def data(self,other): + dtype = None + if hasattr(other,'dtype'): + dtype = other.dtype + temp = Array(other, dtype=dtype) + self._data = temp._data + + @property + @_convert + @schemed(BACKEND_PREFIX) + def ptr(self): + """ Returns a pointer to the memory of this array """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg) + + @property + def itemsize(self): + return self.dtype.itemsize + + @property + def nbytes(self): + return len(self.data) * self.itemsize + + @property + @cpuonly + @_convert + def _swighelper(self): + """ Used internally by SWIG typemaps to ensure @_convert + is called and scheme is correct + """ + return self; + +
+[docs] + @_convert + @schemed(BACKEND_PREFIX) + def numpy(self): + """ Returns a Numpy Array that contains this data """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] + @_convert + def lal(self): + """ Returns a LAL Object that contains this data """ + + lal_data = None + if self._data.dtype == float32: + lal_data = _lal.CreateREAL4Vector(len(self)) + elif self._data.dtype == float64: + lal_data = _lal.CreateREAL8Vector(len(self)) + elif self._data.dtype == complex64: + lal_data = _lal.CreateCOMPLEX8Vector(len(self)) + elif self._data.dtype == complex128: + lal_data = _lal.CreateCOMPLEX16Vector(len(self)) + + lal_data.data[:] = self.numpy() + + return lal_data
+ + + @property + def dtype(self): + return self._data.dtype + +
+[docs] + def save(self, path, group=None): + """ + Save array to a Numpy .npy, hdf, or text file. When saving a complex array as + text, the real and imaginary parts are saved as the first and second + column respectively. When using hdf format, the data is stored + as a single vector, along with relevant attributes. + + Parameters + ---------- + path: string + Destination file path. Must end with either .hdf, .npy or .txt. + + group: string + Additional name for internal storage use. Ex. hdf storage uses + this as the key value. + + Raises + ------ + ValueError + If path does not end in .npy or .txt. + """ + + ext = _os.path.splitext(path)[1] + if ext == '.npy': + _numpy.save(path, self.numpy()) + elif ext == '.txt': + if self.kind == 'real': + _numpy.savetxt(path, self.numpy()) + elif self.kind == 'complex': + output = _numpy.vstack((self.numpy().real, + self.numpy().imag)).T + _numpy.savetxt(path, output) + elif ext == '.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'a') as f: + f.create_dataset(key, data=self.numpy(), compression='gzip', + compression_opts=9, shuffle=True) + else: + raise ValueError('Path must end with .npy, .txt, or .hdf')
+ + +
+[docs] + @_convert + def trim_zeros(self): + """Remove the leading and trailing zeros. + """ + tmp = self.numpy() + f = len(self)-len(_numpy.trim_zeros(tmp, trim='f')) + b = len(self)-len(_numpy.trim_zeros(tmp, trim='b')) + return self[f:len(self)-b]
+ + +
+[docs] + @_returntype + @_convert + def view(self, dtype): + """ + Return a 'view' of the array with its bytes now interpreted according + to 'dtype'. The location in memory is unchanged and changing elements + in a view of an array will also change the original array. + + Parameters + ---------- + dtype : numpy dtype (one of float32, float64, complex64 or complex128) + The new dtype that should be used to interpret the bytes of self + """ + return self._data.view(dtype)
+ + +
+[docs] + def copy(self): + """ Return copy of this array """ + return self._return(self.data.copy())
+ + + def __lt__(self, other): + return self.numpy().__lt__(other) + + def __le__(self, other): + return self.numpy().__le__(other) + + def __ne__(self, other): + return self.numpy().__ne__(other) + + def __gt__(self, other): + return self.numpy().__gt__(other) + + def __ge__(self, other): + return self.numpy().__ge__(other)
+ + +# Convenience functions for determining dtypes +
+[docs] +def real_same_precision_as(data): + if data.precision == 'single': + return float32 + elif data.precision == 'double': + return float64
+ + +
+[docs] +def complex_same_precision_as(data): + if data.precision == 'single': + return complex64 + elif data.precision == 'double': + return complex128
+ + +def _return_array(func): + @wraps(func) + def return_array(*args, **kwds): + return Array(func(*args, **kwds), copy=False) + return return_array + +
+[docs] +@_return_array +@schemed(BACKEND_PREFIX) +def zeros(length, dtype=float64): + """ Return an Array filled with zeros. + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] +@_return_array +@schemed(BACKEND_PREFIX) +def empty(length, dtype=float64): + """ Return an empty Array (no initialization) + """ + err_msg = "This function is a stub that should be overridden using " + err_msg += "the scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] +def load_array(path, group=None): + """Load an Array from an HDF5, ASCII or Numpy file. The file type is + inferred from the file extension, which must be `.hdf`, `.txt` or `.npy`. + + For ASCII and Numpy files with a single column, a real array is returned. + For files with two columns, the columns are assumed to contain the real + and imaginary parts of a complex array respectively. + + The default data types will be double precision floating point. + + Parameters + ---------- + path : string + Input file path. Must end with either `.npy`, `.txt` or `.hdf`. + + group: string + Additional name for internal storage use. When reading HDF files, this + is the path to the HDF dataset to read. + + Raises + ------ + ValueError + If path does not end with a supported extension. For Numpy and ASCII + input files, this is also raised if the array does not have 1 or 2 + dimensions. + """ + ext = _os.path.splitext(path)[1] + if ext == '.npy': + data = _numpy.load(path) + elif ext == '.txt': + data = _numpy.loadtxt(path) + elif ext == '.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'r') as f: + array = Array(f[key]) + return array + else: + raise ValueError('Path must end with .npy, .hdf, or .txt') + + if data.ndim == 1: + return Array(data) + elif data.ndim == 2: + return Array(data[:,0] + 1j*data[:,1]) + + raise ValueError('File has %s dimensions, cannot convert to Array, \ + must be 1 (real) or 2 (complex)' % data.ndim)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/config.html b/latest/html/_modules/pycbc/types/config.html new file mode 100644 index 00000000000..cec63f33d49 --- /dev/null +++ b/latest/html/_modules/pycbc/types/config.html @@ -0,0 +1,910 @@ + + + + + + pycbc.types.config — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.config

+# Copyright (C) 2013,2017,2021 Ian Harry, Duncan Brown, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides a wrapper to the ConfigParser utilities for pycbc.
+This module is described in the page here:
+"""
+import re
+import os
+import itertools
+import logging
+from io import StringIO
+import configparser as ConfigParser
+
+logger = logging.getLogger('pycbc.types.config')
+
+
+
+[docs] +class DeepCopyableConfigParser(ConfigParser.ConfigParser): + """ + The standard SafeConfigParser no longer supports deepcopy() as of python + 2.7 (see http://bugs.python.org/issue16058). This subclass restores that + functionality. + """ + + def __deepcopy__(self, memo): + # http://stackoverflow.com/questions/23416370 + # /manually-building-a-deep-copy-of-a-configparser-in-python-2-7 + config_string = StringIO() + self.write(config_string) + config_string.seek(0) + new_config = self.__class__() + new_config.read_file(config_string) + return new_config
+ + + +
+[docs] +class InterpolatingConfigParser(DeepCopyableConfigParser): + """ + This is a sub-class of DeepCopyableConfigParser, which lets + us add a few additional helper features that are useful in workflows. + """ + + def __init__( + self, + configFiles=None, + overrideTuples=None, + parsedFilePath=None, + deleteTuples=None, + skip_extended=False, + sanitize_newline=True, + ): + """ + Initialize an InterpolatingConfigParser. This reads the input configuration + files, overrides values if necessary and performs the interpolation. + + Parameters + ----------- + configFiles : Path to .ini file, or list of paths + The file(s) to be read in and parsed. + overrideTuples : List of (section, option, value) tuples + Add the (section, option, value) triplets provided + in this list to the provided .ini file(s). If the section, option + pair is already present, it will be overwritten. + parsedFilePath : Path, optional (default=None) + If given, write the parsed .ini file back to disk at this location. + deleteTuples : List of (section, option) tuples + Delete the (section, option) pairs provided + in this list from provided .ini file(s). If the section only + is provided, the entire section will be deleted. + + Returns + -------- + InterpolatingConfigParser + Initialized InterpolatingConfigParser instance. + """ + if configFiles is None: + configFiles = [] + if overrideTuples is None: + overrideTuples = [] + if deleteTuples is None: + deleteTuples = [] + + super().__init__() + + # Enable case sensitive options + self.optionxform = str + + # Add in environment + # We allow access to environment variables by loading them into a + # special configparser section ([environment]) which can then + # be referenced by other sections. + # We cannot include environment variables containing characters + # that are special to ConfigParser. So any variable containing a % or a + # $ is ignored. + env_vals = { + key: value for key, value in os.environ.items() + if '%' not in value and '$' not in value + } + self.read_dict({'environment': env_vals}) + + self.read_ini_file(configFiles) + + # Split sections like [inspiral&tmplt] into [inspiral] and [tmplt] + self.split_multi_sections() + + # Populate shared options from the [sharedoptions] section + self.populate_shared_sections() + + # Do deletes from command line + for delete in deleteTuples: + if len(delete) == 1: + if self.remove_section(delete[0]) is False: + raise ValueError( + "Cannot delete section %s, " + "no such section in configuration." % delete + ) + + logger.info( + "Deleting section %s from configuration", delete[0] + ) + elif len(delete) == 2: + if self.remove_option(delete[0], delete[1]) is False: + raise ValueError( + "Cannot delete option %s from section %s," + " no such option in configuration." % delete + ) + + logger.info( + "Deleting option %s from section %s in " "configuration", + delete[1], + delete[0], + ) + else: + raise ValueError( + "Deletes must be tuples of length 1 or 2. " + "Got %s." % str(delete) + ) + + # Do overrides from command line + for override in overrideTuples: + if len(override) not in [2, 3]: + errmsg = "Overrides must be tuples of length 2 or 3." + errmsg = "Got %s." % (str(override)) + raise ValueError(errmsg) + section = override[0] + option = override[1] + value = "" + if len(override) == 3: + value = override[2] + # Check for section existence, create if needed + if not self.has_section(section): + self.add_section(section) + self.set(section, option, value) + logger.info( + "Overriding section %s option %s with value %s " + "in configuration.", + section, + option, + value, + ) + + # Check for any substitutions that can be made + if not skip_extended: + self.perform_extended_interpolation() + + # replace newlines in input with spaces + # this enables command line conversion compatibility + if sanitize_newline: + self.sanitize_newline() + + # Check for duplicate options in sub-sections + self.sanity_check_subsections() + + # Dump parsed .ini file if needed + if parsedFilePath: + fp = open(parsedFilePath, "w") + self.write(fp) + fp.close() + +
+[docs] + @classmethod + def from_cli(cls, opts): + """Initialize the config parser using options parsed from the command + line. + + The parsed options ``opts`` must include options provided by + :py:func:`add_workflow_command_line_group`. + + Parameters + ----------- + opts : argparse.ArgumentParser + The command line arguments parsed by argparse + """ + # read configuration file + logger.info("Reading configuration file") + if opts.config_overrides is not None: + overrides = [ + tuple(override.split(":", 2)) + for override in opts.config_overrides + ] + else: + overrides = None + if opts.config_delete is not None: + deletes = [ + tuple(delete.split(":")) for delete in opts.config_delete + ] + else: + deletes = None + return cls(opts.config_files, overrides, deleteTuples=deletes)
+ + +
+[docs] + def read_ini_file(self, fpath): + """ + Read a .ini file and return it as a ConfigParser class. + This function does none of the parsing/combining of sections. It simply + reads the file and returns it unedited + + Stub awaiting more functionality - see configparser_test.py + + Parameters + ---------- + fpath : Path to .ini file, or list of paths + The path(s) to a .ini file to be read in + + Returns + ------- + cp : ConfigParser + The ConfigParser class containing the read in .ini file + """ + # Read the file + self.read(fpath)
+ + +
+[docs] + def get_subsections(self, section_name): + """Return a list of subsections for the given section name""" + # Keep only subsection names + subsections = [ + sec[len(section_name) + 1:] + for sec in self.sections() + if sec.startswith(section_name + "-") + and not sec.endswith('defaultvalues') + ] + + for sec in subsections: + sp = sec.split("-") + # The format [section-subsection-tag] is okay. Just + # check that [section-subsection] section exists. If not it is possible + # the user is trying to use an subsection name with '-' in it + if (len(sp) > 1) and not self.has_section( + "%s-%s" % (section_name, sp[0]) + ): + raise ValueError( + "Workflow uses the '-' as a delimiter so " + "this is interpreted as section-subsection-tag. " + "While checking section %s, no section with " + "name %s-%s was found. " + "If you did not intend to use tags in an " + "'advanced user' manner, or do not understand what " + "this means, don't use dashes in section " + "names. So [injection-nsbhinj] is good. " + "[injection-nsbh-inj] is not." % (sec, sp[0], sp[1]) + ) + + if len(subsections) > 0: + return [sec.split("-")[0] for sec in subsections] + elif self.has_section(section_name): + return [""] + else: + return []
+ + +
+[docs] + def perform_extended_interpolation(self): + """ + Filter through an ini file and replace all examples of + ExtendedInterpolation formatting with the exact value. For values like + ${example} this is replaced with the value that corresponds to the + option called example ***in the same section*** + + For values like ${common|example} this is replaced with the value that + corresponds to the option example in the section [common]. Note that + in the python3 config parser this is ${common:example} but python2.7 + interprets the : the same as a = and this breaks things + + Nested interpolation is not supported here. + """ + + # Do not allow any interpolation of the section names + for section in self.sections(): + for option, value in self.items(section): + # Check the option name + new_str = self.interpolate_string(option, section) + if new_str != option: + self.set(section, new_str, value) + self.remove_option(section, option) + # Check the value + new_str = self.interpolate_string(value, section) + if new_str != value: + self.set(section, option, new_str)
+ + +
+[docs] + def sanitize_newline(self): + """ + Filter through an ini file and replace all examples of + newlines with spaces. This is useful for command line conversion + and allow multiline configparser inputs without added backslashes + """ + + # Do not allow any interpolation of the section names + for section in self.sections(): + for option, value in self.items(section): + new_value = value.replace('\n', ' ').replace('\r', ' ') + self.set(section, option, new_value)
+ + +
+[docs] + def interpolate_string(self, test_string, section): + """ + Take a string and replace all example of ExtendedInterpolation + formatting within the string with the exact value. + + For values like ${example} this is replaced with the value that + corresponds to the option called example ***in the same section*** + + For values like ${common|example} this is replaced with the value that + corresponds to the option example in the section [common]. Note that + in the python3 config parser this is ${common:example} but python2.7 + interprets the : the same as a = and this breaks things + + Nested interpolation is not supported here. + + Parameters + ---------- + test_string : String + The string to parse and interpolate + section : String + The current section of the ConfigParser object + + Returns + ---------- + test_string : String + Interpolated string + """ + + # First check if any interpolation is needed and abort if not + re_obj = re.search(r"\$\{.*?\}", test_string) + while re_obj: + # Not really sure how this works, but this will obtain the first + # instance of a string contained within ${....} + rep_string = (re_obj).group(0)[2:-1] + # Need to test which of the two formats we have + split_string = rep_string.split("|") + if len(split_string) == 1: + try: + test_string = test_string.replace( + "${" + rep_string + "}", + self.get(section, split_string[0]), + ) + except ConfigParser.NoOptionError: + print("Substitution failed") + raise + if len(split_string) == 2: + try: + test_string = test_string.replace( + "${" + rep_string + "}", + self.get(split_string[0], split_string[1]), + ) + except ConfigParser.NoOptionError: + print("Substitution failed") + raise + re_obj = re.search(r"\$\{.*?\}", test_string) + + return test_string
+ + +
+[docs] + def split_multi_sections(self): + """ + Parse through the WorkflowConfigParser instance and splits any sections + labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into + [inspiral] and [tmpltbank] sections. If these individual sections + already exist they will be appended to. If an option exists in both the + [inspiral] and [inspiral&tmpltbank] sections an error will be thrown + """ + # Begin by looping over all sections + for section in self.sections(): + # Only continue if section needs splitting + if "&" not in section: + continue + # Get list of section names to add these options to + split_sections = section.split("&") + for new_sec in split_sections: + # Add sections if they don't already exist + if not self.has_section(new_sec): + self.add_section(new_sec) + self.add_options_to_section(new_sec, self.items(section)) + self.remove_section(section)
+ + +
+[docs] + def populate_shared_sections(self): + """Parse the [sharedoptions] section of the ini file. + + That section should contain entries according to: + + * massparams = inspiral, tmpltbank + * dataparams = tmpltbank + + This will result in all options in [sharedoptions-massparams] being + copied into the [inspiral] and [tmpltbank] sections and the options + in [sharedoptions-dataparams] being copited into [tmpltbank]. + In the case of duplicates an error will be raised. + """ + if not self.has_section("sharedoptions"): + # No sharedoptions, exit + return + for key, value in self.items("sharedoptions"): + assert self.has_section("sharedoptions-%s" % (key)) + # Comma separated + values = value.split(",") + common_options = self.items("sharedoptions-%s" % (key)) + for section in values: + if not self.has_section(section): + self.add_section(section) + for arg, val in common_options: + if arg in self.options(section): + raise ValueError( + "Option exists in both original " + + "ConfigParser section [%s] and " % (section,) + + "sharedoptions section: %s %s" + % (arg, "sharedoptions-%s" % (key)) + ) + self.set(section, arg, val) + self.remove_section("sharedoptions-%s" % (key)) + self.remove_section("sharedoptions")
+ + +
+[docs] + def add_options_to_section(self, section, items, overwrite_options=False): + """ + Add a set of options and values to a section of a ConfigParser object. + Will throw an error if any of the options being added already exist, + this behaviour can be overridden if desired + + Parameters + ---------- + section : string + The name of the section to add options+values to + items : list of tuples + Each tuple contains (at [0]) the option and (at [1]) the value to + add to the section of the ini file + overwrite_options : Boolean, optional + By default this function will throw a ValueError if an option exists + in both the original section in the ConfigParser *and* in the + provided items. + This will override so that the options+values given in items + will replace the original values if the value is set to True. + Default = False + """ + # Sanity checking + if not self.has_section(section): + raise ValueError( + "Section %s not present in ConfigParser." % (section,) + ) + + # Check for duplicate options first + for option, value in items: + if not overwrite_options: + if option in self.options(section): + raise ValueError( + "Option exists in both original " + + "ConfigParser section [%s] and " % (section,) + + "input list: %s" % (option,) + ) + self.set(section, option, value)
+ + +
+[docs] + def sanity_check_subsections(self): + """ + This function goes through the ConfigParser and checks that any options + given in the [SECTION_NAME] section are not also given in any + [SECTION_NAME-SUBSECTION] sections. + + """ + # Loop over the sections in the ini file + for section in self.sections(): + # [pegasus_profile] is specially allowed to be overriden by + # sub-sections + if section == "pegasus_profile": + continue + + if section.endswith('-defaultvalues') and \ + not len(section.split('-')) == 2: + # Only allow defaultvalues for top-level sections + raise NotImplementedError( + "-defaultvalues subsections are only allowed for " + "top-level sections; given %s" % section + ) + + # Loop over the sections again + for section2 in self.sections(): + # Check if any are subsections of section + if section2.startswith(section + "-"): + if section2.endswith("defaultvalues"): + # defaultvalues is storage for defaults, and will + # be over-written by anything in the sections-proper + continue + # Check for duplicate options whenever this exists + self.check_duplicate_options( + section, section2, raise_error=True + )
+ + +
+[docs] + def check_duplicate_options(self, section1, section2, raise_error=False): + """ + Check for duplicate options in two sections, section1 and section2. + Will return a list of the duplicate options. + + Parameters + ---------- + section1 : string + The name of the first section to compare + section2 : string + The name of the second section to compare + raise_error : Boolean, optional (default=False) + If True, raise an error if duplicates are present. + + Returns + ---------- + duplicates : List + List of duplicate options + """ + # Sanity checking + if not self.has_section(section1): + raise ValueError( + "Section %s not present in ConfigParser." % (section1,) + ) + if not self.has_section(section2): + raise ValueError( + "Section %s not present in ConfigParser." % (section2,) + ) + + # Are section1 and section2 a section-and-defaultvalues pair? + section_and_default = (section1 == f"{section2}-defaultvalues" or + section2 == f"{section1}-defaultvalues") + + # Is one the sections defaultvalues, but the other is not the + # top-level section? This is to catch the case where we are + # comparing section-defaultvalues with section-subsection + if section1.endswith("-defaultvalues") or \ + section2.endswith("-defaultvalues"): + if not section_and_default: + # Override the raise_error variable not to error when + # defaultvalues are given and the sections are not + # otherwise the same + raise_error = False + + items1 = self.options(section1) + items2 = self.options(section2) + + # The list comprehension here creates a list of all duplicate items + duplicates = [x for x in items1 if x in items2] + + if duplicates and raise_error: + err_msg = ("The following options appear in both section " + f"{section1} and {section2}: " + ", ".join(duplicates)) + if section_and_default: + err_msg += ". Default values are unused in this case." + raise ValueError(err_msg) + + return duplicates
+ + +
+[docs] + def get_opt_tag(self, section, option, tag): + """ + Convenience function accessing get_opt_tags() for a single tag: see + documentation for that function. + NB calling get_opt_tags() directly is preferred for simplicity. + + Parameters + ----------- + self : ConfigParser object + The ConfigParser object (automatically passed when this is appended + to the ConfigParser class) + section : string + The section of the ConfigParser object to read + option : string + The ConfigParser option to look for + tag : string + The name of the subsection to look in, if not found in [section] + + Returns + -------- + string + The value of the options being searched for + """ + return self.get_opt_tags(section, option, [tag])
+ + +
+[docs] + def get_opt_tags(self, section, option, tags): + """ + Supplement to ConfigParser.ConfigParser.get(). This will search for an + option in [section] and if it doesn't find it will also try in + [section-defaultvalues], and [section-tag] for every value of tag + in tags. [section-tag] will be preferred to [section-defaultvalues] + values. Will raise a ConfigParser.Error if it cannot find a value. + + Parameters + ----------- + self : ConfigParser object + The ConfigParser object (automatically passed when this is appended + to the ConfigParser class) + section : string + The section of the ConfigParser object to read + option : string + The ConfigParser option to look for + tags : list of strings + The name of subsections to look in, if not found in [section] + + Returns + -------- + string + The value of the options being searched for + """ + # Need lower case tag name; also exclude cases with tag=None + if tags: + tags = [tag.lower() for tag in tags if tag is not None] + + try: + return self.get(section, option) + except ConfigParser.Error: + err_string = "No option '%s' in section [%s] " % (option, section) + if not tags: + raise ConfigParser.Error(err_string + ".") + return_vals = [] + # First, check if there are any default values set: + has_defaultvalue = False + if self.has_section(f"{section}-defaultvalues"): + return_vals.append( + self.get(f"{section}-defaultvalues", option) + ) + has_defaultvalue = True + + sub_section_list = [] + for sec_len in range(1, len(tags) + 1): + for tag_permutation in itertools.permutations(tags, sec_len): + joined_name = "-".join(tag_permutation) + sub_section_list.append(joined_name) + section_list = ["%s-%s" % (section, sb) for sb in sub_section_list] + err_section_list = [] + for sub in sub_section_list: + if self.has_section("%s-%s" % (section, sub)): + if self.has_option("%s-%s" % (section, sub), option): + err_section_list.append("%s-%s" % (section, sub)) + return_vals.append( + self.get("%s-%s" % (section, sub), option) + ) + + if has_defaultvalue and len(return_vals) > 1: + # option supplied which should overwrite the default; + # default will be first in the list, so remove it + return_vals = return_vals[1:] + + # We also want to recursively go into sections + if not return_vals: + err_string += "or in sections [%s]." % ( + "] [".join(section_list) + ) + raise ConfigParser.Error(err_string) + if len(return_vals) > 1: + err_string += ( + "and multiple entries found in sections [%s]." + % ("] [".join(err_section_list)) + ) + raise ConfigParser.Error(err_string) + return return_vals[0]
+ + +
+[docs] + def has_option_tag(self, section, option, tag): + """ + Convenience function accessing has_option_tags() for a single tag: see + documentation for that function. + NB calling has_option_tags() directly is preferred for simplicity. + + Parameters + ----------- + self : ConfigParser object + The ConfigParser object (automatically passed when this is appended + to the ConfigParser class) + section : string + The section of the ConfigParser object to read + option : string + The ConfigParser option to look for + tag : string + The name of the subsection to look in, if not found in [section] + + Returns + -------- + Boolean + Is the option in the section or [section-tag] + """ + return self.has_option_tags(section, option, [tag])
+ + +
+[docs] + def has_option_tags(self, section, option, tags): + """ + Supplement to ConfigParser.ConfigParser.has_option(). + This will search for an option in [section] and if it doesn't find it + will also try in [section-tag] for each value in tags. + Returns True if the option is found and false if not. + + Parameters + ----------- + self : ConfigParser object + The ConfigParser object (automatically passed when this is appended + to the ConfigParser class) + section : string + The section of the ConfigParser object to read + option : string + The ConfigParser option to look for + tags : list of strings + The names of the subsection to look in, if not found in [section] + + Returns + -------- + Boolean + Is the option in the section or [section-tag] (for tag in tags) + """ + try: + self.get_opt_tags(section, option, tags) + return True + except ConfigParser.Error: + return False
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/frequencyseries.html b/latest/html/_modules/pycbc/types/frequencyseries.html new file mode 100644 index 00000000000..1751bb168a0 --- /dev/null +++ b/latest/html/_modules/pycbc/types/frequencyseries.html @@ -0,0 +1,803 @@ + + + + + + pycbc.types.frequencyseries — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.frequencyseries

+# Copyright (C) 2012  Tito Dal Canton, Josh Willis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+Provides a class representing a frequency series.
+"""
+import os as _os
+import h5py
+from pycbc.types.array import Array, _convert, zeros, _noreal
+import lal as _lal
+import numpy as _numpy
+
+
+[docs] +class FrequencySeries(Array): + """Models a frequency series consisting of uniformly sampled scalar values. + + Parameters + ---------- + initial_array : array-like + Array containing sampled data. + delta_f : float + Frequency between consecutive samples in Hertz. + epoch : {None, lal.LIGOTimeGPS}, optional + Start time of the associated time domain data in seconds. + dtype : {None, data-type}, optional + Sample data type. + copy : boolean, optional + If True, samples are copied to a new array. + """ + + def __init__(self, initial_array, delta_f=None, epoch="", dtype=None, copy=True): + if len(initial_array) < 1: + raise ValueError('initial_array must contain at least one sample.') + if delta_f is None: + try: + delta_f = initial_array.delta_f + except AttributeError: + raise TypeError('must provide either an initial_array with a delta_f attribute, or a value for delta_f') + if not delta_f > 0: + raise ValueError('delta_f must be a positive number') + # We gave a nonsensical default value to epoch so we can test if it's been set. + # If the user passes in an initial_array that has an 'epoch' attribute and doesn't + # pass in a value of epoch, then our new object's epoch comes from initial_array. + # But if the user passed in a value---even 'None'---that will take precedence over + # anything set in initial_array. Finally, if the user passes in something without + # an epoch attribute *and* doesn't pass in a value of epoch, it becomes 'None' + if not isinstance(epoch,_lal.LIGOTimeGPS): + if epoch == "": + if isinstance(initial_array,FrequencySeries): + epoch = initial_array._epoch + else: + epoch = _lal.LIGOTimeGPS(0) + elif epoch is not None: + try: + if isinstance(epoch, _numpy.generic): + # In python3 lal LIGOTimeGPS will not work on numpy + # types as input. A quick google on how to generically + # convert numpy floats/ints to the python equivalent + # https://stackoverflow.com/questions/9452775/ + epoch = _lal.LIGOTimeGPS(epoch.item()) + else: + epoch = _lal.LIGOTimeGPS(epoch) + except: + raise TypeError('epoch must be either None or a lal.LIGOTimeGPS') + Array.__init__(self, initial_array, dtype=dtype, copy=copy) + self._delta_f = delta_f + self._epoch = epoch + + def _return(self, ary): + return FrequencySeries(ary, self._delta_f, epoch=self._epoch, copy=False) + + def _typecheck(self, other): + if isinstance(other, FrequencySeries): + try: + _numpy.testing.assert_almost_equal(other._delta_f, + self._delta_f) + except: + raise ValueError('different delta_f') + # consistency of _epoch is not required because we may want + # to combine frequency series estimated at different times + # (e.g. PSD estimation) + +
+[docs] + def get_delta_f(self): + """Return frequency between consecutive samples in Hertz. + """ + return self._delta_f
+ + delta_f = property(get_delta_f, + doc="Frequency between consecutive samples in Hertz.") + +
+[docs] + def get_epoch(self): + """Return frequency series epoch as a LIGOTimeGPS. + """ + return self._epoch
+ + epoch = property(get_epoch, + doc="Frequency series epoch as a LIGOTimeGPS.") + +
+[docs] + def get_sample_frequencies(self): + """Return an Array containing the sample frequencies. + """ + return Array(range(len(self))) * self._delta_f
+ + sample_frequencies = property(get_sample_frequencies, + doc="Array of the sample frequencies.") + + def _getslice(self, index): + if index.step is not None: + new_delta_f = self._delta_f * index.step + else: + new_delta_f = self._delta_f + return FrequencySeries(Array._getslice(self, index), + delta_f=new_delta_f, + epoch=self._epoch, + copy=False) + +
+[docs] + def at_frequency(self, freq): + """ Return the value at the specified frequency + """ + return self[int(freq / self.delta_f)]
+ + + @property + def start_time(self): + """Return the start time of this vector + """ + return self.epoch + + @start_time.setter + def start_time(self, time): + """ Set the start time + """ + self._epoch = _lal.LIGOTimeGPS(time) + + @property + def end_time(self): + """Return the end time of this vector + """ + return self.start_time + self.duration + + @property + def duration(self): + """Return the time duration of this vector + """ + return 1.0 / self.delta_f + + @property + def delta_t(self): + """Return the time between samples if this were a time series. + This assume the time series is even in length! + """ + return 1.0 / self.sample_rate + + @property + def sample_rate(self): + """Return the sample rate this would have in the time domain. This + assumes even length time series! + """ + return (len(self) - 1) * self.delta_f * 2.0 + + def __eq__(self,other): + """ + This is the Python special method invoked whenever the '==' + comparison is used. It will return true if the data of two + frequency series are identical, and all of the numeric meta-data + are identical, irrespective of whether or not the two + instances live in the same memory (for that comparison, the + Python statement 'a is b' should be used instead). + + Thus, this method returns 'True' if the types of both 'self' + and 'other' are identical, as well as their lengths, dtypes, + epochs, delta_fs and the data in the arrays, element by element. + It will always do the comparison on the CPU, but will *not* move + either object to the CPU if it is not already there, nor change + the scheme of either object. It is possible to compare a CPU + object to a GPU object, and the comparison should be true if the + data and meta-data of the two objects are the same. + + Note in particular that this function returns a single boolean, + and not an array of booleans as Numpy does. If the numpy + behavior is instead desired it can be obtained using the numpy() + method of the PyCBC type to get a numpy instance from each + object, and invoking '==' on those two instances. + + Parameters + ---------- + other: another Python object, that should be tested for equality + with 'self'. + + Returns + ------- + boolean: 'True' if the types, dtypes, lengths, epochs, delta_fs + and data of the two objects are each identical. + """ + if super(FrequencySeries,self).__eq__(other): + return (self._epoch == other._epoch and self._delta_f == other._delta_f) + else: + return False + +
+[docs] + def almost_equal_elem(self,other,tol,relative=True,dtol=0.0): + """ + Compare whether two frequency series are almost equal, element + by element. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(self[i]-other[i]) <= tol*abs(self[i]) + for all elements of the series. + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(self[i]-other[i]) <= tol + for all elements of the series. + + The method also checks that self.delta_f is within 'dtol' of + other.delta_f; if 'dtol' has its default value of 0 then exact + equality between the two is required. + + Other meta-data (type, dtype, length, and epoch) must be exactly + equal. If either object's memory lives on the GPU it will be + copied to the CPU for the comparison, which may be slow. But the + original object itself will not have its memory relocated nor + scheme changed. + + Parameters + ---------- + other: another Python object, that should be tested for + almost-equality with 'self', element-by-element. + tol: a non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative: A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + dtol: a non-negative number, the tolerance for delta_f. Like 'tol', + it is interpreted as relative or absolute based on the value of + 'relative'. This parameter defaults to zero, enforcing exact + equality between the delta_f values of the two FrequencySeries. + + Returns + ------- + boolean: 'True' if the data and delta_fs agree within the tolerance, + as interpreted by the 'relative' keyword, and if the types, + lengths, dtypes, and epochs are exactly the same. + """ + # Check that the delta_f tolerance is non-negative; raise an exception + # if needed. + if (dtol < 0.0): + raise ValueError("Tolerance in delta_f cannot be negative") + if super(FrequencySeries,self).almost_equal_elem(other,tol=tol,relative=relative): + if relative: + return (self._epoch == other._epoch and + abs(self._delta_f-other._delta_f) <= dtol*self._delta_f) + else: + return (self._epoch == other._epoch and + abs(self._delta_f-other._delta_f) <= dtol) + else: + return False
+ + +
+[docs] + def almost_equal_norm(self,other,tol,relative=True,dtol=0.0): + """ + Compare whether two frequency series are almost equal, normwise. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(norm(self-other)) <= tol*abs(norm(self)). + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(norm(self-other)) <= tol + + The method also checks that self.delta_f is within 'dtol' of + other.delta_f; if 'dtol' has its default value of 0 then exact + equality between the two is required. + + Other meta-data (type, dtype, length, and epoch) must be exactly + equal. If either object's memory lives on the GPU it will be + copied to the CPU for the comparison, which may be slow. But the + original object itself will not have its memory relocated nor + scheme changed. + + Parameters + ---------- + other: another Python object, that should be tested for + almost-equality with 'self', based on their norms. + tol: a non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative: A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + dtol: a non-negative number, the tolerance for delta_f. Like 'tol', + it is interpreted as relative or absolute based on the value of + 'relative'. This parameter defaults to zero, enforcing exact + equality between the delta_f values of the two FrequencySeries. + + Returns + ------- + boolean: 'True' if the data and delta_fs agree within the tolerance, + as interpreted by the 'relative' keyword, and if the types, + lengths, dtypes, and epochs are exactly the same. + """ + # Check that the delta_f tolerance is non-negative; raise an exception + # if needed. + if (dtol < 0.0): + raise ValueError("Tolerance in delta_f cannot be negative") + if super(FrequencySeries,self).almost_equal_norm(other,tol=tol,relative=relative): + if relative: + return (self._epoch == other._epoch and + abs(self._delta_f-other._delta_f) <= dtol*self._delta_f) + else: + return (self._epoch == other._epoch and + abs(self._delta_f-other._delta_f) <= dtol) + else: + return False
+ + +
+[docs] + @_convert + def lal(self): + """Produces a LAL frequency series object equivalent to self. + + Returns + ------- + lal_data : {lal.*FrequencySeries} + LAL frequency series object containing the same data as self. + The actual type depends on the sample's dtype. If the epoch of + self was 'None', the epoch of the returned LAL object will be + LIGOTimeGPS(0,0); otherwise, the same as that of self. + + Raises + ------ + TypeError + If frequency series is stored in GPU memory. + """ + + lal_data = None + if self._epoch is None: + ep = _lal.LIGOTimeGPS(0,0) + else: + ep = self._epoch + + if self._data.dtype == _numpy.float32: + lal_data = _lal.CreateREAL4FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.float64: + lal_data = _lal.CreateREAL8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.complex64: + lal_data = _lal.CreateCOMPLEX8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.complex128: + lal_data = _lal.CreateCOMPLEX16FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) + + lal_data.data.data[:] = self.numpy() + + return lal_data
+ + +
+[docs] + def save(self, path, group=None, ifo='P1'): + """ + Save frequency series to a Numpy .npy, hdf, or text file. The first column + contains the sample frequencies, the second contains the values. + In the case of a complex frequency series saved as text, the imaginary + part is written as a third column. When using hdf format, the data is stored + as a single vector, along with relevant attributes. + + Parameters + ---------- + path: string + Destination file path. Must end with either .hdf, .npy or .txt. + + group: string + Additional name for internal storage use. Ex. hdf storage uses + this as the key value. + + Raises + ------ + ValueError + If path does not end in .npy or .txt. + """ + + ext = _os.path.splitext(path)[1] + if ext == '.npy': + output = _numpy.vstack((self.sample_frequencies.numpy(), + self.numpy())).T + _numpy.save(path, output) + elif ext == '.txt': + if self.kind == 'real': + output = _numpy.vstack((self.sample_frequencies.numpy(), + self.numpy())).T + elif self.kind == 'complex': + output = _numpy.vstack((self.sample_frequencies.numpy(), + self.numpy().real, + self.numpy().imag)).T + _numpy.savetxt(path, output) + elif ext == '.xml' or path.endswith('.xml.gz'): + from pycbc.io.ligolw import make_psd_xmldoc + from ligo.lw import utils + + if self.kind != 'real': + raise ValueError('XML only supports real frequency series') + output = self.lal() + output.name = 'psd' + # When writing in this format we must *not* have the 0 values at + # frequencies less than flow. To resolve this we set the first + # non-zero value < flow. + data_lal = output.data.data + first_idx = _numpy.argmax(data_lal>0) + if not first_idx == 0: + data_lal[:first_idx] = data_lal[first_idx] + psddict = {ifo: output} + utils.write_filename( + make_psd_xmldoc(psddict), + path, + compress='auto' + ) + elif ext == '.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'a') as f: + ds = f.create_dataset(key, data=self.numpy(), + compression='gzip', + compression_opts=9, shuffle=True) + if self.epoch is not None: + ds.attrs['epoch'] = float(self.epoch) + ds.attrs['delta_f'] = float(self.delta_f) + else: + raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' + 'or .hdf')
+ + +
+[docs] + def to_frequencyseries(self): + """ Return frequency series """ + return self
+ + +
+[docs] + @_noreal + def to_timeseries(self, delta_t=None): + """ Return the Fourier transform of this time series. + + Note that this assumes even length time series! + + Parameters + ---------- + delta_t : {None, float}, optional + The time resolution of the returned series. By default the + resolution is determined by length and delta_f of this frequency + series. + + Returns + ------- + TimeSeries: + The inverse fourier transform of this frequency series. + """ + from pycbc.fft import ifft + from pycbc.types import TimeSeries, real_same_precision_as + nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f + if not delta_t: + delta_t = nat_delta_t + + # add 0.5 to round integer + tlen = int(1.0 / self.delta_f / delta_t + 0.5) + flen = int(tlen / 2 + 1) + + if flen < len(self): + raise ValueError("The value of delta_t (%s) would be " + "undersampled. Maximum delta_t " + "is %s." % (delta_t, nat_delta_t)) + if not delta_t: + tmp = self + else: + tmp = FrequencySeries(zeros(flen, dtype=self.dtype), + delta_f=self.delta_f, epoch=self.epoch) + tmp[:len(self)] = self[:] + + f = TimeSeries(zeros(tlen, + dtype=real_same_precision_as(self)), + delta_t=delta_t) + ifft(tmp, f) + f._delta_t = delta_t + return f
+ + +
+[docs] + @_noreal + def cyclic_time_shift(self, dt): + """Shift the data and timestamps by a given number of seconds + + Shift the data and timestamps in the time domain a given number of + seconds. To just change the time stamps, do ts.start_time += dt. + The time shift may be smaller than the intrinsic sample rate of the data. + Note that data will be cycliclly rotated, so if you shift by 2 + seconds, the final 2 seconds of your data will now be at the + beginning of the data set. + + Parameters + ---------- + dt : float + Amount of time to shift the vector. + + Returns + ------- + data : pycbc.types.FrequencySeries + The time shifted frequency series. + """ + from pycbc.waveform import apply_fseries_time_shift + data = apply_fseries_time_shift(self, dt) + data.start_time = self.start_time - dt + return data
+ + +
+[docs] + def match(self, other, psd=None, + low_frequency_cutoff=None, high_frequency_cutoff=None): + """ Return the match between the two TimeSeries or FrequencySeries. + + Return the match between two waveforms. This is equivalent to the overlap + maximized over time and phase. By default, the other vector will be + resized to match self. Beware, this may remove high frequency content or the + end of the vector. + + Parameters + ---------- + other : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : Frequency Series + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the match. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the match. + index: int + The number of samples to shift to get the match. + + Returns + ------- + match: float + index: int + The number of samples to shift to get the match. + """ + from pycbc.types import TimeSeries + from pycbc.filter import match + + if isinstance(other, TimeSeries): + if other.duration != self.duration: + other = other.copy() + other.resize(int(other.sample_rate * self.duration)) + + other = other.to_frequencyseries() + + if len(other) != len(self): + other = other.copy() + other.resize(len(self)) + + if psd is not None and len(psd) > len(self): + psd = psd.copy() + psd.resize(len(self)) + + return match(self, other, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff)
+ + +
+[docs] + def plot(self, **kwds): + """ Basic plot of this frequency series + """ + from matplotlib import pyplot + + if self.kind == 'real': + plot = pyplot.plot(self.sample_frequencies, self, **kwds) + return plot + elif self.kind == 'complex': + plot1 = pyplot.plot(self.sample_frequencies, self.real(), **kwds) + plot2 = pyplot.plot(self.sample_frequencies, self.imag(), **kwds) + return plot1, plot2
+
+ + +
+[docs] +def load_frequencyseries(path, group=None): + """Load a FrequencySeries from an HDF5, ASCII or Numpy file. The file type + is inferred from the file extension, which must be `.hdf`, `.txt` or + `.npy`. + + For ASCII and Numpy files, the first column of the array is assumed to + contain the frequency. If the array has two columns, a real frequency + series is returned. If the array has three columns, the second and third + ones are assumed to contain the real and imaginary parts of a complex + frequency series. + + For HDF files, the dataset is assumed to contain the attribute `delta_f` + giving the frequency resolution in Hz. The attribute `epoch`, if present, + is taken as the start GPS time (epoch) of the data in the series. + + The default data types will be double precision floating point. + + Parameters + ---------- + path: string + Input file path. Must end with either `.npy`, `.txt` or `.hdf`. + + group: string + Additional name for internal storage use. When reading HDF files, this + is the path to the HDF dataset to read. + + Raises + ------ + ValueError + If the path does not end in a supported extension. + For Numpy and ASCII input files, this is also raised if the array + does not have 2 or 3 dimensions. + """ + ext = _os.path.splitext(path)[1] + if ext == '.npy': + data = _numpy.load(path) + elif ext == '.txt': + data = _numpy.loadtxt(path) + elif ext == '.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'r') as f: + data = f[key][:] + delta_f = f[key].attrs['delta_f'] + epoch = f[key].attrs['epoch'] if 'epoch' in f[key].attrs else None + series = FrequencySeries(data, delta_f=delta_f, epoch=epoch) + return series + else: + raise ValueError('Path must end with .npy, .hdf, or .txt') + + delta_f = (data[-1][0] - data[0][0]) / (len(data) - 1) + if data.ndim == 2: + return FrequencySeries(data[:,1], delta_f=delta_f, epoch=None) + elif data.ndim == 3: + return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f, + epoch=None) + + raise ValueError('File has %s dimensions, cannot convert to FrequencySeries, \ + must be 2 (real) or 3 (complex)' % data.ndim)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/optparse.html b/latest/html/_modules/pycbc/types/optparse.html new file mode 100644 index 00000000000..28ea6aabf7b --- /dev/null +++ b/latest/html/_modules/pycbc/types/optparse.html @@ -0,0 +1,711 @@ + + + + + + pycbc.types.optparse — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.optparse

+# Copyright (C) 2015 Ian Harry, Tito Dal Canton
+#               2022 Shichao Wu
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Generals
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+This modules contains extensions for use with argparse
+"""
+import copy
+import argparse
+from collections import defaultdict
+
+
+[docs] +class DictWithDefaultReturn(defaultdict): + default_set = False + ifo_set = False + def __bool__(self): + if self.items() and not all(entry is None for entry in self.values()): + # True if any values are explictly set. + return True + elif self['RANDOM_STRING_314324'] is not None: + # Or true if the default value was set + # NOTE: This stores the string RANDOM_STRING_314324 in the dict + # so subsequent calls will be caught in the first test here. + return True + else: + # Else false + return False + # Python 2 and 3 have different conventions for boolean method + __nonzero__ = __bool__
+ + +
+[docs] +class MultiDetOptionAction(argparse.Action): + # Initialise the same as the standard 'append' action + def __init__(self, + option_strings, + dest, + nargs='+', + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if type is not None: + self.internal_type = type + else: + self.internal_type = str + new_default = DictWithDefaultReturn(lambda: default) + #new_default.default_value=default + if nargs == 0: + raise ValueError('nargs for append actions must be > 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != argparse.OPTIONAL: + raise ValueError('nargs must be %r to supply const' + % argparse.OPTIONAL) + super(MultiDetOptionAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=new_default, + type=str, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + # Again this is modified from the standard argparse 'append' action + err_msg = "Issue with option: %s \n" %(self.dest,) + err_msg += "Received value: %s \n" %(' '.join(values),) + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, DictWithDefaultReturn()) + items = getattr(namespace, self.dest) + items = copy.copy(items) + for value in values: + value = value.split(':') + if len(value) == 2: + # "Normal" case, all ifos supplied independently as "H1:VALUE" + if items.default_set: + err_msg += "If you are supplying a value for all ifos, you " + err_msg += "cannot also supply values for specific ifos." + raise ValueError(err_msg) + items[value[0]] = self.internal_type(value[1]) + items.ifo_set = True + elif len(value) == 1: + # OR supply only one value and use this for all ifos + if items.default_set: + err_msg += "If you are supplying a value for all ifos, you " + err_msg += "must only supply one value." + raise ValueError(err_msg) + # Can't use a global and ifo specific options + if items.ifo_set: + err_msg += "If you are supplying a value for all ifos, you " + err_msg += "cannot also supply values for specific ifos." + raise ValueError(err_msg) + #items.default_value = self.internal_type(value[0]) + new_default = self.internal_type(value[0]) + items.default_factory = lambda: new_default + items.default_set = True + else: + err_msg += "The character ':' is used to deliminate the " + err_msg += "ifo and the value. Please do not use it more than " + err_msg += "once." + raise ValueError(err_msg) + setattr(namespace, self.dest, items)
+ + +
+[docs] +class MultiDetOptionActionSpecial(MultiDetOptionAction): + """ + This class in an extension of the MultiDetOptionAction class to handle + cases where the : is already a special character. For example the channel + name is something like H1:CHANNEL_NAME. Here the channel name *must* + be provided uniquely for each ifo. The dictionary key is set to H1 and the + value to H1:CHANNEL_NAME for this example. + """ + def __call__(self, parser, namespace, values, option_string=None): + # Again this is modified from the standard argparse 'append' action + err_msg = "Issue with option: %s \n" %(self.dest,) + err_msg += "Received value: %s \n" %(' '.join(values),) + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, {}) + items = getattr(namespace, self.dest) + items = copy.copy(items) + for value in values: + value_split = value.split(':') + if len(value_split) == 2: + # "Normal" case, all ifos supplied independently as "H1:VALUE" + if value_split[0] in items: + err_msg += "Multiple values supplied for ifo %s.\n" \ + %(value_split[0],) + err_msg += "Already have %s." %(items[value_split[0]]) + raise ValueError(err_msg) + else: + items[value_split[0]] = value + elif len(value_split) == 3: + # This is an unadvertised feature. It is used for cases where I + # want to pretend H1 data is actually L1 (or similar). So if I + # supply --channel-name H1:L1:LDAS-STRAIN I can use L1 data and + # pretend it is H1 internally. + if value_split[0] in items: + err_msg += "Multiple values supplied for ifo %s.\n" \ + %(value_split[0],) + err_msg += "Already have %s." %(items[value_split[0]]) + raise ValueError(err_msg) + else: + items[value_split[0]] = ':'.join(value_split[1:3]) + else: + err_msg += "The character ':' is used to deliminate the " + err_msg += "ifo and the value. It must appear exactly " + err_msg += "once." + raise ValueError(err_msg) + setattr(namespace, self.dest, items)
+ + +
+[docs] +class MultiDetMultiColonOptionAction(MultiDetOptionAction): + """A special case of `MultiDetOptionAction` which allows one to use + arguments containing colons, such as `V1:FOOBAR:1`. The first colon is + assumed to be the separator between the detector and the argument. + All subsequent colons are kept as part of the argument. Unlike + `MultiDetOptionAction`, all arguments must be prefixed by the + corresponding detector. + """ + def __call__(self, parser, namespace, values, option_string=None): + err_msg = ('Issue with option: {}\n' + 'Received value: {}\n').format(self.dest, ' '.join(values)) + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, {}) + items = copy.copy(getattr(namespace, self.dest)) + for value in values: + if ':' not in value: + err_msg += ("Each argument must contain at least one ':' " + "character") + raise ValueError(err_msg) + detector, argument = value.split(':', 1) + if detector in items: + err_msg += ('Multiple values supplied for detector {},\n' + 'already have {}.') + err_msg = err_msg.format(detector, items[detector]) + raise ValueError(err_msg) + items[detector] = self.internal_type(argument) + setattr(namespace, self.dest, items)
+ + +
+[docs] +class MultiDetOptionAppendAction(MultiDetOptionAction): + def __call__(self, parser, namespace, values, option_string=None): + # Again this is modified from the standard argparse 'append' action + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, {}) + items = getattr(namespace, self.dest) + items = copy.copy(items) + for value in values: + value = value.split(':') + if len(value) == 2: + # "Normal" case, all ifos supplied independetly as "H1:VALUE" + if value[0] in items: + items[value[0]].append(self.internal_type(value[1])) + else: + items[value[0]] = [self.internal_type(value[1])] + else: + err_msg = "Issue with option: %s \n" %(self.dest,) + err_msg += "Received value: %s \n" %(' '.join(values),) + err_msg += "The character ':' is used to distinguish the " + err_msg += "ifo and the value. It must be given exactly once " + err_msg += "for all entries" + raise ValueError(err_msg) + setattr(namespace, self.dest, items)
+ + +
+[docs] +class DictOptionAction(argparse.Action): + # Initialise the same as the standard 'append' action + def __init__(self, + option_strings, + dest, + nargs='+', + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if type is not None: + self.internal_type = type + else: + self.internal_type = str + new_default = DictWithDefaultReturn(lambda: default) + if nargs == 0: + raise ValueError('nargs for append actions must be > 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != argparse.OPTIONAL: + raise ValueError('nargs must be %r to supply const' + % argparse.OPTIONAL) + super(DictOptionAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=new_default, + type=str, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + # Again this is modified from the standard argparse 'append' action + err_msg = "Issue with option: %s \n" %(self.dest,) + err_msg += "Received value: %s \n" %(' '.join(values),) + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, {}) + items = getattr(namespace, self.dest) + items = copy.copy(items) + for value in values: + if values == ['{}']: + break + value = value.split(':') + if len(value) == 2: + # "Normal" case, all extra arguments supplied independently + # as "param:VALUE" + items[value[0]] = self.internal_type(value[1]) + else: + err_msg += "The character ':' is used to distinguish the " + err_msg += "parameter name and the value. Please do not " + err_msg += "use it more than or less than once." + raise ValueError(err_msg) + setattr(namespace, self.dest, items)
+ + +
+[docs] +class MultiDetDictOptionAction(DictOptionAction): + """A special case of `DictOptionAction` which allows one to use + argument containing the detector (channel) name, such as + `DETECTOR:PARAM:VALUE`. The first colon is the name of detector, + the second colon is the name of parameter, the third colon is the value. + Or similar to `DictOptionAction`, all arguments don't contain the name of + detector, such as `PARAM:VALUE`, this will assume each detector has same + values of those parameters. + """ + def __call__(self, parser, namespace, values, option_string=None): + # Again this is modified from the standard argparse 'append' action + err_msg = ('Issue with option: {}\n' + 'Received value: {}\n').format(self.dest, ' '.join(values)) + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, {}) + items = copy.copy(getattr(namespace, self.dest)) + detector_args = {} + for value in values: + if values == ['{}']: + break + if value.count(':') == 2: + detector, param_value = value.split(':', 1) + param, val = param_value.split(':') + if detector not in detector_args: + detector_args[detector] = {param: self.internal_type(val)} + if param in detector_args[detector]: + err_msg += ("Multiple values supplied for the same " + "parameter {} under detector {},\n" + "already have {}.") + err_msg = err_msg.format(param, detector, + detector_args[detector][param]) + else: + detector_args[detector][param] = self.internal_type(val) + elif value.count(':') == 1: + param, val = value.split(':') + for detector in getattr(namespace, 'instruments'): + if detector not in detector_args: + detector_args[detector] = \ + {param: self.internal_type(val)} + if param in detector_args[detector]: + err_msg += ("Multiple values supplied for the same " + "parameter {} under detector {},\n" + "already have {}.") + err_msg = err_msg.format( + param, detector, + detector_args[detector][param]) + else: + detector_args[detector][param] = \ + self.internal_type(val) + else: + err_msg += ("Use format `DETECTOR:PARAM:VALUE` for each " + "detector, or use `PARAM:VALUE` for all.") + raise ValueError(err_msg) + items = detector_args + setattr(namespace, self.dest, items)
+ + +
+[docs] +def required_opts(opt, parser, opt_list, required_by=None): + """Check that all the opts are defined + + Parameters + ---------- + opt : object + Result of option parsing + parser : object + OptionParser instance. + opt_list : list of strings + required_by : string, optional + the option that requires these options (if applicable) + """ + for name in opt_list: + attr = name[2:].replace('-', '_') + if not hasattr(opt, attr) or (getattr(opt, attr) is None): + err_str = "%s is missing " % name + if required_by is not None: + err_str += ", required by %s" % required_by + parser.error(err_str)
+ + +
+[docs] +def required_opts_multi_ifo(opt, parser, ifo, opt_list, required_by=None): + """Check that all the opts are defined + + Parameters + ---------- + opt : object + Result of option parsing + parser : object + OptionParser instance. + ifo : string + opt_list : list of strings + required_by : string, optional + the option that requires these options (if applicable) + """ + for name in opt_list: + attr = name[2:].replace('-', '_') + try: + if getattr(opt, attr)[ifo] is None: + raise KeyError + except KeyError: + err_str = "%s is missing " % name + if required_by is not None: + err_str += ", required by %s" % required_by + parser.error(err_str)
+ + +
+[docs] +def ensure_one_opt(opt, parser, opt_list): + """ Check that one and only one in the opt_list is defined in opt + + Parameters + ---------- + opt : object + Result of option parsing + parser : object + OptionParser instance. + opt_list : list of strings + """ + + the_one = None + for name in opt_list: + attr = name[2:].replace('-', '_') + if hasattr(opt, attr) and (getattr(opt, attr) is not None): + if the_one is None: + the_one = name + else: + parser.error("%s and %s are mutually exculsive" \ + % (the_one, name)) + + if the_one is None: + parser.error("you must supply one of the following %s" \ + % (', '.join(opt_list)))
+ + +
+[docs] +def ensure_one_opt_multi_ifo(opt, parser, ifo, opt_list): + """ Check that one and only one in the opt_list is defined in opt + + Parameters + ---------- + opt : object + Result of option parsing + parser : object + OptionParser instance. + opt_list : list of strings + """ + + the_one = None + for name in opt_list: + attr = name[2:].replace('-', '_') + try: + if getattr(opt, attr)[ifo] is None: + raise KeyError + except KeyError: + pass + else: + if the_one is None: + the_one = name + else: + parser.error("%s and %s are mutually exculsive" \ + % (the_one, name)) + + if the_one is None: + parser.error("you must supply one of the following %s" \ + % (', '.join(opt_list)))
+ + +
+[docs] +def copy_opts_for_single_ifo(opt, ifo): + """ + Takes the namespace object (opt) from the multi-detector interface and + returns a namespace object for a single ifo that can be used with + functions expecting output from the single-detector interface. + """ + opt = copy.deepcopy(opt) + for arg, val in vars(opt).items(): + if isinstance(val, DictWithDefaultReturn) or \ + (isinstance(val, dict) and ifo in val): + setattr(opt, arg, getattr(opt, arg)[ifo]) + return opt
+ + +
+[docs] +def convert_to_process_params_dict(opt): + """ + Takes the namespace object (opt) from the multi-detector interface and + returns a dictionary of command line options that will be handled correctly + by the register_to_process_params ligolw function. + """ + opt = copy.deepcopy(opt) + for arg, val in vars(opt).items(): + if isinstance(val, DictWithDefaultReturn): + new_val = [] + for key in val.keys(): + if isinstance(val[key], list): + for item in val[key]: + if item is not None: + new_val.append(':'.join([key, str(item)])) + else: + if val[key] is not None: + new_val.append(':'.join([key, str(val[key])])) + setattr(opt, arg, new_val) + return vars(opt)
+ + +def _positive_type(s, dtype=None): + """ + Ensure argument is positive and convert type to dtype + + This is for the functions below to wrap to avoid code duplication. + """ + assert dtype is not None + err_msg = f"Input must be a positive {dtype}, not {s}" + try: + value = dtype(s) + except ValueError: + raise argparse.ArgumentTypeError(err_msg) + if value <= 0: + raise argparse.ArgumentTypeError(err_msg) + return value + +def _nonnegative_type(s, dtype=None): + """ + Ensure argument is positive or zero and convert type to dtype + + This is for the functions below to wrap to avoid code duplication. + """ + assert dtype is not None + err_msg = f"Input must be either a positive or zero {dtype}, not {s}" + try: + value = dtype(s) + except ValueError: + raise argparse.ArgumentTypeError(err_msg) + if value < 0: + raise argparse.ArgumentTypeError(err_msg) + return value + +
+[docs] +def positive_float(s): + """ + Ensure argument is a positive real number and return it as float. + + To be used as type in argparse arguments. + """ + return _positive_type(s, dtype=float)
+ + +
+[docs] +def nonnegative_float(s): + """ + Ensure argument is a positive real number or zero and return it as float. + + To be used as type in argparse arguments. + """ + return _nonnegative_type(s, dtype=float)
+ + +
+[docs] +def positive_int(s): + """ + Ensure argument is a positive integer and return it as int. + + To be used as type in argparse arguments. + """ + return _positive_type(s, dtype=int)
+ + +
+[docs] +def nonnegative_int(s): + """ + Ensure argument is a positive integer or zero and return it as int. + + To be used as type in argparse arguments. + """ + return _nonnegative_type(s, dtype=int)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/types/timeseries.html b/latest/html/_modules/pycbc/types/timeseries.html new file mode 100644 index 00000000000..511940c4a94 --- /dev/null +++ b/latest/html/_modules/pycbc/types/timeseries.html @@ -0,0 +1,1431 @@ + + + + + + pycbc.types.timeseries — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.types.timeseries

+# Copyright (C) 2014  Tito Dal Canton, Josh Willis, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""
+Provides a class representing a time series.
+"""
+import os as _os
+import h5py
+from pycbc.types.array import Array, _convert, complex_same_precision_as, zeros
+from pycbc.types.array import _nocomplex
+from pycbc.types.frequencyseries import FrequencySeries
+import lal as _lal
+import numpy as _numpy
+from scipy.io.wavfile import write as write_wav
+
+
+
+[docs] +class TimeSeries(Array): + """Models a time series consisting of uniformly sampled scalar values. + + Parameters + ---------- + initial_array : array-like + Array containing sampled data. + delta_t : float + Time between consecutive samples in seconds. + epoch : {None, lal.LIGOTimeGPS}, optional + Time of the first sample in seconds. + dtype : {None, data-type}, optional + Sample data type. + copy : boolean, optional + If True, samples are copied to a new array. + """ + + def __init__(self, initial_array, delta_t=None, + epoch=None, dtype=None, copy=True): + if len(initial_array) < 1: + raise ValueError('initial_array must contain at least one sample.') + if delta_t is None: + try: + delta_t = initial_array.delta_t + except AttributeError: + raise TypeError('must provide either an initial_array with a delta_t attribute, or a value for delta_t') + if not delta_t > 0: + raise ValueError('delta_t must be a positive number') + + # Get epoch from initial_array if epoch not given (or is None) + # If initialy array has no epoch, set epoch to 0. + # If epoch is provided, use that. + if not isinstance(epoch, _lal.LIGOTimeGPS): + if epoch is None: + if isinstance(initial_array, TimeSeries): + epoch = initial_array._epoch + else: + epoch = _lal.LIGOTimeGPS(0) + elif epoch is not None: + try: + epoch = _lal.LIGOTimeGPS(epoch) + except: + raise TypeError('epoch must be either None or a lal.LIGOTimeGPS') + Array.__init__(self, initial_array, dtype=dtype, copy=copy) + self._delta_t = delta_t + self._epoch = epoch + +
+[docs] + def to_astropy(self, name='pycbc'): + """ Return an astropy.timeseries.TimeSeries instance + """ + from astropy.timeseries import TimeSeries as ATimeSeries + from astropy.time import Time + from astropy.units import s + + start = Time(float(self.start_time), format='gps', scale='utc') + delta = self.delta_t * s + return ATimeSeries({name: self.numpy()}, + time_start=start, + time_delta=delta, + n_samples=len(self))
+ + +
+[docs] + def epoch_close(self, other): + """ Check if the epoch is close enough to allow operations """ + dt = abs(float(self.start_time - other.start_time)) + return dt <= 1e-7
+ + +
+[docs] + def sample_rate_close(self, other): + """ Check if the sample rate is close enough to allow operations """ + + # compare our delta_t either to a another time series' or + # to a given sample rate (float) + if isinstance(other, TimeSeries): + odelta_t = other.delta_t + else: + odelta_t = 1.0/other + + if (odelta_t - self.delta_t) / self.delta_t > 1e-4: + return False + + if abs(1 - odelta_t / self.delta_t) * len(self) > 0.5: + return False + + return True
+ + + def _return(self, ary): + return TimeSeries(ary, self._delta_t, epoch=self._epoch, copy=False) + + def _typecheck(self, other): + if isinstance(other, TimeSeries): + if not self.sample_rate_close(other): + raise ValueError('different delta_t, {} vs {}'.format( + self.delta_t, other.delta_t)) + if not self.epoch_close(other): + raise ValueError('different epoch, {} vs {}'.format( + self.start_time, other.start_time)) + + def _getslice(self, index): + # Set the new epoch---note that index.start may also be None + if index.start is None: + new_epoch = self._epoch + else: + if index.start < 0: + raise ValueError(('Negative start index ({})' + ' not supported').format(index.start)) + new_epoch = self._epoch + index.start * self._delta_t + + if index.step is not None: + new_delta_t = self._delta_t * index.step + else: + new_delta_t = self._delta_t + + return TimeSeries(Array._getslice(self, index), new_delta_t, + new_epoch, copy=False) + + +
+[docs] + def prepend_zeros(self, num): + """Prepend num zeros onto the beginning of this TimeSeries. Update also + epoch to include this prepending. + """ + self.resize(len(self) + num) + self.roll(num) + self._epoch = self._epoch - num * self._delta_t
+ + +
+[docs] + def append_zeros(self, num): + """Append num zeros onto the end of this TimeSeries. + """ + self.resize(len(self) + num)
+ + +
+[docs] + def get_delta_t(self): + """Return time between consecutive samples in seconds. + """ + return self._delta_t
+ + delta_t = property(get_delta_t, + doc="Time between consecutive samples in seconds.") + +
+[docs] + def get_duration(self): + """Return duration of time series in seconds. + """ + return len(self) * self._delta_t
+ + duration = property(get_duration, + doc="Duration of time series in seconds.") + +
+[docs] + def get_sample_rate(self): + """Return the sample rate of the time series. + """ + return 1.0/self.delta_t
+ + sample_rate = property(get_sample_rate, + doc="The sample rate of the time series.") + +
+[docs] + def time_slice(self, start, end, mode='floor'): + """Return the slice of the time series that contains the time range + in GPS seconds. + """ + if start < self.start_time: + raise ValueError('Time series does not contain a time as early as %s' % start) + + if end > self.end_time: + raise ValueError('Time series does not contain a time as late as %s' % end) + + start_idx = float(start - self.start_time) * self.sample_rate + end_idx = float(end - self.start_time) * self.sample_rate + + if _numpy.isclose(start_idx, round(start_idx), rtol=0, atol=1E-3): + start_idx = round(start_idx) + + if _numpy.isclose(end_idx, round(end_idx), rtol=0, atol=1E-3): + end_idx = round(end_idx) + + if mode == 'floor': + start_idx = int(start_idx) + end_idx = int(end_idx) + elif mode == 'nearest': + start_idx = int(round(start_idx)) + end_idx = int(round(end_idx)) + else: + raise ValueError("Invalid mode: {}".format(mode)) + + return self[start_idx:end_idx]
+ + + @property + def delta_f(self): + """Return the delta_f this ts would have in the frequency domain + """ + return 1.0 / self.duration + + @property + def start_time(self): + """Return time series start time as a LIGOTimeGPS. + """ + return self._epoch + + @start_time.setter + def start_time(self, time): + """ Set the start time + """ + self._epoch = _lal.LIGOTimeGPS(time) + +
+[docs] + def get_end_time(self): + """Return time series end time as a LIGOTimeGPS. + """ + return self._epoch + self.get_duration()
+ + end_time = property(get_end_time, + doc="Time series end time as a LIGOTimeGPS.") + +
+[docs] + def get_sample_times(self): + """Return an Array containing the sample times. + """ + if self._epoch is None: + return Array(range(len(self))) * self._delta_t + else: + return Array(range(len(self))) * self._delta_t + float(self._epoch)
+ + sample_times = property(get_sample_times, + doc="Array containing the sample times.") + +
+[docs] + def at_time(self, time, nearest_sample=False, + interpolate=None, extrapolate=None): + """Return the value of the TimeSeries at the specified GPS time. + + Parameters + ---------- + time: scalar or array-like + GPS time at which the value is wanted. Note that LIGOTimeGPS + objects count as scalar. + nearest_sample: bool + Return the sample at the time nearest to the chosen time rather + than rounded down. + interpolate: str, None + Return the interpolated value of the time series. Choices + are simple linear or quadratic interpolation. + extrapolate: str or float, None + Value to return if time is outside the range of the vector or + method of extrapolating the value. + """ + if nearest_sample: + time = time + self.delta_t / 2.0 + vtime = _numpy.array(time, ndmin=1) + + fill_value = None + keep_idx = None + size = len(vtime) + if extrapolate is not None: + if _numpy.isscalar(extrapolate) and _numpy.isreal(extrapolate): + fill_value = extrapolate + facl = facr = 0 + if interpolate == 'quadratic': + facl = facr = 1.1 + elif interpolate == 'linear': + facl, facr = 0.1, 1.1 + + left = (vtime >= self.start_time + self.delta_t * facl) + right = (vtime < self.end_time - self.delta_t * facr) + keep_idx = _numpy.where(left & right)[0] + vtime = vtime[keep_idx] + else: + raise ValueError(f"Unsupported extrapolate: {extrapolate}") + + fi = (vtime - float(self.start_time)) * self.sample_rate + i = _numpy.asarray(_numpy.floor(fi)).astype(int) + di = fi - i + + if interpolate == 'linear': + a = self[i] + b = self[i+1] + ans = a + (b - a) * di + elif interpolate == 'quadratic': + c = self.data[i] + xr = self.data[i + 1] - c + xl = self.data[i - 1] - c + a = 0.5 * (xr + xl) + b = 0.5 * (xr - xl) + ans = a * di**2.0 + b * di + c + else: + ans = self[i] + + ans = _numpy.array(ans, ndmin=1) + if fill_value is not None: + old = ans + ans = _numpy.zeros(size) + fill_value + ans[keep_idx] = old + ans = _numpy.array(ans, ndmin=1) + + if _numpy.ndim(time) == 0: + return ans[0] + return ans
+ + + at_times = at_time + + def __eq__(self,other): + """ + This is the Python special method invoked whenever the '==' + comparison is used. It will return true if the data of two + time series are identical, and all of the numeric meta-data + are identical, irrespective of whether or not the two + instances live in the same memory (for that comparison, the + Python statement 'a is b' should be used instead). + + Thus, this method returns 'True' if the types of both 'self' + and 'other' are identical, as well as their lengths, dtypes, + epochs, delta_ts and the data in the arrays, element by element. + It will always do the comparison on the CPU, but will *not* move + either object to the CPU if it is not already there, nor change + the scheme of either object. It is possible to compare a CPU + object to a GPU object, and the comparison should be true if the + data and meta-data of the two objects are the same. + + Note in particular that this function returns a single boolean, + and not an array of booleans as Numpy does. If the numpy + behavior is instead desired it can be obtained using the numpy() + method of the PyCBC type to get a numpy instance from each + object, and invoking '==' on those two instances. + + Parameters + ---------- + other: another Python object, that should be tested for equality + with 'self'. + + Returns + ------- + boolean: 'True' if the types, dtypes, lengths, epochs, delta_ts + and data of the two objects are each identical. + """ + if super(TimeSeries,self).__eq__(other): + return (self._epoch == other._epoch and self._delta_t == other._delta_t) + else: + return False + +
+[docs] + def almost_equal_elem(self,other,tol,relative=True,dtol=0.0): + """ + Compare whether two time series are almost equal, element + by element. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(self[i]-other[i]) <= tol*abs(self[i]) + for all elements of the series. + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(self[i]-other[i]) <= tol + for all elements of the series. + + The method also checks that self.delta_t is within 'dtol' of + other.delta_t; if 'dtol' has its default value of 0 then exact + equality between the two is required. + + Other meta-data (type, dtype, length, and epoch) must be exactly + equal. If either object's memory lives on the GPU it will be + copied to the CPU for the comparison, which may be slow. But the + original object itself will not have its memory relocated nor + scheme changed. + + Parameters + ---------- + other: another Python object, that should be tested for + almost-equality with 'self', element-by-element. + tol: a non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative: A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + dtol: a non-negative number, the tolerance for delta_t. Like 'tol', + it is interpreted as relative or absolute based on the value of + 'relative'. This parameter defaults to zero, enforcing exact + equality between the delta_t values of the two TimeSeries. + + Returns + ------- + boolean: 'True' if the data and delta_ts agree within the tolerance, + as interpreted by the 'relative' keyword, and if the types, + lengths, dtypes, and epochs are exactly the same. + """ + # Check that the delta_t tolerance is non-negative; raise an exception + # if needed. + if (dtol < 0.0): + raise ValueError("Tolerance in delta_t cannot be negative") + if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative): + if relative: + return (self._epoch == other._epoch and + abs(self._delta_t-other._delta_t) <= dtol*self._delta_t) + else: + return (self._epoch == other._epoch and + abs(self._delta_t-other._delta_t) <= dtol) + else: + return False
+ + +
+[docs] + def almost_equal_norm(self,other,tol,relative=True,dtol=0.0): + """ + Compare whether two time series are almost equal, normwise. + + If the 'relative' parameter is 'True' (the default) then the + 'tol' parameter (which must be positive) is interpreted as a + relative tolerance, and the comparison returns 'True' only if + abs(norm(self-other)) <= tol*abs(norm(self)). + + If 'relative' is 'False', then 'tol' is an absolute tolerance, + and the comparison is true only if + abs(norm(self-other)) <= tol + + The method also checks that self.delta_t is within 'dtol' of + other.delta_t; if 'dtol' has its default value of 0 then exact + equality between the two is required. + + Other meta-data (type, dtype, length, and epoch) must be exactly + equal. If either object's memory lives on the GPU it will be + copied to the CPU for the comparison, which may be slow. But the + original object itself will not have its memory relocated nor + scheme changed. + + Parameters + ---------- + other: another Python object, that should be tested for + almost-equality with 'self', based on their norms. + tol: a non-negative number, the tolerance, which is interpreted + as either a relative tolerance (the default) or an absolute + tolerance. + relative: A boolean, indicating whether 'tol' should be interpreted + as a relative tolerance (if True, the default if this argument + is omitted) or as an absolute tolerance (if tol is False). + dtol: a non-negative number, the tolerance for delta_t. Like 'tol', + it is interpreted as relative or absolute based on the value of + 'relative'. This parameter defaults to zero, enforcing exact + equality between the delta_t values of the two TimeSeries. + + Returns + ------- + boolean: 'True' if the data and delta_ts agree within the tolerance, + as interpreted by the 'relative' keyword, and if the types, + lengths, dtypes, and epochs are exactly the same. + """ + # Check that the delta_t tolerance is non-negative; raise an exception + # if needed. + if (dtol < 0.0): + raise ValueError("Tolerance in delta_t cannot be negative") + if super(TimeSeries,self).almost_equal_norm(other,tol=tol,relative=relative): + if relative: + return (self._epoch == other._epoch and + abs(self._delta_t-other._delta_t) <= dtol*self._delta_t) + else: + return (self._epoch == other._epoch and + abs(self._delta_t-other._delta_t) <= dtol) + else: + return False
+ + +
+[docs] + @_convert + def lal(self): + """Produces a LAL time series object equivalent to self. + + Returns + ------- + lal_data : {lal.*TimeSeries} + LAL time series object containing the same data as self. + The actual type depends on the sample's dtype. If the epoch of + self is 'None', the epoch of the returned LAL object will be + LIGOTimeGPS(0,0); otherwise, the same as that of self. + + Raises + ------ + TypeError + If time series is stored in GPU memory. + """ + lal_data = None + ep = self._epoch + + if self._data.dtype == _numpy.float32: + lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.float64: + lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.complex64: + lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self)) + elif self._data.dtype == _numpy.complex128: + lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self)) + + lal_data.data.data[:] = self.numpy() + + return lal_data
+ + +
+[docs] + def crop(self, left, right): + """ Remove given seconds from either end of time series + + Parameters + ---------- + left : float + Number of seconds of data to remove from the left of the time series. + right : float + Number of seconds of data to remove from the right of the time series. + + Returns + ------- + cropped : pycbc.types.TimeSeries + The reduced time series + """ + if left + right > self.duration: + raise ValueError('Cannot crop more data than we have') + + s = int(left * self.sample_rate) + e = len(self) - int(right * self.sample_rate) + return self[s:e]
+ + +
+[docs] + def save_to_wav(self, file_name): + """ Save this time series to a wav format audio file. + + Parameters + ---------- + file_name : string + The output file name + """ + scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767) + write_wav(file_name, int(self.sample_rate), scaled)
+ + +
+[docs] + def psd(self, segment_duration, **kwds): + """ Calculate the power spectral density of this time series. + + Use the `pycbc.psd.welch` method to estimate the psd of this time segment. + For more complete options, please see that function. + + Parameters + ---------- + segment_duration: float + Duration in seconds to use for each sample of the spectrum. + kwds : keywords + Additional keyword arguments are passed on to the `pycbc.psd.welch` method. + + Returns + ------- + psd : FrequencySeries + Frequency series containing the estimated PSD. + """ + from pycbc.psd import welch + seg_len = int(round(segment_duration * self.sample_rate)) + seg_stride = int(seg_len / 2) + return welch(self, seg_len=seg_len, + seg_stride=seg_stride, + **kwds)
+ + +
+[docs] + def gate(self, time, window=0.25, method='taper', copy=True, + taper_width=0.25, invpsd=None): + """ Gate out portion of time series + + Parameters + ---------- + time: float + Central time of the gate in seconds + window: float + Half-length in seconds to remove data around gate time. + method: str + Method to apply gate, options are 'hard', 'taper', and 'paint'. + copy: bool + If False, do operations inplace to this time series, else return + new time series. + taper_width: float + Length of tapering region on either side of excized data. Only + applies to the taper gating method. + invpsd: pycbc.types.FrequencySeries + The inverse PSD to use for painting method. If not given, + a PSD is generated using default settings. + + Returns + ------- + data: pycbc.types.TimeSeris + Gated time series + """ + data = self.copy() if copy else self + if method == 'taper': + from pycbc.strain import gate_data + return gate_data(data, [(time, window, taper_width)]) + elif method == 'paint': + # Uses the hole-filling method of + # https://arxiv.org/pdf/1908.05644.pdf + from pycbc.strain.gate import gate_and_paint + from pycbc.waveform.utils import apply_fd_time_shift + if invpsd is None: + # These are some bare minimum settings, normally you + # should probably provide a psd + invpsd = 1. / self.filter_psd(self.duration/32, self.delta_f, 0) + lindex = int((time - window - self.start_time) / self.delta_t) + rindex = int((time + window - self.start_time) / self.delta_t) + lindex = lindex if lindex >= 0 else 0 + rindex = rindex if rindex <= len(self) else len(self) + rindex_time = float(self.start_time + rindex * self.delta_t) + offset = rindex_time - (time + window) + if offset == 0: + return gate_and_paint(data, lindex, rindex, invpsd, copy=False) + else: + # time shift such that gate end time lands on a specific data sample + fdata = data.to_frequencyseries() + fdata = apply_fd_time_shift(fdata, offset + fdata.epoch, copy=False) + # gate and paint in time domain + data = fdata.to_timeseries() + data = gate_and_paint(data, lindex, rindex, invpsd, copy=False) + # shift back to the original time + fdata = data.to_frequencyseries() + fdata = apply_fd_time_shift(fdata, -offset + fdata.epoch, copy=False) + tdata = fdata.to_timeseries() + return tdata + elif method == 'hard': + tslice = data.time_slice(time - window, time + window) + tslice[:] = 0 + return data + else: + raise ValueError('Invalid method name: {}'.format(method))
+ + +
+[docs] + def filter_psd(self, segment_duration, delta_f, flow): + """ Calculate the power spectral density of this time series. + + Use the `pycbc.psd.welch` method to estimate the psd of this time segment. + The psd is then truncated in the time domain to the segment duration + and interpolated to the requested sample frequency. + + Parameters + ---------- + segment_duration: float + Duration in seconds to use for each sample of the spectrum. + delta_f : float + Frequency spacing to return psd at. + flow : float + The low frequency cutoff to apply when truncating the inverse + spectrum. + + Returns + ------- + psd : FrequencySeries + Frequency series containing the estimated PSD. + """ + from pycbc.psd import interpolate, inverse_spectrum_truncation + p = self.psd(segment_duration) + samples = int(round(p.sample_rate * segment_duration)) + p = interpolate(p, delta_f) + return inverse_spectrum_truncation(p, samples, + low_frequency_cutoff=flow, + trunc_method='hann')
+ + +
+[docs] + def whiten(self, segment_duration, max_filter_duration, trunc_method='hann', + remove_corrupted=True, low_frequency_cutoff=None, + return_psd=False, **kwds): + """ Return a whitened time series + + Parameters + ---------- + segment_duration: float + Duration in seconds to use for each sample of the spectrum. + max_filter_duration : int + Maximum length of the time-domain filter in seconds. + trunc_method : {None, 'hann'} + Function used for truncating the time-domain filter. + None produces a hard truncation at `max_filter_len`. + remove_corrupted : {True, boolean} + If True, the region of the time series corrupted by the whitening + is excised before returning. If false, the corrupted regions + are not excised and the full time series is returned. + low_frequency_cutoff : {None, float} + Low frequency cutoff to pass to the inverse spectrum truncation. + This should be matched to a known low frequency cutoff of the + data if there is one. + return_psd : {False, Boolean} + Return the estimated and conditioned PSD that was used to whiten + the data. + kwds : keywords + Additional keyword arguments are passed on to the `pycbc.psd.welch` method. + + Returns + ------- + whitened_data : TimeSeries + The whitened time series + """ + from pycbc.psd import inverse_spectrum_truncation, interpolate + # Estimate the noise spectrum + psd = self.psd(segment_duration, **kwds) + psd = interpolate(psd, self.delta_f) + max_filter_len = int(round(max_filter_duration * self.sample_rate)) + + # Interpolate and smooth to the desired corruption length + psd = inverse_spectrum_truncation(psd, + max_filter_len=max_filter_len, + low_frequency_cutoff=low_frequency_cutoff, + trunc_method=trunc_method) + + # Whiten the data by the asd + white = (self.to_frequencyseries() / psd**0.5).to_timeseries() + + if remove_corrupted: + white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)] + + if return_psd: + return white, psd + + return white
+ + +
+[docs] + def qtransform(self, delta_t=None, delta_f=None, logfsteps=None, + frange=None, qrange=(4,64), mismatch=0.2, return_complex=False): + """ Return the interpolated 2d qtransform of this data + + Parameters + ---------- + delta_t : {self.delta_t, float} + The time resolution to interpolate to + delta_f : float, Optional + The frequency resolution to interpolate to + logfsteps : int + Do a log interpolation (incompatible with delta_f option) and set + the number of steps to take. + frange : {(30, nyquist*0.8), tuple of ints} + frequency range + qrange : {(4, 64), tuple} + q range + mismatch : float + Mismatch between frequency tiles + return_complex: {False, bool} + return the raw complex series instead of the normalized power. + + Returns + ------- + times : numpy.ndarray + The time that the qtransform is sampled. + freqs : numpy.ndarray + The frequencies that the qtransform is sampled. + qplane : numpy.ndarray (2d) + The two dimensional interpolated qtransform of this time series. + """ + from pycbc.filter.qtransform import qtiling, qplane + from scipy.interpolate import RectBivariateSpline as interp2d + + if frange is None: + frange = (30, int(self.sample_rate / 2 * 8)) + + q_base = qtiling(self, qrange, frange, mismatch) + _, times, freqs, q_plane = qplane(q_base, self.to_frequencyseries(), + return_complex=return_complex) + if logfsteps and delta_f: + raise ValueError("Provide only one (or none) of delta_f and logfsteps") + + # Interpolate if requested + if delta_f or delta_t or logfsteps: + if return_complex: + interp_amp = interp2d(freqs, times, abs(q_plane), kx=1, ky=1) + interp_phase = interp2d(freqs, times, _numpy.angle(q_plane), + kx=1, ky=1) + else: + interp = interp2d(freqs, times, q_plane, kx=1, ky=1) + + if delta_t: + times = _numpy.arange(float(self.start_time), + float(self.end_time), delta_t) + if delta_f: + freqs = _numpy.arange(int(frange[0]), int(frange[1]), delta_f) + if logfsteps: + freqs = _numpy.logspace(_numpy.log10(frange[0]), + _numpy.log10(frange[1]), + logfsteps) + + if delta_f or delta_t or logfsteps: + if return_complex: + q_plane = _numpy.exp(1.0j * interp_phase(freqs, times)) + q_plane *= interp_amp(freqs, times) + else: + q_plane = interp(freqs, times) + + return times, freqs, q_plane
+ + +
+[docs] + def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True): + """ notch filter the time series using an FIR filtered generated from + the ideal response passed through a time-domain kaiser + window (beta = 5.0) + + The suppression of the notch filter is related to the bandwidth and + the number of samples in the filter length. For a few Hz bandwidth, + a length corresponding to a few seconds is typically + required to create significant suppression in the notched band. + + Parameters + ---------- + Time Series: TimeSeries + The time series to be notched. + f1: float + The start of the frequency suppression. + f2: float + The end of the frequency suppression. + order: int + Number of corrupted samples on each side of the time series + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + """ + from pycbc.filter import notch_fir + ts = notch_fir(self, f1, f2, order, beta=beta) + if remove_corrupted: + ts = ts[order:len(ts)-order] + return ts
+ + +
+[docs] + def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True): + """ Lowpass filter the time series using an FIR filtered generated from + the ideal response passed through a kaiser window (beta = 5.0) + + Parameters + ---------- + Time Series: TimeSeries + The time series to be low-passed. + frequency: float + The frequency below which is suppressed. + order: int + Number of corrupted samples on each side of the time series + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + remove_corrupted : {True, boolean} + If True, the region of the time series corrupted by the filtering + is excised before returning. If false, the corrupted regions + are not excised and the full time series is returned. + """ + from pycbc.filter import lowpass_fir + ts = lowpass_fir(self, frequency, order, beta=beta) + if remove_corrupted: + ts = ts[order:len(ts)-order] + return ts
+ + +
+[docs] + def highpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True): + """ Highpass filter the time series using an FIR filtered generated from + the ideal response passed through a kaiser window (beta = 5.0) + + Parameters + ---------- + Time Series: TimeSeries + The time series to be high-passed. + frequency: float + The frequency below which is suppressed. + order: int + Number of corrupted samples on each side of the time series + beta: float + Beta parameter of the kaiser window that sets the side lobe attenuation. + remove_corrupted : {True, boolean} + If True, the region of the time series corrupted by the filtering + is excised before returning. If false, the corrupted regions + are not excised and the full time series is returned. + """ + from pycbc.filter import highpass_fir + ts = highpass_fir(self, frequency, order, beta=beta) + if remove_corrupted: + ts = ts[order:len(ts)-order] + return ts
+ + +
+[docs] + def fir_zero_filter(self, coeff): + """Filter the timeseries with a set of FIR coefficients + + Parameters + ---------- + coeff: numpy.ndarray + FIR coefficients. Should be and odd length and symmetric. + + Returns + ------- + filtered_series: pycbc.types.TimeSeries + Return the filtered timeseries, which has been properly shifted to account + for the FIR filter delay and the corrupted regions zeroed out. + """ + from pycbc.filter import fir_zero_filter + return self._return(fir_zero_filter(coeff, self))
+ + +
+[docs] + def resample(self, delta_t): + """ Resample this time series to the new delta_t + + Parameters + ----------- + delta_t: float + The time step to resample the times series to. + + Returns + ------- + resampled_ts: pycbc.types.TimeSeries + The resample timeseries at the new time interval delta_t. + """ + from pycbc.filter import resample_to_delta_t + return resample_to_delta_t(self, delta_t)
+ + +
+[docs] + def save(self, path, group = None): + """ + Save time series to a Numpy .npy, hdf, or text file. The first column + contains the sample times, the second contains the values. + In the case of a complex time series saved as text, the imaginary + part is written as a third column. When using hdf format, the data is stored + as a single vector, along with relevant attributes. + + Parameters + ---------- + path: string + Destination file path. Must end with either .hdf, .npy or .txt. + + group: string + Additional name for internal storage use. Ex. hdf storage uses + this as the key value. + + Raises + ------ + ValueError + If path does not end in .npy or .txt. + """ + + ext = _os.path.splitext(path)[1] + if ext == '.npy': + output = _numpy.vstack((self.sample_times.numpy(), self.numpy())).T + _numpy.save(path, output) + elif ext == '.txt': + if self.kind == 'real': + output = _numpy.vstack((self.sample_times.numpy(), + self.numpy())).T + elif self.kind == 'complex': + output = _numpy.vstack((self.sample_times.numpy(), + self.numpy().real, + self.numpy().imag)).T + _numpy.savetxt(path, output) + elif ext =='.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'a') as f: + ds = f.create_dataset(key, data=self.numpy(), + compression='gzip', + compression_opts=9, shuffle=True) + ds.attrs['start_time'] = float(self.start_time) + ds.attrs['delta_t'] = float(self.delta_t) + else: + raise ValueError('Path must end with .npy, .txt or .hdf')
+ + +
+[docs] + def to_timeseries(self): + """ Return time series""" + return self
+ + +
+[docs] + @_nocomplex + def to_frequencyseries(self, delta_f=None): + """ Return the Fourier transform of this time series + + Parameters + ---------- + delta_f : {None, float}, optional + The frequency resolution of the returned frequency series. By + default the resolution is determined by the duration of the timeseries. + + Returns + ------- + FrequencySeries: + The fourier transform of this time series. + """ + from pycbc.fft import fft + if not delta_f: + delta_f = 1.0 / self.duration + + # add 0.5 to round integer + tlen = int(1.0 / delta_f / self.delta_t + 0.5) + flen = int(tlen / 2 + 1) + + if tlen < len(self): + raise ValueError("The value of delta_f (%s) would be " + "undersampled. Maximum delta_f " + "is %s." % (delta_f, 1.0 / self.duration)) + if not delta_f: + tmp = self + else: + tmp = TimeSeries(zeros(tlen, dtype=self.dtype), + delta_t=self.delta_t, epoch=self.start_time) + tmp[:len(self)] = self[:] + + f = FrequencySeries(zeros(flen, + dtype=complex_same_precision_as(self)), + delta_f=delta_f) + fft(tmp, f) + f._delta_f = delta_f + return f
+ + +
+[docs] + def inject(self, other, copy=True): + """Return copy of self with other injected into it. + + The other vector will be resized and time shifted with sub-sample + precision before adding. This assumes that one can assume zeros + outside of the original vector range. + """ + # only handle equal sample rate for now. + if not self.sample_rate_close(other): + raise ValueError('Sample rate must be the same') + # determine if we want to inject in place or not + if copy: + ts = self.copy() + else: + ts = self + # Other is disjoint + if ((other.start_time >= ts.end_time) or + (ts.start_time > other.end_time)): + return ts + + other = other.copy() + dt = float((other.start_time - ts.start_time) * ts.sample_rate) + + # This coaligns other to the time stepping of self + if not dt.is_integer(): + diff = (dt - _numpy.floor(dt)) * ts.delta_t + + # insert zeros at end + other.resize(len(other) + (len(other) + 1) % 2 + 1) + + # fd shift to the right + other = other.cyclic_time_shift(diff) + + # get indices of other with respect to self + # this is already an integer to floating point precission + left = float(other.start_time - ts.start_time) * ts.sample_rate + left = int(round(left)) + right = left + len(other) + + oleft = 0 + oright = len(other) + + # other overhangs on left so truncate + if left < 0: + oleft = -left + left = 0 + + # other overhangs on right so truncate + if right > len(ts): + oright = len(other) - (right - len(ts)) + right = len(ts) + + ts[left:right] += other[oleft:oright] + return ts
+ + + add_into = inject # maintain backwards compatibility for now + +
+[docs] + @_nocomplex + def cyclic_time_shift(self, dt): + """Shift the data and timestamps by a given number of seconds + + Shift the data and timestamps in the time domain a given number of + seconds. To just change the time stamps, do ts.start_time += dt. + The time shift may be smaller than the intrinsic sample rate of the data. + Note that data will be cyclically rotated, so if you shift by 2 + seconds, the final 2 seconds of your data will now be at the + beginning of the data set. + + Parameters + ---------- + dt : float + Amount of time to shift the vector. + + Returns + ------- + data : pycbc.types.TimeSeries + The time shifted time series. + """ + # We do this in the frequency domain to allow us to do sub-sample + # time shifts. This also results in the shift being circular. It + # is left to a future update to do a faster impelementation in the case + # where the time shift can be done with an exact number of samples. + return self.to_frequencyseries().cyclic_time_shift(dt).to_timeseries()
+ + +
+[docs] + def match(self, other, psd=None, + low_frequency_cutoff=None, high_frequency_cutoff=None): + """ Return the match between the two TimeSeries or FrequencySeries. + + Return the match between two waveforms. This is equivalent to the overlap + maximized over time and phase. By default, the other vector will be + resized to match self. This may remove high frequency content or the + end of the vector. + + Parameters + ---------- + other : TimeSeries or FrequencySeries + The input vector containing a waveform. + psd : Frequency Series + A power spectral density to weight the overlap. + low_frequency_cutoff : {None, float}, optional + The frequency to begin the match. + high_frequency_cutoff : {None, float}, optional + The frequency to stop the match. + + Returns + ------- + match: float + index: int + The number of samples to shift to get the match. + """ + return self.to_frequencyseries().match(other, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff)
+ + +
+[docs] + def detrend(self, type='linear'): + """ Remove linear trend from the data + + Remove a linear trend from the data to improve the approximation that + the data is circularly convolved, this helps reduce the size of filter + transients from a circular convolution / filter. + + Parameters + ---------- + type: str + The choice of detrending. The default ('linear') removes a linear + least squares fit. 'constant' removes only the mean of the data. + """ + from scipy.signal import detrend + return self._return(detrend(self.numpy(), type=type))
+ + +
+[docs] + def plot(self, **kwds): + """ Basic plot of this time series + """ + from matplotlib import pyplot + + if self.kind == 'real': + plot = pyplot.plot(self.sample_times, self, **kwds) + return plot + elif self.kind == 'complex': + plot1 = pyplot.plot(self.sample_times, self.real(), **kwds) + plot2 = pyplot.plot(self.sample_times, self.imag(), **kwds) + return plot1, plot2
+
+ + +
+[docs] +def load_timeseries(path, group=None): + """Load a TimeSeries from an HDF5, ASCII or Numpy file. The file type is + inferred from the file extension, which must be `.hdf`, `.txt` or `.npy`. + + For ASCII and Numpy files, the first column of the array is assumed to + contain the sample times. If the array has two columns, a real-valued time + series is returned. If the array has three columns, the second and third + ones are assumed to contain the real and imaginary parts of a complex time + series. + + For HDF files, the dataset is assumed to contain the attributes `delta_t` + and `start_time`, which should contain respectively the sampling period in + seconds and the start GPS time of the data. + + The default data types will be double precision floating point. + + Parameters + ---------- + path: string + Input file path. Must end with either `.npy`, `.txt` or `.hdf`. + + group: string + Additional name for internal storage use. When reading HDF files, this + is the path to the HDF dataset to read. + + Raises + ------ + ValueError + If path does not end in a supported extension. + For Numpy and ASCII input files, this is also raised if the array + does not have 2 or 3 dimensions. + """ + ext = _os.path.splitext(path)[1] + if ext == '.npy': + data = _numpy.load(path) + elif ext == '.txt': + data = _numpy.loadtxt(path) + elif ext == '.hdf': + key = 'data' if group is None else group + with h5py.File(path, 'r') as f: + data = f[key][:] + series = TimeSeries(data, delta_t=f[key].attrs['delta_t'], + epoch=f[key].attrs['start_time']) + return series + else: + raise ValueError('Path must end with .npy, .hdf, or .txt') + + delta_t = (data[-1][0] - data[0][0]) / (len(data) - 1) + epoch = _lal.LIGOTimeGPS(data[0][0]) + if data.ndim == 2: + return TimeSeries(data[:,1], delta_t=delta_t, epoch=epoch) + elif data.ndim == 3: + return TimeSeries(data[:,1] + 1j*data[:,2], + delta_t=delta_t, epoch=epoch) + + raise ValueError('File has %s dimensions, cannot convert to TimeSeries, \ + must be 2 (real) or 3 (complex)' % data.ndim)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/vetoes/autochisq.html b/latest/html/_modules/pycbc/vetoes/autochisq.html new file mode 100644 index 00000000000..d9b79aca26e --- /dev/null +++ b/latest/html/_modules/pycbc/vetoes/autochisq.html @@ -0,0 +1,445 @@ + + + + + + pycbc.vetoes.autochisq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.vetoes.autochisq

+# Copyright (C) 2013  Stas Babak
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+from pycbc.filter import make_frequency_series
+from pycbc.filter import  matched_filter_core
+from pycbc.types import Array
+import numpy as np
+import logging
+
+BACKEND_PREFIX="pycbc.vetoes.autochisq_"
+
+
+
+[docs] +def autochisq_from_precomputed(sn, corr_sn, hautocorr, indices, + stride=1, num_points=None, oneside=None, + twophase=True, maxvalued=False): + """ + Compute correlation (two sided) between template and data + and compares with autocorrelation of the template: C(t) = IFFT(A*A/S(f)) + + Parameters + ---------- + sn: Array[complex] + normalized (!) array of complex snr for the template that produced the + trigger(s) being tested + corr_sn : Array[complex] + normalized (!) array of complex snr for the template that you want to + produce a correlation chisq test for. In the [common] case that sn and + corr_sn are the same, you are computing auto-correlation chisq. + hautocorr: Array[complex] + time domain autocorrelation for the template + indices: Array[int] + compute correlation chisquare at the points specified in this array, + num_points: [int, optional; default=None] + Number of points used for autochisq on each side, if None all points + are used. + stride: [int, optional; default = 1] + stride for points selection for autochisq + total length <= 2*num_points*stride + oneside: [str, optional; default=None] + whether to use one or two sided autochisquare. If None (or not + provided) twosided chi-squared will be used. If given, options are + 'left' or 'right', to do one-sided chi-squared on the left or right. + twophase: Boolean, optional; default=True + If True calculate the auto-chisq using both phases of the filter. + If False only use the phase of the obtained trigger(s). + maxvalued: Boolean, optional; default=False + Return the largest auto-chisq at any of the points tested if True. + If False, return the sum of auto-chisq at all points tested. + + Returns + ------- + dof: int + number of degrees of freedom + autochisq: Array[float] + autochisq values corresponding to the time instances defined by indices + """ + Nsnr = len(sn) + + achisq = np.zeros(len(indices)) + num_points_all = int(Nsnr/stride) + if num_points is None: + num_points = num_points_all + if (num_points > num_points_all): + num_points = num_points_all + + snrabs = np.abs(sn[indices]) + cphi_array = (sn[indices]).real / snrabs + sphi_array = (sn[indices]).imag / snrabs + + start_point = - stride*num_points + end_point = stride*num_points+1 + if oneside == 'left': + achisq_idx_list = np.arange(start_point, 0, stride) + elif oneside == 'right': + achisq_idx_list = np.arange(stride, end_point, stride) + else: + achisq_idx_list_pt1 = np.arange(start_point, 0, stride) + achisq_idx_list_pt2 = np.arange(stride, end_point, stride) + achisq_idx_list = np.append(achisq_idx_list_pt1, + achisq_idx_list_pt2) + + hauto_corr_vec = hautocorr[achisq_idx_list] + hauto_norm = hauto_corr_vec.real*hauto_corr_vec.real + # REMOVE THIS LINE TO REPRODUCE OLD RESULTS + hauto_norm += hauto_corr_vec.imag*hauto_corr_vec.imag + chisq_norm = 1.0 - hauto_norm + + for ip,ind in enumerate(indices): + curr_achisq_idx_list = achisq_idx_list + ind + + cphi = cphi_array[ip] + sphi = sphi_array[ip] + # By construction, the other "phase" of the SNR is 0 + snr_ind = sn[ind].real*cphi + sn[ind].imag*sphi + + # Wrap index if needed (maybe should fail in this case?) + if curr_achisq_idx_list[0] < 0: + curr_achisq_idx_list[curr_achisq_idx_list < 0] += Nsnr + if curr_achisq_idx_list[-1] > (Nsnr - 1): + curr_achisq_idx_list[curr_achisq_idx_list > (Nsnr-1)] -= Nsnr + + z = corr_sn[curr_achisq_idx_list].real*cphi + \ + corr_sn[curr_achisq_idx_list].imag*sphi + dz = z - hauto_corr_vec.real*snr_ind + curr_achisq_list = dz*dz/chisq_norm + + if twophase: + chisq_norm = 1.0 - hauto_norm + z = -corr_sn[curr_achisq_idx_list].real*sphi + \ + corr_sn[curr_achisq_idx_list].imag*cphi + dz = z - hauto_corr_vec.imag*snr_ind + curr_achisq_list += dz*dz/chisq_norm + + if maxvalued: + achisq[ip] = curr_achisq_list.max() + else: + achisq[ip] = curr_achisq_list.sum() + + dof = num_points + if oneside is None: + dof = dof * 2 + if twophase: + dof = dof * 2 + + return dof, achisq
+ + +
+[docs] +class SingleDetAutoChisq(object): + """Class that handles precomputation and memory management for efficiently + running the auto chisq in a single detector inspiral analysis. + """ + def __init__(self, stride, num_points, onesided=None, twophase=False, + reverse_template=False, take_maximum_value=False, + maximal_value_dof=None): + """ + Initialize autochisq calculation instance + + Parameters + ----------- + stride : int + Number of sample points between points at which auto-chisq is + calculated. + num_points : int + Number of sample points at which to calculate auto-chisq in each + direction from the trigger + onesided : optional, default=None, choices=['left','right'] + If None (default), calculate auto-chisq in both directions from the + trigger. If left (backwards in time) or right (forwards in time) + calculate auto-chisq only in that direction. + twophase : optional, default=False + If False calculate auto-chisq using only the phase of the trigger. + If True, compare also against the orthogonal phase. + reverse_template : optional, default=False + If true, time-reverse the template before calculating auto-chisq. + In this case this is more of a cross-correlation chisq than auto. + take_maximum_value : optional, default=False + If provided, instead of adding the auto-chisq value at each sample + point tested, return only the maximum value. + maximal_value_dof : int, required if using take_maximum_value + If using take_maximum_value the expected value is not known. This + value specifies what to store in the cont_chisq_dof output. + """ + if stride > 0: + self.do = True + self.column_name = "cont_chisq" + self.table_dof_name = "cont_chisq_dof" + self.dof = num_points + self.num_points = num_points + self.stride = stride + self.one_sided = onesided + if (onesided is not None): + self.dof = self.dof * 2 + self.two_phase = twophase + if self.two_phase: + self.dof = self.dof * 2 + self.reverse_template = reverse_template + self.take_maximum_value=take_maximum_value + if self.take_maximum_value: + if maximal_value_dof is None: + err_msg = "Must provide the maximal_value_dof keyword " + err_msg += "argument if using the take_maximum_value " + err_msg += "option." + raise ValueError(err_msg) + self.dof = maximal_value_dof + + self._autocor = None + self._autocor_id = None + else: + self.do = False + +
+[docs] + def values(self, sn, indices, template, psd, norm, stilde=None, + low_frequency_cutoff=None, high_frequency_cutoff=None): + """ + Calculate the auto-chisq at the specified indices. + + Parameters + ----------- + sn : Array[complex] + SNR time series of the template for which auto-chisq is being + computed. Provided unnormalized. + indices : Array[int] + List of points at which to calculate auto-chisq + template : Pycbc template object + The template for which we are calculating auto-chisq + psd : Pycbc psd object + The PSD of the data being analysed + norm : float + The normalization factor to apply to sn + stilde : Pycbc data object, needed if using reverse-template + The data being analysed. Only needed if using reverse-template, + otherwise ignored + low_frequency_cutoff : float + The lower frequency to consider in matched-filters + high_frequency_cutoff : float + The upper frequency to consider in matched-filters + + Returns + ------- + achi_list : TimeSeries of auto veto values - if indices + is None then evaluated at all time samples, if not then only at + requested sample indices + + dof: int, approx number of statistical degrees of freedom + """ + if self.do and (len(indices) > 0): + htilde = make_frequency_series(template) + + # Check if we need to recompute the autocorrelation + key = (id(template), id(psd)) + if key != self._autocor_id: + logging.info("Calculating autocorrelation") + + if not self.reverse_template: + Pt, _, P_norm = matched_filter_core(htilde, + htilde, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff) + Pt = Pt * (1./ Pt[0]) + self._autocor = Array(Pt, copy=True) + else: + Pt, _, P_norm = matched_filter_core(htilde.conj(), + htilde, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff) + + # T-reversed template has same norm as forward template + # so we can normalize using that + # FIXME: Here sigmasq has to be cast to a float or the + # code is really slow ... why?? + norm_fac = P_norm / float(((template.sigmasq(psd))**0.5)) + Pt *= norm_fac + self._autocor = Array(Pt, copy=True) + self._autocor_id = key + + logging.info("...Calculating autochisquare") + sn = sn*norm + if self.reverse_template: + assert(stilde is not None) + asn, _, ahnrm = matched_filter_core(htilde.conj(), stilde, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff, + h_norm=template.sigmasq(psd)) + correlation_snr = asn * ahnrm + else: + correlation_snr = sn + + achi_list = np.array([]) + index_list = np.array(indices) + dof, achi_list = autochisq_from_precomputed(sn, correlation_snr, + self._autocor, index_list, stride=self.stride, + num_points=self.num_points, + oneside=self.one_sided, twophase=self.two_phase, + maxvalued=self.take_maximum_value) + self.dof = dof + return achi_list, dof + else: + return None, None
+
+ + +
+[docs] +class SingleDetSkyMaxAutoChisq(SingleDetAutoChisq): + """Stub for precessing auto chisq if anyone ever wants to code it up. + """ + def __init__(self, *args, **kwds): + super(SingleDetSkyMaxAutoChisq, self).__init__(*args, **kwds) + +
+[docs] + def values(self, *args, **kwargs): + if self.do: + err_msg = "Precessing single detector sky-max auto chisq has not " + err_msg += "been written. If you want to use it, why not help " + err_msg += "write it?" + raise NotImplementedError(err_msg) + else: + return None
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/vetoes/bank_chisq.html b/latest/html/_modules/pycbc/vetoes/bank_chisq.html new file mode 100644 index 00000000000..ff58e13a00f --- /dev/null +++ b/latest/html/_modules/pycbc/vetoes/bank_chisq.html @@ -0,0 +1,402 @@ + + + + + + pycbc.vetoes.bank_chisq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.vetoes.bank_chisq

+# Copyright (C) 2013  Ian Harry, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+import logging, numpy
+from pycbc.types import Array, zeros, real_same_precision_as, TimeSeries
+from pycbc.filter import overlap_cplx, matched_filter_core
+from pycbc.waveform import FilterBank
+from math import sqrt
+
+
+[docs] +def segment_snrs(filters, stilde, psd, low_frequency_cutoff): + """ This functions calculates the snr of each bank veto template against + the segment + + Parameters + ---------- + filters: list of FrequencySeries + The list of bank veto templates filters. + stilde: FrequencySeries + The current segment of data. + psd: FrequencySeries + low_frequency_cutoff: float + + Returns + ------- + snr (list): List of snr time series. + norm (list): List of normalizations factors for the snr time series. + """ + snrs = [] + norms = [] + + for bank_template in filters: + # For every template compute the snr against the stilde segment + snr, _, norm = matched_filter_core( + bank_template, stilde, h_norm=bank_template.sigmasq(psd), + psd=None, low_frequency_cutoff=low_frequency_cutoff) + # SNR time series stored here + snrs.append(snr) + # Template normalization factor stored here + norms.append(norm) + + return snrs, norms
+ + +
+[docs] +def template_overlaps(bank_filters, template, psd, low_frequency_cutoff): + """ This functions calculates the overlaps between the template and the + bank veto templates. + + Parameters + ---------- + bank_filters: List of FrequencySeries + template: FrequencySeries + psd: FrequencySeries + low_frequency_cutoff: float + + Returns + ------- + overlaps: List of complex overlap values. + """ + overlaps = [] + template_ow = template / psd + for bank_template in bank_filters: + overlap = overlap_cplx(template_ow, bank_template, + low_frequency_cutoff=low_frequency_cutoff, normalized=False) + norm = sqrt(1 / template.sigmasq(psd) / bank_template.sigmasq(psd)) + overlaps.append(overlap * norm) + if (abs(overlaps[-1]) > 0.99): + errMsg = "Overlap > 0.99 between bank template and filter. " + errMsg += "This bank template will not be used to calculate " + errMsg += "bank chisq for this filter template. The expected " + errMsg += "value will be added to the chisq to account for " + errMsg += "the removal of this template.\n" + errMsg += "Masses of filter template: %e %e\n" \ + %(template.params.mass1, template.params.mass2) + errMsg += "Masses of bank filter template: %e %e\n" \ + %(bank_template.params.mass1, bank_template.params.mass2) + errMsg += "Overlap: %e" %(abs(overlaps[-1])) + logging.info(errMsg) + return overlaps
+ + +
+[docs] +def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms, + tmplt_bank_matches, indices=None): + """ This function calculates and returns a TimeSeries object containing the + bank veto calculated over a segment. + + Parameters + ---------- + tmplt_snr: TimeSeries + The SNR time series from filtering the segment against the current + search template + tmplt_norm: float + The normalization factor for the search template + bank_snrs: list of TimeSeries + The precomputed list of SNR time series between each of the bank veto + templates and the segment + bank_norms: list of floats + The normalization factors for the list of bank veto templates + (usually this will be the same for all bank veto templates) + tmplt_bank_matches: list of floats + The complex overlap between the search template and each + of the bank templates + indices: {None, Array}, optional + Array of indices into the snr time series. If given, the bank chisq + will only be calculated at these values. + + Returns + ------- + bank_chisq: TimeSeries of the bank vetos + """ + if indices is not None: + tmplt_snr = Array(tmplt_snr, copy=False) + bank_snrs_tmp = [] + for bank_snr in bank_snrs: + bank_snrs_tmp.append(bank_snr.take(indices)) + bank_snrs=bank_snrs_tmp + + # Initialise bank_chisq as 0s everywhere + bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr)) + + # Loop over all the bank templates + for i in range(len(bank_snrs)): + bank_match = tmplt_bank_matches[i] + if (abs(bank_match) > 0.99): + # Not much point calculating bank_chisquared if the bank template + # is very close to the filter template. Can also hit numerical + # error due to approximations made in this calculation. + # The value of 2 is the expected addition to the chisq for this + # template + bank_chisq += 2. + continue + bank_norm = sqrt((1 - bank_match*bank_match.conj()).real) + + bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm) + tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm) + + bank_SNR = Array(bank_SNR, copy=False) + tmplt_SNR = Array(tmplt_SNR, copy=False) + + bank_chisq += (bank_SNR - tmplt_SNR).squared_norm() + + if indices is not None: + return bank_chisq + else: + return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t, + epoch=tmplt_snr.start_time, copy=False)
+ + +
+[docs] +class SingleDetBankVeto(object): + """This class reads in a template bank file for a bank veto, handles the + memory management of its filters internally, and calculates the bank + veto TimeSeries. + """ + def __init__(self, bank_file, flen, delta_f, f_low, cdtype, approximant=None, **kwds): + if bank_file is not None: + self.do = True + + self.column_name = "bank_chisq" + self.table_dof_name = "bank_chisq_dof" + + self.cdtype = cdtype + self.delta_f = delta_f + self.f_low = f_low + self.seg_len_freq = flen + self.seg_len_time = (self.seg_len_freq-1)*2 + + logging.info("Read in bank veto template bank") + bank_veto_bank = FilterBank(bank_file, + self.seg_len_freq, + self.delta_f, self.cdtype, + low_frequency_cutoff=f_low, + approximant=approximant, **kwds) + + self.filters = list(bank_veto_bank) + self.dof = len(bank_veto_bank) * 2 + + self._overlaps_cache = {} + self._segment_snrs_cache = {} + else: + self.do = False + +
+[docs] + def cache_segment_snrs(self, stilde, psd): + key = (id(stilde), id(psd)) + if key not in self._segment_snrs_cache: + logging.info("Precalculate the bank veto template snrs") + data = segment_snrs(self.filters, stilde, psd, self.f_low) + self._segment_snrs_cache[key] = data + return self._segment_snrs_cache[key]
+ + +
+[docs] + def cache_overlaps(self, template, psd): + key = (id(template.params), id(psd)) + if key not in self._overlaps_cache: + logging.info("...Calculate bank veto overlaps") + o = template_overlaps(self.filters, template, psd, self.f_low) + self._overlaps_cache[key] = o + return self._overlaps_cache[key]
+ + +
+[docs] + def values(self, template, psd, stilde, snrv, norm, indices): + """ + Returns + ------- + bank_chisq_from_filters: TimeSeries of bank veto values - if indices + is None then evaluated at all time samples, if not then only at + requested sample indices + + bank_chisq_dof: int, approx number of statistical degrees of freedom + """ + if self.do: + logging.info("...Doing bank veto") + overlaps = self.cache_overlaps(template, psd) + bank_veto_snrs, bank_veto_norms = self.cache_segment_snrs(stilde, psd) + chisq = bank_chisq_from_filters(snrv, norm, bank_veto_snrs, + bank_veto_norms, overlaps, indices) + dof = numpy.repeat(self.dof, len(chisq)) + return chisq, dof + else: + return None, None
+
+ + +
+[docs] +class SingleDetSkyMaxBankVeto(SingleDetBankVeto): + """Stub for precessing bank veto if anyone ever wants to code it up. + """ + def __init__(self, *args, **kwds): + super(SingleDetSkyMaxBankVeto, self).__init__(*args, **kwds) + +
+[docs] + def values(self, *args, **kwargs): + if self.do: + err_msg = "Precessing single detector sky-max bank veto has not " + err_msg += "been written. If you want to use it, why not help " + err_msg += "write it?" + raise NotImplementedError(err_msg) + else: + return None, None
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/vetoes/chisq.html b/latest/html/_modules/pycbc/vetoes/chisq.html new file mode 100644 index 00000000000..c9961dcf31d --- /dev/null +++ b/latest/html/_modules/pycbc/vetoes/chisq.html @@ -0,0 +1,689 @@ + + + + + + pycbc.vetoes.chisq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.vetoes.chisq

+# Copyright (C) 2012  Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+import numpy, logging, math, pycbc.fft
+
+from pycbc.types import zeros, real_same_precision_as, TimeSeries, complex_same_precision_as
+from pycbc.filter import sigmasq_series, make_frequency_series, matched_filter_core, get_cutoff_indices
+from pycbc.scheme import schemed
+import pycbc.pnutils
+
+BACKEND_PREFIX="pycbc.vetoes.chisq_"
+
+
+[docs] +def power_chisq_bins_from_sigmasq_series(sigmasq_series, num_bins, kmin, kmax): + """Returns bins of equal power for use with the chisq functions + + Parameters + ---------- + + sigmasq_series: FrequencySeries + A frequency series containing the cumulative power of a filter template + preweighted by a psd. + num_bins: int + The number of chisq bins to calculate. + kmin: int + DOCUMENTME + kmax: int + DOCUMENTME + + Returns + ------- + + bins: List of ints + A list of the edges of the chisq bins is returned. + """ + sigmasq = sigmasq_series[kmax - 1] + edge_vec = numpy.arange(0, num_bins) * sigmasq / num_bins + bins = numpy.searchsorted(sigmasq_series[kmin:kmax], edge_vec, side='right') + bins += kmin + return numpy.append(bins, kmax)
+ + + +
+[docs] +def power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff=None, + high_frequency_cutoff=None): + """Returns bins of equal power for use with the chisq functions + + Parameters + ---------- + + htilde: FrequencySeries + A frequency series containing the template waveform + num_bins: int + The number of chisq bins to calculate. + psd: FrequencySeries + A frequency series containing the psd. Its length must be commensurate + with the template waveform. + low_frequency_cutoff: {None, float}, optional + The low frequency cutoff to apply + high_frequency_cutoff: {None, float}, optional + The high frequency cutoff to apply + + Returns + ------- + + bins: List of ints + A list of the edges of the chisq bins is returned. + """ + sigma_vec = sigmasq_series(htilde, psd, low_frequency_cutoff, + high_frequency_cutoff).numpy() + kmin, kmax = get_cutoff_indices(low_frequency_cutoff, + high_frequency_cutoff, + htilde.delta_f, + (len(htilde)-1)*2) + return power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins, kmin, kmax)
+ + + +
+[docs] +@schemed(BACKEND_PREFIX) +def chisq_accum_bin(chisq, q): + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + +
+[docs] +@schemed(BACKEND_PREFIX) +def shift_sum(v1, shifts, bins): + """ Calculate the time shifted sum of the FrequencySeries + """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + + +
+[docs] +def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices): + """Calculate the chisq timeseries from precomputed values for only select points. + + This function calculates the chisq at each point by explicitly time shifting + and summing each bin. No FFT is involved. + + Parameters + ---------- + corr: FrequencySeries + The product of the template and data in the frequency domain. + snr: numpy.ndarray + The unnormalized array of snr values at only the selected points in `indices`. + snr_norm: float + The normalization of the snr (EXPLAINME : refer to Findchirp paper?) + bins: List of integers + The edges of the equal power bins + indices: Array + The indices where we will calculate the chisq. These must be relative + to the given `corr` series. + + Returns + ------- + chisq: Array + An array containing only the chisq at the selected points. + """ + num_bins = len(bins) - 1 + chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return + return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0)
+ + +_q_l = None +_qtilde_l = None +_chisq_l = None +
+[docs] +def power_chisq_from_precomputed(corr, snr, snr_norm, bins, indices=None, return_bins=False): + """Calculate the chisq timeseries from precomputed values. + + This function calculates the chisq at all times by performing an + inverse FFT of each bin. + + Parameters + ---------- + + corr: FrequencySeries + The produce of the template and data in the frequency domain. + snr: TimeSeries + The unnormalized snr time series. + snr_norm: + The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'? + bins: List of integers + The edges of the chisq bins. + indices: {Array, None}, optional + Index values into snr that indicate where to calculate + chisq values. If none, calculate chisq for all possible indices. + return_bins: {boolean, False}, optional + Return a list of the SNRs for each chisq bin. + + Returns + ------- + chisq: TimeSeries + """ + # Get workspace memory + global _q_l, _qtilde_l, _chisq_l + + bin_snrs = [] + + if _q_l is None or len(_q_l) != len(snr): + q = zeros(len(snr), dtype=complex_same_precision_as(snr)) + qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr)) + _q_l = q + _qtilde_l = qtilde + else: + q = _q_l + qtilde = _qtilde_l + + if indices is not None: + snr = snr.take(indices) + + if _chisq_l is None or len(_chisq_l) < len(snr): + chisq = zeros(len(snr), dtype=real_same_precision_as(snr)) + _chisq_l = chisq + else: + chisq = _chisq_l[0:len(snr)] + chisq.clear() + + num_bins = len(bins) - 1 + + for j in range(num_bins): + k_min = int(bins[j]) + k_max = int(bins[j+1]) + + qtilde[k_min:k_max] = corr[k_min:k_max] + pycbc.fft.ifft(qtilde, q) + qtilde[k_min:k_max].clear() + + if return_bins: + bin_snrs.append(TimeSeries(q * snr_norm * num_bins ** 0.5, + delta_t=snr.delta_t, + epoch=snr.start_time)) + + if indices is not None: + chisq_accum_bin(chisq, q.take(indices)) + else: + chisq_accum_bin(chisq, q) + + chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm ** 2.0) + + if indices is None: + chisq = TimeSeries(chisq, delta_t=snr.delta_t, epoch=snr.start_time, copy=False) + + if return_bins: + return chisq, bin_snrs + else: + return chisq
+ + + +
+[docs] +def fastest_power_chisq_at_points(corr, snr, snrv, snr_norm, bins, indices): + """Calculate the chisq values for only selected points. + + This function looks at the number of points to be evaluated and selects + the fastest method (FFT, or direct time shift and sum). In either case, + only the selected points are returned. + + Parameters + ---------- + corr: FrequencySeries + The product of the template and data in the frequency domain. + snr: Array + The unnormalized snr + snr_norm: float + The snr normalization factor --- EXPLAINME + bins: List of integers + The edges of the equal power bins + indices: Array + The indices where we will calculate the chisq. These must be relative + to the given `snr` series. + + Returns + ------- + chisq: Array + An array containing only the chisq at the selected points. + """ + import pycbc.scheme + if isinstance(pycbc.scheme.mgr.state, pycbc.scheme.CPUScheme): + # We don't have that many points so do the direct time shift. + return power_chisq_at_points_from_precomputed(corr, snrv, + snr_norm, bins, indices) + else: + # We have a lot of points so it is faster to use the fourier transform + return power_chisq_from_precomputed(corr, snr, snr_norm, bins, + indices=indices)
+ + + +
+[docs] +def power_chisq(template, data, num_bins, psd, + low_frequency_cutoff=None, + high_frequency_cutoff=None, + return_bins=False): + """Calculate the chisq timeseries + + Parameters + ---------- + template: FrequencySeries or TimeSeries + A time or frequency series that contains the filter template. + data: FrequencySeries or TimeSeries + A time or frequency series that contains the data to filter. The length + must be commensurate with the template. + (EXPLAINME - does this mean 'the same as' or something else?) + num_bins: int + The number of frequency bins used for chisq. The number of statistical + degrees of freedom ('dof') is 2*num_bins-2. + psd: FrequencySeries + The psd of the data. + low_frequency_cutoff: {None, float}, optional + The low frequency cutoff for the filter + high_frequency_cutoff: {None, float}, optional + The high frequency cutoff for the filter + return_bins: {boolean, False}, optional + Return a list of the individual chisq bins + + Returns + ------- + chisq: TimeSeries + TimeSeries containing the chisq values for all times. + """ + htilde = make_frequency_series(template) + stilde = make_frequency_series(data) + + bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff, + high_frequency_cutoff) + corra = zeros((len(htilde) - 1) * 2, dtype=htilde.dtype) + total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd, + low_frequency_cutoff, high_frequency_cutoff, + corr_out=corra) + + return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
+ + + +
+[docs] +class SingleDetPowerChisq(object): + """Class that handles precomputation and memory management for efficiently + running the power chisq in a single detector inspiral analysis. + """ + def __init__(self, num_bins=0, snr_threshold=None): + if not (num_bins == "0" or num_bins == 0): + self.do = True + self.column_name = "chisq" + self.table_dof_name = "chisq_dof" + self.num_bins = num_bins + else: + self.do = False + self.snr_threshold = snr_threshold + +
+[docs] + @staticmethod + def parse_option(row, arg): + safe_dict = {'max': max, 'min': min} + safe_dict.update(row.__dict__) + safe_dict.update(math.__dict__) + safe_dict.update(pycbc.pnutils.__dict__) + return eval(arg, {"__builtins__":None}, safe_dict)
+ + +
+[docs] + def cached_chisq_bins(self, template, psd): + from pycbc.opt import LimitedSizeDict + + key = id(psd) + if not hasattr(psd, '_chisq_cached_key'): + psd._chisq_cached_key = {} + + if not hasattr(template, '_bin_cache'): + template._bin_cache = LimitedSizeDict(size_limit=2**2) + + if key not in template._bin_cache or id(template.params) not in psd._chisq_cached_key: + psd._chisq_cached_key[id(template.params)] = True + num_bins = int(self.parse_option(template, self.num_bins)) + + if hasattr(psd, 'sigmasq_vec') and \ + template.approximant in psd.sigmasq_vec: + kmin = int(template.f_lower / psd.delta_f) + kmax = template.end_idx + bins = power_chisq_bins_from_sigmasq_series( + psd.sigmasq_vec[template.approximant], + num_bins, + kmin, + kmax + ) + else: + bins = power_chisq_bins(template, num_bins, psd, template.f_lower) + template._bin_cache[key] = bins + + return template._bin_cache[key]
+ + +
+[docs] + def values(self, corr, snrv, snr_norm, psd, indices, template): + """ Calculate the chisq at points given by indices. + + Returns + ------- + chisq: Array + Chisq values, one for each sample index, or zero for points below + the specified SNR threshold + + chisq_dof: Array + Number of statistical degrees of freedom for the chisq test + in the given template, equal to 2 * num_bins - 2 + """ + if self.do: + num_above = len(indices) + if self.snr_threshold: + above = abs(snrv * snr_norm) > self.snr_threshold + num_above = above.sum() + logging.info('%s above chisq activation threshold' % num_above) + above_indices = indices[above] + above_snrv = snrv[above] + chisq_out = numpy.zeros(len(indices), dtype=numpy.float32) + dof = -100 + else: + above_indices = indices + above_snrv = snrv + + if num_above > 0: + bins = self.cached_chisq_bins(template, psd) + # len(bins) is number of bin edges, num_bins = len(bins) - 1 + dof = (len(bins) - 1) * 2 - 2 + _chisq = power_chisq_at_points_from_precomputed(corr, + above_snrv, snr_norm, bins, above_indices) + + if self.snr_threshold: + if num_above > 0: + chisq_out[above] = _chisq + else: + chisq_out = _chisq + + return chisq_out, numpy.repeat(dof, len(indices))# dof * numpy.ones_like(indices) + else: + return None, None
+
+ + + +
+[docs] +class SingleDetSkyMaxPowerChisq(SingleDetPowerChisq): + """Class that handles precomputation and memory management for efficiently + running the power chisq in a single detector inspiral analysis when + maximizing analytically over sky location. + """ + def __init__(self, **kwds): + super(SingleDetSkyMaxPowerChisq, self).__init__(**kwds) + self.template_mem = None + self.corr_mem = None + +
+[docs] + def calculate_chisq_bins(self, template, psd): + """ Obtain the chisq bins for this template and PSD. + """ + num_bins = int(self.parse_option(template, self.num_bins)) + if hasattr(psd, 'sigmasq_vec') and \ + template.approximant in psd.sigmasq_vec: + kmin = int(template.f_lower / psd.delta_f) + kmax = template.end_idx + bins = power_chisq_bins_from_sigmasq_series( + psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax) + else: + bins = power_chisq_bins(template, num_bins, psd, template.f_lower) + return bins
+ + +
+[docs] + def values(self, corr_plus, corr_cross, snrv, psd, + indices, template_plus, template_cross, u_vals, + hplus_cross_corr, hpnorm, hcnorm): + """ Calculate the chisq at points given by indices. + + Returns + ------- + chisq: Array + Chisq values, one for each sample index + + chisq_dof: Array + Number of statistical degrees of freedom for the chisq test + in the given template + """ + if self.do: + num_above = len(indices) + if self.snr_threshold: + above = abs(snrv) > self.snr_threshold + num_above = above.sum() + logging.info('%s above chisq activation threshold' % num_above) + above_indices = indices[above] + above_snrv = snrv[above] + u_vals = u_vals[above] + rchisq = numpy.zeros(len(indices), dtype=numpy.float32) + dof = -100 + else: + above_indices = indices + above_snrv = snrv + + if num_above > 0: + chisq = [] + curr_tmplt_mult_fac = 0. + curr_corr_mult_fac = 0. + if self.template_mem is None or \ + (not len(self.template_mem) == len(template_plus)): + self.template_mem = zeros(len(template_plus), + dtype=complex_same_precision_as(corr_plus)) + if self.corr_mem is None or \ + (not len(self.corr_mem) == len(corr_plus)): + self.corr_mem = zeros(len(corr_plus), + dtype=complex_same_precision_as(corr_plus)) + + tmplt_data = template_cross.data + corr_data = corr_cross.data + numpy.copyto(self.template_mem.data, template_cross.data) + numpy.copyto(self.corr_mem.data, corr_cross.data) + template_cross._data = self.template_mem.data + corr_cross._data = self.corr_mem.data + + for lidx, index in enumerate(above_indices): + above_local_indices = numpy.array([index]) + above_local_snr = numpy.array([above_snrv[lidx]]) + local_u_val = u_vals[lidx] + # Construct template from _plus and _cross + # Note that this modifies in place, so we store that and + # revert on the next pass. + template = template_cross.multiply_and_add(template_plus, + local_u_val-curr_tmplt_mult_fac) + curr_tmplt_mult_fac = local_u_val + + template.f_lower = template_plus.f_lower + template.params = template_plus.params + # Construct the corr vector + norm_fac = local_u_val*local_u_val + 1 + norm_fac += 2 * local_u_val * hplus_cross_corr + norm_fac = hcnorm / (norm_fac**0.5) + hp_fac = local_u_val * hpnorm / hcnorm + corr = corr_cross.multiply_and_add(corr_plus, + hp_fac - curr_corr_mult_fac) + curr_corr_mult_fac = hp_fac + + bins = self.calculate_chisq_bins(template, psd) + dof = (len(bins) - 1) * 2 - 2 + curr_chisq = power_chisq_at_points_from_precomputed(corr, + above_local_snr/ norm_fac, norm_fac, + bins, above_local_indices) + chisq.append(curr_chisq[0]) + chisq = numpy.array(chisq) + # Must reset corr and template to original values! + template_cross._data = tmplt_data + corr_cross._data = corr_data + + if self.snr_threshold: + if num_above > 0: + rchisq[above] = chisq + else: + rchisq = chisq + + return rchisq, numpy.repeat(dof, len(indices))# dof * numpy.ones_like(indices) + else: + return None, None
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/vetoes/sgchisq.html b/latest/html/_modules/pycbc/vetoes/sgchisq.html new file mode 100644 index 00000000000..bed6d05e0e6 --- /dev/null +++ b/latest/html/_modules/pycbc/vetoes/sgchisq.html @@ -0,0 +1,317 @@ + + + + + + pycbc.vetoes.sgchisq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.vetoes.sgchisq

+"""Chisq based on sine-gaussian tiles.
+See https://arxiv.org/abs/1709.08974 for a discussion.
+"""
+
+import numpy
+
+from pycbc.waveform.utils import apply_fseries_time_shift
+from pycbc.filter import sigma
+from pycbc.waveform import sinegauss
+from pycbc.vetoes.chisq import SingleDetPowerChisq
+from pycbc.events import ranking
+
+
+[docs] +class SingleDetSGChisq(SingleDetPowerChisq): + """Class that handles precomputation and memory management for efficiently + running the sine-Gaussian chisq + """ + returns = {'sg_chisq': numpy.float32} + + def __init__(self, bank, num_bins=0, + snr_threshold=None, + chisq_locations=None): + """ Create sine-Gaussian Chisq Calculator + + Parameters + ---------- + bank: pycbc.waveform.TemplateBank + The template bank that will be processed. + num_bins: str + The string determining the number of power chisq bins + snr_threshold: float + The threshold to calculate the sine-Gaussian chisq + chisq_locations: list of strs + List of strings which detail where to place a sine-Gaussian. + The format is 'region-boolean:q1-offset1,q2-offset2'. + The offset is relative to the end frequency of the approximant. + The region is a boolean expression such as 'mtotal>40' indicating + which templates to apply this set of sine-Gaussians to. + """ + if snr_threshold is not None: + self.do = True + self.num_bins = num_bins + self.snr_threshold = snr_threshold + self.params = {} + for descr in chisq_locations: + region, values = descr.split(":") + mask = bank.table.parse_boolargs([(1, region), (0, 'else')])[0] + hashes = bank.table['template_hash'][mask.astype(bool)] + for h in hashes: + self.params[h] = values + else: + self.do = False + +
+[docs] + @staticmethod + def insert_option_group(parser): + group = parser.add_argument_group("Sine-Gaussian Chisq") + group.add_argument("--sgchisq-snr-threshold", type=float, + help="Minimum SNR threshold to use SG chisq") + group.add_argument("--sgchisq-locations", type=str, nargs='+', + help="Frequency offsets and quality factors of the sine-Gaussians" + " to use, format 'region-boolean:q1-offset1,q2-offset2'. " + "Offset is relative to the end frequency of the approximant." + " Region is a boolean expression selecting templates to " + "apply the sine-Gaussians to, ex. 'mtotal>40'")
+ + +
+[docs] + @classmethod + def from_cli(cls, args, bank, chisq_bins): + return cls(bank, chisq_bins, + args.sgchisq_snr_threshold, + args.sgchisq_locations)
+ + +
+[docs] + def values(self, stilde, template, psd, snrv, snr_norm, + bchisq, bchisq_dof, indices): + """ Calculate sine-Gaussian chisq + + Parameters + ---------- + stilde: pycbc.types.Frequencyseries + The overwhitened strain + template: pycbc.types.Frequencyseries + The waveform template being analyzed + psd: pycbc.types.Frequencyseries + The power spectral density of the data + snrv: numpy.ndarray + The peak unnormalized complex SNR values + snr_norm: float + The normalization factor for the snr + bchisq: numpy.ndarray + The Bruce Allen power chisq values for these triggers + bchisq_dof: numpy.ndarray + The degrees of freedom of the Bruce chisq + indics: numpy.ndarray + The indices of the snr peaks. + + Returns + ------- + chisq: Array + Chisq values, one for each sample index + """ + if not self.do: + return None + + if template.params.template_hash not in self.params: + return numpy.ones(len(snrv)) + values = self.params[template.params.template_hash].split(',') + + # Get the chisq bins to use as the frequency reference point + bins = self.cached_chisq_bins(template, psd) + + # This is implemented slowly, so let's not call it often, OK? + chisq = numpy.ones(len(snrv)) + for i, snrvi in enumerate(snrv): + #Skip if newsnr too low + snr = abs(snrvi * snr_norm) + nsnr = ranking.newsnr(snr, bchisq[i] / bchisq_dof[i]) + if nsnr < self.snr_threshold: + continue + + N = (len(template) - 1) * 2 + dt = 1.0 / (N * template.delta_f) + kmin = int(template.f_lower / psd.delta_f) + time = float(template.epoch) + dt * indices[i] + # Shift the time of interest to be centered on 0 + stilde_shift = apply_fseries_time_shift(stilde, -time) + + # Only apply the sine-Gaussian in a +-50 Hz range around the + # central frequency + qwindow = 50 + chisq[i] = 0 + + # Estimate the maximum frequency up to which the waveform has + # power by approximating power per frequency + # as constant over the last 2 chisq bins. We cannot use the final + # chisq bin edge as it does not have to be where the waveform + # terminates. + fstep = (bins[-2] - bins[-3]) + fpeak = (bins[-2] + fstep) * template.delta_f + + # This is 90% of the Nyquist frequency of the data + # This allows us to avoid issues near Nyquist due to resample + # Filtering + fstop = len(stilde) * stilde.delta_f * 0.9 + + dof = 0 + # Calculate the sum of SNR^2 for the sine-Gaussians specified + for descr in values: + # Get the q and frequency offset from the descriptor + q, offset = descr.split('-') + q, offset = float(q), float(offset) + fcen = fpeak + offset + flow = max(kmin * template.delta_f, fcen - qwindow) + fhigh = fcen + qwindow + + # If any sine-gaussian tile has an upper frequency near + # nyquist return 1 instead. + if fhigh > fstop: + return numpy.ones(len(snrv)) + + kmin = int(flow / template.delta_f) + kmax = int(fhigh / template.delta_f) + + #Calculate sine-gaussian tile + gtem = sinegauss.fd_sine_gaussian(1.0, q, fcen, flow, + len(template) * template.delta_f, + template.delta_f).astype(numpy.complex64) + gsigma = sigma(gtem, psd=psd, + low_frequency_cutoff=flow, + high_frequency_cutoff=fhigh) + #Calculate the SNR of the tile + gsnr = (gtem[kmin:kmax] * stilde_shift[kmin:kmax]).sum() + gsnr *= 4.0 * gtem.delta_f / gsigma + chisq[i] += abs(gsnr)**2.0 + dof += 2 + if dof == 0: + chisq[i] = 1 + else: + chisq[i] /= dof + return chisq
+
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/bank.html b/latest/html/_modules/pycbc/waveform/bank.html new file mode 100644 index 00000000000..6be5cff995f --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/bank.html @@ -0,0 +1,1162 @@ + + + + + + pycbc.waveform.bank — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.bank

+# Copyright (C) 2012  Alex Nitz, Josh Willis, Andrew Miller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides classes that describe banks of waveforms
+"""
+import types
+import logging
+import os.path
+import h5py
+from copy import copy
+import numpy as np
+from ligo.lw import lsctables, utils as ligolw_utils
+import pycbc.waveform
+import pycbc.pnutils
+import pycbc.waveform.compress
+from pycbc import DYN_RANGE_FAC
+from pycbc.types import FrequencySeries, zeros
+import pycbc.io
+from pycbc.io.ligolw import LIGOLWContentHandler
+import hashlib
+
+
+
+[docs] +def sigma_cached(self, psd): + """ Cache sigma calculate for use in tandem with the FilterBank class + """ + if not hasattr(self, '_sigmasq'): + from pycbc.opt import LimitedSizeDict + self._sigmasq = LimitedSizeDict(size_limit=2**5) + + key = id(psd) + if not hasattr(psd, '_sigma_cached_key'): + psd._sigma_cached_key = {} + + if key not in self._sigmasq or id(self) not in psd._sigma_cached_key: + psd._sigma_cached_key[id(self)] = True + # If possible, we precalculate the sigmasq vector for all possible waveforms + if pycbc.waveform.waveform_norm_exists(self.approximant): + if not hasattr(psd, 'sigmasq_vec'): + psd.sigmasq_vec = {} + + if self.approximant not in psd.sigmasq_vec: + psd.sigmasq_vec[self.approximant] = \ + pycbc.waveform.get_waveform_filter_norm( + self.approximant, + psd, + len(psd), + psd.delta_f, + self.min_f_lower + ) + + if not hasattr(self, 'sigma_scale'): + # Get an amplitude normalization (mass dependant constant norm) + amp_norm = pycbc.waveform.get_template_amplitude_norm( + self.params, approximant=self.approximant) + amp_norm = 1 if amp_norm is None else amp_norm + self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0 + + curr_sigmasq = psd.sigmasq_vec[self.approximant] + + kmin = int(self.f_lower / psd.delta_f) + self._sigmasq[key] = self.sigma_scale * \ + (curr_sigmasq[self.end_idx-1] - curr_sigmasq[kmin]) + + else: + if not hasattr(self, 'sigma_view'): + from pycbc.filter.matchedfilter import get_cutoff_indices + N = (len(self) -1) * 2 + kmin, kmax = get_cutoff_indices( + self.min_f_lower or self.f_lower, self.end_frequency, + self.delta_f, N) + self.sslice = slice(kmin, kmax) + self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f + + if not hasattr(psd, 'invsqrt'): + psd.invsqrt = 1.0 / psd + + self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt[self.sslice]) + return self._sigmasq[key]
+ + + +# helper function for parsing approximant strings +
+[docs] +def boolargs_from_apprxstr(approximant_strs): + """Parses a list of strings specifying an approximant and where that + approximant should be used into a list that can be understood by + FieldArray.parse_boolargs. + + Parameters + ---------- + apprxstr : (list of) string(s) + The strings to parse. Each string should be formatted `APPRX:COND`, + where `APPRX` is the approximant and `COND` is a string specifying + where it should be applied (see `FieldArgs.parse_boolargs` for examples + of conditional strings). The last string in the list may exclude a + conditional argument, which is the same as specifying ':else'. + + Returns + ------- + boolargs : list + A list of tuples giving the approximant and where to apply them. This + can be passed directly to `FieldArray.parse_boolargs`. + """ + if not isinstance(approximant_strs, list): + approximant_strs = [approximant_strs] + return [tuple(arg.split(':')) for arg in approximant_strs]
+ + + +
+[docs] +def add_approximant_arg(parser, default=None, help=None): + """Adds an approximant argument to the given parser. + + Parameters + ---------- + parser : ArgumentParser + The argument parser to add the argument to. + default : {None, str} + Specify a default for the approximant argument. Defaults to None. + help : {None, str} + Provide a custom help message. If None, will use a descriptive message + on how to specify the approximant. + """ + if help is None: + help=str("The approximant(s) to use. Multiple approximants to use " + "in different regions may be provided. If multiple " + "approximants are provided, every one but the last must be " + "be followed by a conditional statement defining where that " + "approximant should be used. Conditionals can be any boolean " + "test understood by numpy. For example, 'Apprx:(mtotal > 4) & " + "(mchirp <= 5)' would use approximant 'Apprx' where total mass " + "is > 4 and chirp mass is <= 5. " + "Conditionals are applied in order, with each successive one " + "only applied to regions not covered by previous arguments. " + "For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` " + "would result in IMRPhenomD being used where chirp mass is < 3 " + "and total mass is >= 4. The last approximant given may use " + "'else' as the conditional or include no conditional. In either " + "case, this will cause the last approximant to be used in any " + "remaning regions after all the previous conditionals have been " + "applied. For the full list of possible parameters to apply " + "conditionals to, see WaveformArray.default_fields(). Math " + "operations may also be used on parameters; syntax is python, " + "with any operation recognized by numpy.") + parser.add_argument("--approximant", nargs='+', type=str, default=default, + metavar='APPRX[:COND]', + help=help)
+ + + +
+[docs] +def parse_approximant_arg(approximant_arg, warray): + """Given an approximant arg (see add_approximant_arg) and a field + array, figures out what approximant to use for each template in the array. + + Parameters + ---------- + approximant_arg : list + The approximant argument to parse. Should be the thing returned by + ArgumentParser when parsing the argument added by add_approximant_arg. + warray : FieldArray + The array to parse. Must be an instance of a FieldArray, or a class + that inherits from FieldArray. + + Returns + ------- + array + A numpy array listing the approximants to use for each element in + the warray. + """ + return warray.parse_boolargs(boolargs_from_apprxstr(approximant_arg))[0]
+ + +
+[docs] +def tuple_to_hash(tuple_to_be_hashed): + """ + Return a hash for a numpy array, avoids native (unsafe) python3 hash function + + Parameters + ---------- + tuple_to_be_hashed: tuple + The tuple which is being hashed + Must be convertible to a numpy array + + Returns + ------- + int + an integer representation of the hashed array + """ + h = hashlib.blake2b(np.array(tuple_to_be_hashed).tobytes('C'), + digest_size=8) + return np.fromstring(h.digest(), dtype=int)[0]
+ + + +
+[docs] +class TemplateBank(object): + """Class to provide some basic helper functions and information + about elements of a template bank. + + Parameters + ---------- + filename : string + The name of the file to load. Must end in '.xml[.gz]' or '.hdf'. If an + hdf file, it should have a 'parameters' in its `attrs` which gives a + list of the names of fields to load from the file. If no 'parameters' + are found, all of the top-level groups in the file will assumed to be + parameters (a warning will be printed to stdout in this case). If an + xml file, it must have a `SnglInspiral` table. + approximant : {None, (list of) string(s)} + Specify the approximant(s) for each template in the bank. If None + provided, will try to load the approximant from the file. The + approximant may either be a single string (in which case the same + approximant will be used for all templates) or a list of strings and + conditionals specifying where to use the approximant. See + `boolargs_from_apprxstr` for syntax. + parameters : {None, (list of) sting(s)} + Specify what parameters to load from the file. If None, all of the + parameters in the file (if an xml file, this is all of the columns in + the SnglInspiral table, if an hdf file, this is given by the + parameters attribute in the file). The list may include parameters that + are derived from the file's parameters, or functions thereof. For a + full list of possible parameters, see `WaveformArray.default_fields`. + If a derived parameter is specified, only the parameters needed to + compute that parameter will be loaded from the file. For example, if + `parameters='mchirp'`, then only `mass1, mass2` will be loaded from + the file. Note that derived parameters can only be used if the + needed parameters are in the file; e.g., you cannot use `chi_eff` if + `spin1z`, `spin2z`, `mass1`, and `mass2` are in the input file. + \**kwds : + Any additional keyword arguments are stored to the `extra_args` + attribute. + + Attributes + ---------- + table : WaveformArray + An instance of a WaveformArray containing all of the information about + the parameters of the bank. + has_compressed_waveforms : {False, bool} + True if compressed waveforms are present in the the (hdf) file; False + otherwise. + indoc : {None, xmldoc} + If an xml file was provided, an in-memory representation of the xml. + Otherwise, None. + filehandler : {None, pycbc.io.HFile} + If an hdf file was provided, the file handler pointing to the hdf file + (left open after initialization). Otherwise, None. + extra_args : {None, dict} + Any extra keyword arguments that were provided on initialization. + """ + def __init__(self, filename, approximant=None, parameters=None, + **kwds): + self.has_compressed_waveforms = False + ext = os.path.basename(filename) + if ext.endswith(('.xml', '.xml.gz', '.xmlgz')): + self.filehandler = None + self.indoc = ligolw_utils.load_filename( + filename, False, contenthandler=LIGOLWContentHandler) + self.table = lsctables.SnglInspiralTable.get_table(self.indoc) + self.table = pycbc.io.WaveformArray.from_ligolw_table(self.table, + columns=parameters) + + # inclination stored in xml alpha3 column + names = list(self.table.dtype.names) + names = tuple([n if n != 'alpha3' else 'inclination' for n in names]) + + # low frequency cutoff in xml alpha6 column + names = tuple([n if n!= 'alpha6' else 'f_lower' for n in names]) + self.table.dtype.names = names + + elif ext.endswith(('hdf', '.h5', '.hdf5')): + self.indoc = None + f = pycbc.io.HFile(filename, 'r') + self.filehandler = f + try: + fileparams = list(f.attrs['parameters']) + except KeyError: + # just assume all of the top-level groups are the parameters + fileparams = list(f.keys()) + logging.info("WARNING: no parameters attribute found. " + "Assuming that %s " %(', '.join(fileparams)) + + "are the parameters.") + tmp_params = [] + # At this point fileparams might be bytes. Fix if it is + for param in fileparams: + try: + param = param.decode() + tmp_params.append(param) + except AttributeError: + tmp_params.append(param) + fileparams = tmp_params + + # use WaveformArray's syntax parser to figure out what fields + # need to be loaded + if parameters is None: + parameters = fileparams + common_fields = list(pycbc.io.WaveformArray(1, + names=parameters).fieldnames) + add_fields = list(set(parameters) & + (set(fileparams) - set(common_fields))) + # load + dtype = [] + data = {} + for key in common_fields+add_fields: + data[key] = f[key][:] + dtype.append((key, data[key].dtype)) + num = f[fileparams[0]].size + self.table = pycbc.io.WaveformArray(num, dtype=dtype) + for key in data: + self.table[key] = data[key] + # add the compressed waveforms, if they exist + self.has_compressed_waveforms = 'compressed_waveforms' in f + else: + raise ValueError("Unsupported template bank file extension %s" %( + ext)) + + # if approximant is specified, override whatever was in the file + # (if anything was in the file) + if approximant is not None: + # get the approximant for each template + dtype = h5py.string_dtype(encoding='utf-8') + apprxs = np.array(self.parse_approximant(approximant), + dtype=dtype) + if 'approximant' not in self.table.fieldnames: + self.table = self.table.add_fields(apprxs, 'approximant') + else: + self.table['approximant'] = apprxs + self.extra_args = kwds + self.ensure_hash() + + @property + def parameters(self): + """tuple: The parameters loaded from the input file. + Same as `table.fieldnames`. + """ + return self.table.fieldnames + +
+[docs] + def ensure_hash(self): + """Ensure that there is a correctly populated template_hash. + + Check for a correctly populated template_hash and create if it doesn't + already exist. + """ + fields = self.table.fieldnames + if 'template_hash' in fields: + return + + # The fields to use in making a template hash + hash_fields = ['mass1', 'mass2', 'inclination', + 'spin1x', 'spin1y', 'spin1z', + 'spin2x', 'spin2y', 'spin2z',] + + fields = [f for f in hash_fields if f in fields] + template_hash = np.array([tuple_to_hash(v) for v in zip(*[self.table[p] + for p in fields])]) + if not np.unique(template_hash).size == template_hash.size: + raise RuntimeError("Some template hashes clash. This should not " + "happen.") + self.table = self.table.add_fields(template_hash, 'template_hash')
+ + +
+[docs] + def write_to_hdf(self, filename, start_index=None, stop_index=None, + force=False, skip_fields=None, + write_compressed_waveforms=True): + """Writes self to the given hdf file. + + Parameters + ---------- + filename : str + The name of the file to write to. Must be a recognised HDF5 + file extension + start_index : If a specific slice of the template bank is to be + written to the hdf file, this would specify the index of the + first template in the slice + stop_index : If a specific slice of the template bank is to be + written to the hdf file, this would specify the index of the + last template in the slice + force : {False, bool} + If the file already exists, it will be overwritten if True. + Otherwise, an OSError is raised if the file exists. + skip_fields : {None, (list of) strings} + Do not write the given fields to the hdf file. Default is None, + in which case all fields in self.table.fieldnames are written. + write_compressed_waveforms : {True, bool} + Write compressed waveforms to the output (hdf) file if this is + True, which is the default setting. If False, do not write the + compressed waveforms group, but only the template parameters to + the output file. + + Returns + ------- + pycbc.io.HFile + The file handler to the output hdf file (left open). + """ + if not filename.endswith(('.hdf', '.h5', '.hdf5')): + raise ValueError("Unrecoginized file extension") + if os.path.exists(filename) and not force: + raise IOError("File %s already exists" %(filename)) + f = pycbc.io.HFile(filename, 'w') + parameters = self.parameters + if skip_fields is not None: + if not isinstance(skip_fields, list): + skip_fields = [skip_fields] + parameters = [p for p in parameters if p not in skip_fields] + # save the parameters + f.attrs['parameters'] = parameters + write_tbl = self.table[start_index:stop_index] + for p in parameters: + f[p] = write_tbl[p] + if write_compressed_waveforms and self.has_compressed_waveforms: + for tmplt_hash in write_tbl.template_hash: + compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf( + self.filehandler, tmplt_hash, + load_now=True) + compressed_waveform.write_to_hdf(f, tmplt_hash) + return f
+ + +
+[docs] + def end_frequency(self, index): + """ Return the end frequency of the waveform at the given index value + """ + if hasattr(self.table[index], 'f_final'): + return self.table[index].f_final + + return pycbc.waveform.get_waveform_end_frequency( + self.table[index], + approximant=self.approximant(index), + **self.extra_args)
+ + +
+[docs] + def parse_approximant(self, approximant): + """Parses the given approximant argument, returning the approximant to + use for each template in self. This is done by calling + `parse_approximant_arg` using self's table as the array; see that + function for more details.""" + return parse_approximant_arg(approximant, self.table)
+ + +
+[docs] + def approximant(self, index): + """ Return the name of the approximant ot use at the given index + """ + if 'approximant' not in self.table.fieldnames: + raise ValueError("approximant not found in input file and no " + "approximant was specified on initialization") + apx = self.table["approximant"][index] + if hasattr(apx, 'decode'): + apx = apx.decode() + return apx
+ + + def __len__(self): + return len(self.table) + +
+[docs] + def template_thinning(self, inj_filter_rejector): + """Remove templates from bank that are far from all injections.""" + if not inj_filter_rejector.enabled or \ + inj_filter_rejector.chirp_time_window is None: + # Do nothing! + return + + injection_parameters = inj_filter_rejector.injection_params.table + fref = inj_filter_rejector.f_lower + threshold = inj_filter_rejector.chirp_time_window + m1= self.table['mass1'] + m2= self.table['mass2'] + tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref) + indices = [] + + sort = tau0_temp.argsort() + tau0_temp = tau0_temp[sort] + + for inj in injection_parameters: + tau0_inj, _ = \ + pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2, + fref) + lid = np.searchsorted(tau0_temp, tau0_inj - threshold) + rid = np.searchsorted(tau0_temp, tau0_inj + threshold) + inj_indices = sort[lid:rid] + indices.append(inj_indices) + + indices_combined = np.concatenate(indices) + indices_unique= np.unique(indices_combined) + self.table = self.table[indices_unique]
+ + +
+[docs] + def ensure_standard_filter_columns(self, low_frequency_cutoff=None): + """ Initialize FilterBank common fields + + Parameters + ---------- + low_frequency_cutoff: {float, None}, Optional + A low frequency cutoff which overrides any given within the + template bank file. + """ + + # Make sure we have a template duration field + if not hasattr(self.table, 'template_duration'): + self.table = self.table.add_fields(np.zeros(len(self.table), + dtype=np.float32), 'template_duration') + + # Make sure we have a f_lower field + if low_frequency_cutoff is not None: + if not hasattr(self.table, 'f_lower'): + vec = np.zeros(len(self.table), dtype=np.float32) + self.table = self.table.add_fields(vec, 'f_lower') + self.table['f_lower'][:] = low_frequency_cutoff + + self.min_f_lower = min(self.table['f_lower']) + if self.f_lower is None and self.min_f_lower == 0.: + raise ValueError('Invalid low-frequency cutoff settings')
+
+ + + +
+[docs] +class LiveFilterBank(TemplateBank): + def __init__(self, filename, sample_rate, minimum_buffer, + approximant=None, increment=8, parameters=None, + low_frequency_cutoff=None, + **kwds): + + self.increment = increment + self.filename = filename + self.sample_rate = sample_rate + self.minimum_buffer = minimum_buffer + self.f_lower = low_frequency_cutoff + + super(LiveFilterBank, self).__init__(filename, approximant=approximant, + parameters=parameters, **kwds) + self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff) + self.param_lookup = {} + for i, p in enumerate(self.table): + key = (p.mass1, p.mass2, p.spin1z, p.spin2z) + assert(key not in self.param_lookup) # Uh, oh, template confusion! + self.param_lookup[key] = i + +
+[docs] + def round_up(self, num): + """Determine the length to use for this waveform by rounding. + + Parameters + ---------- + num : int + Proposed size of waveform in samples. + + Returns + ------- + size: int + The rounded size to use for the waveform buffer in samples. + This is calculated using an internal `increment` attribute, which + determines the discreteness of the rounding. + """ + inc = self.increment + size = np.ceil(num / self.sample_rate / inc) * self.sample_rate * inc + return size
+ + +
+[docs] + def getslice(self, sindex): + instance = copy(self) + instance.table = self.table[sindex] + return instance
+ + +
+[docs] + def id_from_param(self, param_tuple): + """Get the index of this template based on its param tuple + + Parameters + ---------- + param_tuple : tuple + Tuple of the parameters which uniquely identify this template + + Returns + -------- + index : int + The ordered index that this template has in the template bank. + """ + return self.param_lookup[param_tuple]
+ + + def __getitem__(self, index): + if isinstance(index, slice): + return self.getslice(index) + + return self.get_template(index) + +
+[docs] + def freq_resolution_for_template(self, index): + """Compute the correct resolution for a frequency series that contains + a given template in the bank. + """ + from pycbc.waveform.waveform import props + + time_duration = self.minimum_buffer + time_duration += 0.5 + params = props(self.table[index]) + params.pop('approximant') + approximant = self.approximant(index) + waveform_duration = pycbc.waveform.get_waveform_filter_length_in_time( + approximant, **params + ) + if waveform_duration is None: + raise RuntimeError( + 'Template waveform {approximant} not recognized!' + ) + time_duration += waveform_duration + td_samples = self.round_up(time_duration * self.sample_rate) + return self.sample_rate / float(td_samples)
+ + +
+[docs] + def get_template(self, index, delta_f=None): + """Calculate and return the frequency-domain waveform for the template + with the given index. The frequency resolution can optionally be given. + + Parameters + ---------- + index: int + Index of the template in the bank. + delta_f: float, optional + Resolution of the resulting frequency series. If not given, it is + calculated from the time duration of the template. + + Returns + ------- + htilde: FrequencySeries + Template waveform in the frequency domain. + """ + approximant = self.approximant(index) + f_end = self.end_frequency(index) + flow = self.table[index].f_lower + + if delta_f is None: + delta_f = self.freq_resolution_for_template(index) + + flen = int(self.sample_rate / (2 * delta_f) + 1) + + if f_end is None or f_end >= (flen * delta_f): + f_end = (flen - 1) * delta_f + + logging.info( + "Generating %s, duration %s s, index %i, starting from %s Hz", + approximant, + 1.0 / delta_f, + index, + flow + ) + + # Get the waveform filter + distance = 1.0 / DYN_RANGE_FAC + htilde = pycbc.waveform.get_waveform_filter( + zeros(flen, dtype=np.complex64), self.table[index], + approximant=approximant, f_lower=flow, f_final=f_end, + delta_f=delta_f, delta_t=1.0 / self.sample_rate, distance=distance, + **self.extra_args) + + # If available, record the total duration (which may + # include ringdown) and the duration up to merger since they will be + # erased by the type conversion below. + ttotal = template_duration = -1 + time_offset = None + if hasattr(htilde, 'length_in_time'): + ttotal = htilde.length_in_time + if hasattr(htilde, 'chirp_length'): + template_duration = htilde.chirp_length + if hasattr(htilde, 'time_offset'): + time_offset = htilde.time_offset + + self.table[index].template_duration = template_duration + + htilde = htilde.astype(np.complex64) + htilde.f_lower = flow + htilde.min_f_lower = self.min_f_lower + htilde.end_idx = int(f_end / htilde.delta_f) + htilde.params = self.table[index] + htilde.chirp_length = template_duration + htilde.length_in_time = ttotal + htilde.approximant = approximant + htilde.end_frequency = f_end + + if time_offset: + htilde.time_offset = time_offset + + # Add sigmasq as a method of this instance + htilde.sigmasq = types.MethodType(sigma_cached, htilde) + + htilde.id = self.id_from_param((htilde.params.mass1, + htilde.params.mass2, + htilde.params.spin1z, + htilde.params.spin2z)) + return htilde
+
+ + + +
+[docs] +class FilterBank(TemplateBank): + def __init__(self, filename, filter_length, delta_f, dtype, + out=None, max_template_length=None, + approximant=None, parameters=None, + enable_compressed_waveforms=True, + low_frequency_cutoff=None, + waveform_decompression_method=None, + **kwds): + self.out = out + self.dtype = dtype + self.f_lower = low_frequency_cutoff + self.filename = filename + self.delta_f = delta_f + self.N = (filter_length - 1 ) * 2 + self.delta_t = 1.0 / (self.N * self.delta_f) + self.filter_length = filter_length + self.max_template_length = max_template_length + self.enable_compressed_waveforms = enable_compressed_waveforms + self.waveform_decompression_method = waveform_decompression_method + + super(FilterBank, self).__init__(filename, approximant=approximant, + parameters=parameters, **kwds) + self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff) + +
+[docs] + def get_decompressed_waveform(self, tempout, index, f_lower=None, + approximant=None, df=None): + """Returns a frequency domain decompressed waveform for the template + in the bank corresponding to the index taken in as an argument. The + decompressed waveform is obtained by interpolating in frequency space, + the amplitude and phase points for the compressed template that are + read in from the bank.""" + + from pycbc.waveform.waveform import props + from pycbc.waveform import get_waveform_filter_length_in_time + + # Get the template hash corresponding to the template index taken in as argument + tmplt_hash = self.table.template_hash[index] + + # Read the compressed waveform from the bank file + compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf( + self.filehandler, tmplt_hash, + load_now=True) + + # Get the interpolation method to be used to decompress the waveform + if self.waveform_decompression_method is not None : + decompression_method = self.waveform_decompression_method + else : + decompression_method = compressed_waveform.interpolation + logging.info("Decompressing waveform using %s", decompression_method) + + if df is not None : + delta_f = df + else : + delta_f = self.delta_f + + # Create memory space for writing the decompressed waveform + decomp_scratch = FrequencySeries(tempout[0:self.filter_length], delta_f=delta_f, copy=False) + + # Get the decompressed waveform + hdecomp = compressed_waveform.decompress(out=decomp_scratch, f_lower=f_lower, interpolation=decompression_method) + p = props(self.table[index]) + p.pop('approximant') + try: + tmpltdur = self.table[index].template_duration + except AttributeError: + tmpltdur = None + if tmpltdur is None or tmpltdur==0.0 : + tmpltdur = get_waveform_filter_length_in_time(approximant, **p) + hdecomp.chirp_length = tmpltdur + hdecomp.length_in_time = hdecomp.chirp_length + return hdecomp
+ + +
+[docs] + def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f, + low_frequency_cutoff=None, + cached_mem=None): + """Generate the template with index t_num using custom length.""" + approximant = self.approximant(t_num) + # Don't want to use INTERP waveforms in here + if approximant.endswith('_INTERP'): + approximant = approximant.replace('_INTERP', '') + # Using SPAtmplt here is bad as the stored cbrt and logv get + # recalculated as we change delta_f values. Fall back to TaylorF2 + # in lalsimulation. + if approximant == 'SPAtmplt': + approximant = 'TaylorF2' + if cached_mem is None: + wav_len = int(max_freq / delta_f) + 1 + cached_mem = zeros(wav_len, dtype=np.complex64) + if self.has_compressed_waveforms and self.enable_compressed_waveforms: + htilde = self.get_decompressed_waveform(cached_mem, t_num, + f_lower=low_frequency_cutoff, + approximant=approximant, + df=delta_f) + else : + htilde = pycbc.waveform.get_waveform_filter( + cached_mem, self.table[t_num], approximant=approximant, + f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f, + distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq)) + return htilde
+ + + def __getitem__(self, index): + # Make new memory for templates if we aren't given output memory + if self.out is None: + tempout = zeros(self.filter_length, dtype=self.dtype) + else: + tempout = self.out + + approximant = self.approximant(index) + f_end = self.end_frequency(index) + if f_end is None or f_end >= (self.filter_length * self.delta_f): + f_end = (self.filter_length-1) * self.delta_f + + # Find the start frequency, if variable + f_low = find_variable_start_frequency(approximant, + self.table[index], + self.f_lower, + self.max_template_length) + logging.info('%s: generating %s from %s Hz' % (index, approximant, f_low)) + + # Clear the storage memory + poke = tempout.data # pylint:disable=unused-variable + tempout.clear() + + # Get the waveform filter + distance = 1.0 / DYN_RANGE_FAC + if self.has_compressed_waveforms and self.enable_compressed_waveforms: + htilde = self.get_decompressed_waveform(tempout, index, f_lower=f_low, + approximant=approximant, df=None) + else : + htilde = pycbc.waveform.get_waveform_filter( + tempout[0:self.filter_length], self.table[index], + approximant=approximant, f_lower=f_low, f_final=f_end, + delta_f=self.delta_f, delta_t=self.delta_t, distance=distance, + **self.extra_args) + + # If available, record the total duration (which may + # include ringdown) and the duration up to merger since they will be + # erased by the type conversion below. + ttotal = template_duration = None + if hasattr(htilde, 'length_in_time'): + ttotal = htilde.length_in_time + if hasattr(htilde, 'chirp_length'): + template_duration = htilde.chirp_length + + self.table[index].template_duration = template_duration + + htilde = htilde.astype(self.dtype) + htilde.f_lower = f_low + htilde.min_f_lower = self.min_f_lower + htilde.end_idx = int(f_end / htilde.delta_f) + htilde.params = self.table[index] + htilde.chirp_length = template_duration + htilde.length_in_time = ttotal + htilde.approximant = approximant + htilde.end_frequency = f_end + + # Add sigmasq as a method of this instance + htilde.sigmasq = types.MethodType(sigma_cached, htilde) + htilde._sigmasq = {} + return htilde
+ + + +
+[docs] +def find_variable_start_frequency(approximant, parameters, f_start, max_length, + delta_f = 1): + """ Find a frequency value above the starting frequency that results in a + waveform shorter than max_length. + """ + if (f_start is None): + f = parameters.f_lower + elif (max_length is not None): + l = max_length + 1 + f = f_start - delta_f + while l > max_length: + f += delta_f + l = pycbc.waveform.get_waveform_filter_length_in_time(approximant, + parameters, f_lower=f) + else : + f = f_start + return f
+ + + +
+[docs] +class FilterBankSkyMax(TemplateBank): + def __init__(self, filename, filter_length, delta_f, + dtype, out_plus=None, out_cross=None, + max_template_length=None, parameters=None, + low_frequency_cutoff=None, **kwds): + self.out_plus = out_plus + self.out_cross = out_cross + self.dtype = dtype + self.f_lower = low_frequency_cutoff + self.filename = filename + self.delta_f = delta_f + self.N = (filter_length - 1 ) * 2 + self.delta_t = 1.0 / (self.N * self.delta_f) + self.filter_length = filter_length + self.max_template_length = max_template_length + + super(FilterBankSkyMax, self).__init__(filename, parameters=parameters, + **kwds) + + self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff) + + def __getitem__(self, index): + # Make new memory for templates if we aren't given output memory + if self.out_plus is None: + tempoutplus = zeros(self.filter_length, dtype=self.dtype) + else: + tempoutplus = self.out_plus + if self.out_cross is None: + tempoutcross = zeros(self.filter_length, dtype=self.dtype) + else: + tempoutcross = self.out_cross + + approximant = self.approximant(index) + + # Get the end of the waveform if applicable (only for SPAtmplt atm) + f_end = self.end_frequency(index) + if f_end is None or f_end >= (self.filter_length * self.delta_f): + f_end = (self.filter_length-1) * self.delta_f + + # Find the start frequency, if variable + f_low = find_variable_start_frequency(approximant, + self.table[index], + self.f_lower, + self.max_template_length) + logging.info('%s: generating %s from %s Hz', index, approximant, f_low) + + # What does this do??? + poke1 = tempoutplus.data # pylint:disable=unused-variable + poke2 = tempoutcross.data # pylint:disable=unused-variable + + # Clear the storage memory + tempoutplus.clear() + tempoutcross.clear() + + # Get the waveform filter + distance = 1.0 / DYN_RANGE_FAC + hplus, hcross = pycbc.waveform.get_two_pol_waveform_filter( + tempoutplus[0:self.filter_length], + tempoutcross[0:self.filter_length], self.table[index], + approximant=approximant, f_lower=f_low, + f_final=f_end, delta_f=self.delta_f, delta_t=self.delta_t, + distance=distance, **self.extra_args) + + if hasattr(hplus, 'chirp_length') and hplus.chirp_length is not None: + self.table[index].template_duration = hplus.chirp_length + + hplus = hplus.astype(self.dtype) + hcross = hcross.astype(self.dtype) + hplus.f_lower = f_low + hcross.f_lower = f_low + hplus.min_f_lower = self.min_f_lower + hcross.min_f_lower = self.min_f_lower + hplus.end_frequency = f_end + hcross.end_frequency = f_end + hplus.end_idx = int(hplus.end_frequency / hplus.delta_f) + hcross.end_idx = int(hplus.end_frequency / hplus.delta_f) + hplus.params = self.table[index] + hcross.params = self.table[index] + hplus.approximant = approximant + hcross.approximant = approximant + + # Add sigmasq as a method of this instance + hplus.sigmasq = types.MethodType(sigma_cached, hplus) + hplus._sigmasq = {} + hcross.sigmasq = types.MethodType(sigma_cached, hcross) + hcross._sigmasq = {} + + return hplus, hcross
+ + + +__all__ = ('sigma_cached', 'boolargs_from_apprxstr', 'add_approximant_arg', + 'parse_approximant_arg', 'tuple_to_hash', 'TemplateBank', + 'LiveFilterBank', 'FilterBank', 'find_variable_start_frequency', + 'FilterBankSkyMax') +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/compress.html b/latest/html/_modules/pycbc/waveform/compress.html new file mode 100644 index 00000000000..9ceb8e5943e --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/compress.html @@ -0,0 +1,897 @@ + + + + + + pycbc.waveform.compress — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.compress

+# Copyright (C) 2016  Alex Nitz, Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" Utilities for handling frequency compressed an unequally spaced frequency
+domain waveforms.
+"""
+import lal, numpy, logging, h5py
+from pycbc import filter
+from scipy import interpolate
+from pycbc.types import FrequencySeries, real_same_precision_as
+from pycbc.waveform import utils
+from pycbc.scheme import schemed
+from pycbc.io.hdf import HFile
+
+
+[docs] +def rough_time_estimate(m1, m2, flow, fudge_length=1.1, fudge_min=0.02): + """ A very rough estimate of the duration of the waveform. + + An estimate of the waveform duration starting from flow. This is intended + to be fast but not necessarily accurate. It should be an overestimate of + the length. It is derived from a simplification of the 0PN post-newtonian + terms and includes a fudge factor for possible ringdown, etc. + + Parameters + ---------- + m1: float + mass of first component object in solar masses + m2: float + mass of second component object in solar masses + flow: float + starting frequency of the waveform + fudge_length: optional, {1.1, float} + Factor to multiply length estimate by to ensure it is a convservative + value + fudge_min: optional, {0.2, float} + Minimum signal duration that can be returned. This should be long + enough to encompass the ringdown and errors in the precise end time. + + Returns + ------- + time: float + Time from flow untill the end of the waveform + """ + m = m1 + m2 + msun = m * lal.MTSUN_SI + t = 5.0 / 256.0 * m * m * msun / (m1 * m2) / \ + (numpy.pi * msun * flow) ** (8.0 / 3.0) + + # fudge factoriness + return .022 if t < 0 else (t + fudge_min) * fudge_length
+ + +
+[docs] +def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None): + """Return the frequencies needed to compress a waveform with the given + chirp mass. This is based on the estimate in rough_time_estimate. + + Parameters + ---------- + m1: float + mass of first component object in solar masses + m2: float + mass of second component object in solar masses + fmin : float + The starting frequency of the compressed waveform. + fmax : float + The ending frequency of the compressed waveform. + min_seglen : float + The inverse of this gives the maximum frequency step that is used. + df_multiple : {None, float} + Make the compressed sampling frequencies a multiple of the given value. + If None provided, the returned sample points can have any floating + point value. + + Returns + ------- + array + The frequencies at which to evaluate the compressed waveform. + """ + sample_points = [] + f = fmin + while f < fmax: + if df_multiple is not None: + f = int(f/df_multiple)*df_multiple + sample_points.append(f) + f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen) + # add the last point + if sample_points[-1] < fmax: + sample_points.append(fmax) + return numpy.array(sample_points)
+ + +
+[docs] +def spa_compression(htilde, fmin, fmax, min_seglen=0.02, + sample_frequencies=None): + """Returns the frequencies needed to compress the given frequency domain + waveform. This is done by estimating t(f) of the waveform using the + stationary phase approximation. + + Parameters + ---------- + htilde : FrequencySeries + The waveform to compress. + fmin : float + The starting frequency of the compressed waveform. + fmax : float + The ending frequency of the compressed waveform. + min_seglen : float + The inverse of this gives the maximum frequency step that is used. + sample_frequencies : {None, array} + The frequencies that the waveform is evaluated at. If None, will + retrieve the frequencies from the waveform's sample_frequencies + attribute. + + Returns + ------- + array + The frequencies at which to evaluate the compressed waveform. + """ + if sample_frequencies is None: + sample_frequencies = htilde.sample_frequencies.numpy() + kmin = int(fmin/htilde.delta_f) + kmax = int(fmax/htilde.delta_f) + tf = abs(utils.time_from_frequencyseries(htilde, + sample_frequencies=sample_frequencies).data[kmin:kmax]) + sample_frequencies = sample_frequencies[kmin:kmax] + sample_points = [] + f = fmin + while f < fmax: + f = int(f/htilde.delta_f)*htilde.delta_f + sample_points.append(f) + jj = numpy.searchsorted(sample_frequencies, f) + f += 1./(tf[jj:].max()+min_seglen) + # add the last point + if sample_points[-1] < fmax: + sample_points.append(fmax) + return numpy.array(sample_points)
+ + +compression_algorithms = { + 'mchirp': mchirp_compression, + 'spa': spa_compression + } + +def _vecdiff(htilde, hinterp, fmin, fmax, psd=None): + return abs(filter.overlap_cplx(htilde, htilde, + low_frequency_cutoff=fmin, + high_frequency_cutoff=fmax, + normalized=False, psd=psd) + - filter.overlap_cplx(htilde, hinterp, + low_frequency_cutoff=fmin, + high_frequency_cutoff=fmax, + normalized=False, psd=psd)) + +
+[docs] +def vecdiff(htilde, hinterp, sample_points, psd=None): + """Computes a statistic indicating between which sample points a waveform + and the interpolated waveform differ the most. + """ + vecdiffs = numpy.zeros(sample_points.size-1, dtype=float) + for kk,thisf in enumerate(sample_points[:-1]): + nextf = sample_points[kk+1] + vecdiffs[kk] = abs(_vecdiff(htilde, hinterp, thisf, nextf, psd=psd)) + return vecdiffs
+ + +
+[docs] +def compress_waveform(htilde, sample_points, tolerance, interpolation, + precision, decomp_scratch=None, psd=None): + """Retrieves the amplitude and phase at the desired sample points, and adds + frequency points in order to ensure that the interpolated waveform + has a mismatch with the full waveform that is <= the desired tolerance. The + mismatch is computed by finding 1-overlap between `htilde` and the + decompressed waveform; no maximimization over phase/time is done, a + PSD may be used. + + .. note:: + The decompressed waveform is only garaunteed to have a true mismatch + <= the tolerance for the given `interpolation` and for no PSD. + However, since no maximization over time/phase is performed when + adding points, the actual mismatch between the decompressed waveform + and `htilde` is better than the tolerance, using no PSD. Using a PSD + does increase the mismatch, and can lead to mismatches > than the + desired tolerance, but typically by only a factor of a few worse. + + Parameters + ---------- + htilde : FrequencySeries + The waveform to compress. + sample_points : array + The frequencies at which to store the amplitude and phase. More points + may be added to this, depending on the desired tolerance. + tolerance : float + The maximum mismatch to allow between a decompressed waveform and + `htilde`. + interpolation : str + The interpolation to use for decompressing the waveform when computing + overlaps. + precision : str + The precision being used to generate and store the compressed waveform + points. + decomp_scratch : {None, FrequencySeries} + Optionally provide scratch space for decompressing the waveform. The + provided frequency series must have the same `delta_f` and length + as `htilde`. + psd : {None, FrequencySeries} + The psd to use for calculating the overlap between the decompressed + waveform and the original full waveform. + + Returns + ------- + CompressedWaveform + The compressed waveform data; see `CompressedWaveform` for details. + """ + fmin = sample_points.min() + df = htilde.delta_f + sample_index = (sample_points / df).astype(int) + amp = utils.amplitude_from_frequencyseries(htilde) + phase = utils.phase_from_frequencyseries(htilde) + + comp_amp = amp.take(sample_index) + comp_phase = phase.take(sample_index) + if decomp_scratch is None: + outdf = df + else: + outdf = None + hdecomp = fd_decompress(comp_amp, comp_phase, sample_points, + out=decomp_scratch, df=outdf, f_lower=fmin, + interpolation=interpolation) + kmax = min(len(htilde), len(hdecomp)) + htilde = htilde[:kmax] + hdecomp = hdecomp[:kmax] + mismatch = 1. - filter.overlap(hdecomp, htilde, psd=psd, + low_frequency_cutoff=fmin) + if mismatch > tolerance: + # we'll need the difference in the waveforms as a function of frequency + vecdiffs = vecdiff(htilde, hdecomp, sample_points, psd=psd) + + # We will find where in the frequency series the interpolated waveform + # has the smallest overlap with the full waveform, add a sample point + # there, and re-interpolate. We repeat this until the overall mismatch + # is > than the desired tolerance + added_points = [] + while mismatch > tolerance: + minpt = vecdiffs.argmax() + # add a point at the frequency halfway between minpt and minpt+1 + add_freq = sample_points[[minpt, minpt+1]].mean() + addidx = int(round(add_freq/df)) + # ensure that only new points are added + if addidx in sample_index: + diffidx = vecdiffs.argsort() + addpt = -1 + while addidx in sample_index: + addpt -= 1 + try: + minpt = diffidx[addpt] + except IndexError: + raise ValueError("unable to compress to desired tolerance") + add_freq = sample_points[[minpt, minpt+1]].mean() + addidx = int(round(add_freq/df)) + new_index = numpy.zeros(sample_index.size+1, dtype=int) + new_index[:minpt+1] = sample_index[:minpt+1] + new_index[minpt+1] = addidx + new_index[minpt+2:] = sample_index[minpt+1:] + sample_index = new_index + sample_points = (sample_index * df).astype( + real_same_precision_as(htilde)) + # get the new compressed points + comp_amp = amp.take(sample_index) + comp_phase = phase.take(sample_index) + # update the vecdiffs and mismatch + hdecomp = fd_decompress(comp_amp, comp_phase, sample_points, + out=decomp_scratch, df=outdf, + f_lower=fmin, interpolation=interpolation) + hdecomp = hdecomp[:kmax] + new_vecdiffs = numpy.zeros(vecdiffs.size+1) + new_vecdiffs[:minpt] = vecdiffs[:minpt] + new_vecdiffs[minpt+2:] = vecdiffs[minpt+1:] + new_vecdiffs[minpt:minpt+2] = vecdiff(htilde, hdecomp, + sample_points[minpt:minpt+2], + psd=psd) + vecdiffs = new_vecdiffs + mismatch = 1. - filter.overlap(hdecomp, htilde, psd=psd, + low_frequency_cutoff=fmin) + added_points.append(addidx) + logging.info("mismatch: %f, N points: %i (%i added)" %(mismatch, + len(comp_amp), len(added_points))) + + return CompressedWaveform(sample_points, comp_amp, comp_phase, + interpolation=interpolation, + tolerance=tolerance, mismatch=mismatch, + precision=precision)
+ + +_precision_map = { + 'float32': 'single', + 'float64': 'double', + 'complex64': 'single', + 'complex128': 'double' +} + +_complex_dtypes = { + 'single': numpy.complex64, + 'double': numpy.complex128 +} + +_real_dtypes = { + 'single': numpy.float32, + 'double': numpy.float64 +} + +
+[docs] +@schemed("pycbc.waveform.decompress_") +def inline_linear_interp(amp, phase, sample_frequencies, output, + df, f_lower, imin, start_index): + """Generate a frequency-domain waveform via linear interpolation + from sampled amplitude and phase. The sample frequency locations + for the amplitude and phase must be the same. This function may + be less accurate than scipy's linear interpolation, but should be + much faster. Additionally, it is 'schemed' and so may run under + either CPU or GPU schemes. + + This function is not ordinarily called directly, but rather by + giving the argument 'interpolation' the value 'inline_linear' + when calling the function 'fd_decompress' below. + + Parameters + ---------- + amp : array + The amplitude of the waveform at the sample frequencies. + phase : array + The phase of the waveform at the sample frequencies. + sample_frequencies : array + The frequency (in Hz) of the waveform at the sample frequencies. + output : {None, FrequencySeries} + The output array to save the decompressed waveform to. If this contains + slots for frequencies > the maximum frequency in sample_frequencies, + the rest of the values are zeroed. If not provided, must provide a df. + df : {None, float} + The frequency step to use for the decompressed waveform. Must be + provided if out is None. + f_lower : float + The frequency to start the decompression at. All values at + frequencies less than this will be 0 in the decompressed waveform. + imin : int + The index at which to start in the sampled frequency series. Must + therefore be 0 <= imin < len(sample_frequencies) + start_index : int + The index at which to start in the output frequency; + i.e., ceil(f_lower/df). + + Returns + ------- + output : FrequencySeries + If out was provided, writes to that array. Otherwise, a new + FrequencySeries with the decompressed waveform. + + """ + return
+ + +
+[docs] +def fd_decompress(amp, phase, sample_frequencies, out=None, df=None, + f_lower=None, interpolation='inline_linear'): + """Decompresses an FD waveform using the given amplitude, phase, and the + frequencies at which they are sampled at. + + Parameters + ---------- + amp : array + The amplitude of the waveform at the sample frequencies. + phase : array + The phase of the waveform at the sample frequencies. + sample_frequencies : array + The frequency (in Hz) of the waveform at the sample frequencies. + out : {None, FrequencySeries} + The output array to save the decompressed waveform to. If this contains + slots for frequencies > the maximum frequency in sample_frequencies, + the rest of the values are zeroed. If not provided, must provide a df. + df : {None, float} + The frequency step to use for the decompressed waveform. Must be + provided if out is None. + f_lower : {None, float} + The frequency to start the decompression at. If None, will use whatever + the lowest frequency is in sample_frequencies. All values at + frequencies less than this will be 0 in the decompressed waveform. + interpolation : {'inline_linear', str} + The interpolation to use for the amplitude and phase. Default is + 'inline_linear'. If 'inline_linear' a custom interpolater is used. + Otherwise, ``scipy.interpolate.interp1d`` is used; for other options, + see possible values for that function's ``kind`` argument. + + Returns + ------- + out : FrequencySeries + If out was provided, writes to that array. Otherwise, a new + FrequencySeries with the decompressed waveform. + """ + precision = _precision_map[sample_frequencies.dtype.name] + if _precision_map[amp.dtype.name] != precision or \ + _precision_map[phase.dtype.name] != precision: + raise ValueError("amp, phase, and sample_points must all have the " + "same precision") + + if out is None: + if df is None: + raise ValueError("Either provide output memory or a df") + hlen = int(numpy.ceil(sample_frequencies.max()/df+1)) + out = FrequencySeries(numpy.zeros(hlen, + dtype=_complex_dtypes[precision]), copy=False, + delta_f=df) + else: + # check for precision compatibility + if out.precision == 'double' and precision == 'single': + raise ValueError("cannot cast single precision to double") + df = out.delta_f + hlen = len(out) + if f_lower is None: + imin = 0 # pylint:disable=unused-variable + f_lower = sample_frequencies[0] + start_index = 0 + else: + if f_lower >= sample_frequencies.max(): + raise ValueError("f_lower is > than the maximum sample frequency") + if f_lower < sample_frequencies.min(): + raise ValueError("f_lower is < than the minimum sample frequency") + imin = int(numpy.searchsorted(sample_frequencies, f_lower, + side='right')) - 1 # pylint:disable=unused-variable + start_index = int(numpy.ceil(f_lower/df)) + if start_index >= hlen: + raise ValueError('requested f_lower >= largest frequency in out') + # interpolate the amplitude and the phase + if interpolation == "inline_linear": + # Call the scheme-dependent function + inline_linear_interp(amp, phase, sample_frequencies, out, + df, f_lower, imin, start_index) + else: + # use scipy for fancier interpolation + sample_frequencies = numpy.array(sample_frequencies) + amp = numpy.array(amp) + phase = numpy.array(phase) + outfreq = out.sample_frequencies.numpy() + amp_interp = interpolate.interp1d(sample_frequencies, amp, + kind=interpolation, + bounds_error=False, + fill_value=0., + assume_sorted=True) + phase_interp = interpolate.interp1d(sample_frequencies, phase, + kind=interpolation, + bounds_error=False, + fill_value=0., + assume_sorted=True) + A = amp_interp(outfreq) + phi = phase_interp(outfreq) + out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi) + return out
+ + + +
+[docs] +class CompressedWaveform(object): + """Class that stores information about a compressed waveform. + + Parameters + ---------- + sample_points : {array, h5py.Dataset} + The frequency points at which the compressed waveform is sampled. + amplitude : {array, h5py.Dataset} + The amplitude of the waveform at the given `sample_points`. + phase : {array, h5py.Dataset} + The phase of the waveform at the given `sample_points`. + interpolation : {None, str} + The interpolation that was used when compressing the waveform for + computing tolerance. This is also the default interpolation used when + decompressing; see `decompress` for details. + tolerance : {None, float} + The tolerance that was used when compressing the waveform. + mismatch : {None, float} + The actual mismatch between the decompressed waveform (using the given + `interpolation`) and the full waveform. + precision : {'double', str} + The precision used to generate the compressed waveform's amplitude and + phase points. Default is 'double'. + load_to_memory : {True, bool} + If `sample_points`, `amplitude`, and/or `phase` is an hdf dataset, they + will be cached in memory the first time they are accessed. Default is + True. + + Attributes + ---------- + load_to_memory : bool + Whether or not to load `sample_points`/`amplitude`/`phase` into memory + the first time they are accessed, if they are hdf datasets. Can be + set directly to toggle this behavior. + interpolation : str + The interpolation that was used when compressing the waveform, for + checking the mismatch. Also the default interpolation used when + decompressing. + tolerance : {None, float} + The tolerance that was used when compressing the waveform. + mismatch : {None, float} + The mismatch between the decompressed waveform and the original + waveform. + precision : {'double', str} + The precision used to generate and store the compressed waveform + points. Options are 'double' or 'single'; default is 'double + """ + + def __init__(self, sample_points, amplitude, phase, + interpolation=None, tolerance=None, mismatch=None, + precision='double', load_to_memory=True): + self._sample_points = sample_points + self._amplitude = amplitude + self._phase = phase + self._cache = {} + self.load_to_memory = load_to_memory + # if sample points, amplitude, and/or phase are hdf datasets, + # save their filenames + self._filenames = {} + self._groupnames = {} + for arrname in ['sample_points', 'amplitude', 'phase']: + try: + fname = getattr(self, '_{}'.format(arrname)).file.filename + gname = getattr(self, '_{}'.format(arrname)).name + except AttributeError: + fname = None + gname = None + self._filenames[arrname] = fname + self._groupnames[arrname] = gname + # metadata + self.interpolation = interpolation + self.tolerance = tolerance + self.mismatch = mismatch + self.precision = precision + + def _get(self, param): + val = getattr(self, '_%s' %param) + if isinstance(val, h5py.Dataset): + try: + val = self._cache[param] + except KeyError: + try: + val = val[:] + except ValueError: + # this can happen if the file is closed; if so, open it + # and get the data + fp = HFile(self._filenames[param], 'r') + val = fp[self._groupnames[param]][:] + fp.close() + if self.load_to_memory: + self._cache[param] = val + return val + + @property + def amplitude(self): + """The amplitude of the waveform at the `sample_points`. + + This is always returned as an array; the same logic as for + `sample_points` is used to determine whether or not to cache in + memory. + + Returns + ------- + amplitude : Array + """ + return self._get('amplitude') + + @property + def phase(self): + """The phase of the waveform as the `sample_points`. + + This is always returned as an array; the same logic as for + `sample_points` returned as an array; the same logic as for + `sample_points` is used to determine whether or not to cache in + memory. + + Returns + ------- + phase : Array + """ + return self._get('phase') + + @property + def sample_points(self): + """The frequencies at which the compressed waveform is sampled. + + This is + always returned as an array, even if the stored `sample_points` is an + hdf dataset. If `load_to_memory` is True and the stored points are + an hdf dataset, the `sample_points` will cached in memory the first + time this attribute is accessed. + + Returns + ------- + sample_points : Array + """ + return self._get('sample_points') + +
+[docs] + def clear_cache(self): + """Clear self's cache of amplitude, phase, and sample_points.""" + self._cache.clear()
+ + +
+[docs] + def decompress(self, out=None, df=None, f_lower=None, interpolation=None): + """Decompress self. + + Parameters + ---------- + out : {None, FrequencySeries} + Write the decompressed waveform to the given frequency series. The + decompressed waveform will have the same `delta_f` as `out`. + Either this or `df` must be provided. + df : {None, float} + Decompress the waveform such that its `delta_f` has the given + value. Either this or `out` must be provided. + f_lower : {None, float} + The starting frequency at which to decompress the waveform. Cannot + be less than the minimum frequency in `sample_points`. If `None` + provided, will default to the minimum frequency in `sample_points`. + interpolation : {None, str} + The interpolation to use for decompressing the waveform. If `None` + provided, will default to `self.interpolation`. + + Returns + ------- + FrequencySeries + The decompressed waveform. + """ + if f_lower is None: + # use the minimum of the samlpe points + f_lower = self.sample_points.min() + if interpolation is None: + interpolation = self.interpolation + return fd_decompress(self.amplitude, self.phase, self.sample_points, + out=out, df=df, f_lower=f_lower, + interpolation=interpolation)
+ + +
+[docs] + def write_to_hdf(self, fp, template_hash, root=None, precision=None): + """Write the compressed waveform to the given hdf file handler. + + The waveform is written to: + `fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`, + where `param` is the `sample_points`, `amplitude`, and `phase`. The + `interpolation`, `tolerance`, `mismatch` and `precision` are saved + to the group's attributes. + + Parameters + ---------- + fp : h5py.File + An open hdf file to write the compressed waveform to. + template_hash : {hash, int, str} + A hash, int, or string to map the template to the waveform. + root : {None, str} + Put the `compressed_waveforms` group in the given directory in the + hdf file. If `None`, `compressed_waveforms` will be the root + directory. + precision : {None, str} + Cast the saved parameters to the given precision before saving. If + None provided, will use whatever their current precision is. This + will raise an error if the parameters have single precision but the + requested precision is double. + """ + if root is None: + root = '' + else: + root = '%s/'%(root) + if precision is None: + precision = self.precision + elif precision == 'double' and self.precision == 'single': + raise ValueError("cannot cast single precision to double") + outdtype = _real_dtypes[precision] + group = '%scompressed_waveforms/%s' %(root, str(template_hash)) + for param in ['amplitude', 'phase', 'sample_points']: + fp['%s/%s' %(group, param)] = self._get(param).astype(outdtype) + fp_group = fp[group] + fp_group.attrs['mismatch'] = self.mismatch + fp_group.attrs['interpolation'] = self.interpolation + fp_group.attrs['tolerance'] = self.tolerance + fp_group.attrs['precision'] = precision
+ + +
+[docs] + @classmethod + def from_hdf(cls, fp, template_hash, root=None, load_to_memory=True, + load_now=False): + """Load a compressed waveform from the given hdf file handler. + + The waveform is retrieved from: + `fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`, + where `param` is the `sample_points`, `amplitude`, and `phase`. + + Parameters + ---------- + fp : h5py.File + An open hdf file to write the compressed waveform to. + template_hash : {hash, int, str} + The id of the waveform. + root : {None, str} + Retrieve the `compressed_waveforms` group from the given string. + If `None`, `compressed_waveforms` will be assumed to be in the + top level. + load_to_memory : {True, bool} + Set the `load_to_memory` attribute to the given value in the + returned instance. + load_now : {False, bool} + Immediately load the `sample_points`/`amplitude`/`phase` to memory. + + + Returns + ------- + CompressedWaveform + An instance of this class with parameters loaded from the hdf file. + """ + if root is None: + root = '' + else: + root = '%s/'%(root) + group = '%scompressed_waveforms/%s' %(root, str(template_hash)) + fp_group = fp[group] + sample_points = fp_group['sample_points'] + amp = fp_group['amplitude'] + phase = fp_group['phase'] + if load_now: + sample_points = sample_points[:] + amp = amp[:] + phase = phase[:] + return cls(sample_points, amp, phase, + interpolation=fp_group.attrs['interpolation'], + tolerance=fp_group.attrs['tolerance'], + mismatch=fp_group.attrs['mismatch'], + precision=fp_group.attrs['precision'], + load_to_memory=load_to_memory)
+
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/decompress_cpu.html b/latest/html/_modules/pycbc/waveform/decompress_cpu.html new file mode 100644 index 00000000000..e73aa937b8b --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/decompress_cpu.html @@ -0,0 +1,181 @@ + + + + + + pycbc.waveform.decompress_cpu — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.decompress_cpu

+# Copyright (C) 2016  Alex Nitz, Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" Utilities for handling frequency compressed an unequally spaced frequency
+domain waveforms.
+"""
+import numpy
+from ..types import real_same_precision_as
+from ..types import complex_same_precision_as
+from .decompress_cpu_cython import decomp_ccode_double, decomp_ccode_float
+
+
+[docs] +def inline_linear_interp(amp, phase, sample_frequencies, output, + df, f_lower, imin, start_index): + + rprec = real_same_precision_as(output) + cprec = complex_same_precision_as(output) + sample_frequencies = numpy.array(sample_frequencies, copy=False, + dtype=rprec) + amp = numpy.array(amp, copy=False, dtype=rprec) + phase = numpy.array(phase, copy=False, dtype=rprec) + sflen = len(sample_frequencies) + h = numpy.array(output.data, copy=False, dtype=cprec) + hlen = len(output) + delta_f = float(df) + if output.precision == 'single': + decomp_ccode_float(h, delta_f, hlen, start_index, sample_frequencies, + amp, phase, sflen, imin) + else: + decomp_ccode_double(h, delta_f, hlen, start_index, sample_frequencies, + amp, phase, sflen, imin) + + return output
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/generator.html b/latest/html/_modules/pycbc/waveform/generator.html new file mode 100644 index 00000000000..bcfb2fc3b96 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/generator.html @@ -0,0 +1,1504 @@ + + + + + + pycbc.waveform.generator — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.generator

+# Copyright (C) 2016  Collin Capano, Alex Nitz, Christopher Biwer
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This modules provides classes for generating waveforms.
+"""
+import os
+import logging
+
+from abc import (ABCMeta, abstractmethod)
+
+from . import waveform
+from .waveform import (FailedWaveformError)
+from . import ringdown
+from . import supernovae
+from . import waveform_modes
+from pycbc import transforms
+from pycbc.types import TimeSeries
+from pycbc.waveform import parameters
+from pycbc.waveform.utils import apply_fseries_time_shift, taper_timeseries, \
+                                 ceilpow2, apply_fd_time_shift
+from pycbc.detector import Detector
+from pycbc.pool import use_mpi
+import lal as _lal
+from pycbc import strain
+
+
+# utility functions/class
+failed_counter = 0
+
+
+[docs] +class BaseGenerator(object): + """A wrapper class to call a waveform generator with a set of frozen + parameters and a set of variable parameters. The frozen parameters and + values, along with a list of variable parameter names, are set at + initialization. This way, repeated calls can be made to the underlying + generator by simply passing a list of values for the variable parameters + to this class's generate function. + + Parameters + ---------- + generator : function + The function that is called for waveform generation. + variable_args : {(), list} + A tuple or list of strings giving the names and order of variable + parameters that will be passed to the waveform generator when the + generate function is called. + record_failures : boolean + Store output files containing the parameters of failed waveform + generation. Default is False. + \**frozen_params : + These keyword arguments are the ones that will be frozen in the + waveform generator. For a list of possible parameters, see + pycbc.waveform.cbc_parameters. + + Attributes + ---------- + generator : function + The function that is called for waveform generation. + variable_args : tuple + The list of names of variable arguments. + frozen_params : dict + A dictionary of the frozen keyword arguments that are always passed + to the waveform generator function. + current_params : dict + A dictionary of the frozen keyword arguments and variable arguments + that were last passed to the waveform generator. + """ + def __init__(self, generator, variable_args=(), record_failures=False, + **frozen_params): + self.generator = generator + self.variable_args = tuple(variable_args) + self.frozen_params = frozen_params + # we'll keep a dictionary of the current parameters for fast + # generation + self.current_params = frozen_params.copy() + # keep a list of functions to call before waveform generation + self._pregenerate_functions = [] + + # If we are under mpi, then failed waveform will be stored by + # mpi rank to avoid file writing conflicts. We'll check for this + # upfront + self.record_failures = (record_failures or + ('PYCBC_RECORD_FAILED_WAVEFORMS' in os.environ)) + self.mpi_enabled, _, self.mpi_rank = use_mpi() + + @property + def static_args(self): + """Returns a dictionary of the static arguments.""" + return self.frozen_params + +
+[docs] + def generate(self, **kwargs): + """Generates a waveform from the keyword args. The current params + are updated with the given kwargs, then the generator is called. + """ + self.current_params.update(kwargs) + return self._generate_from_current()
+ + + def _add_pregenerate(self, func): + """ Adds a function that will be called by the generator function + before waveform generation. + """ + self._pregenerate_functions.append(func) + + def _postgenerate(self, res): + """Allows the waveform returned by the generator function to be + manipulated before returning. + """ + return res + + def _gdecorator(generate_func): + """A decorator that allows for seemless pre/post manipulation of + the waveform generator function. + """ + def dostuff(self): + for func in self._pregenerate_functions: + self.current_params = func(self.current_params) + res = generate_func(self) # pylint:disable=not-callable + return self._postgenerate(res) + return dostuff + + @_gdecorator + def _generate_from_current(self): + """Generates a waveform from the current parameters. + """ + try: + new_waveform = self.generator(**self.current_params) + return new_waveform + except RuntimeError as e: + if self.record_failures: + from pycbc.io.hdf import dump_state, HFile + + global failed_counter + + if self.mpi_enabled: + outname = 'failed/params_%s.hdf' % self.mpi_rank + else: + outname = 'failed/params.hdf' + + if not os.path.exists('failed'): + os.makedirs('failed') + + with HFile(outname) as f: + dump_state(self.current_params, f, + dsetname=str(failed_counter)) + failed_counter += 1 + + # we'll get a RuntimeError if lalsimulation failed to generate + # the waveform for whatever reason + strparams = ' | '.join(['{}: {}'.format( + p, str(val)) for p, val in self.current_params.items()]) + raise FailedWaveformError("Failed to generate waveform with " + "parameters:\n{}\nError was: {}" + .format(strparams, e))
+ + + +
+[docs] +class BaseCBCGenerator(BaseGenerator): + """Adds ability to convert from various derived parameters to parameters + needed by the waveform generators. + """ + + possible_args = set(parameters.td_waveform_params + + parameters.fd_waveform_params + + ['taper']) + """set: The set of names of arguments that may be used in the + `variable_args` or `frozen_params`. + """ + + def __init__(self, generator, variable_args=(), **frozen_params): + super(BaseCBCGenerator, self).__init__(generator, + variable_args=variable_args, **frozen_params) + # decorate the generator function with a list of functions that convert + # parameters to those used by the waveform generation interface + all_args = set(list(self.frozen_params.keys()) + + list(self.variable_args)) + # check that there are no unused (non-calibration) parameters + calib_args = set([a for a in self.variable_args if + a.startswith('calib_')]) + all_args = all_args - calib_args + unused_args = all_args - self.possible_args + if len(unused_args): + logging.warning("WARNING: The following parameters are generally " + "not used by CBC waveform generators: %s. If you " + "have provided a transform that converted these " + "into known parameters (e.g., mchirp, q to " + "mass1, mass2) or you are using a custom model " + "that uses these parameters, you can safely " + "ignore this message.", ', '.join(unused_args))
+ + + +
+[docs] +class FDomainCBCGenerator(BaseCBCGenerator): + """Generates frequency-domain CBC waveforms in the radiation frame. + + Uses `waveform.get_fd_waveform` as a generator function to create + frequency- domain CBC waveforms in the radiation frame; i.e., with no + detector response function applied. For more details, see `BaseGenerator`. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import FDomainCBCGenerator + >>> generator = FDomainCBCGenerator(variable_args=['mass1', 'mass2'], delta_f=1./32, f_lower=30., approximant='TaylorF2') + + Create a waveform with the variable arguments (in this case, mass1, mass2): + + >>> generator.generate(mass1=1.4, mass2=1.4) + (<pycbc.types.frequencyseries.FrequencySeries at 0x1110c1450>, + <pycbc.types.frequencyseries.FrequencySeries at 0x1110c1510>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(FDomainCBCGenerator, self).__init__(waveform.get_fd_waveform, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class FDomainCBCModesGenerator(BaseCBCGenerator): + """Generates frequency-domain CBC waveform modes. + + Uses :py:func:`waveform_modes.get_fd_waveform_modes` as a generator + function to create frequency-domain CBC waveforms mode-by-mode, without + applying spherical harmonics. + + For details, on methods and arguments, see :py:class:`BaseGenerator`. + """ + def __init__(self, variable_args=(), **frozen_params): + super(FDomainCBCModesGenerator, self).__init__( + waveform_modes.get_fd_waveform_modes, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class TDomainCBCGenerator(BaseCBCGenerator): + """Create time domain CBC waveforms in the radiation frame. + + Uses waveform.get_td_waveform as a generator function to create time- + domain CBC waveforms in the radiation frame; i.e., with no detector + response function applied. For more details, see `BaseGenerator`. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import TDomainCBCGenerator + >>> generator = TDomainCBCGenerator(variable_args=['mass1', 'mass2'], delta_t=1./4096, f_lower=30., approximant='TaylorT4') + + Create a waveform with the variable arguments (in this case, mass1, mass2): + + >>> generator.generate(mass1=2., mass2=1.3) + (<pycbc.types.timeseries.TimeSeries at 0x10e546710>, + <pycbc.types.timeseries.TimeSeries at 0x115f37690>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(TDomainCBCGenerator, self).__init__(waveform.get_td_waveform, + variable_args=variable_args, **frozen_params) + + def _postgenerate(self, res): + """Applies a taper if it is in current params. + """ + hp, hc = res + try: + hp = taper_timeseries(hp, tapermethod=self.current_params['taper']) + hc = taper_timeseries(hc, tapermethod=self.current_params['taper']) + except KeyError: + pass + return hp, hc
+ + + +
+[docs] +class TDomainCBCModesGenerator(BaseCBCGenerator): + """Generates time domain CBC waveform modes. + + Uses :py:func:`waveform_modes.get_td_waveform_modes` as a generator + function to create time-domain CBC waveforms mode-by-mode, without applying + spherical harmonics. The ``generate`` function returns a dictionary of + modes -> (real, imag) part of the complex time series. + + For details, on methods and arguments, see :py:class:`BaseGenerator`. + """ + def __init__(self, variable_args=(), **frozen_params): + super(TDomainCBCModesGenerator, self).__init__( + waveform_modes.get_td_waveform_modes, + variable_args=variable_args, **frozen_params) + + def _postgenerate(self, res): + """Applies a taper if it is in current params. + """ + if 'taper' in self.current_params: + tapermethod = self.current_params['taper'] + for mode in res: + ulm, vlm = res[mode] + ulm = taper_timeseries(ulm, tapermethod=tapermethod) + vlm = taper_timeseries(vlm, tapermethod=tapermethod) + res[mode] = (ulm, vlm) + return res
+ + + +
+[docs] +class FDomainMassSpinRingdownGenerator(BaseGenerator): + """Uses ringdown.get_fd_from_final_mass_spin as a generator function to + create frequency-domain ringdown waveforms with higher modes in the + radiation frame; i.e., with no detector response function applied. + For more details, see BaseGenerator. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import FDomainMassSpinRingdownGenerator + >>> generator = FDomainMassSpinRingdownGenerator(variable_args=['final_mass', + 'final_spin','amp220','amp210','phi220','phi210'], lmns=['221','211'], + delta_f=1./32, f_lower=30., f_final=500) + + Create a ringdown with the variable arguments: + + >>> generator.generate(final_mass=65., final_spin=0.7, + amp220=1e-21, amp210=1./10, phi220=0., phi210=0.) + (<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>, + <pycbc.types.frequencyseries.FrequencySeries at 0x5161550>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(FDomainMassSpinRingdownGenerator, self).__init__(ringdown.get_fd_from_final_mass_spin, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class FDomainFreqTauRingdownGenerator(BaseGenerator): + """Uses ringdown.get_fd_from_freqtau as a generator function to + create frequency-domain ringdown waveforms with higher modes in the + radiation frame; i.e., with no detector response function applied. + For more details, see BaseGenerator. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import FDomainFreqTauRingdownGenerator + >>> generator = FDomainFreqTauRingdownGenerator(variable_args=['f_220', + 'tau_220','f_210','tau_210','amp220','amp210','phi220','phi210'], + lmns=['221','211'], delta_f=1./32, f_lower=30., f_final=500) + + Create a ringdown with the variable arguments: + + >>> generator.generate(f_220=317., tau_220=0.003, f_210=274., tau_210=0.003, + amp220=1e-21, amp210=1./10, phi220=0., phi210=0.) + (<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>, + <pycbc.types.frequencyseries.FrequencySeries at 0x5161550>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(FDomainFreqTauRingdownGenerator, self).__init__(ringdown.get_fd_from_freqtau, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class TDomainMassSpinRingdownGenerator(BaseGenerator): + """Uses ringdown.get_td_from_final_mass_spin as a generator function to + create time-domain ringdown waveforms with higher modes in the + radiation frame; i.e., with no detector response function applied. + For more details, see BaseGenerator. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import TDomainMassSpinRingdownGenerator + >>> generator = TDomainMassSpinRingdownGenerator(variable_args=['final_mass', + 'final_spin','amp220','amp210','phi220','phi210'], lmns=['221','211'], + delta_t=1./2048) + + Create a ringdown with the variable arguments: + + >>> generator.generate(final_mass=65., final_spin=0.7, + amp220=1e-21, amp210=1./10, phi220=0., phi210=0.) + (<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>, + <pycbc.types.frequencyseries.FrequencySeries at 0x5161550>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(TDomainMassSpinRingdownGenerator, self).__init__(ringdown.get_td_from_final_mass_spin, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class TDomainFreqTauRingdownGenerator(BaseGenerator): + """Uses ringdown.get_td_from_freqtau as a generator function to + create time-domain ringdown waveforms with higher modes in the + radiation frame; i.e., with no detector response function applied. + For more details, see BaseGenerator. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import FDomainFreqTauRingdownGenerator + >>> generator = TDomainFreqTauRingdownGenerator(variable_args=['f_220', + 'tau_220','f_210','tau_210','amp220','amp210','phi220','phi210'], + lmns=['221','211'], delta_t=1./2048) + + Create a ringdown with the variable arguments: + + >>> generator.generate(f_220=317., tau_220=0.003, f_210=274., tau_210=0.003, + amp220=1e-21, amp210=1./10, phi220=0., phi210=0.) + (<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>, + <pycbc.types.frequencyseries.FrequencySeries at 0x5161550>) + + """ + def __init__(self, variable_args=(), **frozen_params): + super(TDomainFreqTauRingdownGenerator, self).__init__(ringdown.get_td_from_freqtau, + variable_args=variable_args, **frozen_params)
+ + + +
+[docs] +class TDomainSupernovaeGenerator(BaseGenerator): + """Uses supernovae.py to create time domain core-collapse supernovae waveforms + using a set of Principal Components provided in a .hdf file. + """ + def __init__(self, variable_args=(), **frozen_params): + super(TDomainSupernovaeGenerator, + self).__init__(supernovae.get_corecollapse_bounce, + variable_args=variable_args, **frozen_params)
+ + + +# +# ============================================================================= +# +# Detector-frame generators +# +# ============================================================================= +# + + +
+[docs] +class BaseFDomainDetFrameGenerator(metaclass=ABCMeta): + """Base generator for frquency-domain waveforms in a detector frame. + + Parameters + ---------- + rFrameGeneratorClass : class + The class to use for generating the waveform in the radiation frame, + e.g., FDomainCBCGenerator. This should be the class, not an + instance of the class (the class will be initialized with the + appropriate arguments internally). + detectors : {None, list of strings} + The names of the detectors to use. If provided, all location parameters + must be included in either the variable args or the frozen params. If + None, the generate function will just return the plus polarization + returned by the rFrameGeneratorClass shifted by any desired time shift. + epoch : {float, lal.LIGOTimeGPS + The epoch start time to set the waveform to. A time shift = tc - epoch is + applied to waveforms before returning. + variable_args : {(), list or tuple} + A list or tuple of strings giving the names and order of parameters + that will be passed to the generate function. + \**frozen_params + Keyword arguments setting the parameters that will not be changed from + call-to-call of the generate function. + + Attributes + ---------- + detectors : dict + The dictionary of detectors that antenna patterns are calculated for + on each call of generate. If no detectors were provided, will be + ``{'RF': None}``, where "RF" means "radiation frame". + detector_names : list + The list of detector names. If no detectors were provided, then this + will be ['RF'] for "radiation frame". + current_params : dict + A dictionary of name, value pairs of the arguments that were last + used by the generate function. + rframe_generator : instance of rFrameGeneratorClass + The instance of the radiation-frame generator that is used for waveform + generation. All parameters in current_params except for the + location params are passed to this class's generate function. + frozen_location_args : dict + Any location parameters that were included in the frozen_params. + variable_args : tuple + The list of names of arguments that are passed to the generate + function. + + """ + + location_args = set([]) + """Set: Should be overriden by children classes with a set of parameters + that set the binary's location. + """ + + def __init__(self, rFrameGeneratorClass, epoch, detectors=None, + variable_args=(), recalib=None, gates=None, **frozen_params): + # initialize frozen & current parameters: + self.current_params = frozen_params.copy() + self._static_args = frozen_params.copy() + # we'll separate out frozen location parameters from the frozen + # parameters that are sent to the rframe generator + self.frozen_location_args = {} + loc_params = set(frozen_params.keys()) & self.location_args + for param in loc_params: + self.frozen_location_args[param] = frozen_params.pop(param) + # set the order of the variable parameters + self.variable_args = tuple(variable_args) + # variables that are sent to the rFrame generator + rframe_variables = list(set(self.variable_args) - self.location_args) + # initialize the radiation frame generator + self.rframe_generator = rFrameGeneratorClass( + variable_args=rframe_variables, **frozen_params) + self.set_epoch(epoch) + # set calibration model + self.recalib = recalib + # if detectors are provided, convert to detector type; also ensure that + # location variables are specified + if detectors is not None: + self.detectors = {det: Detector(det) for det in detectors} + missing_args = [arg for arg in self.location_args if not + (arg in self.current_params or arg in self.variable_args)] + if any(missing_args): + raise ValueError("detectors provided, but missing location " + "parameters %s. " %(', '.join(missing_args)) + + "These must be either in the frozen params or the " + "variable args.") + else: + self.detectors = {'RF': None} + self.detector_names = sorted(self.detectors.keys()) + self.gates = gates + +
+[docs] + def set_epoch(self, epoch): + """Sets the epoch; epoch should be a float or a LIGOTimeGPS.""" + self._epoch = float(epoch)
+ + + @property + def static_args(self): + """Returns a dictionary of the static arguments.""" + return self._static_args + + @property + def epoch(self): + """The GPS start time of the frequency series returned by the generate + function. A time shift is applied to the waveform equal to tc-epoch. + Update by using ``set_epoch`` + """ + return _lal.LIGOTimeGPS(self._epoch) + +
+[docs] + @abstractmethod + def generate(self, **kwargs): + """The function that generates the waveforms. + """ + pass
+ + +
+[docs] + @abstractmethod + def select_rframe_generator(self, approximant): + """Method to select waveform generator based on an approximant.""" + pass
+
+ + + + +
+[docs] +class FDomainDetFrameGenerator(BaseFDomainDetFrameGenerator): + """Generates frequency-domain waveform in a specific frame. + + Generates a waveform using the given radiation frame generator class, + and applies the detector response function and appropriate time offset. + + Parameters + ---------- + rFrameGeneratorClass : class + The class to use for generating the waveform in the radiation frame, + e.g., FDomainCBCGenerator. This should be the class, not an + instance of the class (the class will be initialized with the + appropriate arguments internally). + detectors : {None, list of strings} + The names of the detectors to use. If provided, all location parameters + must be included in either the variable args or the frozen params. If + None, the generate function will just return the plus polarization + returned by the rFrameGeneratorClass shifted by any desired time shift. + epoch : {float, lal.LIGOTimeGPS + The epoch start time to set the waveform to. A time shift = tc - epoch is + applied to waveforms before returning. + variable_args : {(), list or tuple} + A list or tuple of strings giving the names and order of parameters + that will be passed to the generate function. + \**frozen_params + Keyword arguments setting the parameters that will not be changed from + call-to-call of the generate function. + + Attributes + ---------- + detectors : dict + The dictionary of detectors that antenna patterns are calculated for + on each call of generate. If no detectors were provided, will be + ``{'RF': None}``, where "RF" means "radiation frame". + detector_names : list + The list of detector names. If no detectors were provided, then this + will be ['RF'] for "radiation frame". + epoch : lal.LIGOTimeGPS + The GPS start time of the frequency series returned by the generate function. + A time shift is applied to the waveform equal to tc-epoch. Update by using + ``set_epoch``. + current_params : dict + A dictionary of name, value pairs of the arguments that were last + used by the generate function. + rframe_generator : instance of rFrameGeneratorClass + The instance of the radiation-frame generator that is used for waveform + generation. All parameters in current_params except for the + location params are passed to this class's generate function. + frozen_location_args : dict + Any location parameters that were included in the frozen_params. + variable_args : tuple + The list of names of arguments that are passed to the generate + function. + + Examples + -------- + Initialize a generator: + + >>> from pycbc.waveform.generator import FDomainDetFrameGenerator + >>> generator = FDomainDetFrameGenerator(waveform.FDomainCBCGenerator, 0., variable_args=['mass1', 'mass2', 'spin1z', 'spin2z', 'tc', 'ra', 'dec', 'polarization'], detectors=['H1', 'L1'], delta_f=1./64, f_lower=20., approximant='SEOBNRv2_ROM_DoubleSpin') + + Generate a waveform: + + >>> generator.generate(mass1=38.6, mass2=29.3, spin1z=0.33, spin2z=-0.94, tc=2.43, ra=1.37, dec=-1.26, polarization=2.76) + {'H1': <pycbc.types.frequencyseries.FrequencySeries at 0x116637350>, + 'L1': <pycbc.types.frequencyseries.FrequencySeries at 0x116637a50>} + + """ + + location_args = set(['tc', 'ra', 'dec', 'polarization']) + """set(['tc', 'ra', 'dec', 'polarization']): + The set of location parameters. These are not passed to the rFrame + generator class; instead, they are used to apply the detector response + function and/or shift the waveform in time. The parameters are: + + * tc: The GPS time of coalescence (should be geocentric time). + * ra: Right ascension. + * dec: declination + * polarization: polarization. + + All of these must be provided in either the variable args or the + frozen params if detectors is not None. If detectors + is None, tc may optionally be provided. + """ + +
+[docs] + def generate(self, **kwargs): + """Generates a waveform, applies a time shift and the detector response + function from the given kwargs. + """ + self.current_params.update(kwargs) + rfparams = {param: self.current_params[param] + for param in kwargs if param not in self.location_args} + hp, hc = self.rframe_generator.generate(**rfparams) + if isinstance(hp, TimeSeries): + df = self.current_params['delta_f'] + hp = hp.to_frequencyseries(delta_f=df) + hc = hc.to_frequencyseries(delta_f=df) + # time-domain waveforms will not be shifted so that the peak amp + # happens at the end of the time series (as they are for f-domain), + # so we add an additional shift to account for it + tshift = 1./df - abs(hp._epoch) + else: + tshift = 0. + hp._epoch = hc._epoch = self._epoch + h = {} + if self.detector_names != ['RF']: + for detname, det in self.detectors.items(): + # apply detector response function + fp, fc = det.antenna_pattern(self.current_params['ra'], + self.current_params['dec'], + self.current_params['polarization'], + self.current_params['tc']) + thish = fp*hp + fc*hc + # apply the time shift + tc = self.current_params['tc'] + \ + det.time_delay_from_earth_center(self.current_params['ra'], + self.current_params['dec'], self.current_params['tc']) + h[detname] = apply_fd_time_shift(thish, tc+tshift, copy=False) + if self.recalib: + # recalibrate with given calibration model + h[detname] = \ + self.recalib[detname].map_to_adjust(h[detname], + **self.current_params) + else: + # no detector response, just use the + polarization + if 'tc' in self.current_params: + hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift, + copy=False) + h['RF'] = hp + if self.gates is not None: + # resize all to nearest power of 2 + for d in h.values(): + d.resize(ceilpow2(len(d)-1) + 1) + h = strain.apply_gates_to_fd(h, self.gates) + return h
+ + +
+[docs] + @staticmethod + def select_rframe_generator(approximant): + """Returns a radiation frame generator class based on the approximant + string. + """ + return select_waveform_generator(approximant)
+
+ + + +
+[docs] +class FDomainDetFrameTwoPolGenerator(BaseFDomainDetFrameGenerator): + """Generates frequency-domain waveform in a specific frame. + + Generates both polarizations of a waveform using the given radiation frame + generator class, and applies the time shift. Detector response functions + are not applied. + + Parameters + ---------- + rFrameGeneratorClass : class + The class to use for generating the waveform in the radiation frame, + e.g., FDomainCBCGenerator. This should be the class, not an + instance of the class (the class will be initialized with the + appropriate arguments internally). + detectors : {None, list of strings} + The names of the detectors to use. If provided, all location parameters + must be included in either the variable args or the frozen params. If + None, the generate function will just return the plus polarization + returned by the rFrameGeneratorClass shifted by any desired time shift. + epoch : {float, lal.LIGOTimeGPS + The epoch start time to set the waveform to. A time shift = tc - epoch is + applied to waveforms before returning. + variable_args : {(), list or tuple} + A list or tuple of strings giving the names and order of parameters + that will be passed to the generate function. + \**frozen_params + Keyword arguments setting the parameters that will not be changed from + call-to-call of the generate function. + + Attributes + ---------- + detectors : dict + The dictionary of detectors that antenna patterns are calculated for + on each call of generate. If no detectors were provided, will be + ``{'RF': None}``, where "RF" means "radiation frame". + detector_names : list + The list of detector names. If no detectors were provided, then this + will be ['RF'] for "radiation frame". + epoch : lal.LIGOTimeGPS + The GPS start time of the frequency series returned by the generate function. + A time shift is applied to the waveform equal to tc-epoch. Update by using + ``set_epoch``. + current_params : dict + A dictionary of name, value pairs of the arguments that were last + used by the generate function. + rframe_generator : instance of rFrameGeneratorClass + The instance of the radiation-frame generator that is used for waveform + generation. All parameters in current_params except for the + location params are passed to this class's generate function. + frozen_location_args : dict + Any location parameters that were included in the frozen_params. + variable_args : tuple + The list of names of arguments that are passed to the generate + function. + + """ + location_args = set(['tc', 'ra', 'dec']) + """ set(['tc', 'ra', 'dec']): + The set of location parameters. These are not passed to the rFrame + generator class; instead, they are used to apply the detector response + function and/or shift the waveform in time. The parameters are: + + * tc: The GPS time of coalescence (should be geocentric time). + * ra: Right ascension. + * dec: declination + + All of these must be provided in either the variable args or the + frozen params if detectors is not None. If detectors + is None, tc may optionally be provided. + """ + +
+[docs] + def generate(self, **kwargs): + """Generates a waveform polarizations and applies a time shift. + + Returns + ------- + dict : + Dictionary of ``detector names -> (hp, hc)``, where ``hp, hc`` are + the plus and cross polarization, respectively. + """ + self.current_params.update(kwargs) + rfparams = {param: self.current_params[param] + for param in kwargs if param not in self.location_args} + hp, hc = self.rframe_generator.generate(**rfparams) + if isinstance(hp, TimeSeries): + df = self.current_params['delta_f'] + hp = hp.to_frequencyseries(delta_f=df) + hc = hc.to_frequencyseries(delta_f=df) + # time-domain waveforms will not be shifted so that the peak amp + # happens at the end of the time series (as they are for f-domain), + # so we add an additional shift to account for it + tshift = 1./df - abs(hp._epoch) + else: + tshift = 0. + hp._epoch = hc._epoch = self._epoch + h = {} + if self.detector_names != ['RF']: + for detname, det in self.detectors.items(): + # apply the time shift + tc = self.current_params['tc'] + \ + det.time_delay_from_earth_center(self.current_params['ra'], + self.current_params['dec'], self.current_params['tc']) + dethp = apply_fd_time_shift(hp, tc+tshift, copy=True) + dethc = apply_fd_time_shift(hc, tc+tshift, copy=True) + if self.recalib: + # recalibrate with given calibration model + dethp = self.recalib[detname].map_to_adjust( + dethp, **self.current_params) + dethc = self.recalib[detname].map_to_adjust( + dethc, **self.current_params) + h[detname] = (dethp, dethc) + else: + # no detector response, just use the + polarization + if 'tc' in self.current_params: + hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift, + copy=False) + hc = apply_fd_time_shift(hc, self.current_params['tc']+tshift, + copy=False) + h['RF'] = (hp, hc) + if self.gates is not None: + # resize all to nearest power of 2 + hps = {} + hcs = {} + for det in h: + hp = h[det] + hc = h[det] + hp.resize(ceilpow2(len(hp)-1) + 1) + hc.resize(ceilpow2(len(hc)-1) + 1) + hps[det] = hp + hcs[det] = hc + hps = strain.apply_gates_to_fd(hps, self.gates) + hcs = strain.apply_gates_to_fd(hps, self.gates) + h = {det: (hps[det], hcs[det]) for det in h} + return h
+ + +
+[docs] + @staticmethod + def select_rframe_generator(approximant): + """Returns a radiation frame generator class based on the approximant + string. + """ + return select_waveform_generator(approximant)
+
+ + +
+[docs] +class FDomainDetFrameTwoPolNoRespGenerator(BaseFDomainDetFrameGenerator): + """Generates frequency-domain waveform in a specific frame. + + Generates both polarizations of a waveform using the given radiation frame + generator class, and applies the time shift. Detector response functions + are not applied. + + Parameters + ---------- + rFrameGeneratorClass : class + The class to use for generating the waveform in the radiation frame, + e.g., FDomainCBCGenerator. This should be the class, not an + instance of the class (the class will be initialized with the + appropriate arguments internally). + detectors : {None, list of strings} + The names of the detectors to use. If provided, all location parameters + must be included in either the variable args or the frozen params. If + None, the generate function will just return the plus polarization + returned by the rFrameGeneratorClass shifted by any desired time shift. + epoch : {float, lal.LIGOTimeGPS + The epoch start time to set the waveform to. A time shift = tc - epoch is + applied to waveforms before returning. + variable_args : {(), list or tuple} + A list or tuple of strings giving the names and order of parameters + that will be passed to the generate function. + \**frozen_params + Keyword arguments setting the parameters that will not be changed from + call-to-call of the generate function. + + Attributes + ---------- + detectors : dict + The dictionary of detectors that antenna patterns are calculated for + on each call of generate. If no detectors were provided, will be + ``{'RF': None}``, where "RF" means "radiation frame". + detector_names : list + The list of detector names. If no detectors were provided, then this + will be ['RF'] for "radiation frame". + epoch : lal.LIGOTimeGPS + The GPS start time of the frequency series returned by the generate function. + A time shift is applied to the waveform equal to tc-epoch. Update by using + ``set_epoch``. + current_params : dict + A dictionary of name, value pairs of the arguments that were last + used by the generate function. + rframe_generator : instance of rFrameGeneratorClass + The instance of the radiation-frame generator that is used for waveform + generation. All parameters in current_params except for the + location params are passed to this class's generate function. + frozen_location_args : dict + Any location parameters that were included in the frozen_params. + variable_args : tuple + The list of names of arguments that are passed to the generate + function. + + """ + +
+[docs] + def generate(self, **kwargs): + """Generates a waveform polarizations + + Returns + ------- + dict : + Dictionary of ``detector names -> (hp, hc)``, where ``hp, hc`` are + the plus and cross polarization, respectively. + """ + self.current_params.update(kwargs) + hp, hc = self.rframe_generator.generate(**self.current_params) + if isinstance(hp, TimeSeries): + df = self.current_params['delta_f'] + hp = hp.to_frequencyseries(delta_f=df) + hc = hc.to_frequencyseries(delta_f=df) + # time-domain waveforms will not be shifted so that the peak amp + # happens at the end of the time series (as they are for f-domain), + # so we add an additional shift to account for it + tshift = 1./df - abs(hp._epoch) + hp = apply_fseries_time_shift(hp, tshift, copy=True) + hc = apply_fseries_time_shift(hc, tshift, copy=True) + + hp._epoch = hc._epoch = self._epoch + h = {} + + for detname in self.detectors: + if self.recalib: + # recalibrate with given calibration model + hp = self.recalib[detname].map_to_adjust( + hp, **self.current_params) + hc = self.recalib[detname].map_to_adjust( + hc, **self.current_params) + h[detname] = (hp.copy(), hc.copy()) + return h
+ + +
+[docs] + @staticmethod + def select_rframe_generator(approximant): + """Returns a radiation frame generator class based on the approximant + string. + """ + return select_waveform_generator(approximant)
+
+ + +
+[docs] +class FDomainDetFrameModesGenerator(BaseFDomainDetFrameGenerator): + """Generates frequency-domain waveform modes in a specific frame. + + Generates both polarizations of every waveform mode using the given + radiation frame generator class, and applies the time shift. Detector + response functions are not applied. + + Parameters + ---------- + rFrameGeneratorClass : class + The class to use for generating the waveform modes in the radiation + frame, e.g., :py:class:`FDomainCBCModesGenerator`. This should be the + class, not an instance of the class (the class will be initialized with + the appropriate arguments internally). The class should have a generate + function that returns a dictionary of waveforms keyed by the modes. + detectors : {None, list of strings} + The names of the detectors to use. If provided, all location parameters + must be included in either the variable args or the frozen params. If + None, the generate function will just return the plus polarization + returned by the rFrameGeneratorClass shifted by any desired time shift. + epoch : {float, lal.LIGOTimeGPS + The epoch start time to set the waveform to. A time shift = tc - epoch is + applied to waveforms before returning. + variable_args : {(), list or tuple} + A list or tuple of strings giving the names and order of parameters + that will be passed to the generate function. + \**frozen_params + Keyword arguments setting the parameters that will not be changed from + call-to-call of the generate function. + + Attributes + ---------- + detectors : dict + The dictionary of detectors that antenna patterns are calculated for + on each call of generate. If no detectors were provided, will be + ``{'RF': None}``, where "RF" means "radiation frame". + detector_names : list + The list of detector names. If no detectors were provided, then this + will be ['RF'] for "radiation frame". + epoch : lal.LIGOTimeGPS + The GPS start time of the frequency series returned by the generate + function. A time shift is applied to the waveform equal to tc-epoch. + Update by using ``set_epoch``. + current_params : dict + A dictionary of name, value pairs of the arguments that were last + used by the generate function. + rframe_generator : instance of rFrameGeneratorClass + The instance of the radiation-frame generator that is used for waveform + generation. All parameters in current_params except for the + location params are passed to this class's generate function. + frozen_location_args : dict + Any location parameters that were included in the frozen_params. + variable_args : tuple + The list of names of arguments that are passed to the generate + function. + + """ + location_args = set(['tc', 'ra', 'dec']) + """ set(['tc', 'ra', 'dec']): + The set of location parameters. These are not passed to the rFrame + generator class; instead, they are used to apply the detector response + function and/or shift the waveform in time. The parameters are: + + * tc: The GPS time of coalescence (should be geocentric time). + * ra: Right ascension. + * dec: declination + + All of these must be provided in either the variable args or the + frozen params if detectors is not None. If detectors + is None, tc may optionally be provided. + """ + +
+[docs] + def generate(self, **kwargs): + """Generates and returns a waveform decompsed into separate modes. + + Returns + ------- + dict : + Dictionary of ``detector names -> modes -> (ulm, vlm)``, where + ``ulm, vlm`` are the frequency-domain representations of the real + and imaginary parts, respectively, of the complex time series + representation of the ``hlm``. + """ + self.current_params.update(kwargs) + rfparams = {param: self.current_params[param] + for param in kwargs if param not in self.location_args} + hlms = self.rframe_generator.generate(**rfparams) + h = {det: {} for det in self.detectors} + for mode in hlms: + ulm, vlm = hlms[mode] + if isinstance(ulm, TimeSeries): + df = self.current_params['delta_f'] + ulm = ulm.to_frequencyseries(delta_f=df) + vlm = vlm.to_frequencyseries(delta_f=df) + # time-domain waveforms will not be shifted so that the peak + # amplitude happens at the end of the time series (as they are + # for f-domain), so we add an additional shift to account for + # it + tshift = 1./df - abs(ulm._epoch) + else: + tshift = 0. + ulm._epoch = vlm._epoch = self._epoch + if self.detector_names != ['RF']: + for detname, det in self.detectors.items(): + # apply the time shift + tc = self.current_params['tc'] + \ + det.time_delay_from_earth_center( + self.current_params['ra'], + self.current_params['dec'], + self.current_params['tc']) + detulm = apply_fd_time_shift(ulm, tc+tshift, copy=True) + detvlm = apply_fd_time_shift(vlm, tc+tshift, copy=True) + if self.recalib: + # recalibrate with given calibration model + detulm = self.recalib[detname].map_to_adjust( + detulm, **self.current_params) + detvlm = self.recalib[detname].map_to_adjust( + detvlm, **self.current_params) + h[detname][mode] = (detulm, detvlm) + else: + # no detector response, just apply time shift + if 'tc' in self.current_params: + ulm = apply_fd_time_shift(ulm, + self.current_params['tc']+tshift, + copy=False) + vlm = apply_fd_time_shift(vlm, + self.current_params['tc']+tshift, + copy=False) + h['RF'][mode] = (ulm, vlm) + if self.gates is not None: + # resize all to nearest power of 2 + ulms = {} + vlms = {} + for det in h: + ulm, vlm = h[det][mode] + ulm.resize(ceilpow2(len(ulm)-1) + 1) + vlm.resize(ceilpow2(len(vlm)-1) + 1) + ulms[det] = ulm + vlms[det] = vlm + ulms = strain.apply_gates_to_fd(ulms, self.gates) + vlms = strain.apply_gates_to_fd(ulms, self.gates) + for det in ulms: + h[det][mode] = (ulms[det], vlms[det]) + return h
+ + +
+[docs] + @staticmethod + def select_rframe_generator(approximant): + """Returns a radiation frame generator class based on the approximant + string. + """ + return select_waveform_modes_generator(approximant)
+
+ + + +
+[docs] +class FDomainDirectDetFrameGenerator(BaseCBCGenerator): + """Generates frequency-domain waveforms directly in the detector frame. + + Uses :py:func:`waveform.get_fd_det_waveform` as a generator + function to create frequency-domain CBC waveforms that include the detector + response. + + For details, on methods and arguments, see :py:class:`BaseCBCGenerator`. + """ + def __init__( + self, + rFrameGeneratorClass=None, + epoch=None, + detectors=None, + variable_args=(), + gates=None, + recalib=None, + **frozen_params + ): + + if rFrameGeneratorClass is not None: + raise ValueError( + f"{self.__class__.__name__} does not supprt using a radiation " + "frame generator class." + ) + + self.set_epoch(epoch) + self.detectors = detectors + + if gates is not None: + raise RuntimeError( + f"{self.__class__.__name__} does not support `gates`" + ) + if recalib is not None: + raise RuntimeError( + f"{self.__class__.__name__} does not support `recalib`" + ) + + if detectors is None: + raise ValueError( + f"Must specify detectors to use {self.__class__.__name__}." + ) + + super().__init__( + waveform.get_fd_det_waveform, + variable_args=variable_args, + **frozen_params + ) + +
+[docs] + def set_epoch(self, epoch): + """Sets the epoch; epoch should be a float or a LIGOTimeGPS.""" + self._epoch = float(epoch)
+ + + @property + def epoch(self): + """The GPS start time of the frequency series returned by the generate + function. A time shift is applied to the waveform equal to tc-epoch. + Update by using ``set_epoch`` + """ + return _lal.LIGOTimeGPS(self._epoch) + +
+[docs] + @staticmethod + def select_rframe_generator(approximant): + """Returns the radiation frame generator. + + Returns ``None`` since this class does not support generating waveforms + in the radiation frame. + """ + return None
+ + +
+[docs] + def generate(self, **kwargs): + """Generates and returns a waveform in the detector frame. + + Returns + ------- + dict : + Dictionary of ``detector names -> h``, where is the waveform in the + specified detector. + """ + wfs = super().generate(ifos=self.detectors, **kwargs) + for det in self.detectors: + wfs[det]._epoch = self._epoch + wfs[det] = apply_fd_time_shift(wfs[det], kwargs["tc"], copy=False) + return wfs
+
+ + + +# +# ============================================================================= +# +# Helper functions +# +# ============================================================================= +# + + +
+[docs] +def select_waveform_generator(approximant): + """Returns the single-IFO generator for the approximant. + + Parameters + ---------- + approximant : str + Name of waveform approximant. Valid names can be found using + ``pycbc.waveform`` methods. + + Returns + ------- + generator : (PyCBC generator instance) + A waveform generator object. + + Examples + -------- + Get a list of available approximants: + >>> from pycbc import waveform + >>> waveform.fd_approximants() + >>> waveform.td_approximants() + >>> from pycbc.waveform import ringdown + >>> ringdown.ringdown_fd_approximants.keys() + + Get generator object: + >>> from pycbc.waveform.generator import select_waveform_generator + >>> select_waveform_generator(waveform.fd_approximants()[0]) + """ + # check if frequency-domain CBC waveform + if approximant in waveform.fd_approximants(): + return FDomainCBCGenerator + # check if time-domain CBC waveform + elif approximant in waveform.td_approximants(): + return TDomainCBCGenerator + # check if frequency-domain ringdown waveform + elif approximant in ringdown.ringdown_fd_approximants: + if approximant == 'FdQNMfromFinalMassSpin': + return FDomainMassSpinRingdownGenerator + elif approximant == 'FdQNMfromFreqTau': + return FDomainFreqTauRingdownGenerator + elif approximant in ringdown.ringdown_td_approximants: + if approximant == 'TdQNMfromFinalMassSpin': + return TDomainMassSpinRingdownGenerator + elif approximant == 'TdQNMfromFreqTau': + return TDomainFreqTauRingdownGenerator + # check if supernovae waveform: + elif approximant in supernovae.supernovae_td_approximants: + if approximant == 'CoreCollapseBounce': + return TDomainSupernovaeGenerator + # otherwise waveform approximant is not supported + else: + raise ValueError("%s is not a valid approximant." % approximant)
+ + + +
+[docs] +def select_waveform_modes_generator(approximant): + """Returns the single-IFO modes generator for the approximant. + + Parameters + ---------- + approximant : str + Name of waveform approximant. Valid names can be found using + ``pycbc.waveform`` methods. + + Returns + ------- + generator : (PyCBC generator instance) + A waveform generator object. + """ + # check if frequency-domain CBC waveform + if approximant in waveform.fd_approximants(): + return FDomainCBCModesGenerator + # check if time-domain CBC waveform + elif approximant in waveform.td_approximants(): + return TDomainCBCModesGenerator + # otherwise waveform approximant is not supported + raise ValueError("%s is not a valid approximant." % approximant)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/multiband.html b/latest/html/_modules/pycbc/waveform/multiband.html new file mode 100644 index 00000000000..7bf071ff7d8 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/multiband.html @@ -0,0 +1,242 @@ + + + + + + pycbc.waveform.multiband — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.multiband

+""" Tools and functions to calculate interpolate waveforms using multi-banding
+"""
+import numpy
+
+from pycbc.types import TimeSeries, zeros
+
+
+
+[docs] +def multiband_fd_waveform(bands=None, lengths=None, overlap=0, **p): + """ Generate a fourier domain waveform using multibanding + + Speed up generation of a fouerier domain waveform using multibanding. This + allows for multi-rate sampling of the frequeny space. Each band is + smoothed and stitched together to produce the final waveform. The base + approximant must support 'f_ref' and 'f_final'. The other parameters + must be chosen carefully by the user. + + Parameters + ---------- + bands: list or str + The frequencies to split the waveform by. These should be chosen + so that the corresponding length include all the waveform's frequencies + within this band. + lengths: list or str + The corresponding length for each frequency band. This sets the + resolution of the subband and should be chosen carefully so that it is + sufficiently long to include all of the bands frequency content. + overlap: float + The frequency width to apply tapering between bands. + params: dict + The remaining keyworkd arguments passed to the base approximant + waveform generation. + + Returns + ------- + hp: pycbc.types.FrequencySeries + Plus polarization + hc: pycbc.type.FrequencySeries + Cross polarization + """ + from pycbc.waveform import get_fd_waveform + + if isinstance(bands, str): + bands = [float(s) for s in bands.split(' ')] + + if isinstance(lengths, str): + lengths = [float(s) for s in lengths.split(' ')] + + p['approximant'] = p['base_approximant'] + df = p['delta_f'] + fmax = p['f_final'] + flow = p['f_lower'] + + bands = [flow] + bands + [fmax] + dfs = [df] + [1.0 / l for l in lengths] + + dt = 1.0 / (2.0 * fmax) + tlen = int(1.0 / dt / df) + flen = tlen / 2 + 1 + wf_plus = TimeSeries(zeros(tlen, dtype=numpy.float32), + copy=False, delta_t=dt, epoch=-1.0/df) + wf_cross = TimeSeries(zeros(tlen, dtype=numpy.float32), + copy=False, delta_t=dt, epoch=-1.0/df) + + # Iterate over the sub-bands + for i in range(len(lengths)+1): + taper_start = taper_end = False + if i != 0: + taper_start = True + if i != len(lengths): + taper_end = True + + # Generate waveform for sub-band of full waveform + start = bands[i] + stop = bands[i+1] + p2 = p.copy() + p2['delta_f'] = dfs[i] + p2['f_lower'] = start + p2['f_final'] = stop + + if taper_start: + p2['f_lower'] -= overlap / 2.0 + + if taper_end: + p2['f_final'] += overlap / 2.0 + + tlen = int(1.0 / dt / dfs[i]) + flen = tlen / 2 + 1 + + hp, hc = get_fd_waveform(**p2) + + # apply window function to smooth over transition regions + kmin = int(p2['f_lower'] / dfs[i]) + kmax = int(p2['f_final'] / dfs[i]) + taper = numpy.hanning(int(overlap * 2 / dfs[i])) + + for wf, h in zip([wf_plus, wf_cross], [hp, hc]): + h = h.astype(numpy.complex64) + + if taper_start: + h[kmin:kmin + len(taper) // 2] *= taper[:len(taper)//2] + + if taper_end: + l, r = kmax - (len(taper) - len(taper) // 2), kmax + h[l:r] *= taper[len(taper)//2:] + + # add frequency band to total and use fft to interpolate + h.resize(flen) + h = h.to_timeseries() + wf[len(wf)-len(h):] += h + + return (wf_plus.to_frequencyseries().astype(hp.dtype), + wf_cross.to_frequencyseries().astype(hp.dtype))
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/nltides.html b/latest/html/_modules/pycbc/waveform/nltides.html new file mode 100644 index 00000000000..717b777025b --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/nltides.html @@ -0,0 +1,216 @@ + + + + + + pycbc.waveform.nltides — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.nltides

+""" Utilities for introducing nonlinear tidal effects into waveform approximants
+"""
+import pycbc.conversions
+import numpy
+import lal
+
+
+[docs] +def nltides_fourier_phase_difference(f, delta_f, f0, amplitude, n, m1, m2): + """Calculate the change to the Fourier phase change due + to non-linear tides. Note that the Fourier phase Psi(f) + is not the same as the gravitational-wave phase phi(f) and + is computed by + Delta Psi(f) = 2 \pi f Delta t(f) - Delta phi(f) + + Parameters + ---------- + f: numpy.array + Array of frequency values to calculate the fourier phase difference + delta_f: float + Frequency resolution of f array + f0: float + Frequency that NL effects switch on + amplitude: float + Amplitude of effect + n: float + Growth dependence of effect + m1: float + Mass of component 1 + m2: float + Mass of component 2 + + Returns + ------- + delta_psi: numpy.array + Fourier phase as a function of frequency + """ + + kmin = int(f0/delta_f) + kmax = len(f) + + f_ref, t_of_f_factor, phi_of_f_factor = \ + pycbc.conversions.nltides_coefs(amplitude, n, m1, m2) + + # Fourier phase shift below f0 from \Delta \phi(f) + delta_psi_f_le_f0 = numpy.ones(kmin) + delta_psi_f_le_f0 *= - phi_of_f_factor * (f0/f_ref)**(n-3.) + + # Fourier phase shift above f0 from \Delta \phi(f) + delta_psi_f_gt_f0 = - phi_of_f_factor * (f[kmin:kmax]/f_ref)**(n-3.) + + # Fourier phase shift below f0 from 2 pi f \Delta t(f) + delta_psi_f_le_f0 += 2.0 * lal.lal.PI * f[0:kmin] * t_of_f_factor * \ + (f0/f_ref)**(n-4.) + + # Fourier phase shift above f0 from 2 pi f \Delta t(f) + delta_psi_f_gt_f0 += 2.0 * lal.lal.PI * f[kmin:kmax] * t_of_f_factor * \ + (f[kmin:kmax]/f_ref)**(n-4.) + + # Return the shift to the Fourier phase + return numpy.concatenate((delta_psi_f_le_f0, delta_psi_f_gt_f0), axis=0)
+ + + +
+[docs] +def nonlinear_tidal_spa(**kwds): + """Generates a frequency-domain waveform that implements the + TaylorF2+NL tide model described in https://arxiv.org/abs/1808.07013 + """ + + from pycbc import waveform + from pycbc.types import Array + + # We start with the standard TaylorF2 based waveform + kwds.pop('approximant') + hp, hc = waveform.get_fd_waveform(approximant="TaylorF2", **kwds) + + # Add the phasing difference from the nonlinear tides + f = numpy.arange(len(hp)) * hp.delta_f + pd = Array(numpy.exp(-1.0j * nltides_fourier_phase_difference(f, + hp.delta_f, + kwds['f0'], kwds['amplitude'], kwds['n'], + kwds['mass1'], kwds['mass2'])), + dtype=hp.dtype) + hp *= pd + hc *= pd + return hp, hc
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/parameters.html b/latest/html/_modules/pycbc/waveform/parameters.html new file mode 100644 index 00000000000..4fb0f4dbb1e --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/parameters.html @@ -0,0 +1,786 @@ + + + + + + pycbc.waveform.parameters — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.parameters

+# Copyright (C) 2016 Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Classes to define common parameters used for waveform generation.
+"""
+
+from collections import OrderedDict
+try:
+    from collections import UserList
+except ImportError:
+    from UserList import UserList
+
+#
+# =============================================================================
+#
+#                                  Base definitions
+#
+# =============================================================================
+#
+
+
+[docs] +class Parameter(str): + """A class that stores information about a parameter. This is done by + sub-classing string, adding additional attributes. + """ + + def __new__(cls, name, dtype=None, default=None, label=None, + description="No description."): + obj = str.__new__(cls, name) + obj.name = name + obj.dtype = dtype + obj.default = default + obj.label = label + obj.description = description + return obj + +
+[docs] + def docstr(self, prefix='', include_label=True): + """Returns a string summarizing the parameter. Format is: + <prefix>``name`` : {``default``, ``dtype``} + <prefix> ``description`` Label: ``label``. + """ + dtype_str = str(self.dtype).replace("<type '", '').replace("'>", '') + dtype_str = dtype_str.replace("<class '", '') + outstr = "%s%s : {%s, %s}\n%s %s" % ( + prefix, self.name, str(self.default), dtype_str, prefix, + self.description) + if include_label: + outstr += " Label: %s" % (self.label) + return outstr
+
+ + + +
+[docs] +class ParameterList(UserList): + """A list of parameters. Each element in the list is expected to be a + Parameter instance. + """ + + @property + def names(self): + """Returns a list of the names of each parameter.""" + return [x.name for x in self] + + @property + def aslist(self): + """Cast to basic list.""" + return list(self) + + @property + def asdict(self): + """Returns a dictionary of the parameters keyed by the parameters.""" + return dict([[x, x] for x in self]) + +
+[docs] + def defaults(self): + """Returns a list of the name and default value of each parameter, + as tuples. + """ + return [(x, x.default) for x in self]
+ + +
+[docs] + def default_dict(self): + """Returns a dictionary of the name and default value of each + parameter. + """ + return OrderedDict(self.defaults())
+ + + @property + def nodefaults(self): + """Returns a ParameterList of the parameters that have None for + defaults. + """ + return ParameterList([x for x in self if x.default is None]) + + @property + def dtypes(self): + """Returns a list of the name and dtype of each parameter, + as tuples. + """ + return [(x, x.dtype) for x in self] + + @property + def dtype_dict(self): + """Returns a dictionary of the name and dtype of each parameter.""" + return OrderedDict(self.dtypes) + + @property + def descriptions(self): + """Returns a list of the name and description of each parameter, + as tuples. + """ + return [(x, x.description) for x in self] + + @property + def description_dict(self): + """Return a dictionary of the name and description of each parameter. + """ + return OrderedDict(self.descriptions) + + @property + def labels(self): + """Returns a list of each parameter and its label, as tuples.""" + return [(x, x.label) for x in self] + + @property + def label_dict(self): + """Return a dictionary of the name and label of each parameter. + """ + return OrderedDict(self.labels) + +
+[docs] + def docstr(self, prefix='', include_label=True): + """Returns the ``docstr`` of each parameter joined together.""" + return '\n'.join([x.docstr(prefix, include_label) for x in self])
+
+ + +# +# ============================================================================= +# +# Parameter definitions +# +# ============================================================================= +# + +# +# CBC intrinsic parameters +# +mass1 = Parameter("mass1", + dtype=float, default=None, label=r"$m_1~(\mathrm{M}_\odot)$", + description="The mass of the first component object in the " + "binary (in solar masses).") +mass2 = Parameter("mass2", + dtype=float, default=None, label=r"$m_2~(\mathrm{M}_\odot)$", + description="The mass of the second component object in the " + "binary (in solar masses).") +spin1x = Parameter("spin1x", + dtype=float, default=0., label=r"$\chi_{1x}$", + description="The x component of the first binary component's " + "dimensionless spin.") +spin1y = Parameter("spin1y", + dtype=float, default=0., label=r"$\chi_{1y}$", + description="The y component of the first binary component's " + "dimensionless spin.") +spin1z = Parameter("spin1z", + dtype=float, default=0., label=r"$\chi_{1z}$", + description="The z component of the first binary component's " + "dimensionless spin.") +spin2x = Parameter("spin2x", + dtype=float, default=0., label=r"$\chi_{2x}$", + description="The x component of the second binary component's " + "dimensionless spin.") +spin2y = Parameter("spin2y", + dtype=float, default=0., label=r"$\chi_{2y}$", + description="The y component of the second binary component's " + "dimensionless spin.") +spin2z = Parameter("spin2z", + dtype=float, default=0., label=r"$\chi_{2z}$", + description="The z component of the second binary component's " + "dimensionless spin.") +eccentricity = Parameter("eccentricity", + dtype=float, default=0., label=r"$e$", + description="Eccentricity.") + +# derived parameters (these are not used for waveform generation) +mchirp = Parameter("mchirp", + dtype=float, label=r"$\mathcal{M}~(\mathrm{M}_\odot)$", + description="The chirp mass of the binary (in solar masses).") +eta = Parameter("eta", + dtype=float, label=r"$\eta$", + description="The symmetric mass ratio of the binary.") +mtotal = Parameter("mtotal", + dtype=float, label=r"$M~(\mathrm{M}_\odot)$", + description="The total mass of the binary (in solar masses).") +q = Parameter("q", + dtype=float, label=r"$q$", + description="The mass ratio, m1/m2, where m1 >= m2.") +srcmass1 = Parameter("srcmass1", dtype=float, + label=r"$m_1^{\rm{src}}~(\mathrm{M}_\odot)$", + description="The mass of the first component object in " + "the source frame (in solar masses).") +srcmass2 = Parameter("srcmass1", dtype=float, + label=r"$m_2^{\rm{src}}~(\mathrm{M}_\odot)$", + description="The mass of the second component object in " + "the source frame (in solar masses).") +srcmchirp = Parameter("srcmchirp", dtype=float, + label=r"$\mathcal{M}^{\rm{src}}~(\mathrm{M}_\odot)$", + description="The chirp mass of the binary in the " + "source frame (in solar masses).") +srcmtotal = Parameter("mtotal", dtype=float, + label=r"$M^{\rm{src}}~(\mathrm{M}_\odot)$", + description="The total mass of the binary in the " + "source frame (in solar masses).") +primary_mass = Parameter("primary_mass", + dtype=float, label=r"$m_{1}$", + description="Mass of the primary object (in solar masses).") +secondary_mass = Parameter("secondary_mass", + dtype=float, label=r"$m_{2}$", + description="Mass of the secondary object (in solar masses).") + +# derived parameters for component spins +chi_eff = Parameter("chi_eff", + dtype=float, label=r"$\chi_\mathrm{eff}$", + description="Effective spin of the binary.") +chi_p = Parameter("chi_p", + dtype=float, label=r"$\chi_p$", + description="Effective precessing spin of the binary.") +spin_px = Parameter("spin_px", + dtype=float, label=r"$\chi_{1x}$", + description="The x component of the dimensionless spin of the " + "primary object.") +spin_py = Parameter("spin_py", + dtype=float, label=r"$\chi_{1y}$", + description="The y component of the dimensionless spin of the " + "primary object.") +spin_pz = Parameter("spin_pz", + dtype=float, label=r"$\chi_{1z}$", + description="The z component of the dimensionless spin of the " + "primary object.") +spin_sx = Parameter("spin_sx", + dtype=float, label=r"$\chi_{2x}$", + description="The x component of the dimensionless spin of the " + "secondary object.") +spin_sy = Parameter("spin_sy", + dtype=float, label=r"$\chi_{2y}$", + description="The y component of the dimensionless spin of the " + "secondary object.") +spin_sz = Parameter("spin_sz", + dtype=float, label=r"$\chi_{2z}$", + description="The z component of the dimensionless spin of the " + "secondary object.") +lambda1 = Parameter("lambda1", + dtype=float, default=None, label=r"$\Lambda_1$", + description="The dimensionless tidal deformability parameter of object 1.") +lambda2 = Parameter("lambda2", + dtype=float, default=None, label=r"$\Lambda_2$", + description="The dimensionless tidal deformability parameter of object 2.") +dquad_mon1 = Parameter("dquad_mon1", + dtype=float, default=None, label=r"$qm_1$", + description="Quadrupole-monopole parameter / m_1^5 -1.") +dquad_mon2 = Parameter("dquad_mon2", + dtype=float, default=None, label=r"$qm_2$", + description="Quadrupole-monopole parameter / m_2^5 -1.") +lambda_octu1 = Parameter("lambda_octu1", + dtype=float, default=None, label=r"$\Lambda_3^{(1)}$", + description="The octupolar tidal deformability parameter of " + "object 1.") +lambda_octu2 = Parameter("lambda_octu2", + dtype=float, default=None, label=r"$\Lambda_3^{(2)}$", + description="The octupolar tidal deformability parameter of " + "object 2.") +quadfmode1 = Parameter("quadfmode1", + dtype=float, default=None, label=r"$m_1 \omega_{02}^{(1)}$", + description="The quadrupolar f-mode angular frequency of " + "object 1.") +quadfmode2 = Parameter("quadfmode2", + dtype=float, default=None, label=r"$m_ \omega_{02}^{(2)}$", + description="The quadrupolar f-mode angular frequency of " + "object 2.") +octufmode1 = Parameter("octufmode1", + dtype=float, default=None, label=r"$m_1 \omega_{03}^{(1)}$", + description="The octupolar f-mode angular frequency of " + "object 1.") +octufmode2 = Parameter("octufmode2", + dtype=float, default=None, label=r"$m_ \omega_{03}^{(2)}$", + description="The octupolar f-mode angular frequency of " + "object 2.") + +# derived parameters for component spin magnitude and angles +spin1_a = Parameter("spin1_a", + dtype=float, label=r"$a_{1}$", + description="The dimensionless spin magnitude " + r"$|\vec{s}/m_{1}^2|$.") +spin2_a = Parameter("spin2_a", + dtype=float, label=r"$a_{2}$", + description="The dimensionless spin magnitude " + r"$|\vec{s}/m_{2}^2|$.") +spin1_azimuthal = Parameter( + "spin1_azimuthal", + dtype=float, label=r"$\theta_1^\mathrm{azimuthal}$", + description="The azimuthal spin angle for mass 1.") +spin2_azimuthal = Parameter( + "spin2_azimuthal", + dtype=float, label=r"$\theta_2^\mathrm{azimuthal}$", + description="The azimuthal spin angle for mass 2.") +spin1_polar = Parameter("spin1_polar", + dtype=float, label=r"$\theta_1^\mathrm{polar}$", + description="The polar spin angle for mass 1.") +spin2_polar = Parameter("spin2_polar", + dtype=float, label=r"$\theta_2^\mathrm{polar}$", + description="The polar spin angle for mass 2.") + + +# +# Parameters needed for CBC waveform generation +# +f_lower = Parameter("f_lower", + dtype=float, default=None, label=r"$f_0$ (Hz)", + description="The starting frequency of the waveform (in Hz).") +f_final = Parameter("f_final", + dtype=float, default=0, label=r"$f_{\mathrm{final}}$ (Hz)", + description="The ending frequency of the waveform. The " + "default (0) indicates that the choice is made by " + "the respective approximant.") +f_final_func = Parameter("f_final_func", + dtype=str, default="", label=None, + description="Use the given frequency function to compute f_final " + "based on the parameters of the waveform.") +f_ref = Parameter("f_ref", + dtype=float, default=0, label=r"$f_{\mathrm{ref}}$ (Hz)", + description="The reference frequency.") +delta_f = Parameter("delta_f", + dtype=float, default=None, label=r"$\Delta f$ (Hz)", + description="The frequency step used to generate the waveform " + "(in Hz).") +delta_t = Parameter("delta_t", + dtype=float, default=None, label=r"$\Delta t$ (s)", + description="The time step used to generate the waveform " + "(in s).") +sample_points = Parameter("sample_points", + dtype="Array", default=None, label=None, + description="An array of the frequencies (in Hz) at which to " + "generate the waveform.") +approximant = Parameter("approximant", + dtype=str, default=None, label=None, + description="A string that indicates the chosen approximant.") +phase_order = Parameter("phase_order", + dtype=int, default=-1, label=None, + description="The pN order of the orbital phase. The default " + "of -1 indicates that all implemented orders are " + "used.") +spin_order = Parameter("spin_order", + dtype=int, default=-1, label=None, + description="The pN order of the spin corrections. The " + "default of -1 indicates that all implemented " + "orders are used.") +tidal_order = Parameter("tidal_order", + dtype=int, default=-1, label=None, + description="The pN order of the tidal corrections. The " + "default of -1 indicates that all implemented " + "orders are used.") +amplitude_order = Parameter("amplitude_order", + dtype=int, default=-1, label=None, + description="The pN order of the amplitude. The default of -1 " + "indicates that all implemented orders are used.") +eccentricity_order = Parameter("eccentricity_order", + dtype=int, default=-1, label=None, + description="The pN order of the eccentricity corrections." + "The default of -1 indicates that all implemented orders are used.") +numrel_data = Parameter("numrel_data", + dtype=str, default="", label=None, + description="Sets the NR flags; only needed for NR waveforms.") +remnant_mass = Parameter("remnant_mass", + dtype=float, label=r"$m_{\mathrm{rem}}$", + description="Remnant mass of NS-BH merger. See " + "conversions.remnant_mass_" + "from_mass1_mass2_spin1x_spin1y_spin1z_eos") + +# +# General location parameters +# +distance = Parameter("distance", + dtype=float, default=1., label=r"$d_L$ (Mpc)", + description="Luminosity distance to the binary (in Mpc).") +chirp_distance = Parameter("chirp_distance", + dtype=float, default=1., label=r"$d_c$ (Mpc)", + description="Chirp distance to the binary (in Mpc).") +coa_phase = Parameter("coa_phase", + dtype=float, default=0., label=r"$\phi_c$", + description="Coalesence phase of the binary (in rad).") +inclination = Parameter("inclination", + dtype=float, default=0., label=r"$\iota$", + description="Inclination (rad), defined as the angle between " + "the orbital angular momentum L and the " + "line-of-sight at the reference frequency.") +thetajn = Parameter("thetajn", + dtype=float, default=0., label=r"$\theta_{JN}$", + description="The angle between the total angular momentum " + "J and the line-of-sight.") +long_asc_nodes = Parameter("long_asc_nodes", + dtype=float, default=0., label=r"$\Omega$", + description="Longitude of ascending nodes axis (rad).") +mean_per_ano = Parameter("mean_per_ano", + dtype=float, default=0., label=r"$\delta$", + description="Mean anomaly of the periastron (rad).") +tc = Parameter("tc", + dtype=float, default=None, label=r"$t_c$ (s)", + description="Coalescence time (s) is the time when a GW " + "reaches the origin of a certain coordinate system.") +delta_tc = Parameter("delta_tc", dtype=float, + label=r"$\Delta t_c~(\rm{s})$", + description="Coalesence time offset.") +ra = Parameter("ra", + dtype=float, default=0., label=r"$\alpha$", + description="Right ascension (rad).") +dec = Parameter("dec", + dtype=float, default=0., label=r"$\delta$", + description="Declination (rad).") +polarization = Parameter("polarization", + dtype=float, default=0., label=r"$\psi$", + description="Polarization angle (rad) in " + "a certain coordinate system.") +redshift = Parameter("redshift", + dtype=float, default=None, label=r"$z$", + description="Redshift.") +comoving_volume = Parameter("comoving_volume", dtype=float, + label=r"$V_C~(\rm{Mpc}^3)$", + description="Comoving volume (in cubic Mpc).") +eclipticlatitude = Parameter("eclipticlatitude", + dtype=float, default=0., label=r"$\beta$", + description="eclipticlatitude in SSB/LISA coords.") +eclipticlongitude = Parameter("eclipticlongitude", + dtype=float, default=0., label=r"$\lambda$", + description="eclipticlongitude in SSB/LISA coords.") + +# +# Calibration parameters +# +delta_fs = Parameter("calib_delta_fs", + dtype=float, + description="Change in optical spring freq (Hz).") +delta_fc = Parameter("calib_delta_fc", + dtype=float, + description="Change in cavity pole freq (Hz).") +delta_qinv = Parameter("calib_delta_qinv", + dtype=float, + description="Change in inverse quality factor.") +kappa_c = Parameter("calib_kappa_c", + dtype=float) +kappa_tst_re = Parameter("calib_kappa_tst_re", + dtype=float) +kappa_tst_im = Parameter("calib_kappa_tst_im", + dtype=float) +kappa_pu_re = Parameter("calib_kappa_pu_re", + dtype=float) +kappa_pu_im = Parameter("calib_kappa_pu_im", + dtype=float) + +# +# Non mandatory flags with default values +# +frame_axis = Parameter("frame_axis", + dtype=int, default=0, + description="Allow to choose among orbital_l, view and total_j") +modes_choice = Parameter("modes_choice", + dtype=int, default=0, + description="Allow to turn on among orbital_l, view and total_j") +side_bands = Parameter("side_bands", + dtype=int, default=0, + description="Flag for generating sidebands") +mode_array = Parameter("mode_array", + dtype=list, default=None, + description="Choose which (l,m) modes to include when " + "generating a waveform. " + "Only if approximant supports this feature." + "By default pass None and let lalsimulation " + "use it's default behaviour." + "Example: mode_array = [ [2,2], [2,-2] ]") + +# +# Parametrized testing general relativity parameters +# +dchi0 = Parameter("dchi0", + dtype=float, default=0., label=r"$d\chi_0$", + description="0PN testingGR parameter.") +dchi1 = Parameter("dchi1", + dtype=float, default=0., label=r"$d\chi_1$", + description="0.5PN testingGR parameter.") +dchi2 = Parameter("dchi2", + dtype=float, default=0., label=r"$d\chi_2$", + description="1PN testingGR parameter.") +dchi3 = Parameter("dchi3", + dtype=float, default=0., label=r"$d\chi_3$", + description="1.5PN testingGR parameter.") +dchi4 = Parameter("dchi4", + dtype=float, default=0., label=r"$d\chi_4$", + description="2PN testingGR parameter.") +dchi5 = Parameter("dchi5", + dtype=float, default=0., label=r"$d\chi_5$", + description="2.5PN testingGR parameter.") +dchi5l = Parameter("dchi5l", + dtype=float, default=0., label=r"$d\chi_5{l}$", + description="2.5PN logrithm testingGR parameter.") +dchi6 = Parameter("dchi6", + dtype=float, default=0., label=r"$d\chi_6$", + description="3PN testingGR parameter.") +dchi6l = Parameter("dchi6l", + dtype=float, default=0., label=r"$d\chi_{6l}$", + description="3PN logrithm testingGR parameter.") +dchi7 = Parameter("dchi7", + dtype=float, default=0., label=r"$d\chi_7$", + description="3.5PN testingGR parameter.") +dalpha1 = Parameter("dalpha1", + dtype=float, default=0., label=r"$d\alpha_1$", + description="Merger-ringdown testingGR parameter.") +dalpha2 = Parameter("dalpha2", + dtype=float, default=0., label=r"$d\alpha_2$", + description="Merger-ringdown testingGR parameter.") +dalpha3 = Parameter("dalpha3", + dtype=float, default=0., label=r"$d\alpha_3$", + description="Merger-ringdown testingGR parameter.") +dalpha4 = Parameter("dalpha4", + dtype=float, default=0., label=r"$d\alpha_4$", + description="Merger-ringdown testingGR parameter.") +dalpha5 = Parameter("dalpha5", + dtype=float, default=0., label=r"$d\alpha_5$", + description="Merger-ringdown testingGR parameter.") +dbeta1 = Parameter("dbeta1", + dtype=float, default=0., label=r"$d\beta_1$", + description="Intermediate testingGR parameter.") +dbeta2 = Parameter("dbeta2", + dtype=float, default=0., label=r"$d\beta_2$", + description="Intermediate testingGR parameter.") +dbeta3 = Parameter("dbeta3", + dtype=float, default=0., label=r"$d\beta_3$", + description="Intermediate testingGR parameter.") +# +# ============================================================================= +# +# Parameter list definitions +# +# ============================================================================= +# + +# parameters describing the location of a binary w.r.t. +# the geocentric/LISA/SSB frame. +# Note: we do not include distance here. This is because these are not +# passed to the waveform generators in lalsimulation, but are instead applied +# after a waveform is generated. Distance, however, is a parameter used by +# the waveform generators. +location_params = ParameterList([tc, ra, dec, polarization, + eclipticlatitude, eclipticlongitude]) + +# parameters describing the orientation of a binary w.r.t. the radiation +# frame. Note: we include distance here, as it is typically used for generating +# waveforms. +orientation_params = ParameterList\ + ([distance, coa_phase, inclination, long_asc_nodes, mean_per_ano]) + +# the extrinsic parameters of a waveform +extrinsic_params = orientation_params + location_params + + +# testing GR parameters +testingGR_params = ParameterList\ + ([dchi0, dchi1, dchi2, dchi3, dchi4, dchi5, dchi5l, dchi6, dchi6l, + dchi7, dalpha1, dalpha2, dalpha3, dalpha4, dalpha5, + dbeta1, dbeta2, dbeta3]) + +# intrinsic parameters of a CBC waveform. Some of these are not recognized +# by every waveform model +cbc_intrinsic_params = ParameterList\ + ([mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z, + eccentricity, lambda1, lambda2, dquad_mon1, dquad_mon2, lambda_octu1, + lambda_octu2, quadfmode1, quadfmode2, octufmode1, octufmode2]) + \ + testingGR_params + +# the parameters of a cbc in the radiation frame +cbc_rframe_params = cbc_intrinsic_params + orientation_params + +# calibration parameters +calibration_params = ParameterList([ + delta_fc, delta_fs, delta_qinv, kappa_c, kappa_tst_re, kappa_tst_im, + kappa_pu_re, kappa_pu_im]) + +# common generation parameters are parameters needed to generate either +# a TD, FD, or frequency sequence waveform +common_generation_params = ParameterList([ + approximant, f_ref, phase_order, spin_order, tidal_order, amplitude_order, eccentricity_order]) + +# Flags having discrete values, optional to generate either +# a TD, FD, or frequency sequence waveform +flags_generation_params = ParameterList([frame_axis, modes_choice, side_bands, mode_array]) + +# the following are parameters needed to generate an FD or TD waveform that +# is equally sampled +common_gen_equal_sampled_params = ParameterList([f_lower]) + \ + common_generation_params + flags_generation_params + +# the following are parameters that can be used to generate an FD waveform +fd_waveform_params = cbc_rframe_params + ParameterList([delta_f]) + \ + common_gen_equal_sampled_params + ParameterList([f_final, f_final_func]) + +# the following are parameters that can be used to generate a TD waveform +td_waveform_params = cbc_rframe_params + ParameterList([delta_t]) + \ + common_gen_equal_sampled_params + ParameterList([numrel_data]) + \ + flags_generation_params + +# The following are the minimum set of parameters that are required to +# generate a FD or TD waveform. All other parameters have some default value as +# defined above. Defaults of None simply mean that the value is not passed into +# the lal_dict structure and the waveform generator will take whatever default +# behaviour +td_required = ParameterList([f_lower, delta_t, approximant]) +fd_required = ParameterList([f_lower, delta_f, approximant]) +# The following is required for the FD sequence waveforms with detector +# response already applied +fd_det_sequence_required = ParameterList([f_lower, approximant]) + +#### +cbc_td_required = ParameterList([mass1, mass2, f_lower, delta_t, approximant]) +cbc_fd_required = ParameterList([mass1, mass2, f_lower, delta_f, approximant]) + +# the following are parameters that can be used to generate a +# frequency series waveform +fd_waveform_sequence_params = cbc_rframe_params + \ + ParameterList([sample_points]) + common_generation_params + \ + flags_generation_params +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/plugin.html b/latest/html/_modules/pycbc/waveform/plugin.html new file mode 100644 index 00000000000..babeb076409 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/plugin.html @@ -0,0 +1,244 @@ + + + + + + pycbc.waveform.plugin — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.plugin

+""" Utilities for handling waveform plugins
+"""
+
+
+
+[docs] +def add_custom_waveform(approximant, function, domain, + sequence=False, has_det_response=False, + force=False,): + """ Make custom waveform available to pycbc + + Parameters + ---------- + approximant : str + The name of the waveform + function : function + The function to generate the waveform + domain : str + Either 'frequency' or 'time' to indicate the domain of the waveform. + sequence : bool, False + Function evaluates waveform at only chosen points (instead of a + equal-spaced grid). + has_det_response : bool, False + Check if waveform generator has built-in detector response. + """ + from pycbc.waveform.waveform import (cpu_fd, cpu_td, fd_sequence, + fd_det, fd_det_sequence) + + used = RuntimeError("Can't load plugin waveform {}, the name is" + " already in use.".format(approximant)) + + if domain == 'time': + if not force and (approximant in cpu_td): + raise used + cpu_td[approximant] = function + elif domain == 'frequency': + if sequence: + if not has_det_response: + if not force and (approximant in fd_sequence): + raise used + fd_sequence[approximant] = function + else: + if not force and (approximant in fd_det_sequence): + raise used + fd_det_sequence[approximant] = function + else: + if not has_det_response: + if not force and (approximant in cpu_fd): + raise used + cpu_fd[approximant] = function + else: + if not force and (approximant in fd_det): + raise used + fd_det[approximant] = function + else: + raise ValueError("Invalid domain ({}), should be " + "'time' or 'frequency'".format(domain))
+ + + +
+[docs] +def add_length_estimator(approximant, function): + """ Add length estimator for an approximant + + Parameters + ---------- + approximant : str + Name of approximant + function : function + A function which takes kwargs and returns the waveform length + """ + from pycbc.waveform.waveform import _filter_time_lengths + if approximant in _filter_time_lengths: + raise RuntimeError("Can't load length estimator {}, the name is" + " already in use.".format(approximant)) + _filter_time_lengths[approximant] = function + + from pycbc.waveform.waveform import td_fd_waveform_transform + td_fd_waveform_transform(approximant)
+ + + +
+[docs] +def retrieve_waveform_plugins(): + """ Process external waveform plugins + """ + import pkg_resources + + # Check for fd waveforms (no detector response) + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd'): + add_custom_waveform(plugin.name, plugin.resolve(), 'frequency') + + # Check for fd waveforms (has detector response) + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_det'): + add_custom_waveform(plugin.name, plugin.resolve(), 'frequency', + has_det_response=True) + + # Check for fd sequence waveforms (no detector response) + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_sequence'): + add_custom_waveform(plugin.name, plugin.resolve(), 'frequency', + sequence=True) + + # Check for fd sequence waveforms (has detector response) + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_det_sequence'): + add_custom_waveform(plugin.name, plugin.resolve(), 'frequency', + sequence=True, has_det_response=True) + + # Check for td waveforms + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.td'): + add_custom_waveform(plugin.name, plugin.resolve(), 'time') + + # Check for waveform length estimates + for plugin in pkg_resources.iter_entry_points('pycbc.waveform.length'): + add_length_estimator(plugin.name, plugin.resolve())
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/premerger.html b/latest/html/_modules/pycbc/waveform/premerger.html new file mode 100644 index 00000000000..6154e09d73d --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/premerger.html @@ -0,0 +1,170 @@ + + + + + + pycbc.waveform.premerger — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.premerger

+""" Waveform approximants for the pre-merger detection of gravitational waves
+"""
+import logging
+
+
+
+[docs] +def premerger_taylorf2(**p): + """ Generate time-shifted TaylorF2""" + from pycbc.waveform import get_fd_waveform + from pycbc.waveform.spa_tmplt import spa_length_in_time + from pycbc.waveform.utils import fd_taper + + p.pop('approximant') + hp, hc = get_fd_waveform(approximant="TaylorF2", **p) + + removed = spa_length_in_time(mass1=p['mass1'], + mass2=p['mass2'], + f_lower=p['f_final'], + phase_order=-1) + + hp = hp.cyclic_time_shift(removed) + hp.start_time += removed + + hc = hc.cyclic_time_shift(removed) + hc.start_time += removed + + logging.info("PreTaylorF2, m1=%.1f, m2=%.1f, fmax=%.1f, timeshift=%.1f", + p['mass1'], p['mass2'], p['f_final'], removed) + kmin = int(p['f_lower'] / p['delta_f']) + hp[0:kmin] = 0 + hc[0:kmin] = 0 + + if 'final_taper' in p: + taper_size = p['final_taper'] + hp = fd_taper(hp, p['f_final'] - taper_size, p['f_final'], side='right') + hc = fd_taper(hc, p['f_final'] - taper_size, p['f_final'], side='right') + + hp.time_offset = removed + hc.time_offset = removed + + return hp, hc
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/ringdown.html b/latest/html/_modules/pycbc/waveform/ringdown.html new file mode 100644 index 00000000000..0e13a2bc7d2 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/ringdown.html @@ -0,0 +1,1430 @@ + + + + + + pycbc.waveform.ringdown — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.ringdown

+# Copyright (C) 2016 Miriam Cabero Mueller
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Generate ringdown templates in the time and frequency domain.
+"""
+
+import numpy, lal
+try:
+    import pykerr
+except ImportError:
+    pykerr = None
+from pycbc.types import (TimeSeries, FrequencySeries, float64, complex128,
+                         zeros)
+from pycbc.waveform.waveform import get_obj_attrs
+from pycbc.conversions import get_lm_f0tau_allmodes
+
+qnm_required_args = ['f_0', 'tau', 'amp', 'phi']
+mass_spin_required_args = ['final_mass','final_spin', 'lmns', 'inclination']
+freqtau_required_args = ['lmns']
+td_args = {'delta_t': None, 't_final': None, 'taper': False}
+fd_args = {'t_0': 0, 'delta_f': None, 'f_lower': 0, 'f_final': None}
+
+max_freq = 16384/2.
+min_dt = 1. / (2 * max_freq)
+pi = numpy.pi
+two_pi = 2 * numpy.pi
+pi_sq = numpy.pi * numpy.pi
+
+
+# Input parameters ############################################################
+
+
+[docs] +def props(obj, required, domain_args, **kwargs): + """ Return a dictionary built from the combination of defaults, kwargs, + and the attributes of the given object. + """ + # Get the attributes of the template object + pr = get_obj_attrs(obj) + + # Get the parameters to generate the waveform + # Note that keyword arguments override values in the template object + input_params = domain_args.copy() + input_params.update(pr) + input_params.update(kwargs) + # Check if the required arguments are given + for arg in required: + if arg not in input_params: + raise ValueError('Please provide ' + str(arg)) + + return input_params
+ + +
+[docs] +def format_lmns(lmns): + """Checks if the format of the parameter lmns is correct, returning the + appropriate format if not, and raise an error if nmodes=0. + + The required format for the ringdown approximants is a list of lmn modes + as a single whitespace-separated string, with n the number + of overtones desired. Alternatively, a list object may be used, + containing individual strings as elements for the lmn modes. + For instance, lmns = '223 331' are the modes 220, 221, 222, and 330. + Giving lmns = ['223', '331'] is equivalent. + The ConfigParser of a workflow might convert that to a single string + (case 1 below) or a list with a single string (case 2), and this function + will return the appropriate list of strings. If a different format is + given, raise an error. + """ + + # Catch case of lmns given as float (as int injection values are cast + # to float by pycbc_create_injections), cast to int, then string + if isinstance(lmns, float): + lmns = str(int(lmns)) + # Case 1: the lmns are given as a string, e.g. '221 331' + if isinstance(lmns, str): + lmns = lmns.split(' ') + # Case 2: the lmns are given as strings in a list, e.g. ['221', '331'] + elif isinstance(lmns, list): + pass + else: + raise ValueError('Format of parameter lmns not recognized. See ' + 'approximant documentation for more info.') + + out = [] + # Cycle over the lmns to ensure that we get back a list of strings that + # are three digits long, and that nmodes!=0 + for lmn in lmns: + # The following line is to be used with Python3 if the lmns are stored + # as a list of strings in the HDF files and the workflow converts that + # to a string + # lmn = lmn.strip(" b'") + # Try to convert to int and then str, to ensure the right format + lmn = str(int(lmn)) + if len(lmn) != 3: + raise ValueError('Format of parameter lmns not recognized. See ' + 'approximant documentation for more info.') + elif int(lmn[2]) == 0: + raise ValueError('Number of overtones (nmodes) must be greater ' + 'than zero in lmn={}.'.format(lmn)) + out.append(lmn) + + return out
+ + +
+[docs] +def parse_mode(lmn): + """Extracts overtones from an lmn. + """ + lm, nmodes = lmn[0:2], int(lmn[2]) + overtones = [] + for n in range(nmodes): + mode = lm + '{}'.format(n) + overtones.append(mode) + return overtones
+ + + +
+[docs] +def lm_amps_phases(**kwargs): + r"""Takes input_params and return dictionaries with amplitudes and phases + of each overtone of a specific lm mode, checking that all of them are + given. Will also look for dbetas and dphis. If ``(dphi|dbeta)`` (i.e., + without a mode suffix) are provided, they will be used for all modes that + don't explicitly set a ``(dphi|dbeta){lmn}``. + """ + lmns = format_lmns(kwargs['lmns']) + amps = {} + phis = {} + dbetas = {} + dphis = {} + # reference mode + ref_amp = kwargs.pop('ref_amp', None) + if ref_amp is None: + # default to the 220 mode + ref_amp = 'amp220' + # check for reference dphi and dbeta + ref_dbeta = kwargs.pop('dbeta', 0.) + ref_dphi = kwargs.pop('dphi', 0.) + if isinstance(ref_amp, str) and ref_amp.startswith('amp'): + # assume a mode was provided; check if the mode exists + ref_mode = ref_amp.replace('amp', '') + try: + ref_amp = kwargs.pop(ref_amp) + amps[ref_mode] = ref_amp + except KeyError: + raise ValueError("Must provide an amplitude for the reference " + "mode {}".format(ref_amp)) + else: + ref_mode = None + # Get amplitudes and phases of the modes + for lmn in lmns: + overtones = parse_mode(lmn) + for mode in overtones: + # skip the reference mode + if mode != ref_mode: + try: + amps[mode] = kwargs['amp' + mode] * ref_amp + except KeyError: + raise ValueError('amp{} is required'.format(mode)) + try: + phis[mode] = kwargs['phi' + mode] + except KeyError: + raise ValueError('phi{} is required'.format(mode)) + dphis[mode] = kwargs.pop('dphi'+mode, ref_dphi) + dbetas[mode] = kwargs.pop('dbeta'+mode, ref_dbeta) + return amps, phis, dbetas, dphis
+ + + +
+[docs] +def lm_freqs_taus(**kwargs): + """Take input_params and return dictionaries with frequencies and damping + times of each overtone of a specific lm mode, checking that all of them + are given. + """ + lmns = format_lmns(kwargs['lmns']) + freqs, taus = {}, {} + for lmn in lmns: + overtones = parse_mode(lmn) + for mode in overtones: + try: + freqs[mode] = kwargs['f_' + mode] + except KeyError: + raise ValueError('f_{} is required'.format(mode)) + try: + taus[mode] = kwargs['tau_' + mode] + except KeyError: + raise ValueError('tau_{} is required'.format(mode)) + return freqs, taus
+ + + +
+[docs] +def lm_arbitrary_harmonics(**kwargs): + """Take input_params and return dictionaries with arbitrary harmonics + for each mode. + """ + lmns = format_lmns(kwargs['lmns']) + pols = {} + polnms = {} + for lmn in lmns: + overtones = parse_mode(lmn) + for mode in overtones: + pols[mode] = kwargs.pop('pol{}'.format(mode), None) + polnms[mode] = kwargs.pop('polnm{}'.format(mode), None) + return pols, polnms
+ + + +# Functions to obtain t_final, f_final and output vector ###################### + +
+[docs] +def qnm_time_decay(tau, decay): + """Return the time at which the amplitude of the + ringdown falls to decay of the peak amplitude. + + Parameters + ---------- + tau : float + The damping time of the sinusoid. + decay : float + The fraction of the peak amplitude. + + Returns + ------- + t_decay : float + The time at which the amplitude of the time-domain + ringdown falls to decay of the peak amplitude. + """ + return -tau * numpy.log(decay)
+ + + +
+[docs] +def qnm_freq_decay(f_0, tau, decay): + """Return the frequency at which the amplitude of the + ringdown falls to decay of the peak amplitude. + + Parameters + ---------- + f_0 : float + The ringdown-frequency, which gives the peak amplitude. + tau : float + The damping time of the sinusoid. + decay : float + The fraction of the peak amplitude. + + Returns + ------- + f_decay : float + The frequency at which the amplitude of the frequency-domain + ringdown falls to decay of the peak amplitude. + """ + q_0 = pi * f_0 * tau + alpha = 1. / decay + alpha_sq = 1. / decay / decay + # Expression obtained analytically under the assumption + # that 1./alpha_sq, q_0^2 >> 1 + q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0))/4. + return numpy.sqrt(q_sq) / pi / tau
+ + + +
+[docs] +def lm_tfinal(damping_times): + """Return the maximum t_final of the modes given, with t_final the time + at which the amplitude falls to 1/1000 of the peak amplitude + """ + if isinstance(damping_times, dict): + t_max = {} + for lmn in damping_times.keys(): + t_max[lmn] = qnm_time_decay(damping_times[lmn], 1./1000) + t_final = max(t_max.values()) + else: + t_final = qnm_time_decay(damping_times, 1./1000) + return t_final
+ + + +
+[docs] +def lm_deltat(freqs, damping_times): + """Return the minimum delta_t of all the modes given, with delta_t given by + the inverse of the frequency at which the amplitude of the ringdown falls + to 1/1000 of the peak amplitude. + """ + if isinstance(freqs, dict) and isinstance(damping_times, dict): + dt = {} + for lmn in freqs.keys(): + dt[lmn] = 1. / qnm_freq_decay(freqs[lmn], + damping_times[lmn], 1./1000) + delta_t = min(dt.values()) + elif isinstance(freqs, dict) and not isinstance(damping_times, dict): + raise ValueError('Missing damping times.') + elif isinstance(damping_times, dict) and not isinstance(freqs, dict): + raise ValueError('Missing frequencies.') + else: + delta_t = 1. / qnm_freq_decay(freqs, damping_times, 1./1000) + + if delta_t < min_dt: + delta_t = min_dt + + return delta_t
+ + + +
+[docs] +def lm_ffinal(freqs, damping_times): + """Return the maximum f_final of the modes given, with f_final the + frequency at which the amplitude falls to 1/1000 of the peak amplitude + """ + if isinstance(freqs, dict) and isinstance(damping_times, dict): + f_max = {} + for lmn in freqs.keys(): + f_max[lmn] = qnm_freq_decay(freqs[lmn], + damping_times[lmn], 1./1000) + f_final = max(f_max.values()) + elif isinstance(freqs, dict) and not isinstance(damping_times, dict): + raise ValueError('Missing damping times.') + elif isinstance(damping_times, dict) and not isinstance(freqs, dict): + raise ValueError('Missing frequencies.') + else: + f_final = qnm_freq_decay(freqs, damping_times, 1./1000) + if f_final > max_freq: + f_final = max_freq + return f_final
+ + + +
+[docs] +def lm_deltaf(damping_times): + """Return the minimum delta_f of all the modes given, with delta_f given by + the inverse of the time at which the amplitude of the ringdown falls to + 1/1000 of the peak amplitude. + """ + if isinstance(damping_times, dict): + df = {} + for lmn in damping_times.keys(): + df[lmn] = 1. / qnm_time_decay(damping_times[lmn], 1./1000) + delta_f = min(df.values()) + else: + delta_f = 1. / qnm_time_decay(damping_times, 1./1000) + return delta_f
+ + + +
+[docs] +def td_output_vector(freqs, damping_times, taper=False, + delta_t=None, t_final=None): + """Return an empty TimeSeries with the appropriate size to fit all + the quasi-normal modes present in freqs, damping_times + """ + if not delta_t: + delta_t = lm_deltat(freqs, damping_times) + if not t_final: + t_final = lm_tfinal(damping_times) + kmax = int(t_final / delta_t) + 1 + # Different modes will have different tapering window-size + # Find maximum window size to create long enough output vector + if taper: + max_tau = max(damping_times.values()) if \ + isinstance(damping_times, dict) else damping_times + kmax += int(max_tau/delta_t) + outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) + outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) + if taper: + # Change epoch of output vector if tapering will be applied + start = - max_tau + # To ensure that t=0 is still in output vector + start -= start % delta_t + outplus._epoch, outcross._epoch = start, start + return outplus, outcross
+ + + +
+[docs] +def fd_output_vector(freqs, damping_times, delta_f=None, f_final=None): + """Return an empty FrequencySeries with the appropriate size to fit all + the quasi-normal modes present in freqs, damping_times + """ + if not delta_f: + delta_f = lm_deltaf(damping_times) + if not f_final: + f_final = lm_ffinal(freqs, damping_times) + kmax = int(f_final / delta_f) + 1 + outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) + outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) + return outplus, outcross
+ + + +# Spherical harmonics and Kerr factor ######################################### + +
+[docs] +def spher_harms(harmonics='spherical', l=None, m=None, n=0, + inclination=0., azimuthal=0., + spin=None, pol=None, polnm=None): + r"""Return the +/-m harmonic polarizations. + + This will return either spherical, spheroidal, or an arbitrary complex + number depending on what ``harmonics`` is set to. If harmonics is set to + arbitrary, then the "(+/-)m" harmonic will be :math:`e^{i \psi_(+/-)}`, + where :math:`\psi_{+/-}` are arbitrary angles provided by the user (using + the ``pol(nm)`` arguments). + + Parameters + ---------- + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + The type of harmonic to generate. Default is spherical. + l : int, optional + The l index. Must be provided if harmonics is 'spherical' or + 'spheroidal'. + m : int, optional + The m index. Must be provided if harmonics is 'spherical' or + 'spheroidal'. + n : int, optional + The overtone number. Only used if harmonics is 'spheroidal'. Default + is 0. + inclination : float, optional + The inclination angle. Only used if harmonics is 'spherical' or + 'spheroidal'. Default is 0. + azimuthal : float, optional + The azimuthal angle. Only used if harmonics is 'spherical' or + 'spheroidal'. Default is 0. + spin : float, optional + The dimensionless spin of the black hole. Must be provided if + harmonics is 'spheroidal'. Ignored otherwise. + pol : float, optional + Angle (in radians) to use for arbitrary "+m" harmonic. Must be provided + if harmonics is 'arbitrary'. Ignored otherwise. + polnm : float, optional + Angle (in radians) to use for arbitrary "-m" harmonic. Must be provided + if harmonics is 'arbitrary'. Ignored otherwise. + + Returns + ------- + xlm : complex + The harmonic of the +m mode. + xlnm : complex + The harmonic of the -m mode. + """ + if harmonics == 'spherical': + xlm = lal.SpinWeightedSphericalHarmonic(inclination, azimuthal, -2, + l, m) + xlnm = lal.SpinWeightedSphericalHarmonic(inclination, azimuthal, -2, + l, -m) + elif harmonics == 'spheroidal': + if spin is None: + raise ValueError("must provide a spin for spheroidal harmonics") + if pykerr is None: + raise ImportError("pykerr must be installed for spheroidal " + "harmonics") + xlm = pykerr.spheroidal(inclination, spin, l, m, n, phi=azimuthal) + xlnm = pykerr.spheroidal(inclination, spin, l, -m, n, phi=azimuthal) + elif harmonics == 'arbitrary': + if pol is None or polnm is None: + raise ValueError('must provide a pol and a polnm for arbitrary ' + 'harmonics') + xlm = numpy.exp(1j*pol) + xlnm = numpy.exp(1j*polnm) + else: + raise ValueError("harmonics must be either spherical, spheroidal, " + "or arbitrary") + return xlm, xlnm
+ + + +
+[docs] +def Kerr_factor(final_mass, distance): + """Return the factor final_mass/distance (in dimensionless units) for Kerr + ringdowns + """ + # Convert solar masses to meters + mass = final_mass * lal.MSUN_SI * lal.G_SI / lal.C_SI**2 + # Convert Mpc to meters + dist = distance * 1e6 * lal.PC_SI + return mass / dist
+ + + +###################################################### +#### Basic functions to generate damped sinusoid +###################################################### + +
+[docs] +def td_damped_sinusoid(f_0, tau, amp, phi, times, + l=2, m=2, n=0, inclination=0., azimuthal=0., + dphi=0., dbeta=0., + harmonics='spherical', final_spin=None, + pol=None, polnm=None): + r"""Return a time domain damped sinusoid (plus and cross polarizations) + with central frequency f_0, damping time tau, amplitude amp and phase phi. + + This returns the plus and cross polarization of the QNM, defined as + :math:`h^{+,\times}_{l|m|n} = (\Re, \Im) \{ h_{l|m|n}(t)\right}`, where + + .. math:: + + h_{l|m|n}(t) &:= A_{lmn} X_{lmn}(\theta, \varphi) + e^{-t/\tau_{lmn} + i(2\pi f_{lmn}t + \phi_{lmn})} \\ + & + A_{l-mn} X_{l-mn}(\theta, \varphi) + e^{-t/\tau_{lmn} - i(2\pi f_{lmn}t + \phi_{l-mn})} + + Here, the :math:`X_{lmn}(\theta, \varphi)` are either the spherical or + spheroidal harmonics, or an arbitrary complex number, depending on the + input arguments. This uses the convention that the +/-m modes are + related to each other via :math:`f_{l-mn} = -f_{lmn}` and + :math:`\tau_{l-mn} = \tau_{lmn}`. The amplitudes :math:`A_{l(-)mn}` and + phases :math:`\phi_{l(-)mn}` of the +/-m modes are related to each other + by: + + .. math:: + + \phi_{l-mn} = l\pi + \Delta \phi_{lmn} - \phi_{lmn} + + and + + .. math:: + + A_{lmn} &= A^0_{lmn} \sqrt{2} \cos(\pi/4 + \Delta \beta_{lmn})\\ + A_{lmn} &= A^0_{lmn} \sqrt{2} \sin(\pi/4 + \Delta \beta_{lmn}). + + Here, :math:`A^0_{lmn}` is an overall fiducial amplitude (set by the + ``amp``) parameter, and + + .. math:: + + \Delta \beta_{lmn} &\in [-pi/4, pi/4], \\ + \Delta \phi_{lmn} &\in (-pi, pi) + + are parameters that define the deviation from circular polarization. + Circular polarization occurs when both :math:`\Delta \beta_{lmn}` and + :math:`\Delta \phi_{lmn}` are zero (this is equivalent to assuming that + :math:`h_{l-mn} = (-1)^l h_{lmn}^*`). + + Parameters + ---------- + f_0 : float + The central frequency of the damped sinusoid, in Hz. + tau : float + The damping time, in seconds. + amp : float + The intrinsic amplitude of the QNM (:math:`A^0_{lmn}` in the above). + phi : float + The reference phase of the QNM (:math:`\phi_{lmn}`) in the above. + times : array + Array of times to use, where t=0 is considered the start of the + ringdown. Times are assumed to be monotonically increasing. A taper of + 10x the damping time will be used for any negative times. + l : int, optional + The l index; default is 2. + m : int, optional + The m index; default is 2. + n : int, optional + The overtone index; default is 0. + inclination : float, optional + The inclination angle (:math:`\theta` in the above). Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle (:math:`\varphi` in the above). Ignored if + ``harmonics='arbitrary'``. Default is 0. + dphi : float, optional + The difference in phase between the +m and -m mode + (:math:`\Delta \phi_{lmn}` in the above). Default is 0. + dbeta : float, optional + The angular difference in the amplitudes of the +m and -m mode + (:math:`\Delta \beta_{lmn}` in the above). Default is 0. If this and + dphi are both 0, will have circularly polarized waves. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + final_spin : float, optional + The dimensionless spin of the black hole. Only needed if + ``harmonics='spheroidal'``. + pol : float, optional + Angle to use for +m arbitrary harmonics. Only needed if + ``harmonics='arbitrary'``. See :py:func:`spher_harms` for details. + polnm : float, optional + Angle to use for -m arbitrary harmonics. Only needed if + ``harmonics='arbitrary'``. See :py:func:`spher_harms` for details. + + Returns + ------- + hplus : numpy.ndarray + The plus polarization. + hcross : numpy.ndarray + The cross polarization. + """ + # evaluate the harmonics + xlm, xlnm = spher_harms(harmonics=harmonics, l=l, m=m, n=n, + inclination=inclination, azimuthal=azimuthal, + spin=final_spin, pol=pol, polnm=polnm) + # generate the +/-m modes + # we measure things as deviations from circular polarization, which occurs + # when h_{l-m} = (-1)^l h_{lm}^*; that implies that + # phi_{l-m} = - phi_{lm} and A_{l-m} = (-1)^l A_{lm} + omegalm = two_pi * f_0 * times + damping = -times/tau + # check for negative times + mask = times < 0 + if mask.any(): + damping[mask] = 10*times[mask]/tau + if m == 0: + # no -m, just calculate + hlm = xlm * amp * numpy.exp(damping + 1j*(omegalm + phi)) + else: + # amplitude + if dbeta == 0: + alm = alnm = amp + else: + beta = pi/4 + dbeta + alm = 2**0.5 * amp * numpy.cos(beta) + alnm = 2**0.5 * amp * numpy.sin(beta) + # phase + phinm = l*pi + dphi - phi + hlm = xlm * alm * numpy.exp(damping + 1j*(omegalm + phi)) \ + + xlnm * alnm * numpy.exp(damping - 1j*(omegalm - phinm)) + return hlm.real, hlm.imag
+ + + +
+[docs] +def fd_damped_sinusoid(f_0, tau, amp, phi, freqs, t_0=0., + l=2, m=2, n=0, inclination=0., azimuthal=0., + harmonics='spherical', final_spin=None, + pol=None, polnm=None): + r"""Return the frequency domain version of a damped sinusoid. + + This is the frequency domain version :py:func:`td_damped_sinusoid` without + a taper if an infinite sample rate were used to resolve the step function + that turns on the damped sinusoid. See :py:func:`td_damped_sinusoid` for + details. + + .. note:: + + This function currently does not support using a different amplitude + and phase for the -m modes (equivalent to setting ``dphi = dbeta = 0`` + in :py:func:`td_damped_sinusoid`. + + Parameters + ---------- + f_0 : float + The central frequency of the damped sinusoid, in Hz. + tau : float + The damping time, in seconds. + amp : float + The intrinsic amplitude of the QNM (:math:`A^0_{lmn}` in the above). + phi : float + The reference phase of the QNM (:math:`\phi_{lmn}`) in the above. + freqs : array + Array of frequencies to evaluate the damped sinusoid over. + t_0 : float, optional + The start time of ringdown. Default (0.) corresponds to the ringdown + starting at the beginning of the equivalent segment in the time + domain. A non-zero value for ``t_0`` will shift the ringdown in time + to the corresponding number of seconds from the start of the segment. + l : int, optional + The l index; default is 2. + m : int, optional + The m index; default is 2. + n : int, optional + The overtone index; default is 0. + inclination : float, optional + The inclination angle (:math:`\theta` in the above). Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle (:math:`\varphi` in the above). Ignored if + ``harmonics='arbitrary'``. Default is 0. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + final_spin : float, optional + The dimensionless spin of the black hole. Only needed if + ``harmonics='spheroidal'``. + pol : float, optional + Angle to use for +m arbitrary harmonics. Only needed if + ``harmonics='arbitrary'``. See :py:func:`spher_harms` for details. + polnm : float, optional + Angle to use for -m arbitrary harmonics. Only needed if + ``harmonics='arbitrary'``. See :py:func:`spher_harms` for details. + + Returns + ------- + hptilde : numpy.ndarray + The plus polarization. + hctilde : numpy.ndarray + The cross polarization. + """ + # evaluate the harmonics + if inclination is None: + inclination = 0. + if azimuthal is None: + azimuthal = 0. + xlm, xlnm = spher_harms(harmonics=harmonics, l=l, m=m, n=n, + inclination=inclination, azimuthal=azimuthal, + spin=final_spin, pol=pol, polnm=polnm) + # we'll assume circular polarization + xp = xlm + (-1)**l * xlnm + xc = xlm - (-1)**l * xlnm + denominator = 1 + (4j * pi * freqs * tau) - \ + (4 * pi_sq * (freqs*freqs - f_0*f_0) * tau*tau) + norm = amp * tau / denominator + if t_0 != 0: + time_shift = numpy.exp(-1j * two_pi * freqs * t_0) + norm *= time_shift + A1 = (1 + 2j * pi * freqs * tau) + A2 = two_pi * f_0 * tau + # Analytical expression for the Fourier transform of the ringdown + hptilde = norm * xp * (A1 * numpy.cos(phi) - A2 * numpy.sin(phi)) + hctilde = norm * xc * (A1 * numpy.sin(phi) + A2 * numpy.cos(phi)) + return hptilde, hctilde
+ + + +###################################################### +#### Base multi-mode for all approximants +###################################################### + +
+[docs] +def multimode_base(input_params, domain, freq_tau_approximant=False): + """Return a superposition of damped sinusoids in either time or frequency + domains with parameters set by input_params. + + Parameters + ---------- + input_params : dict + Dictionary of parameters to generate the ringdowns with. See + :py:func:`td_damped_sinusoid` and :py:func:`fd_damped_sinusoid` for + supported parameters. + domain : string + Choose domain of the waveform, either 'td' for time domain + or 'fd' for frequency domain. If 'td' ('fd'), the damped sinusoids will + be generated with :py:func:`td_damped_sinusoid` + (:py:func:`fd_damped_sinusoid`). + freq_tau_approximant : {False, bool}, optional + Choose choose the waveform approximant to use. Either based on + mass/spin (set to False, default), or on frequencies/damping times + of the modes (set to True). + + Returns + ------- + hplus : TimeSeries + The plus phase of a ringdown with the lm modes specified and + n overtones in the chosen domain (time or frequency). + hcross : TimeSeries + The cross phase of a ringdown with the lm modes specified and + n overtones in the chosen domain (time or frequency). + """ + input_params['lmns'] = format_lmns(input_params['lmns']) + amps, phis, dbetas, dphis = lm_amps_phases(**input_params) + pols, polnms = lm_arbitrary_harmonics(**input_params) + # get harmonics argument + try: + harmonics = input_params['harmonics'] + except KeyError: + harmonics = 'spherical' + # we'll need the final spin for spheroidal harmonics + if harmonics == 'spheroidal': + final_spin = input_params['final_spin'] + else: + final_spin = None + # add inclination and azimuthal if they aren't provided + if 'inclination' not in input_params: + input_params['inclination'] = 0. + if 'azimuthal' not in input_params: + input_params['azimuthal'] = 0. + # figure out the frequencies and damping times + if freq_tau_approximant: + freqs, taus = lm_freqs_taus(**input_params) + norm = 1. + else: + freqs, taus = get_lm_f0tau_allmodes(input_params['final_mass'], + input_params['final_spin'], input_params['lmns']) + norm = Kerr_factor(input_params['final_mass'], + input_params['distance']) if 'distance' in input_params.keys() \ + else 1. + for mode, freq in freqs.items(): + if 'delta_f{}'.format(mode) in input_params: + freqs[mode] += input_params['delta_f{}'.format(mode)]*freq + for mode, tau in taus.items(): + if 'delta_tau{}'.format(mode) in input_params: + taus[mode] += input_params['delta_tau{}'.format(mode)]*tau + # setup the output + if domain == 'td': + outplus, outcross = td_output_vector(freqs, taus, + input_params['taper'], input_params['delta_t'], + input_params['t_final']) + sample_times = outplus.sample_times.numpy() + elif domain == 'fd': + outplus, outcross = fd_output_vector(freqs, taus, + input_params['delta_f'], input_params['f_final']) + kmin = int(input_params['f_lower'] / input_params['delta_f']) + sample_freqs = outplus.sample_frequencies.numpy()[kmin:] + else: + raise ValueError('unrecognised domain argument {}; ' + 'must be either fd or td'.format(domain)) + # cyclce over the modes, generating the waveforms + for lmn in freqs: + if amps[lmn] == 0.: + # skip + continue + if domain == 'td': + hplus, hcross = td_damped_sinusoid( + freqs[lmn], taus[lmn], amps[lmn], phis[lmn], sample_times, + l=int(lmn[0]), m=int(lmn[1]), n=int(lmn[2]), + inclination=input_params['inclination'], + azimuthal=input_params['azimuthal'], + dphi=dphis[lmn], dbeta=dbetas[lmn], + harmonics=harmonics, final_spin=final_spin, + pol=pols[lmn], polnm=polnms[lmn]) + outplus += hplus + outcross += hcross + elif domain == 'fd': + hplus, hcross = fd_damped_sinusoid( + freqs[lmn], taus[lmn], amps[lmn], phis[lmn], sample_freqs, + l=int(lmn[0]), m=int(lmn[1]), n=int(lmn[2]), + inclination=input_params['inclination'], + azimuthal=input_params['azimuthal'], + harmonics=harmonics, final_spin=final_spin, + pol=pols[lmn], polnm=polnms[lmn]) + outplus[kmin:] += hplus + outcross[kmin:] += hcross + return norm * outplus, norm * outcross
+ + + +###################################################### +#### Approximants +###################################################### + +
+[docs] +def get_td_from_final_mass_spin(template=None, **kwargs): + """Return time domain ringdown with all the modes specified. + + Parameters + ---------- + template : object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + final_mass : float + Mass of the final black hole in solar masses. + final_spin : float + Dimensionless spin of the final black hole. + distance : {None, float}, optional + Luminosity distance of the system. If specified, the returned ringdown + will include the Kerr factor (final_mass/distance). + lmns : list + Desired lmn modes as strings. All modes up to l = m = 7 are available. + The n specifies the number of overtones desired for the corresponding + lm pair, not the overtone number; maximum n=8. Example: + lmns = ['223','331'] are the modes 220, 221, 222, and 330 + ref_amp : str, optional + Which mode to use as the reference for computing amplitudes. Must be + 'amp220' if distance is given. Default is 'amp220'. The amplitude of + the reference mode should be specified directly, while all other + amplitudes are specified as ratios with respect to that mode. For + example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no + distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would + result in the 220 mode having a strain amplitude of 1e-22 and the 330 + mode having a strain amplitude of 1e-23. If distance is given, the + amplitude of the reference mode will have a completely different order + of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an + estimate. An amplitude for the reference mode must always be provided, + even if that mode is not being generated. For example, if + ``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220`` + and ``amp330`` must be provided even though only the 330 mode will + be created. + amplmn : float + The amplitude of each mode, required for all modes specifed plus the + reference mode. As described above, amplitudes should be specified + relative to the reference mode. + philmn : float + Phase of the lmn overtone, as many as the number of modes. + inclination : float + Inclination of the system in radians. Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``. + Usually this is not necessary to specify since it is degenerate with + initial phase ``philmn``; i.e., this is only useful if you have an + expectation for what the phase of each mode is. Default is 0. + dphi[lmn] : float, optional + The difference in phase between the +m and -m mode. See the + documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for + details. You may specify a + ``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a + single ``dphi`` (without any lmn) for all modes that do not have + ``dphi`` specified. Default is to use 0 for all modes. + dbeta[lmn] : float, optional + The angular difference in the amplitudes of the +m and -m mode. See the + documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for + details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``) + separately for each mode, and/or a + single ``dbeta`` (without any lmn) for all modes that do not have + ``dbeta`` specified. Default is to use 0 for all modes. + pollmn : float, optional + Angle to use for +m arbitrary harmonics of the lmn mode in radians + (example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``, + ignored otherwise. See :py:func:`spher_harms` for details. + polnmlmn : float, optional + Angle to use for -m arbitrary harmonics of the lmn mode in radians + (example: ``polnm220 = 0.1``). Only needed if + ``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms` + for details. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + delta_flmn: {None, float}, optional + GR deviation for the frequency of the lmn mode. If given, the lmn + frequency will be converted to new_flmn = flmn + delta_flmn * flmn, + with flmn the GR predicted value for the corresponding mass and spin. + delta_taulmn: {None, float}, optional + GR deviation for the damping time of the lmn mode. If given, the lmn + tau will be converted to new_taulmn = taulmn + delta_taulmn * taulmn, + with taulmn the GR predicted value for the corresponding mass and spin. + delta_t : {None, float}, optional + The time step used to generate the ringdown. + If None, it will be set to the inverse of the frequency at which the + amplitude is 1/1000 of the peak amplitude (the minimum of all modes). + t_final : {None, float}, optional + The ending time of the output frequency series. + If None, it will be set to the time at which the amplitude + is 1/1000 of the peak amplitude (the maximum of all modes). + taper : bool, optional + Add a rapid ringup with timescale tau/10 at the beginning of the + waveform to avoid the abrupt turn on of the ringdown. Each mode and + overtone will have a different taper depending on its tau, + the final taper being the superposition of all the tapers. Default is + False. + + Returns + ------- + hplus : TimeSeries + The plus phase of a ringdown with the lm modes specified and + n overtones in time domain. + hcross : TimeSeries + The cross phase of a ringdown with the lm modes specified and + n overtones in time domain. + """ + input_params = props(template, mass_spin_required_args, td_args, **kwargs) + return multimode_base(input_params, domain='td')
+ + + +
+[docs] +def get_fd_from_final_mass_spin(template=None, **kwargs): + """Return frequency domain ringdown with all the modes specified. + + Parameters + ---------- + template : object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + final_mass : float + Mass of the final black hole in solar masses. + final_spin : float + Dimensionless spin of the final black hole. + distance : {None, float}, optional + Luminosity distance of the system. If specified, the returned ringdown + will include the Kerr factor (final_mass/distance). + lmns : list + Desired lmn modes as strings. All modes up to l = m = 7 are available. + The n specifies the number of overtones desired for the corresponding + lm pair, not the overtone number; maximum n=8. Example: + lmns = ['223','331'] are the modes 220, 221, 222, and 330 + ref_amp : str, optional + Which mode to use as the reference for computing amplitudes. Must be + 'amp220' if distance is given. Default is 'amp220'. The amplitude of + the reference mode should be specified directly, while all other + amplitudes are specified as ratios with respect to that mode. For + example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no + distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would + result in the 220 mode having a strain amplitude of 1e-22 and the 330 + mode having a strain amplitude of 1e-23. If distance is given, the + amplitude of the reference mode will have a completely different order + of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an + estimate. An amplitude for the reference mode must always be provided, + even if that mode is not being generated. For example, if + ``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220`` + and ``amp330`` must be provided even though only the 330 mode will + be created. + amplmn : float + The amplitude of each mode, required for all modes specifed plus the + reference mode. As described above, amplitudes should be specified + relative to the reference mode. + philmn : float + Phase of the lmn overtone, as many as the number of modes. + inclination : float + Inclination of the system in radians. Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``. + Usually this is not necessary to specify since it is degenerate with + initial phase ``philmn``; i.e., this is only useful if you have an + expectation for what the phase of each mode is. Default is 0. + pollmn : float, optional + Angle to use for +m arbitrary harmonics of the lmn mode in radians + (example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``, + ignored otherwise. See :py:func:`spher_harms` for details. + polnmlmn : float, optional + Angle to use for -m arbitrary harmonics of the lmn mode in radians + (example: ``polnm220 = 0.1``). Only needed if + ``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms` + for details. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + delta_flmn: {None, float}, optional + GR deviation for the frequency of the lmn mode. If given, the lmn + frequency will be converted to new_flmn = flmn + delta_flmn * flmn, + with flmn the GR predicted value for the corresponding mass and spin. + delta_taulmn: {None, float}, optional + GR deviation for the damping time of the lmn mode. If given, the lmn + tau will be converted to new_taulmn = taulmn + delta_taulmn * taulmn, + with taulmn the GR predicted value for the corresponding mass and spin. + delta_f : {None, float}, optional + The frequency step used to generate the ringdown (not to be confused + with the delta_flmn parameter that simulates GR violations). + If None, it will be set to the inverse of the time at which the + amplitude is 1/1000 of the peak amplitude (the minimum of all modes). + f_lower : {None, float}, optional + The starting frequency of the output frequency series. + If None, it will be set to delta_f. + f_final : {None, float}, optional + The ending frequency of the output frequency series. + If None, it will be set to the frequency at which the amplitude + is 1/1000 of the peak amplitude (the maximum of all modes). + + Returns + ------- + hplustilde : FrequencySeries + The plus phase of a ringdown with the lm modes specified and + n overtones in frequency domain. + hcrosstilde : FrequencySeries + The cross phase of a ringdown with the lm modes specified and + n overtones in frequency domain. + """ + input_params = props(template, mass_spin_required_args, fd_args, **kwargs) + return multimode_base(input_params, domain='fd')
+ + + +
+[docs] +def get_td_from_freqtau(template=None, **kwargs): + """Return time domain ringdown with all the modes specified. + + Parameters + ---------- + template : object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + lmns : list + Desired lmn modes as strings. All modes up to l = m = 7 are available. + The n specifies the number of overtones desired for the corresponding + lm pair, not the overtone number; maximum n=8. Example: + lmns = ['223','331'] are the modes 220, 221, 222, and 330 + f_lmn : float + Central frequency of the lmn overtone, as many as number of modes. + tau_lmn : float + Damping time of the lmn overtone, as many as number of modes. + ref_amp : str, optional + Which mode to use as the reference for computing amplitudes. Must be + 'amp220' if distance is given. Default is 'amp220'. The amplitude of + the reference mode should be specified directly, while all other + amplitudes are specified as ratios with respect to that mode. For + example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no + distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would + result in the 220 mode having a strain amplitude of 1e-22 and the 330 + mode having a strain amplitude of 1e-23. If distance is given, the + amplitude of the reference mode will have a completely different order + of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an + estimate. An amplitude for the reference mode must always be provided, + even if that mode is not being generated. For example, if + ``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220`` + and ``amp330`` must be provided even though only the 330 mode will + be created. + amplmn : float + The amplitude of each mode, required for all modes specifed plus the + reference mode. As described above, amplitudes should be specified + relative to the reference mode. + philmn : float + Phase of the lmn overtone, as many as the number of modes. + inclination : float + Inclination of the system in radians. Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``. + Usually this is not necessary to specify since it is degenerate with + initial phase ``philmn``; i.e., this is only useful if you have an + expectation for what the phase of each mode is. Default is 0. + dphi[lmn] : float, optional + The difference in phase between the +m and -m mode. See the + documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for + details. You may specify a + ``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a + single ``dphi`` (without any lmn) for all modes that do not have + ``dphi`` specified. Default is to use 0 for all modes. + dbeta[lmn] : float, optional + The angular difference in the amplitudes of the +m and -m mode. See the + documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for + details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``) + separately for each mode, and/or a + single ``dbeta`` (without any lmn) for all modes that do not have + ``dbeta`` specified. Default is to use 0 for all modes. + pollmn : float, optional + Angle to use for +m arbitrary harmonics of the lmn mode in radians + (example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``, + ignored otherwise. See :py:func:`spher_harms` for details. + polnmlmn : float, optional + Angle to use for -m arbitrary harmonics of the lmn mode in radians + (example: ``polnm220 = 0.1``). Only needed if + ``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms` + for details. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + final_spin : float, optional + Dimensionless spin of the final black hole. This is required if + ``harmonics='spheroidal'``. Ignored otherwise. + delta_t : {None, float}, optional + The time step used to generate the ringdown. + If None, it will be set to the inverse of the frequency at which the + amplitude is 1/1000 of the peak amplitude (the minimum of all modes). + t_final : {None, float}, optional + The ending time of the output frequency series. + If None, it will be set to the time at which the amplitude + is 1/1000 of the peak amplitude (the maximum of all modes). + taper : bool, optional + Add a rapid ringup with timescale tau/10 at the beginning of the + waveform to avoid the abrupt turn on of the ringdown. Each mode and + overtone will have a different taper depending on its tau, + the final taper being the superposition of all the tapers. Default is + False. + + Returns + ------- + hplus : TimeSeries + The plus phase of a ringdown with the lm modes specified and + n overtones in time domain. + hcross : TimeSeries + The cross phase of a ringdown with the lm modes specified and + n overtones in time domain. + """ + input_params = props(template, freqtau_required_args, td_args, **kwargs) + return multimode_base(input_params, domain='td', freq_tau_approximant=True)
+ + + +
+[docs] +def get_fd_from_freqtau(template=None, **kwargs): + """Return frequency domain ringdown with all the modes specified. + + Parameters + ---------- + template : object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + lmns : list + Desired lmn modes as strings. All modes up to l = m = 7 are available. + The n specifies the number of overtones desired for the corresponding + lm pair, not the overtone number; maximum n=8. Example: + lmns = ['223','331'] are the modes 220, 221, 222, and 330 + f_lmn : float + Central frequency of the lmn overtone, as many as number of modes. + tau_lmn : float + Damping time of the lmn overtone, as many as number of modes. + ref_amp : str, optional + Which mode to use as the reference for computing amplitudes. Must be + 'amp220' if distance is given. Default is 'amp220'. The amplitude of + the reference mode should be specified directly, while all other + amplitudes are specified as ratios with respect to that mode. For + example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no + distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would + result in the 220 mode having a strain amplitude of 1e-22 and the 330 + mode having a strain amplitude of 1e-23. If distance is given, the + amplitude of the reference mode will have a completely different order + of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an + estimate. An amplitude for the reference mode must always be provided, + even if that mode is not being generated. For example, if + ``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220`` + and ``amp330`` must be provided even though only the 330 mode will + be created. + amplmn : float + The amplitude of each mode, required for all modes specifed plus the + reference mode. As described above, amplitudes should be specified + relative to the reference mode. + philmn : float + Phase of the lmn overtone, as many as the number of modes. + inclination : float + Inclination of the system in radians. Ignored if + ``harmonics='arbitrary'``. Default is 0. + azimuthal : float, optional + The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``. + Usually this is not necessary to specify since it is degenerate with + initial phase ``philmn``; i.e., this is only useful if you have an + expectation for what the phase of each mode is. Default is 0. + dphi[lmn] : float, optional + The difference in phase between the +m and -m mode. See the + documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for + details. You may specify a + ``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a + single ``dphi`` (without any lmn) for all modes that do not have + ``dphi`` specified. Default is to use 0 for all modes. + dbeta[lmn] : float, optional + The angular difference in the amplitudes of the +m and -m mode. See the + documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for + details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``) + separately for each mode, and/or a + single ``dbeta`` (without any lmn) for all modes that do not have + ``dbeta`` specified. Default is to use 0 for all modes. + pollmn : float, optional + Angle to use for +m arbitrary harmonics of the lmn mode in radians + (example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``, + ignored otherwise. See :py:func:`spher_harms` for details. + polnmlmn : float, optional + Angle to use for -m arbitrary harmonics of the lmn mode in radians + (example: ``polnm220 = 0.1``). Only needed if + ``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms` + for details. + harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional + Which harmonics to use. See :py:func:`spher_harms` for details. + Default is spherical. + final_spin : float, optional + Dimensionless spin of the final black hole. This is required if + ``harmonics='spheroidal'``. Ignored otherwise. + delta_f : {None, float}, optional + The frequency step used to generate the ringdown. + If None, it will be set to the inverse of the time at which the + amplitude is 1/1000 of the peak amplitude (the minimum of all modes). + f_lower : {None, float}, optional + The starting frequency of the output frequency series. + If None, it will be set to delta_f. + f_final : {None, float}, optional + The ending frequency of the output frequency series. + If None, it will be set to the frequency at which the amplitude + is 1/1000 of the peak amplitude (the maximum of all modes). + + Returns + ------- + hplustilde : FrequencySeries + The plus phase of a ringdown with the lm modes specified and + n overtones in frequency domain. + hcrosstilde : FrequencySeries + The cross phase of a ringdown with the lm modes specified and + n overtones in frequency domain. + """ + input_params = props(template, freqtau_required_args, fd_args, **kwargs) + return multimode_base(input_params, domain='fd', freq_tau_approximant=True)
+ + + +# Approximant names ########################################################### +ringdown_fd_approximants = { + 'FdQNMfromFinalMassSpin': get_fd_from_final_mass_spin, + 'FdQNMfromFreqTau': get_fd_from_freqtau} + + +ringdown_td_approximants = { + 'TdQNMfromFinalMassSpin': get_td_from_final_mass_spin, + 'TdQNMfromFreqTau': get_td_from_freqtau} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/sinegauss.html b/latest/html/_modules/pycbc/waveform/sinegauss.html new file mode 100644 index 00000000000..76d5578d436 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/sinegauss.html @@ -0,0 +1,170 @@ + + + + + + pycbc.waveform.sinegauss — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.sinegauss

+""" Generation of sine-Gaussian bursty type things
+"""
+
+import pycbc.types
+import numpy
+
+
+[docs] +def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f): + """ Generate a Fourier domain sine-Gaussian + + Parameters + ---------- + amp: float + Amplitude of the sine-Gaussian + quality: float + The quality factor + central_frequency: float + The central frequency of the sine-Gaussian + fmin: float + The minimum frequency to generate the sine-Gaussian. This determines + the length of the output vector. + fmax: float + The maximum frequency to generate the sine-Gaussian + delta_f: float + The size of the frequency step + + Returns + ------- + sg: pycbc.types.Frequencyseries + A Fourier domain sine-Gaussian + """ + kmin = int(round(fmin / delta_f)) + kmax = int(round(fmax / delta_f)) + f = numpy.arange(kmin, kmax) * delta_f + tau = quality / 2 / numpy.pi / central_frequency + A = amp * numpy.pi ** 0.5 / 2 * tau + d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0) + d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency)) + v = numpy.zeros(kmax, dtype=numpy.complex128) + v[kmin:kmax] = d[:] + return pycbc.types.FrequencySeries(v, delta_f=delta_f)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/spa_tmplt.html b/latest/html/_modules/pycbc/waveform/spa_tmplt.html new file mode 100644 index 00000000000..24e0a1ece99 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/spa_tmplt.html @@ -0,0 +1,420 @@ + + + + + + pycbc.waveform.spa_tmplt — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.spa_tmplt

+#  Adapted from code in LALSimInspiralTaylorF2.c
+#
+#  Copyright (C) 2007 Jolien Creighton, B.S. Sathyaprakash, Thomas Cokelaer
+#  Copyright (C) 2012 Leo Singer, Alex Nitz
+#
+#  This program is free software you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with with program see the file COPYING. If not, write to the
+#  Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+#  MA  02111-1307  USA
+
+"""This module contains functions for generating common SPA template precalculated
+   vectors.
+"""
+from math import sqrt, log
+import warnings
+import numpy, lal, pycbc.pnutils
+from pycbc.scheme import schemed
+from pycbc.types import FrequencySeries, Array, complex64, float32, zeros
+from pycbc.waveform.utils import ceilpow2
+
+lalsimulation = pycbc.libutils.import_optional('lalsimulation')
+
+
+[docs] +def findchirp_chirptime(m1, m2, fLower, porder): + # variables used to compute chirp time + m1 = float(m1) + m2 = float(m2) + m = m1 + m2 + eta = m1 * m2 / m / m + c0T = c2T = c3T = c4T = c5T = c6T = c6LogT = c7T = 0. + + # All implemented option + if porder == -1: + porder = 7 + + if porder >= 7: + c7T = lal.PI * (14809.0 * eta * eta / 378.0 - 75703.0 * eta / 756.0 - 15419335.0 / 127008.0) + + if porder >= 6: + c6T = lal.GAMMA * 6848.0 / 105.0 - 10052469856691.0 / 23471078400.0 +\ + lal.PI * lal.PI * 128.0 / 3.0 + \ + eta * (3147553127.0 / 3048192.0 - lal.PI * lal.PI * 451.0 / 12.0) -\ + eta * eta * 15211.0 / 1728.0 + eta * eta * eta * 25565.0 / 1296.0 +\ + eta * eta * eta * 25565.0 / 1296.0 + numpy.log(4.0) * 6848.0 / 105.0 + c6LogT = 6848.0 / 105.0 + + if porder >= 5: + c5T = 13.0 * lal.PI * eta / 3.0 - 7729.0 * lal.PI / 252.0 + + if porder >= 4: + c4T = 3058673.0 / 508032.0 + eta * (5429.0 / 504.0 + eta * 617.0 / 72.0) + c3T = -32.0 * lal.PI / 5.0 + c2T = 743.0 / 252.0 + eta * 11.0 / 3.0 + c0T = 5.0 * m * lal.MTSUN_SI / (256.0 * eta) + + # This is the PN parameter v evaluated at the lower freq. cutoff + xT = pow (lal.PI * m * lal.MTSUN_SI * fLower, 1.0 / 3.0) + x2T = xT * xT + x3T = xT * x2T + x4T = x2T * x2T + x5T = x2T * x3T + x6T = x3T * x3T + x7T = x3T * x4T + x8T = x4T * x4T + + # Computes the chirp time as tC = t(v_low); + # tC = t(v_low) - t(v_upper) would be more + # correct, but the difference is negligible. + + # This formula works for any PN order, because + # higher order coeffs will be set to zero. + return c0T * (1 + c2T * x2T + c3T * x3T + c4T * x4T + c5T * x5T + + (c6T + c6LogT * numpy.log(xT)) * x6T + c7T * x7T) / x8T
+ + + +
+[docs] +def spa_length_in_time(**kwds): + """ + Returns the length in time of the template, + based on the masses, PN order, and low-frequency + cut-off. + """ + m1 = kwds['mass1'] + m2 = kwds['mass2'] + flow = kwds['f_lower'] + porder = int(kwds['phase_order']) + + # For now, we call the swig-wrapped function below in + # lalinspiral. Eventually would be nice to replace this + # with a function using PN coeffs from lalsimulation. + return findchirp_chirptime(m1, m2, flow, porder)
+ + + +
+[docs] +def spa_amplitude_factor(**kwds): + m1 = kwds['mass1'] + m2 = kwds['mass2'] + + _, eta = pycbc.pnutils.mass1_mass2_to_mchirp_eta(m1, m2) + + FTaN = 32. * eta * eta / 5. + dETaN = 2. * -eta / 2. + + M = m1 + m2 + + m_sec = M * lal.MTSUN_SI + piM = lal.PI * m_sec + + amp0 = 4. * m1 * m2 / (1e6 * lal.PC_SI) * lal.MRSUN_SI * lal.MTSUN_SI * sqrt(lal.PI / 12.) + + fac = numpy.sqrt(-dETaN / FTaN) * amp0 * (piM ** (-7./6.)) + return -fac
+ + + +_prec = None +
+[docs] +def spa_tmplt_precondition(length, delta_f, kmin=0): + """Return the amplitude portion of the TaylorF2 approximant, used to precondition + the strain data. The result is cached, and so should not be modified, only read. + """ + global _prec + if _prec is None or _prec.delta_f != delta_f or len(_prec) < length: + v = numpy.arange(0, (kmin + length*2), 1.) * delta_f + v = numpy.power(v[1:len(v)], -7./6.) + _prec = FrequencySeries(v, delta_f=delta_f, dtype=float32) + return _prec[kmin:kmin + length]
+ + + +
+[docs] +def spa_tmplt_norm(psd, length, delta_f, f_lower): + amp = spa_tmplt_precondition(length, delta_f) + k_min = int(f_lower / delta_f) + sigma = (amp[k_min:length].numpy() ** 2. / psd[k_min:length].numpy()) + norm_vec = numpy.zeros(length) + norm_vec[k_min:length] = sigma.cumsum() * 4. * delta_f + return norm_vec
+ + + +
+[docs] +def spa_tmplt_end(**kwds): + return pycbc.pnutils.f_SchwarzISCO(kwds['mass1'] + kwds['mass2'])
+ + + +
+[docs] +def spa_distance(psd, mass1, mass2, lower_frequency_cutoff, snr=8): + """ Return the distance at a given snr (default=8) of the SPA TaylorF2 + template. + """ + kend = int(spa_tmplt_end(mass1=mass1, mass2=mass2) / psd.delta_f) + norm1 = spa_tmplt_norm(psd, len(psd), psd.delta_f, lower_frequency_cutoff) + norm2 = spa_amplitude_factor(mass1=mass1, mass2=mass2) ** 2.0 + + if kend >= len(psd): + kend = len(psd) - 2 + return sqrt(norm1[kend] * norm2) / snr
+ + + +
+[docs] +@schemed("pycbc.waveform.spa_tmplt_") +def spa_tmplt_engine(htilde, kmin, phase_order, delta_f, piM, pfaN, + pfa2, pfa3, pfa4, pfa5, pfl5, + pfa6, pfl6, pfa7, amp_factor): + """ Calculate the spa tmplt phase + """ + err_msg = "This function is a stub that should be overridden using the " + err_msg += "scheme. You shouldn't be seeing this error!" + raise ValueError(err_msg)
+ + + +
+[docs] +def spa_tmplt(**kwds): + """ Generate a minimal TaylorF2 approximant with optimizations for the sin/cos + """ + distance = kwds['distance'] + mass1 = kwds['mass1'] + mass2 = kwds['mass2'] + s1z = kwds['spin1z'] + s2z = kwds['spin2z'] + phase_order = int(kwds['phase_order']) + #amplitude_order = int(kwds['amplitude_order']) + spin_order = int(kwds['spin_order']) + + if 'out' in kwds: + out = kwds['out'] + else: + out = None + + amp_factor = spa_amplitude_factor(mass1=mass1, mass2=mass2) / distance + + lal_pars = lal.CreateDict() + if phase_order != -1: + lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder( + lal_pars, phase_order) + + if spin_order != -1: + lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder( + lal_pars, spin_order) + + # Calculate the PN terms + phasing = lalsimulation.SimInspiralTaylorF2AlignedPhasing( + float(mass1), float(mass2), + float(s1z), float(s2z), + lal_pars) + + pfaN = phasing.v[0] + pfa2 = phasing.v[2] / pfaN + pfa3 = phasing.v[3] / pfaN + pfa4 = phasing.v[4] / pfaN + pfa5 = phasing.v[5] / pfaN + pfa6 = (phasing.v[6] - phasing.vlogv[6] * log(4)) / pfaN + pfa7 = phasing.v[7] / pfaN + + pfl5 = phasing.vlogv[5] / pfaN + pfl6 = phasing.vlogv[6] / pfaN + + piM = lal.PI * (mass1 + mass2) * lal.MTSUN_SI + + if 'sample_points' not in kwds: + f_lower = kwds['f_lower'] + delta_f = kwds['delta_f'] + kmin = int(f_lower / float(delta_f)) + + # Get max frequency one way or another + # f_final is assigned default value 0 in parameters.py + if 'f_final' in kwds and kwds['f_final'] > 0.: + fstop = kwds['f_final'] + elif 'f_upper' in kwds: + fstop = kwds['f_upper'] + warnings.warn('f_upper is deprecated in favour of f_final!', + DeprecationWarning) + else: + # Schwarzschild ISCO frequency + vISCO = 1. / sqrt(6.) + fstop = vISCO * vISCO * vISCO / piM + if fstop <= f_lower: + raise ValueError("cannot generate waveform! f_lower >= f_final" + f" ({f_lower}, {fstop})") + + kmax = int(fstop / delta_f) + f_max = ceilpow2(fstop) + n = int(f_max / delta_f) + 1 + + if not out: + htilde = FrequencySeries(zeros(n, dtype=numpy.complex64), delta_f=delta_f, copy=False) + else: + if type(out) is not Array: + raise TypeError("Output must be an instance of Array") + if len(out) < kmax: + kmax = len(out) + if out.dtype != complex64: + raise TypeError("Output array is the wrong dtype") + htilde = FrequencySeries(out, delta_f=delta_f, copy=False) + + spa_tmplt_engine(htilde[kmin:kmax], kmin, phase_order, + delta_f, piM, pfaN, + pfa2, pfa3, pfa4, pfa5, pfl5, + pfa6, pfl6, pfa7, amp_factor) + else: + from .spa_tmplt_cpu import spa_tmplt_inline_sequence + htilde = numpy.empty(len(kwds['sample_points']), dtype=numpy.complex64) + spa_tmplt_inline_sequence( + piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, + amp_factor, kwds['sample_points'], htilde) + + return htilde
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/supernovae.html b/latest/html/_modules/pycbc/waveform/supernovae.html new file mode 100644 index 00000000000..88450868210 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/supernovae.html @@ -0,0 +1,182 @@ + + + + + + pycbc.waveform.supernovae — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.supernovae

+"""Generate core-collapse supernovae waveform for core bounce and
+subsequent postbounce oscillations.
+"""
+
+import numpy
+from pycbc.types import TimeSeries
+from pycbc.io.hdf import HFile
+
+_pc_dict = {}
+
+
+
+[docs] +def get_corecollapse_bounce(**kwargs): + """ Generates core bounce and postbounce waveform by using principal + component basis vectors from a .hdf file. The waveform parameters are the + coefficients of the principal components and the distance. The number of + principal components used can also be varied. + """ + + try: + principal_components = _pc_dict['principal_components'] + except KeyError: + with HFile(kwargs['principal_components_file'], 'r') as pc_file: + principal_components = numpy.array(pc_file['principal_components']) + _pc_dict['principal_components'] = principal_components + + if 'coefficients_array' in kwargs: + coefficients_array = kwargs['coefficients_array'] + else: + coeffs_keys = [x for x in kwargs if x.startswith('coeff_')] + coeffs_keys = numpy.sort(numpy.array(coeffs_keys)) + coefficients_array = numpy.array([kwargs[x] for x in coeffs_keys]) + + no_of_pcs = int(kwargs['no_of_pcs']) + coefficients_array = coefficients_array[:no_of_pcs] + principal_components = principal_components[:no_of_pcs] + + pc_len = len(principal_components) + assert len(coefficients_array) == pc_len + + distance = kwargs['distance'] + mpc_conversion = 3.08567758128e+22 + distance *= mpc_conversion + + strain = numpy.dot(coefficients_array, principal_components) / distance + delta_t = kwargs['delta_t'] + outhp = TimeSeries(strain, delta_t=delta_t) + outhc = TimeSeries(numpy.zeros(len(strain)), delta_t=delta_t) + return outhp, outhc
+ + + +# Approximant names ########################################################### +supernovae_td_approximants = {'CoreCollapseBounce': get_corecollapse_bounce} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/utils.html b/latest/html/_modules/pycbc/waveform/utils.html new file mode 100644 index 00000000000..01aaec968d0 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/utils.html @@ -0,0 +1,704 @@ + + + + + + pycbc.waveform.utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.utils

+# Copyright (C) 2013  Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""This module contains convenience utilities for manipulating waveforms
+"""
+from pycbc.types import TimeSeries, FrequencySeries, Array, float32, float64, complex_same_precision_as, real_same_precision_as
+import lal
+from math import frexp
+import numpy
+from pycbc.scheme import schemed
+from scipy import signal
+
+
+[docs] +def ceilpow2(n): + """convenience function to determine a power-of-2 upper frequency limit""" + signif,exponent = frexp(n) + if (signif < 0): + return 1; + if (signif == 0.5): + exponent -= 1; + return (1) << exponent;
+ + + +
+[docs] +def coalign_waveforms(h1, h2, psd=None, + low_frequency_cutoff=None, + high_frequency_cutoff=None, + resize=True): + """ Return two time series which are aligned in time and phase. + + The alignment is only to the nearest sample point and all changes to the + phase are made to the first input waveform. Waveforms should not be split + accross the vector boundary. If it is, please use roll or cyclic time shift + to ensure that the entire signal is contiguous in the time series. + + Parameters + ---------- + h1: pycbc.types.TimeSeries + The first waveform to align. + h2: pycbc.types.TimeSeries + The second waveform to align. + psd: {None, pycbc.types.FrequencySeries} + A psd to weight the alignment + low_frequency_cutoff: {None, float} + The low frequency cutoff to weight the matching in Hz. + high_frequency_cutoff: {None, float} + The high frequency cutoff to weight the matching in Hz. + resize: Optional, {True, boolean} + If true, the vectors will be resized to match each other. If false, + they must be the same length and even in length + + Returns + ------- + h1: pycbc.types.TimeSeries + The shifted waveform to align with h2 + h2: pycbc.type.TimeSeries + The resized (if necessary) waveform to align with h1. + """ + from pycbc.filter import matched_filter + mlen = ceilpow2(max(len(h1), len(h2))) + + h1 = h1.copy() + h2 = h2.copy() + + if resize: + h1.resize(mlen) + h2.resize(mlen) + elif len(h1) != len(h2) or len(h2) % 2 != 0: + raise ValueError("Time series must be the same size and even if you do " + "not allow resizing") + + snr = matched_filter(h1, h2, psd=psd, + low_frequency_cutoff=low_frequency_cutoff, + high_frequency_cutoff=high_frequency_cutoff) + + _, l = snr.abs_max_loc() + rotation = snr[l] / abs(snr[l]) + h1 = (h1.to_frequencyseries() * rotation).to_timeseries() + h1.roll(l) + + h1 = TimeSeries(h1, delta_t=h2.delta_t, epoch=h2.start_time) + return h1, h2
+ + +
+[docs] +def phase_from_frequencyseries(htilde, remove_start_phase=True): + """Returns the phase from the given frequency-domain waveform. This assumes + that the waveform has been sampled finely enough that the phase cannot + change by more than pi radians between each step. + + Parameters + ---------- + htilde : FrequencySeries + The waveform to get the phase for; must be a complex frequency series. + remove_start_phase : {True, bool} + Subtract the initial phase before returning. + + Returns + ------- + FrequencySeries + The phase of the waveform as a function of frequency. + """ + p = numpy.unwrap(numpy.angle(htilde.data)).astype( + real_same_precision_as(htilde)) + if remove_start_phase: + p += -p[0] + return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch, + copy=False)
+ + +
+[docs] +def amplitude_from_frequencyseries(htilde): + """Returns the amplitude of the given frequency-domain waveform as a + FrequencySeries. + + Parameters + ---------- + htilde : FrequencySeries + The waveform to get the amplitude of. + + Returns + ------- + FrequencySeries + The amplitude of the waveform as a function of frequency. + """ + amp = abs(htilde.data).astype(real_same_precision_as(htilde)) + return FrequencySeries(amp, delta_f=htilde.delta_f, epoch=htilde.epoch, + copy=False)
+ + +
+[docs] +def time_from_frequencyseries(htilde, sample_frequencies=None, + discont_threshold=0.99*numpy.pi): + """Computes time as a function of frequency from the given + frequency-domain waveform. This assumes the stationary phase + approximation. Any frequencies lower than the first non-zero value in + htilde are assigned the time at the first non-zero value. Times for any + frequencies above the next-to-last non-zero value in htilde will be + assigned the time of the next-to-last non-zero value. + + .. note:: + Some waveform models (e.g., `SEOBNRv2_ROM_DoubleSpin`) can have + discontinuities in the phase towards the end of the waveform due to + numerical error. We therefore exclude any points that occur after a + discontinuity in the phase, as the time estimate becomes untrustworthy + beyond that point. What determines a discontinuity in the phase is set + by the `discont_threshold`. To turn this feature off, just set + `discont_threshold` to a value larger than pi (due to the unwrapping + of the phase, no two points can have a difference > pi). + + Parameters + ---------- + htilde : FrequencySeries + The waveform to get the time evolution of; must be complex. + sample_frequencies : {None, array} + The frequencies at which the waveform is sampled. If None, will + retrieve from ``htilde.sample_frequencies``. + discont_threshold : {0.99*pi, float} + If the difference in the phase changes by more than this threshold, + it is considered to be a discontinuity. Default is 0.99*pi. + + Returns + ------- + FrequencySeries + The time evolution of the waveform as a function of frequency. + """ + if sample_frequencies is None: + sample_frequencies = htilde.sample_frequencies.numpy() + phase = phase_from_frequencyseries(htilde).data + dphi = numpy.diff(phase) + time = -dphi / (2.*numpy.pi*numpy.diff(sample_frequencies)) + nzidx = numpy.nonzero(abs(htilde.data))[0] + kmin, kmax = nzidx[0], nzidx[-2] + # exclude everything after a discontinuity + discont_idx = numpy.where(abs(dphi[kmin:]) >= discont_threshold)[0] + if discont_idx.size != 0: + kmax = min(kmax, kmin + discont_idx[0]-1) + time[:kmin] = time[kmin] + time[kmax:] = time[kmax] + return FrequencySeries(time.astype(real_same_precision_as(htilde)), + delta_f=htilde.delta_f, epoch=htilde.epoch, + copy=False)
+ + +
+[docs] +def phase_from_polarizations(h_plus, h_cross, remove_start_phase=True): + """Return gravitational wave phase + + Return the gravitation-wave phase from the h_plus and h_cross + polarizations of the waveform. The returned phase is always + positive and increasing with an initial phase of 0. + + Parameters + ---------- + h_plus : TimeSeries + An PyCBC TmeSeries vector that contains the plus polarization of the + gravitational waveform. + h_cross : TimeSeries + A PyCBC TmeSeries vector that contains the cross polarization of the + gravitational waveform. + + Returns + ------- + GWPhase : TimeSeries + A TimeSeries containing the gravitational wave phase. + + Examples + --------s + >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations + >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, + f_lower=30, delta_t=1.0/4096) + >>> phase = phase_from_polarizations(hp, hc) + + """ + p = numpy.unwrap(numpy.arctan2(h_cross.data, h_plus.data)).astype( + real_same_precision_as(h_plus)) + if remove_start_phase: + p += -p[0] + return TimeSeries(p, delta_t=h_plus.delta_t, epoch=h_plus.start_time, + copy=False)
+ + +
+[docs] +def amplitude_from_polarizations(h_plus, h_cross): + """Return gravitational wave amplitude + + Return the gravitation-wave amplitude from the h_plus and h_cross + polarizations of the waveform. + + Parameters + ---------- + h_plus : TimeSeries + An PyCBC TmeSeries vector that contains the plus polarization of the + gravitational waveform. + h_cross : TimeSeries + A PyCBC TmeSeries vector that contains the cross polarization of the + gravitational waveform. + + Returns + ------- + GWAmplitude : TimeSeries + A TimeSeries containing the gravitational wave amplitude. + + Examples + -------- + >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations + >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, + f_lower=30, delta_t=1.0/4096) + >>> amp = amplitude_from_polarizations(hp, hc) + + """ + amp = (h_plus.squared_norm() + h_cross.squared_norm()) ** (0.5) + return TimeSeries(amp, delta_t=h_plus.delta_t, epoch=h_plus.start_time)
+ + +
+[docs] +def frequency_from_polarizations(h_plus, h_cross): + """Return gravitational wave frequency + + Return the gravitation-wave frequency as a function of time + from the h_plus and h_cross polarizations of the waveform. + It is 1 bin shorter than the input vectors and the sample times + are advanced half a bin. + + Parameters + ---------- + h_plus : TimeSeries + A PyCBC TimeSeries vector that contains the plus polarization of the + gravitational waveform. + h_cross : TimeSeries + A PyCBC TimeSeries vector that contains the cross polarization of the + gravitational waveform. + + Returns + ------- + GWFrequency : TimeSeries + A TimeSeries containing the gravitational wave frequency as a function + of time. + + Examples + -------- + >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations + >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, + f_lower=30, delta_t=1.0/4096) + >>> freq = frequency_from_polarizations(hp, hc) + + """ + phase = phase_from_polarizations(h_plus, h_cross) + freq = numpy.diff(phase) / ( 2 * lal.PI * phase.delta_t ) + start_time = phase.start_time + phase.delta_t / 2 + return TimeSeries(freq.astype(real_same_precision_as(h_plus)), + delta_t=phase.delta_t, epoch=start_time)
+ + +# map between tapering string in sim_inspiral table or inspiral +# code option and lalsimulation constants +try: + import lalsimulation as sim + + taper_map = { + 'TAPER_NONE' : None, + 'TAPER_START' : sim.SIM_INSPIRAL_TAPER_START, + 'start' : sim.SIM_INSPIRAL_TAPER_START, + 'TAPER_END' : sim.SIM_INSPIRAL_TAPER_END, + 'end' : sim.SIM_INSPIRAL_TAPER_END, + 'TAPER_STARTEND': sim.SIM_INSPIRAL_TAPER_STARTEND, + 'startend' : sim.SIM_INSPIRAL_TAPER_STARTEND + } + + taper_func_map = { + numpy.dtype(float32): sim.SimInspiralREAL4WaveTaper, + numpy.dtype(float64): sim.SimInspiralREAL8WaveTaper + } +except ImportError: + taper_map = {} + taper_func_map = {} + +
+[docs] +def taper_timeseries(tsdata, tapermethod=None, return_lal=False): + """ + Taper either or both ends of a time series using wrapped + LALSimulation functions + + Parameters + ---------- + tsdata : TimeSeries + Series to be tapered, dtype must be either float32 or float64 + tapermethod : string + Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END', + 'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will + not change the series! + return_lal : Boolean + If True, return a wrapped LAL time series object, else return a + PyCBC time series. + """ + if tapermethod is None: + raise ValueError("Must specify a tapering method (function was called" + "with tapermethod=None)") + if tapermethod not in taper_map.keys(): + raise ValueError("Unknown tapering method %s, valid methods are %s" % \ + (tapermethod, ", ".join(taper_map.keys()))) + if tsdata.dtype not in (float32, float64): + raise TypeError("Strain dtype must be float32 or float64, not " + + str(tsdata.dtype)) + taper_func = taper_func_map[tsdata.dtype] + # make a LAL TimeSeries to pass to the LALSim function + ts_lal = tsdata.astype(tsdata.dtype).lal() + if taper_map[tapermethod] is not None: + taper_func(ts_lal.data, taper_map[tapermethod]) + if return_lal: + return ts_lal + else: + return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT, + epoch=ts_lal.epoch)
+ + +
+[docs] +@schemed("pycbc.waveform.utils_") +def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True): + """Shifts a frequency domain waveform in time. The waveform is assumed to + be sampled at equal frequency intervals. + """
+ + +
+[docs] +def apply_fd_time_shift(htilde, shifttime, kmin=0, fseries=None, copy=True): + """Shifts a frequency domain waveform in time. The shift applied is + shiftime - htilde.epoch. + + Parameters + ---------- + htilde : FrequencySeries + The waveform frequency series. + shifttime : float + The time to shift the frequency series to. + kmin : {0, int} + The starting index of htilde to apply the time shift. Default is 0. + fseries : {None, numpy array} + The frequencies of each element in htilde. This is only needed if htilde is not + sampled at equal frequency steps. + copy : {True, bool} + Make a copy of htilde before applying the time shift. If False, the time + shift will be applied to htilde's data. + + Returns + ------- + FrequencySeries + A frequency series with the waveform shifted to the new time. If makecopy + is True, will be a new frequency series; if makecopy is False, will be + the same as htilde. + """ + dt = float(shifttime - htilde.epoch) + if dt == 0.: + # no shift to apply, just copy if desired + if copy: + htilde = 1. * htilde + elif isinstance(htilde, FrequencySeries): + # FrequencySeries means equally sampled in frequency, use faster shifting + htilde = apply_fseries_time_shift(htilde, dt, kmin=kmin, copy=copy) + else: + if fseries is None: + fseries = htilde.sample_frequencies.numpy() + shift = Array(numpy.exp(-2j*numpy.pi*dt*fseries), + dtype=complex_same_precision_as(htilde)) + if copy: + htilde = 1. * htilde + htilde *= shift + return htilde
+ + +
+[docs] +def td_taper(out, start, end, beta=8, side='left'): + """Applies a taper to the given TimeSeries. + + A half-kaiser window is used for the roll-off. + + Parameters + ---------- + out : TimeSeries + The ``TimeSeries`` to taper. + start : float + The time (in s) to start the taper window. + + end : float + The time (in s) to end the taper window. + beta : int, optional + The beta parameter to use for the Kaiser window. See + ``scipy.signal.kaiser`` for details. Default is 8. + side : {'left', 'right'} + The side to apply the taper to. If ``'left'`` (``'right'``), the taper + will roll up (down) between ``start`` and ``end``, with all values + before ``start`` (after ``end``) set to zero. Default is ``'left'``. + + Returns + ------- + TimeSeries + The tapered time series. + """ + out = out.copy() + width = end - start + winlen = 2 * int(width / out.delta_t) + window = Array(signal.get_window(('kaiser', beta), winlen)) + xmin = int((start - out.start_time) / out.delta_t) + xmax = xmin + winlen//2 + if side == 'left': + out[xmin:xmax] *= window[:winlen//2] + if xmin > 0: + out[:xmin].clear() + elif side == 'right': + out[xmin:xmax] *= window[winlen//2:] + if xmax < len(out): + out[xmax:].clear() + else: + raise ValueError("unrecognized side argument {}".format(side)) + return out
+ + +
+[docs] +def fd_taper(out, start, end, beta=8, side='left'): + """Applies a taper to the given FrequencySeries. + + A half-kaiser window is used for the roll-off. + + Parameters + ---------- + out : FrequencySeries + The ``FrequencySeries`` to taper. + start : float + The frequency (in Hz) to start the taper window. + end : float + The frequency (in Hz) to end the taper window. + beta : int, optional + The beta parameter to use for the Kaiser window. See + ``scipy.signal.kaiser`` for details. Default is 8. + side : {'left', 'right'} + The side to apply the taper to. If ``'left'`` (``'right'``), the taper + will roll up (down) between ``start`` and ``end``, with all values + before ``start`` (after ``end``) set to zero. Default is ``'left'``. + + Returns + ------- + FrequencySeries + The tapered frequency series. + """ + out = out.copy() + width = end - start + winlen = 2 * int(width / out.delta_f) + window = Array(signal.get_window(('kaiser', beta), winlen)) + kmin = int(start / out.delta_f) + kmax = kmin + winlen//2 + if side == 'left': + out[kmin:kmax] *= window[:winlen//2] + out[:kmin] *= 0. + elif side == 'right': + out[kmin:kmax] *= window[winlen//2:] + out[kmax:] *= 0. + else: + raise ValueError("unrecognized side argument {}".format(side)) + return out
+ + + +
+[docs] +def fd_to_td(htilde, delta_t=None, left_window=None, right_window=None, + left_beta=8, right_beta=8): + """Converts a FD waveform to TD. + + A window can optionally be applied using ``fd_taper`` to the left or right + side of the waveform before being converted to the time domain. + + Parameters + ---------- + htilde : FrequencySeries + The waveform to convert. + delta_t : float, optional + Make the returned time series have the given ``delta_t``. + left_window : tuple of float, optional + A tuple giving the start and end frequency of the FD taper to apply + on the left side. If None, no taper will be applied on the left. + right_window : tuple of float, optional + A tuple giving the start and end frequency of the FD taper to apply + on the right side. If None, no taper will be applied on the right. + left_beta : int, optional + The beta parameter to use for the left taper. See ``fd_taper`` for + details. Default is 8. + right_beta : int, optional + The beta parameter to use for the right taper. Default is 8. + + Returns + ------- + TimeSeries + The time-series representation of ``htilde``. + """ + if left_window is not None: + start, end = left_window + htilde = fd_taper(htilde, start, end, side='left', beta=left_beta) + if right_window is not None: + start, end = right_window + htilde = fd_taper(htilde, start, end, side='right', beta=right_beta) + return htilde.to_timeseries(delta_t=delta_t)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/waveform.html b/latest/html/_modules/pycbc/waveform/waveform.html new file mode 100644 index 00000000000..b7925946c2a --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/waveform.html @@ -0,0 +1,1615 @@ + + + + + + pycbc.waveform.waveform — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.waveform

+# Copyright (C) 2012  Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""Convenience functions to genenerate gravitational wave templates and
+waveforms.
+"""
+
+import os
+import lal, numpy, copy
+from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
+from pycbc.types import real_same_precision_as, complex_same_precision_as
+import pycbc.scheme as _scheme
+import inspect
+from pycbc.fft import fft
+from pycbc import pnutils, libutils
+from pycbc.waveform import utils as wfutils
+from pycbc.waveform import parameters
+from pycbc.conversions import get_final_from_initial, tau_from_final_mass_spin
+from pycbc.filter import interpolate_complex_frequency, resample_to_delta_t
+import pycbc
+from .spa_tmplt import spa_tmplt, spa_tmplt_norm, spa_tmplt_end, \
+                      spa_tmplt_precondition, spa_amplitude_factor, \
+                      spa_length_in_time
+
+
+[docs] +class NoWaveformError(Exception): + """This should be raised if generating a waveform would just result in all + zeros being returned, e.g., if a requested `f_final` is <= `f_lower`. + """ + pass
+ + + +
+[docs] +class FailedWaveformError(Exception): + """This should be raised if a waveform fails to generate. + """ + pass
+ + +# If this is set to True, waveform generation codes will try to regenerate +# waveforms with known failure conditions to try to avoid the failure. For +# example SEOBNRv3 waveforms would be regenerated with double the sample rate. +# If this is set to False waveform failures will always raise exceptions +fail_tolerant_waveform_generation = True + +default_args = \ + (parameters.fd_waveform_params.default_dict() + + parameters.td_waveform_params).default_dict() + +default_sgburst_args = {'eccentricity':0, 'polarization':0} +sgburst_required_args = ['q','frequency','hrss'] + +# td, fd, filter waveforms generated on the CPU +_lalsim_td_approximants = {} +_lalsim_fd_approximants = {} +_lalsim_enum = {} +_lalsim_sgburst_approximants = {} + +def _check_lal_pars(p): + """ Create a laldict object from the dictionary of waveform parameters + + Parameters + ---------- + p: dictionary + The dictionary of lalsimulation paramaters + + Returns + ------- + laldict: LalDict + The lal type dictionary to pass to the lalsimulation waveform functions. + """ + lal_pars = lal.CreateDict() + #nonGRparams can be straightforwardly added if needed, however they have to + # be invoked one by one + if p['phase_order']!=-1: + lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order'])) + if p['amplitude_order']!=-1: + lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order'])) + if p['spin_order']!=-1: + lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order'])) + if p['tidal_order']!=-1: + lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order']) + if p['eccentricity_order']!=-1: + lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order']) + if p['lambda1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1']) + if p['lambda2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2']) + if p['lambda_octu1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda1(lal_pars, p['lambda_octu1']) + if p['lambda_octu2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda2(lal_pars, p['lambda_octu2']) + if p['quadfmode1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode1(lal_pars, p['quadfmode1']) + if p['quadfmode2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode2(lal_pars, p['quadfmode2']) + if p['octufmode1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode1(lal_pars, p['octufmode1']) + if p['octufmode2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode2(lal_pars, p['octufmode2']) + if p['dquad_mon1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1']) + if p['dquad_mon2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2']) + if p['numrel_data']: + lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data'])) + if p['modes_choice']: + lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice']) + if p['frame_axis']: + lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis']) + if p['side_bands']: + lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands']) + if p['mode_array'] is not None: + ma = lalsimulation.SimInspiralCreateModeArray() + for l,m in p['mode_array']: + lalsimulation.SimInspiralModeArrayActivateMode(ma, l, m) + lalsimulation.SimInspiralWaveformParamsInsertModeArray(lal_pars, ma) + #TestingGR parameters: + if p['dchi0'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi0(lal_pars,p['dchi0']) + if p['dchi1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi1(lal_pars,p['dchi1']) + if p['dchi2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi2(lal_pars,p['dchi2']) + if p['dchi3'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi3(lal_pars,p['dchi3']) + if p['dchi4'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi4(lal_pars,p['dchi4']) + if p['dchi5'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi5(lal_pars,p['dchi5']) + if p['dchi5l'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi5L(lal_pars,p['dchi5l']) + if p['dchi6'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi6(lal_pars,p['dchi6']) + if p['dchi6l'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi6L(lal_pars,p['dchi6l']) + if p['dchi7'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi7(lal_pars,p['dchi7']) + if p['dalpha1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha1(lal_pars,p['dalpha1']) + if p['dalpha2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha2(lal_pars,p['dalpha2']) + if p['dalpha3'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha3(lal_pars,p['dalpha3']) + if p['dalpha4'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha4(lal_pars,p['dalpha4']) + if p['dalpha5'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha5(lal_pars,p['dalpha5']) + if p['dbeta1'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta1(lal_pars,p['dbeta1']) + if p['dbeta2'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta2(lal_pars,p['dbeta2']) + if p['dbeta3'] is not None: + lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta3(lal_pars,p['dbeta3']) + return lal_pars + +def _lalsim_td_waveform(**p): + lal_pars = _check_lal_pars(p) + #nonGRparams can be straightforwardly added if needed, however they have to + # be invoked one by one + try: + hp1, hc1 = lalsimulation.SimInspiralChooseTDWaveform( + float(pnutils.solar_mass_to_kg(p['mass1'])), + float(pnutils.solar_mass_to_kg(p['mass2'])), + float(p['spin1x']), float(p['spin1y']), float(p['spin1z']), + float(p['spin2x']), float(p['spin2y']), float(p['spin2z']), + pnutils.megaparsecs_to_meters(float(p['distance'])), + float(p['inclination']), float(p['coa_phase']), + float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']), + float(p['delta_t']), float(p['f_lower']), float(p['f_ref']), + lal_pars, + _lalsim_enum[p['approximant']]) + except RuntimeError: + if not fail_tolerant_waveform_generation: + raise + # For some cases failure modes can occur. Here we add waveform-specific + # instructions to try to work with waveforms that are known to fail. + if 'SEOBNRv3' in p['approximant']: + # Try doubling the sample time and redoing. + # Don't want to get stuck in a loop though! + if 'delta_t_orig' not in p: + p['delta_t_orig'] = p['delta_t'] + p['delta_t'] = p['delta_t'] / 2. + if p['delta_t_orig'] / p['delta_t'] > 9: + raise + hp, hc = _lalsim_td_waveform(**p) + p['delta_t'] = p['delta_t_orig'] + hp = resample_to_delta_t(hp, hp.delta_t*2) + hc = resample_to_delta_t(hc, hc.delta_t*2) + return hp, hc + raise + + #lal.DestroyDict(lal_pars) + + hp = TimeSeries(hp1.data.data[:], delta_t=hp1.deltaT, epoch=hp1.epoch) + hc = TimeSeries(hc1.data.data[:], delta_t=hc1.deltaT, epoch=hc1.epoch) + + return hp, hc + +_lalsim_td_waveform.required = parameters.cbc_td_required + +def _spintaylor_aligned_prec_swapper(**p): + """ + SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin + waveforms. This construct chooses between the aligned-twospin TaylorF2 model + and the precessing singlespin SpinTaylorF2 models. If aligned spins are + given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In + the case of nonaligned doublespin systems the code will fail at the + waveform generator level. + """ + orig_approximant = p['approximant'] + if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \ + p['spin1y'] == 0: + p['approximant'] = 'TaylorF2' + else: + p['approximant'] = 'SpinTaylorF2' + hp, hc = _lalsim_fd_waveform(**p) + p['approximant'] = orig_approximant + return hp, hc + +def _lalsim_fd_waveform(**p): + lal_pars = _check_lal_pars(p) + hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform( + float(pnutils.solar_mass_to_kg(p['mass1'])), + float(pnutils.solar_mass_to_kg(p['mass2'])), + float(p['spin1x']), float(p['spin1y']), float(p['spin1z']), + float(p['spin2x']), float(p['spin2y']), float(p['spin2z']), + pnutils.megaparsecs_to_meters(float(p['distance'])), + float(p['inclination']), float(p['coa_phase']), + float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']), + p['delta_f'], float(p['f_lower']), float(p['f_final']), float(p['f_ref']), + lal_pars, + _lalsim_enum[p['approximant']]) + + hp = FrequencySeries(hp1.data.data[:], delta_f=hp1.deltaF, + epoch=hp1.epoch) + + hc = FrequencySeries(hc1.data.data[:], delta_f=hc1.deltaF, + epoch=hc1.epoch) + #lal.DestroyDict(lal_pars) + return hp, hc + +_lalsim_fd_waveform.required = parameters.cbc_fd_required + +def _lalsim_sgburst_waveform(**p): + hp, hc = lalsimulation.SimBurstSineGaussian(float(p['q']), + float(p['frequency']), + float(p['hrss']), + float(p['eccentricity']), + float(p['polarization']), + float(p['delta_t'])) + + hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch) + hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch) + + return hp, hc + +# Populate waveform approximants from lalsimulation if the library is +# available +try: + import lalsimulation + for approx_enum in range(0, lalsimulation.NumApproximants): + if lalsimulation.SimInspiralImplementedTDApproximants(approx_enum): + approx_name = lalsimulation.GetStringFromApproximant(approx_enum) + _lalsim_enum[approx_name] = approx_enum + _lalsim_td_approximants[approx_name] = _lalsim_td_waveform + + for approx_enum in range(0, lalsimulation.NumApproximants): + if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum): + approx_name = lalsimulation.GetStringFromApproximant(approx_enum) + _lalsim_enum[approx_name] = approx_enum + _lalsim_fd_approximants[approx_name] = _lalsim_fd_waveform + + # sine-Gaussian burst + for approx_enum in range(0, lalsimulation.NumApproximants): + if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum): + approx_name = lalsimulation.GetStringFromApproximant(approx_enum) + _lalsim_enum[approx_name] = approx_enum + _lalsim_sgburst_approximants[approx_name] = _lalsim_sgburst_waveform +except ImportError: + lalsimulation = libutils.import_optional('lalsimulation') + +cpu_sgburst = _lalsim_sgburst_approximants +cpu_td = dict(_lalsim_td_approximants.items()) +cpu_fd = _lalsim_fd_approximants + +# Waveforms written in CUDA +_cuda_td_approximants = {} +_cuda_fd_approximants = {} + +if pycbc.HAVE_CUDA: + from pycbc.waveform.pycbc_phenomC_tmplt import imrphenomc_tmplt + from pycbc.waveform.SpinTaylorF2 import spintaylorf2 as cuda_spintaylorf2 + _cuda_fd_approximants["IMRPhenomC"] = imrphenomc_tmplt + _cuda_fd_approximants["SpinTaylorF2"] = cuda_spintaylorf2 + +cuda_td = dict(list(_lalsim_td_approximants.items()) + list(_cuda_td_approximants.items())) +cuda_fd = dict(list(_lalsim_fd_approximants.items()) + list(_cuda_fd_approximants.items())) + +# List the various available approximants #################################### + + + + + + + + + + +
+[docs] +def td_approximants(scheme=_scheme.mgr.state): + """Return a list containing the available time domain approximants for + the given processing scheme. + """ + return list(td_wav[type(scheme)].keys())
+ + +
+[docs] +def fd_approximants(scheme=_scheme.mgr.state): + """Return a list containing the available fourier domain approximants for + the given processing scheme. + """ + return list(fd_wav[type(scheme)].keys())
+ + +
+[docs] +def sgburst_approximants(scheme=_scheme.mgr.state): + """Return a list containing the available time domain sgbursts for + the given processing scheme. + """ + return list(sgburst_wav[type(scheme)].keys())
+ + +
+[docs] +def filter_approximants(scheme=_scheme.mgr.state): + """Return a list of fourier domain approximants including those + written specifically as templates. + """ + return list(filter_wav[type(scheme)].keys())
+ + +# Input parameter handling ################################################### + +def get_obj_attrs(obj): + """ Return a dictionary built from the attributes of the given object. + """ + pr = {} + if obj is not None: + if isinstance(obj, numpy.core.records.record): + for name in obj.dtype.names: + pr[name] = getattr(obj, name) + elif hasattr(obj, '__dict__') and obj.__dict__: + pr = obj.__dict__ + elif hasattr(obj, '__slots__'): + for slot in obj.__slots__: + if hasattr(obj, slot): + pr[slot] = getattr(obj, slot) + elif isinstance(obj, dict): + pr = obj.copy() + else: + for name in dir(obj): + try: + value = getattr(obj, name) + if not name.startswith('__') and not inspect.ismethod(value): + pr[name] = value + except: + continue + + return pr + + +def parse_mode_array(input_params): + """Ensures mode_array argument in a dictionary of input parameters is + a list of tuples of (l, m), where l and m are ints. + + Accepted formats for the ``mode_array`` argument is a list of tuples of + ints (e.g., ``[(2, 2), (3, 3), (4, 4)]``), a space-separated string giving + the modes (e.g., ``22 33 44``), or an array of ints or floats (e.g., + ``[22., 33., 44.]``. + """ + if 'mode_array' in input_params and input_params['mode_array'] is not None: + mode_array = input_params['mode_array'] + if isinstance(mode_array, str): + mode_array = mode_array.split() + if not isinstance(mode_array, (numpy.ndarray, list)): + mode_array = [mode_array] + for ii, ma in enumerate(mode_array): + # if ma is a float or int, convert to str (e.g., 22. -> '22'), so + # that... + if isinstance(ma, (float, int)): + ma = str(int(ma)) + # if ma is a str convert to (int, int) (e.g., '22' -> (2, 2)) + if isinstance(ma, str): + l, m = ma + ma = (int(l), int(m)) + mode_array[ii] = ma + input_params['mode_array'] = mode_array + return input_params + + +def props(obj, **kwargs): + """ Return a dictionary built from the combination of defaults, kwargs, + and the attributes of the given object. + """ + pr = get_obj_attrs(obj) + pr.update(kwargs) + # Get the parameters to generate the waveform + # Note that keyword arguments override values in the template object + input_params = default_args.copy() + input_params.update(pr) + # if mode array present and is a string, convert to a list of tuples + input_params = parse_mode_array(input_params) + return input_params + + +def check_args(args, required_args): + """ check that required args are given """ + missing = [] + for arg in required_args: + if (arg not in args) or (args[arg] is None): + missing.append(arg) + + if len(missing) != 0: + raise ValueError("Please provide {}".format(', '.join(missing))) + +# Input parameter handling for bursts ######################################## + +def props_sgburst(obj, **kwargs): + pr = {} + if obj is not None: + for name in dir(obj): + try: + value = getattr(obj, name) + if not name.startswith('__') and not inspect.ismethod(value): + pr[name] = value + except: + continue + + # Get the parameters to generate the waveform + # Note that keyword arguments override values in the template object + input_params = default_sgburst_args.copy() + input_params.update(pr) + input_params.update(kwargs) + + return input_params + +# Waveform generation ######################################################## +fd_sequence = {} +fd_det_sequence = {} +fd_det = {} + +def _lalsim_fd_sequence(**p): + """ Shim to interface to lalsimulation SimInspiralChooseFDWaveformSequence + """ + lal_pars = _check_lal_pars(p) + hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence( + float(p['coa_phase']), + float(pnutils.solar_mass_to_kg(p['mass1'])), + float(pnutils.solar_mass_to_kg(p['mass2'])), + float(p['spin1x']), float(p['spin1y']), float(p['spin1z']), + float(p['spin2x']), float(p['spin2y']), float(p['spin2z']), + float(p['f_ref']), + pnutils.megaparsecs_to_meters(float(p['distance'])), + float(p['inclination']), + lal_pars, + _lalsim_enum[p['approximant']], + p['sample_points'].lal()) + return Array(hp.data.data), Array(hc.data.data) +_lalsim_fd_sequence.required = parameters.cbc_fd_required + +for apx in _lalsim_enum: + fd_sequence[apx] = _lalsim_fd_sequence + + +
+[docs] +def get_fd_waveform_sequence(template=None, **kwds): + """Return values of the waveform evaluated at the sequence of frequency + points. The waveform generator doesn't include detector response. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + {params} + + Returns + ------- + hplustilde: Array + The plus phase of the waveform in frequency domain evaluated at the + frequency points. + hcrosstilde: Array + The cross phase of the waveform in frequency domain evaluated at the + frequency points. + """ + input_params = props(template, **kwds) + input_params['delta_f'] = -1 + input_params['f_lower'] = -1 + if input_params['approximant'] not in fd_sequence: + raise ValueError("Approximant %s not available" % + (input_params['approximant'])) + wav_gen = fd_sequence[input_params['approximant']] + if hasattr(wav_gen, 'required'): + required = wav_gen.required + else: + required = parameters.fd_required + if not isinstance(input_params['sample_points'], Array): + input_params['sample_points'] = Array(input_params['sample_points']) + check_args(input_params, required) + return wav_gen(**input_params)
+ + +
+[docs] +def get_fd_det_waveform_sequence(template=None, **kwds): + """Return values of the waveform evaluated at the sequence of frequency + points. The waveform generator includes detector response. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + {params} + + Returns + ------- + dict + The detector-frame waveform (with detector response) in frequency + domain evaluated at the frequency points. Keys are requested data + channels, values are FrequencySeries. + """ + input_params = props(template, **kwds) + if input_params['approximant'] not in fd_det_sequence: + raise ValueError("Approximant %s not available" % + (input_params['approximant'])) + wav_gen = fd_det_sequence[input_params['approximant']] + if hasattr(wav_gen, 'required'): + required = wav_gen.required + else: + required = parameters.fd_det_sequence_required + check_args(input_params, required) + return wav_gen(**input_params)
+ + +get_fd_waveform_sequence.__doc__ = get_fd_waveform_sequence.__doc__.format( + params=parameters.fd_waveform_sequence_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) +get_fd_det_waveform_sequence.__doc__ = get_fd_det_waveform_sequence.__doc__.format( + params=parameters.fd_waveform_sequence_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) + +
+[docs] +def get_td_waveform(template=None, **kwargs): + """Return the plus and cross polarizations of a time domain waveform. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to subsitute + for keyword arguments. A common example would be a row in an xml table. + {params} + + Returns + ------- + hplus: TimeSeries + The plus polarization of the waveform. + hcross: TimeSeries + The cross polarization of the waveform. + """ + input_params = props(template, **kwargs) + wav_gen = td_wav[type(_scheme.mgr.state)] + if input_params['approximant'] not in wav_gen: + raise ValueError("Approximant %s not available" % + (input_params['approximant'])) + wav_gen = wav_gen[input_params['approximant']] + if hasattr(wav_gen, 'required'): + required = wav_gen.required + else: + required = parameters.td_required + check_args(input_params, required) + return wav_gen(**input_params)
+ + +get_td_waveform.__doc__ = get_td_waveform.__doc__.format( + params=parameters.td_waveform_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) + +
+[docs] +def get_fd_waveform(template=None, **kwargs): + """Return a frequency domain gravitational waveform. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + {params} + + Returns + ------- + hplustilde: FrequencySeries + The plus phase of the waveform in frequency domain. + hcrosstilde: FrequencySeries + The cross phase of the waveform in frequency domain. + """ + input_params = props(template, **kwargs) + wav_gen = fd_wav[type(_scheme.mgr.state)] + if input_params['approximant'] not in wav_gen: + raise ValueError("Approximant %s not available" % + (input_params['approximant'])) + try: + ffunc = input_params.pop('f_final_func') + if ffunc != '': + # convert the frequency function to a value + input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc]( + input_params) + # if the f_final is < f_lower, raise a NoWaveformError + if 'f_final' in input_params and \ + (input_params['f_lower']+input_params['delta_f'] >= + input_params['f_final']): + raise NoWaveformError("cannot generate waveform: f_lower >= " + "f_final") + except KeyError: + pass + wav_gen = wav_gen[input_params['approximant']] + if hasattr(wav_gen, 'required'): + required = wav_gen.required + else: + required = parameters.fd_required + check_args(input_params, required) + return wav_gen(**input_params)
+ + + +get_fd_waveform.__doc__ = get_fd_waveform.__doc__.format( + params=parameters.fd_waveform_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) + +
+[docs] +def get_fd_waveform_from_td(**params): + """ Return time domain version of fourier domain approximant. + + This returns a frequency domain version of a fourier domain approximant, + with padding and tapering at the start of the waveform. + + Parameters + ---------- + params: dict + The parameters defining the waveform to generator. + See `get_td_waveform`. + + Returns + ------- + hp: pycbc.types.FrequencySeries + Plus polarization time series + hc: pycbc.types.FrequencySeries + Cross polarization time series + """ + + # determine the duration to use + full_duration = duration = get_waveform_filter_length_in_time(**params) + nparams = params.copy() + + while full_duration < duration * 1.5: + full_duration = get_waveform_filter_length_in_time(**nparams) + nparams['f_lower'] -= 1 + + if 'f_fref' not in nparams: + nparams['f_ref'] = params['f_lower'] + + # We'll try to do the right thing and figure out what the frequency + # end is. Otherwise, we'll just assume 2048 Hz. + # (consider removing as we hopefully have better estimates for more + # approximants + try: + f_end = get_waveform_end_frequency(**params) + delta_t = (0.5 / pnutils.nearest_larger_binary_number(f_end)) + except: + delta_t = 1.0 / 2048 + + nparams['delta_t'] = delta_t + hp, hc = get_td_waveform(**nparams) + + # Resize to the right duration + tsamples = int(1.0 / params['delta_f'] / delta_t) + + if tsamples < len(hp): + raise ValueError("The frequency spacing (df = {}) is too low to " + "generate the {} approximant from the time " + "domain".format(params['delta_f'], params['approximant'])) + + hp.resize(tsamples) + hc.resize(tsamples) + + # apply the tapering, we will use a safety factor here to allow for + # somewhat innacurate duration difference estimation. + window = (full_duration - duration) * 0.8 + hp = wfutils.td_taper(hp, hp.start_time, hp.start_time + window) + hc = wfutils.td_taper(hc, hc.start_time, hc.start_time + window) + + # avoid wraparound + hp = hp.to_frequencyseries().cyclic_time_shift(hp.start_time) + hc = hc.to_frequencyseries().cyclic_time_shift(hc.start_time) + return hp, hc
+ + +
+[docs] +def get_fd_det_waveform(template=None, **kwargs): + """Return a frequency domain gravitational waveform. + The waveform generator includes detector response. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. An example would be a row in an xml table. + {params} + + Returns + ------- + dict + The detector-frame waveform (with detector response) in frequency + domain. Keys are requested data channels, values are FrequencySeries. + """ + input_params = props(template, **kwargs) + if input_params['approximant'] not in fd_det: + raise ValueError("Approximant %s not available" % + (input_params['approximant'])) + wav_gen = fd_det[input_params['approximant']] + if hasattr(wav_gen, 'required'): + required = wav_gen.required + else: + required = parameters.fd_required + check_args(input_params, required) + return wav_gen(**input_params)
+ + +get_fd_det_waveform.__doc__ = get_fd_det_waveform.__doc__.format( + params=parameters.fd_waveform_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) + +def _base_get_td_waveform_from_fd(template=None, rwrap=None, **params): + """ The base function to calculate time domain version of fourier + domain approximant which not include or includes detector response. + Called by `get_td_waveform_from_fd` and `get_td_det_waveform_from_fd_det`. + """ + kwds = props(template, **params) + nparams = kwds.copy() + + if rwrap is None: + # In the `pycbc.waveform.parameters` module, spin1z and + # spin2z have the default value 0. Users must have input + # masses, so no else is needed. + mass_spin_params = set(['mass1', 'mass2', 'spin1z', 'spin2z']) + if mass_spin_params.issubset(set(nparams.keys())): + m_final, spin_final = get_final_from_initial( + mass1=nparams['mass1'], mass2=nparams['mass2'], + spin1z=nparams['spin1z'], spin2z=nparams['spin2z']) + rwrap = tau_from_final_mass_spin(m_final, spin_final) * 10 + if rwrap < 5: + # Long enough for very massive BBHs in XG detectors, + # up to (3000, 3000) solar mass, while still not a + # computational burden for 2G cases. + rwrap = 5 + + if nparams['approximant'] not in _filter_time_lengths: + raise ValueError("Approximant %s _filter_time_lengths function \ + not available" % (nparams['approximant'])) + # determine the duration to use + full_duration = duration = get_waveform_filter_length_in_time(**nparams) + + while full_duration < duration * 1.5: + full_duration = get_waveform_filter_length_in_time(**nparams) + nparams['f_lower'] *= 0.99 + if 't_obs_start' in nparams and \ + full_duration >= nparams['t_obs_start']: + break + + if 'f_ref' not in nparams: + nparams['f_ref'] = params['f_lower'] + + # factor to ensure the vectors are all large enough. We don't need to + # completely trust our duration estimator in this case, at a small + # increase in computational cost + fudge_duration = (max(0, full_duration) + .1 + rwrap) * 1.5 + fsamples = int(fudge_duration / nparams['delta_t']) + N = pnutils.nearest_larger_binary_number(fsamples) + fudge_duration = N * nparams['delta_t'] + + nparams['delta_f'] = 1.0 / fudge_duration + tsize = int(1.0 / nparams['delta_t'] / nparams['delta_f']) + fsize = tsize // 2 + 1 + + if nparams['approximant'] not in fd_det: + hp, hc = get_fd_waveform(**nparams) + # Resize to the right sample rate + hp.resize(fsize) + hc.resize(fsize) + + # avoid wraparound + hp = hp.cyclic_time_shift(-rwrap) + hc = hc.cyclic_time_shift(-rwrap) + + hp = wfutils.fd_to_td(hp, delta_t=params['delta_t'], + left_window=(nparams['f_lower'], + params['f_lower'])) + hc = wfutils.fd_to_td(hc, delta_t=params['delta_t'], + left_window=(nparams['f_lower'], + params['f_lower'])) + return hp, hc + else: + wfs = get_fd_det_waveform(**nparams) + for ifo in wfs.keys(): + wfs[ifo].resize(fsize) + # avoid wraparound + wfs[ifo] = wfs[ifo].cyclic_time_shift(-rwrap) + wfs[ifo] = wfutils.fd_to_td(wfs[ifo], delta_t=kwds['delta_t'], + left_window=(nparams['f_lower'], + kwds['f_lower'])) + return wfs + +
+[docs] +def get_td_waveform_from_fd(rwrap=None, **params): + """ Return time domain version of fourier domain approximant. + + This returns a time domain version of a fourier domain approximant, with + padding and tapering at the start of the waveform. + + Parameters + ---------- + rwrap: float + Cyclic time shift parameter in seconds. A fudge factor to ensure + that the entire time series is contiguous in the array and not + wrapped around the end. + params: dict + The parameters defining the waveform to generator. + See `get_fd_waveform`. + + Returns + ------- + hp: pycbc.types.TimeSeries + Plus polarization time series + hc: pycbc.types.TimeSeries + Cross polarization time series + """ + return _base_get_td_waveform_from_fd(None, rwrap, **params)
+ + +
+[docs] +def get_td_det_waveform_from_fd_det(template=None, rwrap=None, **params): + """ Return time domain version of fourier domain approximant which + includes detector response, with padding and tapering at the start + of the waveform. + + Parameters + ---------- + rwrap: float + Cyclic time shift parameter in seconds. A fudge factor to ensure + that the entire time series is contiguous in the array and not + wrapped around the end. + params: dict + The parameters defining the waveform to generator. + See `get_fd_det_waveform`. + + Returns + ------- + dict + The detector-frame waveform (with detector response) in time + domain. Keys are requested data channels. + """ + return _base_get_td_waveform_from_fd(template, rwrap, **params)
+ + +get_td_det_waveform_from_fd_det.__doc__ = \ + get_td_det_waveform_from_fd_det.__doc__.format( + params=parameters.td_waveform_params.docstr(prefix=" ", + include_label=False).lstrip(' ')) + +def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True, + **params): + """ Return a fourier domain waveform approximant, using interpolation + """ + + def rulog2(val): + return 2.0 ** numpy.ceil(numpy.log2(float(val))) + + orig_approx = params['approximant'] + params['approximant'] = params['approximant'].replace('_INTERP', '') + df = params['delta_f'] + + if 'duration' not in params: + duration = get_waveform_filter_length_in_time(**params) + elif params['duration'] > 0: + duration = params['duration'] + else: + err_msg = "Waveform duration must be greater than 0." + raise ValueError(err_msg) + + #FIXME We should try to get this length directly somehow + # I think this number should be conservative + ringdown_padding = 0.5 + + df_min = 1.0 / rulog2(duration + ringdown_padding) + # FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop + # off the inspiral when using ringdown_padding - 0.5. + # Also, if ringdown_padding is set to a very small + # value we can see cases where the ringdown is chopped. + if df_min > 0.5: + df_min = 0.5 + params['delta_f'] = df_min + hp, hc = get_fd_waveform(**params) + hp = hp.astype(dtype) + if return_hc: + hc = hc.astype(dtype) + else: + hc = None + + f_end = get_waveform_end_frequency(**params) + if f_end is None: + f_end = (len(hp) - 1) * hp.delta_f + if 'f_final' in params and params['f_final'] > 0: + f_end_params = params['f_final'] + if f_end is not None: + f_end = min(f_end_params, f_end) + + n_min = int(rulog2(f_end / df_min)) + 1 + if n_min < len(hp): + hp = hp[:n_min] + if hc is not None: + hc = hc[:n_min] + + offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f) + + hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left') + if hc is not None: + hc = interpolate_complex_frequency(hc, df, zeros_offset=offset, + side='left') + params['approximant'] = orig_approx + return hp, hc + +
+[docs] +def get_sgburst_waveform(template=None, **kwargs): + """Return the plus and cross polarizations of a time domain + sine-Gaussian burst waveform. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to subsitute + for keyword arguments. A common example would be a row in an xml table. + approximant : string + A string that indicates the chosen approximant. See `td_approximants` + for available options. + q : float + The quality factor of a sine-Gaussian burst + frequency : float + The centre-frequency of a sine-Gaussian burst + delta_t : float + The time step used to generate the waveform + hrss : float + The strain rss + amplitude: float + The strain amplitude + + Returns + ------- + hplus: TimeSeries + The plus polarization of the waveform. + hcross: TimeSeries + The cross polarization of the waveform. + """ + input_params = props_sgburst(template,**kwargs) + + for arg in sgburst_required_args: + if arg not in input_params: + raise ValueError("Please provide " + str(arg)) + + return _lalsim_sgburst_waveform(**input_params)
+ + +# Waveform filter routines ################################################### + +# Organize Filter Generators +_inspiral_fd_filters = {} +_cuda_fd_filters = {} + +_cuda_fd_filters['SPAtmplt'] = spa_tmplt +_inspiral_fd_filters['SPAtmplt'] = spa_tmplt + +filter_wav = _scheme.ChooseBySchemeDict() +filter_wav.update( {_scheme.CPUScheme:_inspiral_fd_filters, + _scheme.CUDAScheme:_cuda_fd_filters, + } ) + +# Organize functions for function conditioning/precalculated values +_filter_norms = {} +_filter_ends = {} +_filter_preconditions = {} +_template_amplitude_norms = {} +_filter_time_lengths = {} + +def seobnrv2_final_frequency(**kwds): + return pnutils.get_final_freq("SEOBNRv2", kwds['mass1'], kwds['mass2'], + kwds['spin1z'], kwds['spin2z']) + +def get_imr_length(approx, **kwds): + """Call through to pnutils to obtain IMR waveform durations + """ + m1 = float(kwds['mass1']) + m2 = float(kwds['mass2']) + s1z = float(kwds['spin1z']) + s2z = float(kwds['spin2z']) + f_low = float(kwds['f_lower']) + # 10% margin of error is incorporated in the pnutils function + return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx) + +def seobnrv2_length_in_time(**kwds): + """Stub for holding the calculation of SEOBNRv2* waveform duration. + """ + return get_imr_length("SEOBNRv2", **kwds) + +def seobnrv4_length_in_time(**kwds): + """Stub for holding the calculation of SEOBNRv4* waveform duration. + """ + return get_imr_length("SEOBNRv4", **kwds) + +def seobnrv5_length_in_time(**kwds): + """Stub for holding the calculation of SEOBNRv5_ROM waveform duration. + """ + return get_imr_length("SEOBNRv5_ROM", **kwds) + +def imrphenomd_length_in_time(**kwds): + """Stub for holding the calculation of IMRPhenomD waveform duration. + """ + return get_imr_length("IMRPhenomD", **kwds) + +def imrphenomhm_length_in_time(**kwargs): + """Estimates the duration of IMRPhenom waveforms that include higher modes. + """ + # Default maximum node number for IMRPhenomHM is 4 + # The relevant lower order approximant here is IMRPhenomD + return get_hm_length_in_time("IMRPhenomD", 4, **kwargs) + +def seobnrv4hm_length_in_time(**kwargs): + """ Estimates the duration of SEOBNRv4HM waveforms that include higher modes. + """ + # Default maximum node number for SEOBNRv4HM is 5 + # The relevant lower order approximant here is SEOBNRv4 + return get_hm_length_in_time('SEOBNRv4', 5, **kwargs) + +def get_hm_length_in_time(lor_approx, maxm_default, **kwargs): + kwargs = parse_mode_array(kwargs) + if 'mode_array' in kwargs and kwargs['mode_array'] is not None: + maxm = max(m for _, m in kwargs['mode_array']) + else: + maxm = maxm_default + try: + flow = kwargs['f_lower'] + except KeyError: + raise ValueError("must provide a f_lower") + kwargs['f_lower'] = flow * 2./maxm + return get_imr_length(lor_approx, **kwargs) + +_filter_norms["SPAtmplt"] = spa_tmplt_norm +_filter_preconditions["SPAtmplt"] = spa_tmplt_precondition + +_filter_ends["SPAtmplt"] = spa_tmplt_end +_filter_ends["TaylorF2"] = spa_tmplt_end +#_filter_ends["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_final_frequency +#_filter_ends["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_final_frequency +#_filter_ends["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_final_frequency +#_filter_ends["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_final_frequency +#_filter_ends["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_final_frequency +# PhenomD returns higher frequencies than this, so commenting this out for now +#_filter_ends["IMRPhenomC"] = seobnrv2_final_frequency +#_filter_ends["IMRPhenomD"] = seobnrv2_final_frequency + +_template_amplitude_norms["SPAtmplt"] = spa_amplitude_factor +_filter_time_lengths["SPAtmplt"] = spa_length_in_time +_filter_time_lengths["TaylorF2"] = spa_length_in_time +_filter_time_lengths["SpinTaylorT5"] = spa_length_in_time +_filter_time_lengths["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_length_in_time +_filter_time_lengths["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_length_in_time +_filter_time_lengths["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_length_in_time +_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_length_in_time +_filter_time_lengths["EOBNRv2_ROM"] = seobnrv2_length_in_time +_filter_time_lengths["EOBNRv2HM_ROM"] = seobnrv2_length_in_time +_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_length_in_time +_filter_time_lengths["SEOBNRv4_ROM"] = seobnrv4_length_in_time +_filter_time_lengths["SEOBNRv4HM_ROM"] = seobnrv4hm_length_in_time +_filter_time_lengths["SEOBNRv4"] = seobnrv4_length_in_time +_filter_time_lengths["SEOBNRv4P"] = seobnrv4_length_in_time +_filter_time_lengths["SEOBNRv5_ROM"] = seobnrv5_length_in_time +_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time +_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time +_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time +_filter_time_lengths["IMRPhenomD_NRTidal"] = imrphenomd_length_in_time +_filter_time_lengths["IMRPhenomPv2_NRTidal"] = imrphenomd_length_in_time +_filter_time_lengths["IMRPhenomHM"] = imrphenomhm_length_in_time +_filter_time_lengths["IMRPhenomPv3HM"] = imrphenomhm_length_in_time +_filter_time_lengths["IMRPhenomXHM"] = imrphenomhm_length_in_time +_filter_time_lengths["IMRPhenomXPHM"] = imrphenomhm_length_in_time +_filter_time_lengths["SpinTaylorF2"] = spa_length_in_time +_filter_time_lengths["TaylorF2NL"] = spa_length_in_time +_filter_time_lengths["PreTaylorF2"] = spa_length_in_time + +# Also add generators for switching between approximants +apx_name = "SpinTaylorF2_SWAPPER" +cpu_fd[apx_name] = _spintaylor_aligned_prec_swapper +_filter_time_lengths[apx_name] = _filter_time_lengths["SpinTaylorF2"] + +from . nltides import nonlinear_tidal_spa +cpu_fd["TaylorF2NL"] = nonlinear_tidal_spa + +from .premerger import premerger_taylorf2 +cpu_fd['PreTaylorF2'] = premerger_taylorf2 + +from .multiband import multiband_fd_waveform +cpu_fd['multiband'] = multiband_fd_waveform + +# Load external waveforms ##################################################### +if 'PYCBC_WAVEFORM' in os.environ: + mods = os.environ['PYCBC_WAVEFORM'].split(':') + for mod in mods: + mhandle = __import__(mod, fromlist=['']) + mhandle.add_me(cpu_fd=cpu_fd, + cpu_td=cpu_td, + filter_time_lengths=_filter_time_lengths) + +def td_fd_waveform_transform(approximant): + '''If the waveform approximant is in time domain, make a frequency domain + version using 'get_fd_waveform_from_td'; If the waveform approximant is in + frequency domain, do interpolation for waveforms with a time length estimator, + and make a time domain version using 'get_td_waveform_from_fd' + + Parameters + ---------- + approximant: string + The name of a waveform approximant. + ''' + fd_apx = list(cpu_fd.keys()) + td_apx = list(cpu_td.keys()) + + if (approximant in td_apx) and (approximant not in fd_apx): + # We can make a fd version of td approximants + cpu_fd[approximant] = get_fd_waveform_from_td + + if approximant in fd_apx: + # We can do interpolation for waveforms that have a time length + apx_int = approximant + '_INTERP' + cpu_fd[apx_int] = get_interpolated_fd_waveform + _filter_time_lengths[apx_int] = _filter_time_lengths[approximant] + + # We can also make a td version of this + # This will override any existing approximants with the same name + # (ex. IMRPhenomXX) + cpu_td[approximant] = get_td_waveform_from_fd + +for apx in copy.copy(_filter_time_lengths): + td_fd_waveform_transform(apx) + + +td_wav = _scheme.ChooseBySchemeDict() +fd_wav = _scheme.ChooseBySchemeDict() +td_wav.update({_scheme.CPUScheme:cpu_td,_scheme.CUDAScheme:cuda_td}) +fd_wav.update({_scheme.CPUScheme:cpu_fd,_scheme.CUDAScheme:cuda_fd}) +sgburst_wav = {_scheme.CPUScheme:cpu_sgburst} + +
+[docs] +def get_waveform_filter(out, template=None, **kwargs): + """Return a frequency domain waveform filter for the specified approximant + """ + n = len(out) + + input_params = props(template, **kwargs) + + if input_params['approximant'] in filter_approximants(_scheme.mgr.state): + wav_gen = filter_wav[type(_scheme.mgr.state)] + htilde = wav_gen[input_params['approximant']](out=out, **input_params) + htilde.resize(n) + htilde.chirp_length = get_waveform_filter_length_in_time(**input_params) + htilde.length_in_time = htilde.chirp_length + return htilde + + if input_params['approximant'] in fd_approximants(_scheme.mgr.state): + wav_gen = fd_wav[type(_scheme.mgr.state)] + + duration = get_waveform_filter_length_in_time(**input_params) + hp, _ = wav_gen[input_params['approximant']](duration=duration, + return_hc=False, **input_params) + + hp.resize(n) + out[0:len(hp)] = hp[:] + hp.data = out + + hp.length_in_time = hp.chirp_length = duration + return hp + + elif input_params['approximant'] in td_approximants(_scheme.mgr.state): + wav_gen = td_wav[type(_scheme.mgr.state)] + hp, _ = wav_gen[input_params['approximant']](**input_params) + # taper the time series hp if required + if 'taper' in input_params.keys() and \ + input_params['taper'] is not None: + hp = wfutils.taper_timeseries(hp, input_params['taper'], + return_lal=False) + return td_waveform_to_fd_waveform(hp, out=out) + + else: + raise ValueError("Approximant %s not available" % + (input_params['approximant']))
+ + +
+[docs] +def td_waveform_to_fd_waveform(waveform, out=None, length=None, + buffer_length=100): + """ Convert a time domain into a frequency domain waveform by FFT. + As a waveform is assumed to "wrap" in the time domain one must be + careful to ensure the waveform goes to 0 at both "boundaries". To + ensure this is done correctly the waveform must have the epoch set such + the merger time is at t=0 and the length of the waveform should be + shorter than the desired length of the FrequencySeries (times 2 - 1) + so that zeroes can be suitably pre- and post-pended before FFTing. + If given, out is a memory array to be used as the output of the FFT. + If not given memory is allocated internally. + If present the length of the returned FrequencySeries is determined + from the length out. If out is not given the length can be provided + expicitly, or it will be chosen as the nearest power of 2. If choosing + length explicitly the waveform length + buffer_length is used when + choosing the nearest binary number so that some zero padding is always + added. + """ + # Figure out lengths and set out if needed + if out is None: + if length is None: + N = pnutils.nearest_larger_binary_number(len(waveform) + \ + buffer_length) + n = int(N//2) + 1 + else: + n = length + N = (n-1)*2 + out = zeros(n, dtype=complex_same_precision_as(waveform)) + else: + n = len(out) + N = (n-1)*2 + delta_f = 1. / (N * waveform.delta_t) + + # total duration of the waveform + tmplt_length = len(waveform) * waveform.delta_t + if len(waveform) > N: + err_msg = "The time domain template is longer than the intended " + err_msg += "duration in the frequency domain. This situation is " + err_msg += "not supported in this function. Please shorten the " + err_msg += "waveform appropriately before calling this function or " + err_msg += "increase the allowed waveform length. " + err_msg += "Waveform length (in samples): {}".format(len(waveform)) + err_msg += ". Intended length: {}.".format(N) + raise ValueError(err_msg) + # for IMR templates the zero of time is at max amplitude (merger) + # thus the start time is minus the duration of the template from + # lower frequency cutoff to merger, i.e. minus the 'chirp time' + tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS + waveform.resize(N) + k_zero = int(waveform.start_time / waveform.delta_t) + waveform.roll(k_zero) + htilde = FrequencySeries(out, delta_f=delta_f, copy=False) + fft(waveform.astype(real_same_precision_as(htilde)), htilde) + htilde.length_in_time = tmplt_length + htilde.chirp_length = tChirp + return htilde
+ + +
+[docs] +def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs): + """Return a frequency domain waveform filter for the specified approximant. + Unlike get_waveform_filter this function returns both h_plus and h_cross + components of the waveform, which are needed for searches where h_plus + and h_cross are not related by a simple phase shift. + """ + n = len(outplus) + + # If we don't have an inclination column alpha3 might be used + if not hasattr(template, 'inclination') and 'inclination' not in kwargs: + if hasattr(template, 'alpha3'): + kwargs['inclination'] = template.alpha3 + + input_params = props(template, **kwargs) + + if input_params['approximant'] in fd_approximants(_scheme.mgr.state): + wav_gen = fd_wav[type(_scheme.mgr.state)] + hp, hc = wav_gen[input_params['approximant']](**input_params) + hp.resize(n) + hc.resize(n) + outplus[0:len(hp)] = hp[:] + hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False) + outcross[0:len(hc)] = hc[:] + hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False) + hp.chirp_length = get_waveform_filter_length_in_time(**input_params) + hp.length_in_time = hp.chirp_length + hc.chirp_length = hp.chirp_length + hc.length_in_time = hp.length_in_time + return hp, hc + elif input_params['approximant'] in td_approximants(_scheme.mgr.state): + # N: number of time samples required + N = (n-1)*2 + delta_f = 1.0 / (N * input_params['delta_t']) + wav_gen = td_wav[type(_scheme.mgr.state)] + hp, hc = wav_gen[input_params['approximant']](**input_params) + # taper the time series hp if required + if 'taper' in input_params.keys() and \ + input_params['taper'] is not None: + hp = wfutils.taper_timeseries(hp, input_params['taper'], + return_lal=False) + hc = wfutils.taper_timeseries(hc, input_params['taper'], + return_lal=False) + # total duration of the waveform + tmplt_length = len(hp) * hp.delta_t + # for IMR templates the zero of time is at max amplitude (merger) + # thus the start time is minus the duration of the template from + # lower frequency cutoff to merger, i.e. minus the 'chirp time' + tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS + hp.resize(N) + hc.resize(N) + k_zero = int(hp.start_time / hp.delta_t) + hp.roll(k_zero) + hc.roll(k_zero) + hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False) + hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False) + fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde) + fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde) + hp_tilde.length_in_time = tmplt_length + hp_tilde.chirp_length = tChirp + hc_tilde.length_in_time = tmplt_length + hc_tilde.chirp_length = tChirp + return hp_tilde, hc_tilde + else: + raise ValueError("Approximant %s not available" % + (input_params['approximant']))
+ + +
+[docs] +def waveform_norm_exists(approximant): + if approximant in _filter_norms: + return True + else: + return False
+ + +
+[docs] +def get_template_amplitude_norm(template=None, **kwargs): + """ Return additional constant template normalization. This only affects + the effective distance calculation. Returns None for all templates with a + physically meaningful amplitude. + """ + input_params = props(template,**kwargs) + approximant = kwargs['approximant'] + + if approximant in _template_amplitude_norms: + return _template_amplitude_norms[approximant](**input_params) + else: + return None
+ + +def get_waveform_filter_precondition(approximant, length, delta_f): + """Return the data preconditioning factor for this approximant. + """ + if approximant in _filter_preconditions: + return _filter_preconditions[approximant](length, delta_f) + else: + return None + +
+[docs] +def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower): + """ Return the normalization vector for the approximant + """ + if approximant in _filter_norms: + return _filter_norms[approximant](psd, length, delta_f, f_lower) + else: + return None
+ + +
+[docs] +def get_waveform_end_frequency(template=None, **kwargs): + """Return the stop frequency of a template + """ + input_params = props(template,**kwargs) + approximant = kwargs['approximant'] + + if approximant in _filter_ends: + return _filter_ends[approximant](**input_params) + else: + return None
+ + +
+[docs] +def get_waveform_filter_length_in_time(approximant, template=None, **kwargs): + """For filter templates, return the length in time of the template. + """ + kwargs = props(template, **kwargs) + + if approximant in _filter_time_lengths: + return _filter_time_lengths[approximant](**kwargs) + else: + return None
+ + +__all__ = ["get_td_waveform", "get_td_det_waveform_from_fd_det", + "get_fd_waveform", "get_fd_waveform_sequence", + "get_fd_det_waveform", "get_fd_det_waveform_sequence", + "get_fd_waveform_from_td", + "print_td_approximants", "print_fd_approximants", + "td_approximants", "fd_approximants", + "get_waveform_filter", "filter_approximants", + "get_waveform_filter_norm", "get_waveform_end_frequency", + "waveform_norm_exists", "get_template_amplitude_norm", + "get_waveform_filter_length_in_time", "get_sgburst_waveform", + "print_sgburst_approximants", "sgburst_approximants", + "td_waveform_to_fd_waveform", "get_two_pol_waveform_filter", + "NoWaveformError", "FailedWaveformError", "get_td_waveform_from_fd", + 'cpu_fd', 'cpu_td', 'fd_sequence', 'fd_det_sequence', 'fd_det', + '_filter_time_lengths'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/waveform/waveform_modes.html b/latest/html/_modules/pycbc/waveform/waveform_modes.html new file mode 100644 index 00000000000..ef4af4c8af7 --- /dev/null +++ b/latest/html/_modules/pycbc/waveform/waveform_modes.html @@ -0,0 +1,528 @@ + + + + + + pycbc.waveform.waveform_modes — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.waveform.waveform_modes

+# Copyright (C) 2020  Collin Capano, Alex Nitz
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""Provides functions and utilities for generating waveforms mode-by-mode.
+"""
+
+from string import Formatter
+import lal
+
+from pycbc import libutils, pnutils
+from pycbc.types import (TimeSeries, FrequencySeries)
+from .waveform import (props, _check_lal_pars, check_args)
+from . import parameters
+
+lalsimulation = libutils.import_optional('lalsimulation')
+
+def _formatdocstr(docstr):
+    """Utility for formatting docstrings with parameter information.
+    """
+    return docstr.format(
+        **{_p[1]: getattr(parameters, _p[1]).docstr(
+            prefix="    ", include_label=False).lstrip(' ')
+           for _p in Formatter().parse(docstr) if _p[1] is not None
+           })
+
+
+def _formatdocstrlist(docstr, paramlist, skip_params=None):
+    """Utility for formatting docstrings with parameter information.
+    """
+    if skip_params is None:
+        skip_params = []
+    pl = '\n'.join([_p.docstr(prefix="    ", include_label=False)
+                    for _p in paramlist if _p not in skip_params]).lstrip(' ')
+    return docstr.format(params=pl)
+
+
+
+[docs] +def sum_modes(hlms, inclination, phi): + """Applies spherical harmonics and sums modes to produce a plus and cross + polarization. + + Parameters + ---------- + hlms : dict + Dictionary of ``(l, m)`` -> complex ``hlm``. The ``hlm`` may be a + complex number or array, or complex ``TimeSeries``. All modes in the + dictionary will be summed. + inclination : float + The inclination to use. + phi : float + The phase to use. + + Returns + ------- + complex float or array + The plus and cross polarization as a complex number. The real part + gives the plus, the negative imaginary part the cross. + """ + out = None + for mode in hlms: + l, m = mode + hlm = hlms[l, m] + ylm = lal.SpinWeightedSphericalHarmonic(inclination, phi, -2, l, m) + if out is None: + out = ylm * hlm + else: + out += ylm * hlm + return out
+ + + +
+[docs] +def default_modes(approximant): + """Returns the default modes for the given approximant. + """ + # FIXME: this should be replaced to a call to a lalsimulation function, + # whenever that's added + if approximant in ['IMRPhenomXPHM', 'IMRPhenomXHM']: + # according to arXiv:2004.06503 + ma = [(2, 2), (2, 1), (3, 3), (3, 2), (4, 4)] + # add the -m modes + ma += [(l, -m) for l, m in ma] + elif approximant in ['IMRPhenomPv3HM', 'IMRPhenomHM']: + # according to arXiv:1911.06050 + ma = [(2, 2), (2, 1), (3, 3), (3, 2), (4, 4), (4, 3)] + # add the -m modes + ma += [(l, -m) for l, m in ma] + elif approximant.startswith('NRSur7dq4'): + # according to arXiv:1905.09300 + ma = [(l, m) for l in [2, 3, 4] for m in range(-l, l+1)] + elif approximant.startswith('NRHybSur3dq8'): + # according to arXiv:1812.07865 + ma = [(2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2), + (3, 3), (4, 2), (4, 3), (4, 4), (5, 5)] + else: + raise ValueError("I don't know what the default modes are for " + "approximant {}, sorry!".format(approximant)) + return ma
+ + + +
+[docs] +def get_glm(l, m, theta): + r"""The maginitude of the :math:`{}_{-2}Y_{\ell m}`. + + The spin-weighted spherical harmonics can be written as + :math:`{}_{-2}Y_{\ell m}(\theta, \phi) = g_{\ell m}(\theta)e^{i m \phi}`. + This returns the `g_{\ell m}(\theta)` part. Note that this is real. + + Parameters + ---------- + l : int + The :math:`\ell` index of the spherical harmonic. + m : int + The :math:`m` index of the spherical harmonic. + theta : float + The polar angle (in radians). + + Returns + ------- + float : + The amplitude of the harmonic at the given polar angle. + """ + return lal.SpinWeightedSphericalHarmonic(theta, 0., -2, l, m).real
+ + + +
+[docs] +def get_nrsur_modes(**params): + """Generates NRSurrogate waveform mode-by-mode. + + All waveform parameters should be provided as keyword arguments. + Recognized parameters are listed below. Unrecognized arguments are ignored. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + approximant : str + The approximant to generate. Must be one of the ``NRSur*`` models. + {delta_t} + {mass1} + {mass2} + {spin1x} + {spin1y} + {spin1z} + {spin2x} + {spin2y} + {spin2z} + {f_lower} + {f_ref} + {distance} + {mode_array} + + Returns + ------- + dict : + Dictionary of ``(l, m)`` -> ``(h_+, -h_x)`` ``TimeSeries``. + """ + laldict = _check_lal_pars(params) + ret = lalsimulation.SimInspiralPrecessingNRSurModes( + params['delta_t'], + params['mass1']*lal.MSUN_SI, + params['mass2']*lal.MSUN_SI, + params['spin1x'], params['spin1y'], params['spin1z'], + params['spin2x'], params['spin2y'], params['spin2z'], + params['f_lower'], params['f_ref'], + params['distance']*1e6*lal.PC_SI, laldict, + getattr(lalsimulation, params['approximant']) + ) + hlms = {} + while ret: + hlm = TimeSeries(ret.mode.data.data, delta_t=ret.mode.deltaT, + epoch=ret.mode.epoch) + hlms[ret.l, ret.m] = (hlm.real(), hlm.imag()) + ret = ret.next + return hlms
+ + +
+[docs] +def get_nrhybsur_modes(**params): + """Generates NRHybSur3dq8 waveform mode-by-mode. + + All waveform parameters should be provided as keyword arguments. + Recognized parameters are listed below. Unrecognized arguments are ignored. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to substitute + for keyword arguments. A common example would be a row in an xml table. + approximant : str + The approximant to generate. Must be one of the ``NRHyb*`` models. + {delta_t} + {mass1} + {mass2} + {spin1z} + {spin2z} + {f_lower} + {f_ref} + {distance} + {mode_array} + + Returns + ------- + dict : + Dictionary of ``(l, m)`` -> ``(h_+, -h_x)`` ``TimeSeries``. + """ + laldict = _check_lal_pars(params) + ret = lalsimulation.SimIMRNRHybSur3dq8Modes( + params['delta_t'], + params['mass1']*lal.MSUN_SI, + params['mass2']*lal.MSUN_SI, + params['spin1z'], + params['spin2z'], + params['f_lower'], params['f_ref'], + params['distance']*1e6*lal.PC_SI, laldict + ) + hlms = {} + while ret: + hlm = TimeSeries(ret.mode.data.data, delta_t=ret.mode.deltaT, + epoch=ret.mode.epoch) + hlms[ret.l, ret.m] = (hlm.real(), hlm.imag()) + ret = ret.next + return hlms
+ + + +get_nrsur_modes.__doc__ = _formatdocstr(get_nrsur_modes.__doc__) +get_nrhybsur_modes.__doc__ = _formatdocstr(get_nrhybsur_modes.__doc__) + + +
+[docs] +def get_imrphenomxh_modes(**params): + """Generates ``IMRPhenomXHM`` waveforms mode-by-mode. """ + approx = params['approximant'] + if not approx.startswith('IMRPhenomX'): + raise ValueError("unsupported approximant") + mode_array = params.pop('mode_array', None) + if mode_array is None: + mode_array = default_modes(approx) + if 'f_final' not in params: + # setting to 0 will default to ringdown frequency + params['f_final'] = 0. + hlms = {} + for (l, m) in mode_array: + params['mode_array'] = [(l, m)] + laldict = _check_lal_pars(params) + hlm = lalsimulation.SimIMRPhenomXHMGenerateFDOneMode( + float(pnutils.solar_mass_to_kg(params['mass1'])), + float(pnutils.solar_mass_to_kg(params['mass2'])), + float(params['spin1z']), + float(params['spin2z']), l, m, + pnutils.megaparsecs_to_meters(float(params['distance'])), + params['f_lower'], params['f_final'], params['delta_f'], + params['coa_phase'], params['f_ref'], + laldict) + hlm = FrequencySeries(hlm.data.data, delta_f=hlm.deltaF, + epoch=hlm.epoch) + # Plus, cross strains without Y_lm. + # (-1)**(l) factor ALREADY included in FDOneMode + hplm = 0.5 * hlm # Plus strain + hclm = 0.5j * hlm # Cross strain + if m > 0: + hclm *= -1 + hlms[l, m] = (hplm, hclm) + return hlms
+ + + +_mode_waveform_td = {'NRSur7dq4': get_nrsur_modes, + 'NRHybSur3dq8': get_nrhybsur_modes, + } +_mode_waveform_fd = {'IMRPhenomXHM': get_imrphenomxh_modes, + } +# 'IMRPhenomXPHM':get_imrphenomhm_modes needs to be implemented +# LAL function do not split strain mode by mode + +
+[docs] +def fd_waveform_mode_approximants(): + """Frequency domain approximants that will return separate modes.""" + return sorted(_mode_waveform_fd.keys())
+ + + +
+[docs] +def td_waveform_mode_approximants(): + """Time domain approximants that will return separate modes.""" + return sorted(_mode_waveform_td.keys())
+ + + +
+[docs] +def get_fd_waveform_modes(template=None, **kwargs): + r"""Generates frequency domain waveforms, but does not sum over the modes. + + The returned values are the frequency-domain equivalents of the real and + imaginary parts of the complex :math:`\mathfrak{{h}}_{{\ell m}}(t)` time + series. In other words, the returned values are equivalent to the Fourier + Transform of the two time series returned by + :py:func:`get_td_waveform_modes`; see that function for more details. + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to subsitute + for keyword arguments. + {params} + + Returns + ------- + ulm : dict + Dictionary of mode tuples -> fourier transform of the real part of the + hlm time series, as a :py:class:`pycbc.types.FrequencySeries`. + vlm : dict + Dictionary of mode tuples -> fourier transform of the imaginary part of + the hlm time series, as a :py:class:`pycbc.types.FrequencySeries`. + """ + params = props(template, **kwargs) + required = parameters.fd_required + check_args(params, required) + apprx = params['approximant'] + if apprx not in _mode_waveform_fd: + raise ValueError("I don't support approximant {}, sorry" + .format(apprx)) + return _mode_waveform_fd[apprx](**params)
+ + + +get_fd_waveform_modes.__doc__ = _formatdocstrlist( + get_fd_waveform_modes.__doc__, parameters.fd_waveform_params, + skip_params=['inclination', 'coa_phase']) + + +
+[docs] +def get_td_waveform_modes(template=None, **kwargs): + r"""Generates time domain waveforms, but does not sum over the modes. + + The returned values are the real and imaginary parts of the complex + :math:`\mathfrak{{h}}_{{\ell m}}(t)`. These are defined such that the plus + and cross polarizations :math:`h_{{+,\times}}` are: + + .. math:: + + h_{{+,\times}}(\theta, \phi; t) = (\Re, -\Im) \sum_{{\ell m}} + {{}}_{{-2}}Y_{{\ell m}}(\theta, \phi) \mathfrak{{h}}_{{\ell m}}(t). + + + Parameters + ---------- + template: object + An object that has attached properties. This can be used to subsitute + for keyword arguments. + {params} + + Returns + ------- + ulm : dict + Dictionary of mode tuples -> real part of the hlm, as a + :py:class:`pycbc.types.TimeSeries`. + vlm : dict + Dictionary of mode tuples -> imaginary part of the hlm, as a + :py:class:`pycbc.types.TimeSeries`. + """ + params = props(template, **kwargs) + required = parameters.td_required + check_args(params, required) + apprx = params['approximant'] + if apprx not in _mode_waveform_td: + raise ValueError("I don't support approximant {}, sorry" + .format(apprx)) + return _mode_waveform_td[apprx](**params)
+ + + +get_td_waveform_modes.__doc__ = _formatdocstrlist( + get_td_waveform_modes.__doc__, parameters.td_waveform_params, + skip_params=['inclination', 'coa_phase']) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/coincidence.html b/latest/html/_modules/pycbc/workflow/coincidence.html new file mode 100644 index 00000000000..be2ef4b48cf --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/coincidence.html @@ -0,0 +1,1085 @@ + + + + + + pycbc.workflow.coincidence — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.coincidence

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is responsible for setting up the coincidence stage of pycbc
+workflows. For details about this module and its capabilities see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/coincidence.html
+"""
+
+import os
+import logging
+
+from ligo import segments
+
+from pycbc.workflow.core import FileList, make_analysis_dir, Executable, Node, File
+
+logger = logging.getLogger('pycbc.workflow.coincidence')
+
+
+[docs] +class PyCBCBank2HDFExecutable(Executable): + """Converts xml tmpltbank to hdf format""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, bank_file): + node = Node(self) + node.add_input_opt('--bank-file', bank_file) + node.new_output_file_opt(bank_file.segment, '.hdf', '--output-file') + return node
+
+ + + +
+[docs] +class PyCBCTrig2HDFExecutable(Executable): + """Converts xml triggers to hdf format, grouped by template hash""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, trig_files, bank_file): + node = Node(self) + node.add_input_opt('--bank-file', bank_file) + node.add_input_list_opt('--trigger-files', trig_files) + node.new_output_file_opt(trig_files[0].segment, '.hdf', + '--output-file', use_tmp_subdirs=True) + return node
+
+ + + +
+[docs] +class PyCBCFitByTemplateExecutable(Executable): + """Calculates values that describe the background distribution template by template""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, trig_file, bank_file, veto_file, veto_name): + node = Node(self) + # Executable objects are initialized with ifo information + node.add_opt('--ifo', self.ifo_string) + node.add_input_opt('--trigger-file', trig_file) + node.add_input_opt('--bank-file', bank_file) + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--veto-segment-name', veto_name) + node.new_output_file_opt(trig_file.segment, '.hdf', '--output') + return node
+
+ + + +
+[docs] +class PyCBCFitOverParamExecutable(Executable): + """Smooths the background distribution parameters over a continuous parameter""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, raw_fit_file, bank_file): + node = Node(self) + node.add_input_opt('--template-fit-file', raw_fit_file) + node.add_input_opt('--bank-file', bank_file) + node.new_output_file_opt(raw_fit_file.segment, '.hdf', '--output') + return node
+
+ + + +
+[docs] +class PyCBCFindCoincExecutable(Executable): + """Find coinc triggers using a folded interval method""" + + current_retention_level = Executable.ALL_TRIGGERS +
+[docs] + def create_node(self, trig_files, bank_file, stat_files, veto_file, + veto_name, template_str, pivot_ifo, fixed_ifo, tags=None): + if tags is None: + tags = [] + segs = trig_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + node = Node(self) + node.add_input_opt('--template-bank', bank_file) + node.add_input_list_opt('--trigger-files', trig_files) + if len(stat_files) > 0: + node.add_input_list_opt( + '--statistic-files', + stat_files, + check_existing_options=False + ) + if veto_file is not None: + node.add_input_opt('--veto-files', veto_file) + node.add_opt('--segment-name', veto_name) + node.add_opt('--pivot-ifo', pivot_ifo) + node.add_opt('--fixed-ifo', fixed_ifo) + node.add_opt('--template-fraction-range', template_str) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + +
+[docs] +class PyCBCFindSnglsExecutable(Executable): + """Calculate single-detector ranking statistic for triggers""" + + current_retention_level = Executable.ALL_TRIGGERS + file_input_options = ['--statistic-files'] +
+[docs] + def create_node(self, trig_files, bank_file, stat_files, veto_file, + veto_name, template_str, tags=None): + if tags is None: + tags = [] + segs = trig_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + node = Node(self) + node.add_input_opt('--template-bank', bank_file) + node.add_input_list_opt('--trigger-files', trig_files) + if len(stat_files) > 0: + node.add_input_list_opt( + '--statistic-files', + stat_files, + check_existing_options=False + ) + if veto_file is not None: + node.add_input_opt('--veto-files', veto_file) + node.add_opt('--segment-name', veto_name) + node.add_opt('--template-fraction-range', template_str) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + +
+[docs] +class PyCBCStatMapExecutable(Executable): + """Calculate FAP, IFAR, etc for coincs""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, coinc_files, ifos, tags=None): + if tags is None: + tags = [] + segs = coinc_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + + node = Node(self) + node.add_input_list_opt('--coinc-files', coinc_files) + node.add_opt('--ifos', ifos) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + +
+[docs] +class PyCBCSnglsStatMapExecutable(Executable): + """Calculate FAP, IFAR, etc for singles""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, sngls_files, ifo, tags=None): + if tags is None: + tags = [] + segs = sngls_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + + node = Node(self) + node.add_input_list_opt('--sngls-files', sngls_files) + node.add_opt('--ifos', ifo) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + + +
+[docs] +class PyCBCStatMapInjExecutable(Executable): + """Calculate FAP, IFAR, etc for coincs for injections""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, coinc_files, full_data, + ifos, tags=None): + if tags is None: + tags = [] + segs = coinc_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + + node = Node(self) + node.add_input_list_opt('--zero-lag-coincs', coinc_files) + + if isinstance(full_data, list): + node.add_input_list_opt('--full-data-background', full_data) + else: + node.add_input_opt('--full-data-background', full_data) + + node.add_opt('--ifos', ifos) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + +
+[docs] +class PyCBCSnglsStatMapInjExecutable(Executable): + """Calculate FAP, IFAR, etc for singles for injections""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, sngls_files, background_file, + ifos, tags=None): + if tags is None: + tags = [] + segs = sngls_files.get_times_covered_by_files() + seg = segments.segment(segs[0][0], segs[-1][1]) + + node = Node(self) + node.add_input_list_opt('--sngls-files', sngls_files) + node.add_input_opt('--full-data-background', background_file) + + node.add_opt('--ifos', ifos) + node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags) + return node
+
+ + + +
+[docs] +class PyCBCHDFInjFindExecutable(Executable): + """Find injections in the hdf files output""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, inj_coinc_file, inj_xml_file, veto_file, veto_name, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_list_opt('--trigger-file', inj_coinc_file) + node.add_input_list_opt('--injection-file', inj_xml_file) + if veto_name is not None: + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--segment-name', veto_name) + node.new_output_file_opt(inj_xml_file[0].segment, '.hdf', + '--output-file', tags=tags) + return node
+
+ + + +
+[docs] +class PyCBCDistributeBackgroundBins(Executable): + """Distribute coinc files among different background bins""" + + current_retention_level = Executable.ALL_TRIGGERS +
+[docs] + def create_node(self, coinc_files, bank_file, background_bins, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_list_opt('--coinc-files', coinc_files) + node.add_input_opt('--bank-file', bank_file) + node.add_opt('--background-bins', ' '.join(background_bins)) + + names = [b.split(':')[0] for b in background_bins] + + output_files = [File(coinc_files[0].ifo_list, + self.name, + coinc_files[0].segment, + directory=self.out_dir, + tags = tags + ['mbin-%s' % i], + extension='.hdf') for i in range(len(background_bins))] + node.add_output_list_opt('--output-files', output_files) + node.names = names + return node
+
+ + + +
+[docs] +class PyCBCCombineStatmap(Executable): + """Combine coincs over different bins and apply trials factor""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, statmap_files, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_list_opt('--statmap-files', statmap_files) + node.new_output_file_opt(statmap_files[0].segment, '.hdf', + '--output-file', tags=tags) + return node
+
+ + + +
+[docs] +class PyCBCAddStatmap(PyCBCCombineStatmap): + """Combine statmap files and add FARs over different coinc types""" + + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, statmap_files, background_files, tags=None): + if tags is None: + tags = [] + node = super(PyCBCAddStatmap, self).create_node(statmap_files, + tags=tags) + # Enforce upper case + ctags = [t.upper() for t in (tags + self.tags)] + if 'INJECTIONS' in ctags: + node.add_input_list_opt('--background-files', background_files) + + return node
+
+ + + +
+[docs] +class PyCBCExcludeZerolag(Executable): + """ Remove times of zerolag coincidences of all types from exclusive + background """ + current_retention_level = Executable.MERGED_TRIGGERS +
+[docs] + def create_node(self, statmap_file, other_statmap_files, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_opt('--statmap-file', statmap_file) + node.add_input_list_opt('--other-statmap-files', + other_statmap_files) + node.new_output_file_opt(statmap_file.segment, '.hdf', + '--output-file', tags=None) + + return node
+
+ + + +
+[docs] +class MergeExecutable(Executable): + current_retention_level = Executable.MERGED_TRIGGERS
+ + + +
+[docs] +class CensorForeground(Executable): + current_retention_level = Executable.MERGED_TRIGGERS
+ + + +
+[docs] +def make_foreground_censored_veto(workflow, bg_file, veto_file, veto_name, + censored_name, out_dir, tags=None): + tags = [] if tags is None else tags + node = CensorForeground(workflow.cp, 'foreground_censor', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--foreground-triggers', bg_file) + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--segment-name', veto_name) + node.add_opt('--output-segment-name', censored_name) + node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def merge_single_detector_hdf_files(workflow, bank_file, trigger_files, out_dir, tags=None): + if tags is None: + tags = [] + make_analysis_dir(out_dir) + out = FileList() + for ifo in workflow.ifos: + node = MergeExecutable(workflow.cp, 'hdf_trigger_merge', + ifos=ifo, out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--bank-file', bank_file) + node.add_input_list_opt('--trigger-files', trigger_files.find_output_with_ifo(ifo)) + node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file') + workflow += node + out += node.output_files + return out
+ + + +
+[docs] +def setup_trigger_fitting(workflow, insps, hdfbank, veto_file, veto_name, + output_dir=None, tags=None): + if not workflow.cp.has_option('workflow-coincidence', 'do-trigger-fitting'): + return FileList() + else: + smoothed_fit_files = FileList() + for i in workflow.ifos: + ifo_insp = [insp for insp in insps if (insp.ifo == i)] + assert len(ifo_insp)==1 + ifo_insp = ifo_insp[0] + raw_exe = PyCBCFitByTemplateExecutable(workflow.cp, + 'fit_by_template', ifos=i, + out_dir=output_dir, + tags=tags) + raw_node = raw_exe.create_node(ifo_insp, hdfbank, + veto_file, veto_name) + workflow += raw_node + smooth_exe = PyCBCFitOverParamExecutable(workflow.cp, + 'fit_over_param', ifos=i, + out_dir=output_dir, + tags=tags) + smooth_node = smooth_exe.create_node(raw_node.output_file, + hdfbank) + workflow += smooth_node + smoothed_fit_files += smooth_node.output_files + return smoothed_fit_files
+ + + +
+[docs] +def find_injections_in_hdf_coinc(workflow, inj_coinc_file, inj_xml_file, + veto_file, veto_name, out_dir, tags=None): + if tags is None: + tags = [] + make_analysis_dir(out_dir) + exe = PyCBCHDFInjFindExecutable(workflow.cp, 'hdfinjfind', + ifos=workflow.ifos, + out_dir=out_dir, tags=tags) + node = exe.create_node(inj_coinc_file, inj_xml_file, veto_file, veto_name) + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def convert_bank_to_hdf(workflow, xmlbank, out_dir, tags=None): + """Return the template bank in hdf format""" + if tags is None: + tags = [] + #FIXME, make me not needed + if len(xmlbank) > 1: + raise ValueError('Can only convert a single template bank') + + logger.info('convert template bank to HDF') + make_analysis_dir(out_dir) + bank2hdf_exe = PyCBCBank2HDFExecutable(workflow.cp, 'bank2hdf', + ifos=workflow.ifos, + out_dir=out_dir, tags=tags) + bank2hdf_node = bank2hdf_exe.create_node(xmlbank[0]) + workflow.add_node(bank2hdf_node) + return bank2hdf_node.output_files
+ + + +
+[docs] +def convert_trig_to_hdf(workflow, hdfbank, xml_trigger_files, out_dir, tags=None): + """Return the list of hdf5 trigger files outputs""" + if tags is None: + tags = [] + #FIXME, make me not needed + logger.info('convert single inspiral trigger files to hdf5') + make_analysis_dir(out_dir) + + trig_files = FileList() + for ifo, insp_group in zip(*xml_trigger_files.categorize_by_attr('ifo')): + trig2hdf_exe = PyCBCTrig2HDFExecutable(workflow.cp, 'trig2hdf', + ifos=ifo, out_dir=out_dir, tags=tags) + _, insp_bundles = insp_group.categorize_by_attr('segment') + for insps in insp_bundles: + trig2hdf_node = trig2hdf_exe.create_node(insps, hdfbank[0]) + workflow.add_node(trig2hdf_node) + trig_files += trig2hdf_node.output_files + return trig_files
+ + + +
+[docs] +def setup_statmap(workflow, ifos, coinc_files, out_dir, tags=None): + tags = [] if tags is None else tags + + statmap_exe = PyCBCStatMapExecutable(workflow.cp, 'statmap', + ifos=ifos, + tags=tags, out_dir=out_dir) + + ifolist = ' '.join(ifos) + stat_node = statmap_exe.create_node(coinc_files, ifolist) + workflow.add_node(stat_node) + return stat_node.output_file
+ + + +
+[docs] +def setup_sngls_statmap(workflow, ifo, sngls_files, out_dir, tags=None): + tags = [] if tags is None else tags + + statmap_exe = PyCBCSnglsStatMapExecutable(workflow.cp, 'sngls_statmap', + ifos=ifo, + tags=tags, out_dir=out_dir) + + stat_node = statmap_exe.create_node(sngls_files, ifo) + workflow.add_node(stat_node) + return stat_node.output_file
+ + + +
+[docs] +def setup_statmap_inj(workflow, ifos, coinc_files, background_file, + out_dir, tags=None): + tags = [] if tags is None else tags + + statmap_exe = PyCBCStatMapInjExecutable(workflow.cp, + 'statmap_inj', + ifos=ifos, + tags=tags, out_dir=out_dir) + + ifolist = ' '.join(ifos) + stat_node = statmap_exe.create_node(FileList(coinc_files), + background_file, + ifolist) + workflow.add_node(stat_node) + return stat_node.output_files[0]
+ + + +
+[docs] +def setup_sngls_statmap_inj(workflow, ifo, sngls_inj_files, background_file, + out_dir, tags=None): + tags = [] if tags is None else tags + + statmap_exe = PyCBCSnglsStatMapInjExecutable(workflow.cp, + 'sngls_statmap_inj', + ifos=ifo, + tags=tags, + out_dir=out_dir) + + stat_node = statmap_exe.create_node(sngls_inj_files, + background_file, + ifo) + + workflow.add_node(stat_node) + return stat_node.output_files[0]
+ + + +
+[docs] +def setup_interval_coinc_inj(workflow, hdfbank, + inj_trig_files, stat_files, + background_file, veto_file, veto_name, + out_dir, pivot_ifo, fixed_ifo, tags=None): + """ + This function sets up exact match coincidence for injections + """ + if tags is None: + tags = [] + make_analysis_dir(out_dir) + logger.info('Setting up coincidence for injections') + + # Wall time knob and memory knob + factor = int(workflow.cp.get_opt_tags('workflow-coincidence', + 'parallelization-factor', tags)) + + ifiles = {} + for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('ifo')): + ifiles[ifo] = ifi[0] + + injinj_files = FileList() + for ifo in ifiles: # ifiles is keyed on ifo + injinj_files.append(ifiles[ifo]) + + findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp, + 'coinc', + ifos=ifiles.keys(), + tags=tags + ['injinj'], + out_dir=out_dir) + bg_files = [] + for i in range(factor): + group_str = '%s/%s' % (i, factor) + coinc_node = findcoinc_exe.create_node(injinj_files, hdfbank, + stat_files, + veto_file, veto_name, + group_str, + pivot_ifo, + fixed_ifo, + tags=['JOB'+str(i)]) + bg_files += coinc_node.output_files + workflow.add_node(coinc_node) + + logger.info('...leaving coincidence for injections') + + return setup_statmap_inj(workflow, ifiles.keys(), bg_files, + background_file, out_dir, + tags=tags + [veto_name])
+ + + +
+[docs] +def setup_interval_coinc(workflow, hdfbank, trig_files, stat_files, + veto_file, veto_name, out_dir, pivot_ifo, + fixed_ifo, tags=None): + """ + This function sets up exact match coincidence + """ + if tags is None: + tags = [] + make_analysis_dir(out_dir) + logger.info('Setting up coincidence') + + ifos, _ = trig_files.categorize_by_attr('ifo') + findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp, 'coinc', + ifos=ifos, + tags=tags, out_dir=out_dir) + + # Wall time knob and memory knob + factor = int(workflow.cp.get_opt_tags('workflow-coincidence', + 'parallelization-factor', + [findcoinc_exe.ifo_string] + tags)) + + statmap_files = [] + bg_files = FileList() + for i in range(factor): + group_str = '%s/%s' % (i, factor) + coinc_node = findcoinc_exe.create_node(trig_files, hdfbank, + stat_files, + veto_file, veto_name, + group_str, + pivot_ifo, + fixed_ifo, + tags=['JOB'+str(i)]) + bg_files += coinc_node.output_files + workflow.add_node(coinc_node) + + statmap_files = setup_statmap(workflow, ifos, bg_files, + out_dir, tags=tags) + + logger.info('...leaving coincidence ') + return statmap_files
+ + + +
+[docs] +def setup_sngls(workflow, hdfbank, trig_files, stat_files, + veto_file, veto_name, out_dir, tags=None): + """ + This function sets up getting statistic values for single-detector triggers + """ + ifos, _ = trig_files.categorize_by_attr('ifo') + findsngls_exe = PyCBCFindSnglsExecutable(workflow.cp, 'sngls', ifos=ifos, + tags=tags, out_dir=out_dir) + # Wall time knob and memory knob + factor = int(workflow.cp.get_opt_tags('workflow-coincidence', + 'parallelization-factor', + [findsngls_exe.ifo_string] + tags)) + + statmap_files = [] + bg_files = FileList() + for i in range(factor): + group_str = '%s/%s' % (i, factor) + sngls_node = findsngls_exe.create_node(trig_files, hdfbank, + stat_files, + veto_file, veto_name, + group_str, + tags=['JOB'+str(i)]) + bg_files += sngls_node.output_files + workflow.add_node(sngls_node) + + statmap_files = setup_sngls_statmap(workflow, ifos[0], bg_files, + out_dir, tags=tags) + + logger.info('...leaving coincidence ') + return statmap_files
+ + + +
+[docs] +def setup_sngls_inj(workflow, hdfbank, inj_trig_files, + stat_files, background_file, veto_file, veto_name, + out_dir, tags=None): + """ + This function sets up getting statistic values for single-detector triggers + from injections + """ + ifos, _ = inj_trig_files.categorize_by_attr('ifo') + findsnglsinj_exe = PyCBCFindSnglsExecutable(workflow.cp, 'sngls', ifos=ifos, + tags=tags, out_dir=out_dir) + # Wall time knob and memory knob + exe_str_tags = [findsnglsinj_exe.ifo_string] + tags + factor = int(workflow.cp.get_opt_tags('workflow-coincidence', + 'parallelization-factor', + exe_str_tags)) + + statmap_files = [] + bg_files = FileList() + for i in range(factor): + group_str = '%s/%s' % (i, factor) + sngls_node = findsnglsinj_exe.create_node(inj_trig_files, hdfbank, + stat_files, + veto_file, veto_name, + group_str, + tags=['JOB'+str(i)]) + bg_files += sngls_node.output_files + workflow.add_node(sngls_node) + + statmap_files = setup_sngls_statmap_inj(workflow, ifos[0], bg_files, + background_file, + out_dir, tags=tags) + + logger.info('...leaving coincidence ') + return statmap_files
+ + + +
+[docs] +def select_files_by_ifo_combination(ifocomb, insps): + """ + This function selects single-detector files ('insps') for a given ifo combination + """ + inspcomb = FileList() + for ifo, ifile in zip(*insps.categorize_by_attr('ifo')): + if ifo in ifocomb: + inspcomb += ifile + + return inspcomb
+ + + +
+[docs] +def get_ordered_ifo_list(ifocomb, ifo_ids): + """ + This function sorts the combination of ifos (ifocomb) based on the given + precedence list (ifo_ids dictionary) and returns the first ifo as pivot + the second ifo as fixed, and the ordered list joined as a string. + """ + if len(ifocomb) == 1: + # Single-detector combinations don't have fixed/pivot IFOs + return None, None, ifocomb[0] + + # combination_prec stores precedence info for the detectors in the combination + combination_prec = {ifo: ifo_ids[ifo] for ifo in ifocomb} + ordered_ifo_list = sorted(combination_prec, key = combination_prec.get) + pivot_ifo = ordered_ifo_list[0] + fixed_ifo = ordered_ifo_list[1] + + return pivot_ifo, fixed_ifo, ''.join(ordered_ifo_list)
+ + + +
+[docs] +def setup_combine_statmap(workflow, final_bg_file_list, bg_file_list, + out_dir, tags=None): + """ + Combine the statmap files into one background file + """ + if tags is None: + tags = [] + make_analysis_dir(out_dir) + logger.info('Setting up combine statmap') + + cstat_exe_name = os.path.basename(workflow.cp.get("executables", + "combine_statmap")) + if cstat_exe_name == 'pycbc_combine_statmap': + cstat_class = PyCBCCombineStatmap + elif cstat_exe_name == 'pycbc_add_statmap': + cstat_class = PyCBCAddStatmap + else: + raise NotImplementedError('executable should be ' + 'pycbc_combine_statmap or pycbc_add_statmap') + + cstat_exe = cstat_class(workflow.cp, 'combine_statmap', ifos=workflow.ifos, + tags=tags, out_dir=out_dir) + + if cstat_exe_name == 'pycbc_combine_statmap': + combine_statmap_node = cstat_exe.create_node(final_bg_file_list) + elif cstat_exe_name == 'pycbc_add_statmap': + combine_statmap_node = cstat_exe.create_node(final_bg_file_list, + bg_file_list) + + workflow.add_node(combine_statmap_node) + return combine_statmap_node.output_file
+ + + +
+[docs] +def setup_exclude_zerolag(workflow, statmap_file, other_statmap_files, + out_dir, ifos, tags=None): + """ + Exclude single triggers close to zerolag triggers from forming any + background events + """ + if tags is None: + tags = [] + make_analysis_dir(out_dir) + logger.info('Setting up exclude zerolag') + + exc_zerolag_exe = PyCBCExcludeZerolag(workflow.cp, 'exclude_zerolag', + ifos=ifos, tags=tags, + out_dir=out_dir) + exc_zerolag_node = exc_zerolag_exe.create_node(statmap_file, + other_statmap_files, + tags=None) + workflow.add_node(exc_zerolag_node) + return exc_zerolag_node.output_file
+ + + +
+[docs] +def rerank_coinc_followup(workflow, statmap_file, bank_file, out_dir, + tags=None, + injection_file=None, + ranking_file=None): + if tags is None: + tags = [] + + make_analysis_dir(out_dir) + + if not workflow.cp.has_section("workflow-rerank"): + logger.info("No reranking done in this workflow") + return statmap_file + else: + logger.info("Setting up reranking of candidates") + + # Generate reduced data files (maybe this could also be used elsewhere?) + stores = FileList([]) + for ifo in workflow.ifos: + make_analysis_dir('strain_files') + node = Executable(workflow.cp, 'strain_data_reduce', ifos=[ifo], + out_dir='strain_files', tags=tags).create_node() + node.add_opt('--gps-start-time', workflow.analysis_time[0]) + node.add_opt('--gps-end-time', workflow.analysis_time[1]) + if injection_file: + node.add_input_opt('--injection-file', injection_file) + + fil = node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file') + stores.append(fil) + workflow += node + + # Generate trigger input file + node = Executable(workflow.cp, 'rerank_trigger_input', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--statmap-file', statmap_file) + node.add_input_opt('--bank-file', bank_file) + trigfil = node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file') + workflow += node + + # Parallelize coinc trigger followup + factor = int(workflow.cp.get_opt_tags("workflow-rerank", + "parallelization-factor", tags)) + exe = Executable(workflow.cp, 'coinc_followup', ifos=workflow.ifos, + out_dir=out_dir, tags=tags) + + stat_files = FileList([]) + for i in range(factor): + node = exe.create_node() + node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file', tags=[str(i)]) + node.add_multiifo_input_list_opt('--hdf-store', stores) + node.add_input_opt('--input-file', trigfil) + node.add_opt('--start-index', str(i)) + node.add_opt('--stride', factor) + workflow += node + stat_files += node.output_files + + exe = Executable(workflow.cp, 'rerank_coincs', ifos=workflow.ifos, + out_dir=out_dir, tags=tags) + node = exe.create_node() + node.add_input_list_opt('--stat-files', stat_files) + node.add_input_opt('--statmap-file', statmap_file) + node.add_input_opt('--followup-file', trigfil) + + if ranking_file: + node.add_input_opt('--ranking-file', ranking_file) + + node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file') + workflow += node + return node.output_file
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/configparser_test.html b/latest/html/_modules/pycbc/workflow/configparser_test.html new file mode 100644 index 00000000000..05465a217aa --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/configparser_test.html @@ -0,0 +1,461 @@ + + + + + + pycbc.workflow.configparser_test — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.configparser_test

+import re
+import copy
+try:
+    import configparser as ConfigParser
+except ImportError:
+    import ConfigParser
+
+
+[docs] +def parse_workflow_ini_file(cpFile,parsed_filepath=None): + """Read a .ini file in, parse it as described in the documentation linked + to above, and return the parsed ini file. + + Parameters + ---------- + cpFile : The path to a .ini file to be read in + parsed_filepath: Boolean, optional + If provided, the .ini file, after parsing, will be written to this + location + + Returns + ------- + cp: The parsed ConfigParser class containing the read in .ini file + """ + # First read the .ini file + cp = read_ini_file(cpFile) + print(cp.sections()) + + # Check for any substitutions that can be made + # FIXME: The python 3 version of ConfigParser can do this automatically + # move over to that if it can be backported to python2.X. + # We use the same formatting as the new configparser module when doing + # ExtendedInterpolation + # This is described at http://docs.python.org/3.4/library/configparser.html + #cp = perform_extended_interpolation(cp) + + # Split sections like [inspiral&tmplt] into [inspiral] and [tmplt] + cp = split_multi_sections(cp) + print(cp.sections()) + + # Check for duplicate options in sub-sections + sanity_check_subsections(cp) + print(cp.sections()) + + # Dump parsed .ini file if needed + if parsed_filepath: + fp = open(parsed_filepath,'w') + cp.write(fp) + fp.close() + + return cp
+ + + +
+[docs] +def read_ini_file(cpFile): + """Read a .ini file and return it as a ConfigParser class. + This function does none of the parsing/combining of sections. It simply + reads the file and returns it unedited + + Parameters + ---------- + cpFile : The path to a .ini file to be read in + + Returns + ------- + cp: The ConfigParser class containing the read in .ini file + """ + + # Initialise ConfigParser class + cp = ConfigParser.ConfigParser(\ + interpolation=ConfigParser.ExtendedInterpolation()) + # Read the file + fp = open(cpFile,'r') + cp.read_file(fp) + fp.close() + return cp
+ + +
+[docs] +def perform_extended_interpolation(cp,preserve_orig_file=False): + """Filter through an ini file and replace all examples of + ExtendedInterpolation formatting with the exact value. For values like + ${example} this is replaced with the value that corresponds to the option + called example ***in the same section*** + + For values like ${common|example} this is replaced with the value that + corresponds to the option example in the section [common]. Note that + in the python3 config parser this is ${common:example} but python2.7 + interprets the : the same as a = and this breaks things + + Nested interpolation is not supported here. + + Parameters + ---------- + cp: ConfigParser object + preserve_orig_file: Boolean, optional + By default the input ConfigParser object will be modified in place. If + this is set deepcopy will be used and the input will be preserved. + Default = False + + Returns + ------- + cp: parsed ConfigParser object + """ + # Deepcopy the cp object if needed + if preserve_orig_file: + cp = copy.deepcopy(cp) + + # Do not allow any interpolation of the section names + for section in cp.sections(): + for option,value in cp.items(section): + # Check the option name + newStr = interpolate_string(option,cp,section) + if newStr != option: + cp.set(section,newStr,value) + cp.remove_option(section,option) + # Check the value + newStr = interpolate_string(value,cp,section) + if newStr != value: + cp.set(section,option,newStr) + + return cp
+ + +
+[docs] +def interpolate_string(testString,cp,section): + """Take a string and replace all example of ExtendedInterpolation formatting + within the string with the exact value. + + For values like ${example} this is replaced with the value that corresponds + to the option called example ***in the same section*** + + For values like ${common|example} this is replaced with the value that + corresponds to the option example in the section [common]. Note that + in the python3 config parser this is ${common:example} but python2.7 + interprets the : the same as a = and this breaks things + + Nested interpolation is not supported here. + + Parameters + ---------- + testString: String + The string to parse and interpolate + cp: ConfigParser + The ConfigParser object to look for the interpolation strings within + section: String + The current section of the ConfigParser object + + Returns + ---------- + testString: String + Interpolated string + """ + + # First check if any interpolation is needed and abort if not + reObj = re.search(r"\$\{.*?\}", testString) + while reObj: + # Not really sure how this works, but this will obtain the first + # instance of a string contained within ${....} + repString = (reObj).group(0)[2:-1] + # Need to test which of the two formats we have + splitString = repString.split('|') + if len(splitString) == 1: + testString = testString.replace('${'+repString+'}',\ + cp.get(section,splitString[0])) + if len(splitString) == 2: + testString = testString.replace('${'+repString+'}',\ + cp.get(splitString[0],splitString[1])) + reObj = re.search(r"\$\{.*?\}", testString) + + return testString
+ + +
+[docs] +def split_multi_sections(cp,preserve_orig_file=False): + """Parse through a supplied ConfigParser object and splits any sections + labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into [inspiral] + and [tmpltbank] sections. If these individual sections already exist they + will be appended to. If an option exists in both the [inspiral] and + [inspiral&tmpltbank] sections an error will be thrown + + Parameters + ---------- + cp: The ConfigParser class + preserve_orig_file: Boolean, optional + By default the input ConfigParser object will be modified in place. If + this is set deepcopy will be used and the input will be preserved. + Default = False + + Returns + ---------- + cp: The ConfigParser class + """ + # Deepcopy the cp object if needed + if preserve_orig_file: + cp = copy.deepcopy(cp) + + # Begin by looping over all sections + for section in cp.sections(): + # Only continue if section needs splitting + if '&' not in section: + continue + # Get list of section names to add these options to + splitSections = section.split('&') + for newSec in splitSections: + # Add sections if they don't already exist + if not cp.has_section(newSec): + cp.add_section(newSec) + add_options_to_section(cp,newSec,cp.items(section)) + cp.remove_section(section) + return cp
+ + +
+[docs] +def sanity_check_subsections(cp): + """This function goes through the ConfigParset and checks that any options + given in the [SECTION_NAME] section are not also given in any + [SECTION_NAME-SUBSECTION] sections. + + Parameters + ---------- + cp: The ConfigParser class + + Returns + ---------- + None + """ + # Loop over the sections in the ini file + for section in cp.sections(): + # Loop over the sections again + for section2 in cp.sections(): + # Check if any are subsections of section + if section2.startswith(section + '-'): + # Check for duplicate options whenever this exists + check_duplicate_options(cp,section,section2,raise_error=True)
+ + +
+[docs] +def add_options_to_section(cp,section,items,preserve_orig_file=False,\ + overwrite_options=False): + """Add a set of options and values to a section of a ConfigParser object. + Will throw an error if any of the options being added already exist, this + behaviour can be overridden if desired + + Parameters + ---------- + cp: The ConfigParser class + section: string + The name of the section to add options+values to + items: list of tuples + Each tuple contains (at [0]) the option and (at [1]) the value to add + to the section of the ini file + preserve_orig_file: Boolean, optional + By default the input ConfigParser object will be modified in place. If + this is set deepcopy will be used and the input will be preserved. + Default = False + overwrite_options: Boolean, optional + By default this function will throw a ValueError if an option exists in + both the original section in the ConfigParser *and* in the provided + items. This will override so that the options+values given in items + will replace the original values if the value is set to True. + Default = True + Returns + ---------- + cp: The ConfigParser class + """ + # Sanity checking + if not cp.has_section(section): + raise ValueError('Section %s not present in ConfigParser.' %(section,)) + + # Deepcopy the cp object if needed + if preserve_orig_file: + cp = copy.deepcopy(cp) + + # Check for duplicate options first + for option,value in items: + if not overwrite_options: + if option in cp.options(section): + raise ValueError('Option %s exists in both original' + \ + 'ConfigParser and input list' %(option,)) + cp.set(section,option,value) + + return cp
+ + +
+[docs] +def check_duplicate_options(cp,section1,section2,raise_error=False): + """Check for duplicate options in two sections, section1 and section2. + Will return True if there are duplicate options and False if not + + + Parameters + ---------- + cp: The ConfigParser class + section1: string + The name of the first section to compare + section2: string + The name of the second section to compare + raise_error: Boolean, optional + If True, raise an error if duplicates are present. + Default = False + + Returns + ---------- + duplicate: List + List of duplicate options + """ + # Sanity checking + if not cp.has_section(section1): + raise ValueError('Section %s not present in ConfigParser.'%(section1,)) + if not cp.has_section(section2): + raise ValueError('Section %s not present in ConfigParser.'%(section2,)) + + items1 = cp.items(section1) + items2 = cp.items(section2) + + # The list comprehension here creates a list of all duplicate items + duplicates = [x for x in items1 if x in items2] + + if duplicates and raise_error: + raise ValueError('The following options appear in both section ' +\ + '%s and %s: %s' \ + %(section1,section2,duplicates.join(' '))) + + return duplicates
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/configuration.html b/latest/html/_modules/pycbc/workflow/configuration.html new file mode 100644 index 00000000000..614adb87dec --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/configuration.html @@ -0,0 +1,704 @@ + + + + + + pycbc.workflow.configuration — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.configuration

+# Copyright (C) 2013,2017 Ian Harry, Duncan Brown
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides a wrapper to the ConfigParser utilities for pycbc
+workflow construction. This module is described in the page here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html
+"""
+
+import os
+import logging
+import stat
+import shutil
+import subprocess
+from shutil import which
+import urllib.parse
+from urllib.parse import urlparse
+import hashlib
+
+from pycbc.types.config import InterpolatingConfigParser
+
+logger = logging.getLogger('pycbc.workflow.configuration')
+
+# NOTE urllib is weird. For some reason it only allows known schemes and will
+# give *wrong* results, rather then failing, if you use something like gsiftp
+# We can add schemes explicitly, as below, but be careful with this!
+urllib.parse.uses_relative.append('osdf')
+urllib.parse.uses_netloc.append('osdf')
+
+
+
+[docs] +def hash_compare(filename_1, filename_2, chunk_size=None, max_chunks=None): + """ + Calculate the sha1 hash of a file, or of part of a file + + Parameters + ---------- + filename_1 : string or path + the first file to be hashed / compared + filename_2 : string or path + the second file to be hashed / compared + chunk_size : integer + This size of chunks to be read in and hashed. If not given, will read + the whole file (may be slow for large files). + max_chunks: integer + This many chunks to be compared. If all chunks so far have been the + same, then just assume its the same file. Default 10 + + Returns + ------- + hash : string + The hexdigest() after a sha1 hash of (part of) the file + """ + + if max_chunks is None and chunk_size is not None: + max_chunks = 10 + elif chunk_size is None: + max_chunks = 1 + + with open(filename_1, 'rb') as f1: + with open(filename_2, 'rb') as f2: + for _ in range(max_chunks): + h1 = hashlib.sha1(f1.read(chunk_size)).hexdigest() + h2 = hashlib.sha1(f2.read(chunk_size)).hexdigest() + if h1 != h2: + return False + return True
+ + + +
+[docs] +def resolve_url( + url, + directory=None, + permissions=None, + copy_to_cwd=True, + hash_max_chunks=None, + hash_chunk_size=None, +): + """Resolves a URL to a local file, and returns the path to that file. + + If a URL is given, the file will be copied to the current working + directory. If a local file path is given, the file will only be copied + to the current working directory if ``copy_to_cwd`` is ``True`` + (the default). + """ + + u = urlparse(url) + + # determine whether the file exists locally + islocal = u.scheme == "" or u.scheme == "file" + + if not islocal or copy_to_cwd: + # create the name of the destination file + if directory is None: + directory = os.getcwd() + filename = os.path.join(directory, os.path.basename(u.path)) + else: + filename = u.path + + if islocal: + # check that the file exists + if not os.path.isfile(u.path): + errmsg = "Cannot open file %s from URL %s" % (u.path, url) + raise ValueError(errmsg) + # for regular files, make a direct copy if requested + elif copy_to_cwd: + if os.path.isfile(filename): + # check to see if src and dest are the same file + same_file = hash_compare( + u.path, + filename, + chunk_size=hash_chunk_size, + max_chunks=hash_max_chunks + ) + if not same_file: + shutil.copy(u.path, filename) + else: + shutil.copy(u.path, filename) + + elif u.scheme == "http" or u.scheme == "https": + # Would like to move ciecplib import to top using import_optional, but + # it needs to be available when documentation runs in the CI, and I + # can't get it to install in the GitHub CI + import ciecplib + # Make the scitokens logger a little quieter + # (it is called through ciecpclib) + curr_level = logging.getLogger().level + logging.getLogger('scitokens').setLevel(curr_level + 10) + with ciecplib.Session() as s: + if u.netloc in ("git.ligo.org", "code.pycbc.phy.syr.edu"): + # authenticate with git.ligo.org using callback + s.get("https://git.ligo.org/users/auth/shibboleth/callback") + r = s.get(url, allow_redirects=True) + r.raise_for_status() + + output_fp = open(filename, "wb") + output_fp.write(r.content) + output_fp.close() + + elif u.scheme == "osdf": + # OSDF will require a scitoken to be present and stashcp to be + # available. Thanks Dunky for the code here! + cmd = [ + which("stashcp") or "stashcp", + u.path, + filename, + ] + + try: + subprocess.run(cmd, check=True, capture_output=True) + except subprocess.CalledProcessError as err: + # Print information about the failure + print(err.cmd, "failed with") + print(err.stderr.decode()) + print(err.stdout.decode()) + raise + + return filename + + else: + # TODO: We could support other schemes as needed + errmsg = "Unknown URL scheme: %s\n" % (u.scheme) + errmsg += "Currently supported are: file, http, and https." + raise ValueError(errmsg) + + if not os.path.isfile(filename): + errmsg = "Error trying to create file %s from %s" % (filename, url) + raise ValueError(errmsg) + + if permissions: + if os.access(filename, os.W_OK): + os.chmod(filename, permissions) + else: + # check that the file has at least the permissions requested + s = os.stat(filename)[stat.ST_MODE] + if (s & permissions) != permissions: + errmsg = "Could not change permissions on %s (read-only)" % url + raise ValueError(errmsg) + + return filename
+ + + +
+[docs] +def add_workflow_command_line_group(parser): + """ + The standard way of initializing a ConfigParser object in workflow will be + to do it from the command line. This is done by giving a + + --local-config-files filea.ini fileb.ini filec.ini + + command. You can also set config file override commands on the command + line. This will be most useful when setting (for example) start and + end times, or active ifos. This is done by + + --config-overrides section1:option1:value1 section2:option2:value2 ... + + This can also be given as + + --config-overrides section1:option1 + + where the value will be left as ''. + + To remove a configuration option, use the command line argument + + --config-delete section1:option1 + + which will delete option1 from [section1] or + + --config-delete section1 + + to delete all of the options in [section1] + + Deletes are implemented before overrides. + + This function returns an argparse OptionGroup to ensure these options are + parsed correctly and can then be sent directly to initialize an + WorkflowConfigParser. + + Parameters + ----------- + parser : argparse.ArgumentParser instance + The initialized argparse instance to add the workflow option group to. + """ + workflowArgs = parser.add_argument_group( + "Configuration", "Options needed for parsing " "config file(s)." + ) + workflowArgs.add_argument( + "--config-files", + nargs="+", + action="store", + metavar="CONFIGFILE", + help="List of config files to be used in " "analysis.", + ) + workflowArgs.add_argument( + "--config-overrides", + nargs="*", + action="store", + metavar="SECTION:OPTION:VALUE", + help="List of section,option,value combinations to " + "add into the configuration file. Normally the gps " + "start and end times might be provided this way, " + "and user specific locations (ie. output directories). " + "This can also be provided as SECTION:OPTION or " + "SECTION:OPTION: both of which indicate that the " + "corresponding value is left blank.", + ) + workflowArgs.add_argument( + "--config-delete", + nargs="*", + action="store", + metavar="SECTION:OPTION", + help="List of section,option combinations to delete " + "from the configuration file. This can also be " + "provided as SECTION which deletes the enture section" + " from the configuration file or SECTION:OPTION " + "which deletes a specific option from a given " + "section.", + )
+ + + +
+[docs] +class WorkflowConfigParser(InterpolatingConfigParser): + """ + This is a sub-class of InterpolatingConfigParser, which lets + us add a few additional helper features that are useful in workflows. + """ + + def __init__( + self, + configFiles=None, + overrideTuples=None, + parsedFilePath=None, + deleteTuples=None, + copy_to_cwd=False, + ): + """ + Initialize an WorkflowConfigParser. This reads the input configuration + files, overrides values if necessary and performs the interpolation. + See https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html + + Parameters + ----------- + configFiles : Path to .ini file, or list of paths + The file(s) to be read in and parsed. + overrideTuples : List of (section, option, value) tuples + Add the (section, option, value) triplets provided + in this list to the provided .ini file(s). If the section, option + pair is already present, it will be overwritten. + parsedFilePath : Path, optional (default=None) + If given, write the parsed .ini file back to disk at this location. + deleteTuples : List of (section, option) tuples + Delete the (section, option) pairs provided + in this list from provided .ini file(s). If the section only + is provided, the entire section will be deleted. + copy_to_cwd : bool, optional + Copy the configuration files to the current working directory if + they are not already there, even if they already exist locally. + If False, files will only be copied to the current working + directory if they are remote. Default is False. + + Returns + -------- + WorkflowConfigParser + Initialized WorkflowConfigParser instance. + """ + if configFiles is not None: + configFiles = [ + resolve_url(cFile, copy_to_cwd=copy_to_cwd) + for cFile in configFiles + ] + + InterpolatingConfigParser.__init__( + self, + configFiles, + overrideTuples, + parsedFilePath, + deleteTuples, + skip_extended=True, + ) + # expand executable which statements + self.perform_exe_expansion() + + # Resolve any URLs needing resolving + self.curr_resolved_files = {} + self.resolve_urls() + + # Check for any substitutions that can be made + self.perform_extended_interpolation() + +
+[docs] + def perform_exe_expansion(self): + """ + This function will look through the executables section of the + ConfigParser object and replace any values using macros with full paths. + + For any values that look like + + ${which:lalapps_tmpltbank} + + will be replaced with the equivalent of which(lalapps_tmpltbank) + + Otherwise values will be unchanged. + """ + # Only works on executables section + if self.has_section("executables"): + for option, value in self.items("executables"): + # Check the value + newStr = self.interpolate_exe(value) + if newStr != value: + self.set("executables", option, newStr)
+ + +
+[docs] + def interpolate_exe(self, testString): + """ + Replace testString with a path to an executable based on the format. + + If this looks like + + ${which:lalapps_tmpltbank} + + it will return the equivalent of which(lalapps_tmpltbank) + + Otherwise it will return an unchanged string. + + Parameters + ----------- + testString : string + The input string + + Returns + -------- + newString : string + The output string. + """ + # First check if any interpolation is needed and abort if not + testString = testString.strip() + if not (testString.startswith("${") and testString.endswith("}")): + return testString + + # This may not be an exe interpolation, so even if it has ${ ... } form + # I may not have to do anything + newString = testString + + # Strip the ${ and } + testString = testString[2:-1] + + testList = testString.split(":") + + # Maybe we can add a few different possibilities for substitution + if len(testList) == 2: + if testList[0] == "which": + newString = which(testList[1]) + if not newString: + errmsg = "Cannot find exe %s in your path " % (testList[1]) + errmsg += "and you specified ${which:%s}." % (testList[1]) + raise ValueError(errmsg) + + return newString
+ + +
+[docs] + def section_to_cli(self, section, skip_opts=None): + """Converts a section into a command-line string. + + For example: + + .. code:: + + [section_name] + foo = + bar = 10 + + yields: `'--foo --bar 10'`. + + Parameters + ---------- + section : str + The name of the section to convert. + skip_opts : list, optional + List of options to skip. Default (None) results in all options + in the section being converted. + + Returns + ------- + str : + The options as a command-line string. + """ + if skip_opts is None: + skip_opts = [] + read_opts = [ + opt for opt in self.options(section) if opt not in skip_opts + ] + opts = [] + for opt in read_opts: + opts.append("--{}".format(opt)) + val = self.get(section, opt) + if val != "": + opts.append(val) + return " ".join(opts)
+ + +
+[docs] + def get_cli_option(self, section, option_name, **kwds): + """Return option using CLI action parsing + + Parameters + ---------- + section: str + Section to find option to parse + option_name: str + Name of the option to parse from the config file + kwds: keywords + Additional keywords are passed directly to the argument parser. + + Returns + ------- + value: + The parsed value for this option + """ + import argparse + + optstr = self.section_to_cli(section) + parser = argparse.ArgumentParser() + name = "--" + option_name.replace("_", "-") + parser.add_argument(name, **kwds) + args, _ = parser.parse_known_args(optstr.split()) + return getattr(args, option_name)
+ + +
+[docs] + def resolve_urls(self): + """ + This function will look through all sections of the + ConfigParser object and replace any URLs that are given the resolve + magic flag with a path on the local drive. + + Specifically for any values that look like + + ${resolve:https://git.ligo.org/detchar/SOME_GATING_FILE.txt} + + the file will be replaced with the output of resolve_url(URL) + + Otherwise values will be unchanged. + """ + # Only works on executables section + for section in self.sections(): + for option, value in self.items(section): + # Check the value + value_l = value.split(' ') + new_str_l = [self.resolve_file_url(val) for val in value_l] + new_str = ' '.join(new_str_l) + if new_str is not None and new_str != value: + self.set(section, option, new_str)
+ + +
+[docs] + def resolve_file_url(self, test_string): + """ + Replace test_string with a path to an executable based on the format. + + If this looks like + + ${which:lalapps_tmpltbank} + + it will return the equivalent of which(lalapps_tmpltbank) + + Otherwise it will return an unchanged string. + + Parameters + ----------- + test_string : string + The input string + + Returns + -------- + new_string : string + The output string. + """ + # First check if any interpolation is needed and abort if not + test_string = test_string.strip() + if not (test_string.startswith("${") and test_string.endswith("}")): + return test_string + + # This may not be a "resolve" interpolation, so even if it has + # ${ ... } form I may not have to do anything + + # Strip the ${ and } + test_string_strip = test_string[2:-1] + + test_list = test_string_strip.split(":", 1) + + if len(test_list) == 2: + if test_list[0] == "resolve": + curr_lfn = os.path.basename(test_list[1]) + if curr_lfn in self.curr_resolved_files: + return self.curr_resolved_files[curr_lfn] + local_url = resolve_url(test_list[1]) + self.curr_resolved_files[curr_lfn] = local_url + return local_url + + return test_string
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/core.html b/latest/html/_modules/pycbc/workflow/core.html new file mode 100644 index 00000000000..412e44f2751 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/core.html @@ -0,0 +1,2613 @@ + + + + + + pycbc.workflow.core — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.core

+# Copyright (C) 2013, 2017  Ian Harry, Alex Nitz, Duncan Brown
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module provides the worker functions and classes that are used when
+creating a workflow. For details about the workflow module see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope.html
+"""
+import os
+import stat
+import subprocess
+import logging
+import math
+import string
+import urllib
+import pickle
+import copy
+import configparser as ConfigParser
+from urllib.request import pathname2url
+from urllib.parse import urljoin
+import numpy
+import random
+from itertools import combinations, groupby, permutations
+from operator import attrgetter
+
+import lal
+import lal.utils
+import Pegasus.api  # Try and move this into pegasus_workflow
+from ligo import segments
+from ligo.lw import lsctables, ligolw
+from ligo.lw import utils as ligolw_utils
+from ligo.lw.utils import segments as ligolw_segments
+
+from pycbc import makedir
+from pycbc.io.ligolw import LIGOLWContentHandler, create_process_table
+
+from . import pegasus_workflow
+from .configuration import WorkflowConfigParser, resolve_url
+from .pegasus_sites import make_catalog
+
+logger = logging.getLogger('pycbc.workflow.core')
+
+
+
+[docs] +def make_analysis_dir(path): + """ + Make the analysis directory path, any parent directories that don't already + exist, and the 'logs' subdirectory of path. + """ + if path is not None: + makedir(os.path.join(path, 'logs'))
+ + + +file_input_from_config_dict = {} + +
+[docs] +class Executable(pegasus_workflow.Executable): + # These are the file retention levels + DO_NOT_KEEP = 0 + INTERMEDIATE_PRODUCT = 1 + ALL_TRIGGERS = 2 + MERGED_TRIGGERS = 3 + FINAL_RESULT = 4 + + # Set this parameter to indicate that this option is used to specify a + # file and is *not* handled explicitly in the create_node or __init__ + # methods of the sub-class. Usually that is to say that this option is a + # file and is normally specified in an file, e.g. a PSD file. As files + # need to be identified as such to pegasus, this attempts to catch this + # case. + # These are standard file input arguments used in PyCBC, so we declare + # these as files if given to any PyCBC job. + file_input_options = ['--gating-file', '--frame-files', '--injection-file', + '--statistic-files', '--bank-file', '--config-files', + '--psd-file', '--asd-file', + '--fake-strain-from-file', + '--sgburst-injection-file'] + + # Set this parameter to indicate that this option should take different + # values based on the time. E.g. something like + # --option1 value1[0:1000],value2[1000:2000] + # would be replaced with --option1 value1 if the time is within 0,1000 and + # value2 if in 1000,2000. A failure will be replaced if the job time is + # not fully contained in one of these windows, or if fully contained in + # multiple of these windows. This is resolved when creating the Job from + # the Executable + time_dependent_options = [] + + # This is the default value. It will give a warning if a class is + # used where the retention level is not set. The file will still be stored + KEEP_BUT_RAISE_WARNING = 5 + _warned_classes_list = ['Executable'] + + # Sub classes, or instances, should override this. If not overriden the + # file will be retained, but a warning given + current_retention_level = KEEP_BUT_RAISE_WARNING + def __init__(self, cp, name, ifos=None, out_dir=None, tags=None, + reuse_executable=True, set_submit_subdir=True): + """ + Initialize the Executable class. + + Parameters + ----------- + cp : ConfigParser object + The ConfigParser object holding the workflow configuration settings + exec_name : string + Executable name + universe : string, optional + Condor universe to run the job in + ifos : string or list, optional + The ifo(s) that the Job is valid for. If the job is + independently valid for multiple ifos it can be provided as a list. + Ie. ['H1',L1','V1'], if the job is only valid for the combination + of ifos (for e.g. ligolw_thinca) then this can be supplied + as, for e.g. "H1L1V1". + out_dir: path, optional + The folder to store output files of this job. + tags : list of strings + A list of strings that is used to identify this job. + """ + if isinstance(ifos, str): + self.ifo_list = [ifos] + else: + self.ifo_list = ifos + if self.ifo_list is not None: + self.ifo_list = sorted(self.ifo_list) + self.ifo_string = ''.join(self.ifo_list) + else: + self.ifo_string = None + self.cp = cp + self.name = name + self.container_cls = None + self.container_type = None + + try: + self.installed = cp.getboolean('pegasus_profile-%s' % name, + 'pycbc|installed') + except: + self.installed = False + + self.update_current_tags(tags) + + self.update_output_directory(out_dir=out_dir) + + # Determine the level at which output files should be kept + if cp.has_option_tags('pegasus_profile-%s' % name, + 'pycbc|retention_level', tags): + # Get the retention_level from the config file + # This method allows us to use the retention levels + # defined above + cfg_ret_level = cp.get_opt_tags('pegasus_profile-%s' % name, + 'pycbc|retention_level', tags) + self.current_retention_level = getattr(self, cfg_ret_level) + + self.update_current_retention_level(self.current_retention_level) + + # Should I reuse this executable? + if reuse_executable: + self.pegasus_name = self.name + else: + self.pegasus_name = self.tagged_name + + # Check that the executable actually exists locally or + # looks like a URL, in which case trust Pegasus to be + # able to fetch it. + exe_path = cp.get('executables', name) + self.needs_fetching = False + + exe_url = urllib.parse.urlparse(exe_path) + + # See if the user specified a list of sites for the executable + # Ordering is: + # 1) Check if a specific site for this Executable is set. + # 2) Check is primary_site is set globally. + # 3) Use condorpool_symlink as a fallback. + self.exe_pfns = {} + if cp.has_option_tags('pegasus_profile-%s' % name, 'pycbc|site', tags): + exe_site = cp.get_opt_tags('pegasus_profile-%s' % name, + 'pycbc|site', tags) + elif cp.has_option('pegasus_profile', 'pycbc|primary_site'): + exe_site = cp.get('pegasus_profile', 'pycbc|primary_site') + else: + exe_site = 'condorpool_symlink' + + exe_site = exe_site.strip() + + if exe_url.scheme in ['', 'file']: + # NOTE: There could be a case where the exe is available at a + # remote site, but not on the submit host. Currently allowed + # for the OSG site, versioning will not work as planned if + # we can't see the executable (can we perhaps run versioning + # including singularity??) + + # Check that executables at file urls + # on the local site exist + if os.path.isfile(exe_url.path) is False: + raise TypeError("Failed to find %s executable " + "at %s on site %s" % (name, exe_path, + exe_site)) + elif exe_url.scheme == 'singularity': + # Will use an executable within a singularity container. Don't + # need to do anything here, as I cannot easily check it exists. + exe_path = exe_url.path + else: + # Could be http, https, etc. so it needs fetching if run now + self.needs_fetching = True + if self.needs_fetching and not self.installed: + err_msg = "Non-file path URLs cannot be used unless the " + err_msg += "executable is a bundled standalone executable. " + err_msg += "If this is the case, then add the " + err_msg += "pycbc.installed=True property." + raise ValueError(err_msg) + + if self.installed: + # Is installed, so copy from local site, like other inputs + self.exe_pfns['local'] = exe_path + else: + # We must rely on the executables, and accompanying libraries, + # being directly accessible on the execution site. + # CVMFS is perfect for this! As is singularity. + self.exe_pfns[exe_site] = exe_path + logger.debug( + "Using %s executable at %s on site %s", + name, + exe_url.path, + exe_site + ) + + # FIXME: This hasn't yet been ported to pegasus5 and won't work. + # Pegasus describes two ways to work with containers, and I need + # to figure out which is most appropriate and use that. + # Determine if this executables should be run in a container + try: + self.container_type = cp.get('pegasus_profile-%s' % name, + 'container|type') + except: + pass + + if self.container_type is not None: + # FIXME: Move the actual container setup into pegasus_workflow + self.container_img = cp.get('pegasus_profile-%s' % name, + 'container|image') + try: + self.container_site = cp.get('pegasus_profile-%s' % name, + 'container|image_site') + except: + self.container_site = 'local' + + try: + self.container_mount = cp.get('pegasus_profile-%s' % name, + 'container|mount').split(',') + except: + self.container_mount = None + + + self.container_cls = Pegasus.api.Container("{}-container".format( + name), + self.container_type, + self.container_img, + imagesite=self.container_site, + mount=self.container_mount) + + super(Executable, self).__init__(self.pegasus_name, + installed=self.installed, + container=self.container_cls) + + else: + super(Executable, self).__init__(self.pegasus_name, + installed=self.installed) + + if hasattr(self, "group_jobs"): + self.add_profile('pegasus', 'clusters.size', self.group_jobs) + + # This sets up the sub-directory to use in the submit directory + if set_submit_subdir: + self.add_profile('pegasus', 'relative.submit.dir', + self.pegasus_name) + + # Set configurations from the config file, these should override all + # other settings + self._set_pegasus_profile_options() + + self.execution_site = exe_site + self.executable_url = exe_path + + @property + def ifo(self): + """Return the ifo. + + If only one ifo in the ifo list this will be that ifo. Otherwise an + error is raised. + """ + if self.ifo_list and len(self.ifo_list) == 1: + return self.ifo_list[0] + else: + errMsg = "self.ifoList must contain only one ifo to access the " + errMsg += "ifo property. %s." %(str(self.ifo_list),) + raise TypeError(errMsg) + +
+[docs] + def get_transformation(self): + if self.execution_site in self.transformations: + return self.transformations[self.execution_site] + else: + self.create_transformation(self.execution_site, + self.executable_url) + return self.get_transformation()
+ + +
+[docs] + def add_ini_profile(self, cp, sec): + """Add profile from configuration file. + + Parameters + ----------- + cp : ConfigParser object + The ConfigParser object holding the workflow configuration settings + sec : string + The section containing options for this job. + """ + for opt in cp.options(sec): + namespace = opt.split('|')[0] + if namespace == 'pycbc' or namespace == 'container': + continue + + value = cp.get(sec, opt).strip() + key = opt.split('|')[1] + self.add_profile(namespace, key, value)
+ + + def _add_ini_opts(self, cp, sec, ignore_existing=False): + """Add job-specific options from configuration file. + + Parameters + ----------- + cp : ConfigParser object + The ConfigParser object holding the workflow configuration + settings + sec : string + The section containing options for this job. + """ + for opt in cp.options(sec): + if opt in self.all_added_options: + if ignore_existing: + continue + else: + raise ValueError("Option %s has already been added" % opt) + self.all_added_options.add(opt) + + value = cp.get(sec, opt).strip() + opt = f'--{opt}' + if opt in self.file_input_options: + # This now expects the option to be a file + # Check if we have a list of files + values = [path for path in value.split(' ') if path] + + self.common_raw_options.append(opt) + self.common_raw_options.append(' ') + + # Get LFN and PFN + for path in values: + # Here I decide if the path is URL or + # IFO:/path/to/file or IFO:url://path/to/file + # That's somewhat tricksy as we used : as delimiter + split_path = path.split(':', 1) + if len(split_path) == 1: + ifo = None + path = path + else: + # Have I split a URL or not? + if split_path[1].startswith('//'): + # URL + ifo = None + path = path + else: + #IFO:path or IFO:URL + ifo = split_path[0] + path = split_path[1] + + # If the file exists make sure to use the + # fill path as a file:// URL + if os.path.isfile(path): + curr_pfn = urljoin('file:', + pathname2url(os.path.abspath(path))) + else: + curr_pfn = path + + curr_file = resolve_url_to_file(curr_pfn) + self.common_input_files.append(curr_file) + if ifo: + self.common_raw_options.append(ifo + ':') + self.common_raw_options.append(curr_file.dax_repr) + else: + self.common_raw_options.append(curr_file.dax_repr) + self.common_raw_options.append(' ') + elif opt in self.time_dependent_options: + # There is a possibility of time-dependent, file options. + # For now we will avoid supporting that complication unless + # it is needed. This would require resolving the file first + # in this function, and then dealing with the time-dependent + # stuff later. + self.unresolved_td_options[opt] = value + else: + # This option comes from the config file(s) + self.common_options += [opt, value] + +
+[docs] + def add_opt(self, opt, value=None): + """Add option to job. + + Parameters + ----------- + opt : string + Name of option (e.g. --output-file-format) + value : string, (default=None) + The value for the option (no value if set to None). + """ + if value is None: + self.common_options += [opt] + else: + self.common_options += [opt, value]
+ + +
+[docs] + def get_opt(self, opt): + """Get value of option from configuration file + + Parameters + ----------- + opt : string + Name of option (e.g. output-file-format) + + Returns + -------- + value : string + The value for the option. Returns None if option not present. + """ + for sec in self.sections: + try: + key = self.cp.get(sec, opt) + if key: + return key + except ConfigParser.NoOptionError: + pass + + return None
+ + +
+[docs] + def has_opt(self, opt): + """Check if option is present in configuration file + + Parameters + ----------- + opt : string + Name of option (e.g. output-file-format) + """ + for sec in self.sections: + val = self.cp.has_option(sec, opt) + if val: + return val + + return False
+ + +
+[docs] + def create_node(self, **kwargs): + """Default node constructor. + + This is usually overridden by subclasses of Executable. + """ + return Node(self, **kwargs)
+ + +
+[docs] + def update_current_retention_level(self, value): + """Set a new value for the current retention level. + + This updates the value of self.retain_files for an updated value of the + retention level. + + Parameters + ----------- + value : int + The new value to use for the retention level. + """ + # Determine the level at which output files should be kept + self.current_retention_level = value + try: + global_retention_level = \ + self.cp.get_opt_tags("workflow", "file-retention-level", + self.tags+[self.name]) + except ConfigParser.Error: + msg="Cannot find file-retention-level in [workflow] section " + msg+="of the configuration file. Setting a default value of " + msg+="retain all files." + logger.warning(msg) + self.retain_files = True + self.global_retention_threshold = 1 + self.cp.set("workflow", "file-retention-level", "all_files") + else: + # FIXME: Are these names suitably descriptive? + retention_choices = { + 'all_files' : 1, + 'all_triggers' : 2, + 'merged_triggers' : 3, + 'results' : 4 + } + try: + self.global_retention_threshold = \ + retention_choices[global_retention_level] + except KeyError: + err_msg = "Cannot recognize the file-retention-level in the " + err_msg += "[workflow] section of the ini file. " + err_msg += "Got : {0}.".format(global_retention_level) + err_msg += "Valid options are: 'all_files', 'all_triggers'," + err_msg += "'merged_triggers' or 'results' " + raise ValueError(err_msg) + if self.current_retention_level == 5: + self.retain_files = True + if type(self).__name__ in Executable._warned_classes_list: + pass + else: + warn_msg = "Attribute current_retention_level has not " + warn_msg += "been set in class {0}. ".format(type(self)) + warn_msg += "This value should be set explicitly. " + warn_msg += "All output from this class will be stored." + logger.warning(warn_msg) + Executable._warned_classes_list.append(type(self).__name__) + elif self.global_retention_threshold > self.current_retention_level: + self.retain_files = False + else: + self.retain_files = True
+ + +
+[docs] + def update_current_tags(self, tags): + """Set a new set of tags for this executable. + + Update the set of tags that this job will use. This updated default + file naming and shared options. It will *not* update the pegasus + profile, which belong to the executable and cannot be different for + different nodes. + + Parameters + ----------- + tags : list + The new list of tags to consider. + """ + if tags is None: + tags = [] + if '' in tags: + logger.warning('DO NOT GIVE ME EMPTY TAGS (in %s)', self.name) + tags.remove('') + tags = [tag.upper() for tag in tags] + self.tags = tags + + if len(tags) > 6: + warn_msg = "This job has way too many tags. " + warn_msg += "Current tags are {}. ".format(' '.join(tags)) + warn_msg += "Current executable {}.".format(self.name) + logger.warning(warn_msg) + + if len(tags) != 0: + self.tagged_name = "{0}-{1}".format(self.name, '_'.join(tags)) + else: + self.tagged_name = self.name + if self.ifo_string is not None: + self.tagged_name = "{0}-{1}".format(self.tagged_name, + self.ifo_string) + + # Determine the sections from the ini file that will configure + # this executable + sections = [self.name] + if self.ifo_list is not None: + if len(self.ifo_list) > 1: + sec_tags = tags + self.ifo_list + [self.ifo_string] + else: + sec_tags = tags + self.ifo_list + else: + sec_tags = tags + for sec_len in range(1, len(sec_tags)+1): + for tag_permutation in permutations(sec_tags, sec_len): + joined_name = '-'.join(tag_permutation) + section = '{0}-{1}'.format(self.name, joined_name.lower()) + if self.cp.has_section(section): + sections.append(section) + + self.sections = sections + + # Do some basic sanity checking on the options + for sec1, sec2 in combinations(sections, 2): + self.cp.check_duplicate_options(sec1, sec2, raise_error=True) + + # collect the options and profile information + # from the ini file section(s) + self.all_added_options = set() + self.common_options = [] + self.common_raw_options = [] + self.unresolved_td_options = {} + self.common_input_files = [] + for sec in sections: + if self.cp.has_section(sec): + self._add_ini_opts(self.cp, sec) + else: + warn_string = "warning: config file is missing section " + warn_string += "[{0}]".format(sec) + logger.warning(warn_string) + + # get uppermost section + if self.cp.has_section(f'{self.name}-defaultvalues'): + self._add_ini_opts(self.cp, f'{self.name}-defaultvalues', + ignore_existing=True)
+ + +
+[docs] + def update_output_directory(self, out_dir=None): + """Update the default output directory for output files. + + Parameters + ----------- + out_dir : string (optional, default=None) + If provided use this as the output directory. Else choose this + automatically from the tags. + """ + # Determine the output directory + if out_dir is not None: + self.out_dir = out_dir + elif len(self.tags) == 0: + self.out_dir = self.name + "_output" + else: + self.out_dir = self.tagged_name + "_output" + + if not os.path.isabs(self.out_dir): + self.out_dir = os.path.join(os.getcwd(), self.out_dir) + + # Make output directory if not there + if not os.path.isdir(self.out_dir): + make_analysis_dir(self.out_dir)
+ + + def _set_pegasus_profile_options(self): + """Set the pegasus-profile settings for this Executable. + + These are a property of the Executable and not of nodes that it will + spawn. Therefore it *cannot* be updated without also changing values + for nodes that might already have been created. Therefore this is + only called once in __init__. Second calls to this will fail. + """ + # Executable- and tag-specific profile information + for sec in self.sections: + if self.cp.has_section('pegasus_profile-{0}'.format(sec)): + self.add_ini_profile(self.cp, + 'pegasus_profile-{0}'.format(sec))
+ + + +
+[docs] +class Workflow(pegasus_workflow.Workflow): + """ + This class manages a pycbc workflow. It provides convenience + functions for finding input files using time and keywords. It can also + generate cache files from the inputs. + """ + def __init__(self, args, name=None): + """ + Create a pycbc workflow + + Parameters + ---------- + args : argparse.ArgumentParser + The command line options to initialize a CBC workflow. + """ + # Parse ini file + self.cp = WorkflowConfigParser.from_cli(args) + self.args = args + + if hasattr(args, 'dax_file'): + dax_file = args.dax_file or None + else: + dax_file = None + + if hasattr(args, 'dax_file_directory'): + output_dir = args.dax_file_directory or args.output_dir or None + else: + output_dir = args.output_dir or None + + super(Workflow, self).__init__( + name=name if name is not None else args.workflow_name, + directory=output_dir, + cache_file=args.cache_file, + dax_file_name=dax_file, + ) + + # Set global values + start_time = end_time = 0 + if self.cp.has_option('workflow', 'start-time'): + start_time = int(self.cp.get("workflow", "start-time")) + + if self.cp.has_option('workflow', 'end-time'): + end_time = int(self.cp.get("workflow", "end-time")) + self.analysis_time = segments.segment([start_time, end_time]) + + # Set the ifos to analyse + ifos = [] + if self.cp.has_section('workflow-ifos'): + for ifo in self.cp.options('workflow-ifos'): + ifos.append(ifo.upper()) + + self.ifos = ifos + self.ifos.sort(key=str.lower) + self.get_ifo_combinations() + self.ifo_string = ''.join(self.ifos) + + # Set up input and output file lists for workflow + self._inputs = FileList([]) + self._outputs = FileList([]) + + # FIXME: Should this be in pegasus_workflow? + @property + def output_map(self): + args = self.args + + if hasattr(args, 'output_map') and args.output_map is not None: + return args.output_map + + if self.in_workflow is not False: + name = self.name + '.map' + else: + name = 'output.map' + + path = os.path.join(self.out_dir, name) + return path + + @property + def sites(self): + """List of all possible exucution sites for jobs in this workflow""" + sites = set() + sites.add('local') + if self.cp.has_option('pegasus_profile', 'pycbc|primary_site'): + site = self.cp.get('pegasus_profile', 'pycbc|primary_site') + else: + # The default if not chosen + site = 'condorpool_symlink' + sites.add(site) + subsections = [sec for sec in self.cp.sections() + if sec.startswith('pegasus_profile-')] + for subsec in subsections: + if self.cp.has_option(subsec, 'pycbc|site'): + site = self.cp.get(subsec, 'pycbc|site') + sites.add(site) + return list(sites) + + @property + def staging_site(self): + """Site to use for staging to/from each site""" + staging_site = {} + for site in self.sites: + if site in ['condorpool_shared']: + staging_site[site] = site + else: + staging_site[site] = 'local' + return staging_site + + @property + def staging_site_str(self): + return ','.join(['='.join(x) for x in self.staging_site.items()]) + + @property + def exec_sites_str(self): + return ','.join(self.sites) + +
+[docs] + def execute_node(self, node, verbatim_exe = False): + """ Execute this node immediately on the local machine + """ + node.executed = True + + # Check that the PFN is for a file or path + if node.executable.needs_fetching: + try: + # The pfn may have been marked local... + pfn = node.executable.get_pfn() + except: + # or it may have been marked nonlocal. That's + # fine, we'll resolve the URL and make a local + # entry. + pfn = node.executable.get_pfn('nonlocal') + + resolved = resolve_url( + pfn, + permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR + ) + node.executable.clear_pfns() + node.executable.add_pfn(urljoin('file:', pathname2url(resolved)), + site='local') + + cmd_list = node.get_command_line() + + # Must execute in output directory. + curr_dir = os.getcwd() + out_dir = node.executable.out_dir + os.chdir(out_dir) + + # Make call + make_external_call(cmd_list, out_dir=os.path.join(out_dir, 'logs'), + out_basename=node.executable.name) + # Change back + os.chdir(curr_dir) + + for fil in node._outputs: + fil.node = None + fil.add_pfn(urljoin('file:', pathname2url(fil.storage_path)), + site='local')
+ + +
+[docs] + def save(self, filename=None, output_map_path=None, root=True): + # FIXME: Too close to pegasus to live here and not in pegasus_workflow + + if output_map_path is None: + output_map_path = self.output_map + + output_map_file = pegasus_workflow.File(os.path.basename(output_map_path)) + output_map_file.add_pfn(output_map_path, site='local') + self.output_map_file = output_map_file + + if self.in_workflow: + self._as_job.set_subworkflow_properties( + output_map_file, + staging_site=self.staging_site, + cache_file=self.cache_file + ) + self._as_job.add_planner_args(**self._as_job.pycbc_planner_args) + + # add transformations to dax + for transform in self._transformations: + self.add_transformation(transform) + + for container in self._containers: + self.add_container(container) + + # save the configuration file + ini_file = os.path.join(self.out_dir, self.name + '.ini') + + # This shouldn't already exist, but just in case + if os.path.isfile(ini_file): + err_msg = "Refusing to overwrite configuration file that " + err_msg += "shouldn't be there: " + err_msg += ini_file + raise ValueError(err_msg) + + with open(ini_file, 'w') as fp: + self.cp.write(fp) + + # save the sites file + #FIXME change to check also for submit_now if we drop pycbc_submit_dax + # this would prevent sub-workflows from making extra unused sites.yml + if not self.in_workflow: + catalog_path = os.path.join(self.out_dir, 'sites.yml') + make_catalog(self.cp, self.out_dir).write(catalog_path) + + # save the dax file + super(Workflow, self).save(filename=filename, + output_map_path=output_map_path, + submit_now=self.args.submit_now, + plan_now=self.args.plan_now, + root=root)
+ + +
+[docs] + def save_config(self, fname, output_dir, cp=None): + """ Writes configuration file to disk and returns a pycbc.workflow.File + instance for the configuration file. + + Parameters + ----------- + fname : string + The filename of the configuration file written to disk. + output_dir : string + The directory where the file is written to disk. + cp : ConfigParser object + The ConfigParser object to write. If None then uses self.cp. + + Returns + ------- + FileList + The FileList object with the configuration file. + """ + cp = self.cp if cp is None else cp + ini_file_path = os.path.abspath(os.path.join(output_dir, fname)) + with open(ini_file_path, "w") as fp: + cp.write(fp) + ini_file = File(self.ifos, "", self.analysis_time, + file_url="file://" + ini_file_path) + # set the physical file name + ini_file.add_pfn(ini_file_path, "local") + # set the storage path to be the same + ini_file.storage_path = ini_file_path + return FileList([ini_file])
+ + +
+[docs] + def get_ifo_combinations(self): + """ + Get a list of strings for all possible combinations of IFOs + in the workflow + """ + self.ifo_combinations = [] + for n in range(len(self.ifos)): + self.ifo_combinations += [''.join(ifos).lower() for ifos in + combinations(self.ifos, n + 1)]
+
+ + + +
+[docs] +class Node(pegasus_workflow.Node): + def __init__(self, executable, valid_seg=None): + super(Node, self).__init__(executable.get_transformation()) + self.executable = executable + self.executed = False + self.set_category(executable.name) + self.valid_seg = valid_seg + + self._options += self.executable.common_options + self._raw_options += self.executable.common_raw_options + for inp in self.executable.common_input_files: + self.add_input(inp) + + if len(self.executable.time_dependent_options): + # Resolving these options requires the concept of a valid time. + # To keep backwards compatibility we will allow this to work if + # valid_seg is not supplied and no option actually needs resolving. + # It would be good to get this from the workflow's valid_seg if + # not overriden. But the Node is not connected to the Workflow + # until the dax starts to be written. + self.resolve_td_options(self.executable.unresolved_td_options) + +
+[docs] + def get_command_line(self): + # FIXME: Put in pegasus_workflow?? + self._finalize() + arglist = self._dax_node.arguments + + tmpargs = [] + for a in arglist: + if not isinstance(a, File): + tmpargs += a.split(' ') + else: + tmpargs.append(a) + arglist = tmpargs + + arglist = [a for a in arglist if a != ''] + + arglist = [a.storage_path if isinstance(a, File) else a for a in arglist] + + # This allows the pfn to be an http(s) URL, which will be + # downloaded by resolve_url + exe_path = urllib.parse.urlsplit(self.executable.get_pfn()).path + + return [exe_path] + arglist
+ + +
+[docs] + def new_output_file_opt(self, valid_seg, extension, option_name, tags=None, + store_file=None, use_tmp_subdirs=False): + """ + This function will create a workflow.File object corresponding to the given + information and then add that file as output of this node. + + Parameters + ----------- + valid_seg : ligo.segments.segment + The time span over which the job is valid for. + extension : string + The extension to be used at the end of the filename. + E.g. '.xml' or '.sqlite'. + option_name : string + The option that is used when setting this job as output. For e.g. + 'output-name' or 'output-file', whatever is appropriate for the + current executable. + tags : list of strings, (optional, default=[]) + These tags will be added to the list of tags already associated with + the job. They can be used to uniquely identify this output file. + store_file : Boolean, (optional, default=True) + This file is to be added to the output mapper and will be stored + in the specified output location if True. If false file will be + removed when no longer needed in the workflow. + """ + if tags is None: + tags = [] + + # Changing this from set(tags) to enforce order. It might make sense + # for all jobs to have file names with tags in the same order. + all_tags = copy.deepcopy(self.executable.tags) + for tag in tags: + if tag not in all_tags: + all_tags.append(tag) + + store_file = store_file if store_file is not None else self.executable.retain_files + + fil = File(self.executable.ifo_list, self.executable.name, + valid_seg, extension=extension, store_file=store_file, + directory=self.executable.out_dir, tags=all_tags, + use_tmp_subdirs=use_tmp_subdirs) + self.add_output_opt(option_name, fil) + return fil
+ + +
+[docs] + def add_multiifo_input_list_opt(self, opt, inputs): + """ Add an option that determines a list of inputs from multiple + detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 + ..... + """ + # NOTE: Here we have to use the raw arguments functionality as the + # file and ifo are not space separated. + self.add_raw_arg(opt) + self.add_raw_arg(' ') + for infile in inputs: + self.add_raw_arg(infile.ifo) + self.add_raw_arg(':') + self.add_raw_arg(infile.name) + self.add_raw_arg(' ') + self.add_input(infile)
+ + +
+[docs] + def add_multiifo_output_list_opt(self, opt, outputs): + """ Add an option that determines a list of outputs from multiple + detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 + ..... + """ + # NOTE: Here we have to use the raw arguments functionality as the + # file and ifo are not space separated. + self.add_raw_arg(opt) + self.add_raw_arg(' ') + for outfile in outputs: + self.add_raw_arg(outfile.ifo) + self.add_raw_arg(':') + self.add_raw_arg(outfile.name) + self.add_raw_arg(' ') + self.add_output(outfile)
+ + +
+[docs] + def new_multiifo_output_list_opt(self, opt, ifos, analysis_time, extension, + tags=None, store_file=None, + use_tmp_subdirs=False): + """ Add an option that determines a list of outputs from multiple + detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 + ..... + File names are created internally from the provided extension and + analysis time. + """ + if tags is None: + tags = [] + all_tags = copy.deepcopy(self.executable.tags) + for tag in tags: + if tag not in all_tags: + all_tags.append(tag) + + output_files = FileList([]) + store_file = store_file if store_file is not None \ + else self.executable.retain_files + + for ifo in ifos: + curr_file = File(ifo, self.executable.name, analysis_time, + extension=extension, store_file=store_file, + directory=self.executable.out_dir, tags=all_tags, + use_tmp_subdirs=use_tmp_subdirs) + output_files.append(curr_file) + self.add_multiifo_output_list_opt(opt, output_files)
+ + +
+[docs] + def resolve_td_options(self, td_options): + for opt in td_options: + new_opt = resolve_td_option(td_options[opt], self.valid_seg) + self._options += [opt, new_opt]
+ + + @property + def output_files(self): + return FileList(self._outputs) + + @property + def output_file(self): + """ + If only one output file return it. Otherwise raise an exception. + """ + out_files = self.output_files + if len(out_files) != 1: + err_msg = "output_file property is only valid if there is a single" + err_msg += " output file. Here there are " + err_msg += "%d output files." %(len(out_files)) + raise ValueError(err_msg) + return out_files[0]
+ + + +
+[docs] +class File(pegasus_workflow.File): + ''' + This class holds the details of an individual output file + This file(s) may be pre-supplied, generated from within the workflow + command line script, or generated within the workflow. The important stuff + is: + + * The ifo that the File is valid for + * The time span that the OutFile is valid for + * A short description of what the file is + * The extension that the file should have + * The url where the file should be located + + An example of initiating this class: + + >> c = File("H1", "INSPIRAL_S6LOWMASS", segments.segment(815901601, 815902001), file_url="file://localhost/home/spxiwh/H1-INSPIRAL_S6LOWMASS-815901601-400.xml.gz" ) + + another where the file url is generated from the inputs: + + >> c = File("H1", "INSPIRAL_S6LOWMASS", segments.segment(815901601, 815902001), directory="/home/spxiwh", extension="xml.gz" ) + + ''' + def __init__(self, ifos, exe_name, segs, file_url=None, + extension=None, directory=None, tags=None, + store_file=True, use_tmp_subdirs=False): + """ + Create a File instance + + Parameters + ---------- + ifos : string or list + The ifo(s) that the File is valid for. If the file is + independently valid for multiple ifos it can be provided as a list. + Ie. ['H1',L1','V1'], if the file is only valid for the combination + of ifos (for e.g. ligolw_thinca output) then this can be supplied + as, for e.g. "H1L1V1". + exe_name: string + A short description of the executable description, tagging + only the program that ran this job. + segs : ligo.segments.segment or ligo.segments.segmentlist + The time span that the OutFile is valid for. Note that this is + *not* the same as the data that the job that made the file reads in. + Lalapps_inspiral jobs do not analyse the first an last 72s of the + data that is read, and are therefore not valid at those times. If + the time is not continuous a segmentlist can be supplied. + file_url : url (optional, default=None) + If this is *not* supplied, extension and directory must be given. + If specified this explicitly points to the url of the file, or the + url where the file will be generated when made in the workflow. + extension : string (optional, default=None) + Either supply this *and* directory *or* supply only file_url. + If given this gives the extension at the end of the file name. The + full file name will be inferred from the other arguments + following the workflow standard. + directory : string (optional, default=None) + Either supply this *and* extension *or* supply only file_url. + If given this gives the directory in which the file exists, or will + exists. The file name will be inferred from the other arguments + following the workflow standard. + tags : list of strings (optional, default=None) + This is a list of descriptors describing what this file is. For + e.g. this might be ["BNSINJECTIONS" ,"LOWMASS","CAT_2_VETO"]. + These are used in file naming. + """ + self.metadata = {} + + # Set the science metadata on the file + if isinstance(ifos, str): + self.ifo_list = [ifos] + else: + self.ifo_list = ifos + + if self.ifo_list is not None: + self.ifo_string = ''.join(self.ifo_list) + else: + self.ifo_string = 'file' + + self.description = exe_name + + if isinstance(segs, segments.segment): + self.segment_list = segments.segmentlist([segs]) + elif isinstance(segs, (segments.segmentlist)): + self.segment_list = segs + else: + err = "segs input must be either ligo.segments.segment or " + err += "segments.segmentlist. Got %s." %(str(type(segs)),) + raise ValueError(err) + if tags is None: + tags = [] + if '' in tags: + logger.warning('DO NOT GIVE EMPTY TAGS (from %s)', exe_name) + tags.remove('') + self.tags = tags + + if len(self.tags): + self.tag_str = '_'.join(tags) + tagged_description = '_'.join([self.description] + tags) + else: + tagged_description = self.description + + # Follow the capitals-for-naming convention + self.ifo_string = self.ifo_string.upper() + self.tagged_description = tagged_description.upper() + + if not file_url: + if not extension: + raise TypeError("a file extension required if a file_url " + "is not provided") + if not directory: + raise TypeError("a directory is required if a file_url is " + "not provided") + + filename = self._filename(self.ifo_string, self.tagged_description, + extension, self.segment_list.extent()) + path = os.path.join(directory, filename) + if not os.path.isabs(path): + path = os.path.join(os.getcwd(), path) + file_url = urllib.parse.urlunparse(['file', 'localhost', path, + None, None, None]) + + if use_tmp_subdirs and len(self.segment_list): + pegasus_lfn = str(int(self.segment_list.extent()[0]))[:-4] + pegasus_lfn = pegasus_lfn + '/' + os.path.basename(file_url) + else: + pegasus_lfn = os.path.basename(file_url) + super(File, self).__init__(pegasus_lfn) + + if store_file: + self.storage_path = urllib.parse.urlsplit(file_url).path + else: + self.storage_path = None + + def __getstate__(self): + """ Allow the workflow.File to be picklable. This disables the usage of + the internal cache entry. + """ + for i, seg in enumerate(self.segment_list): + self.segment_list[i] = segments.segment(float(seg[0]), float(seg[1])) + self.cache_entry = None + safe_dict = copy.copy(self.__dict__) + safe_dict['cache_entry'] = None + return safe_dict + + # FIXME: This is a pegasus_workflow thing (don't think it's needed at all!) + # use the pegasus function directly (maybe not). +
+[docs] + def add_metadata(self, key, value): + """ Add arbitrary metadata to this file """ + self.metadata[key] = value
+ + + @property + def ifo(self): + """ + If only one ifo in the ifo_list this will be that ifo. Otherwise an + error is raised. + """ + if len(self.ifo_list) == 1: + return self.ifo_list[0] + else: + err = "self.ifo_list must contain only one ifo to access the " + err += "ifo property. %s." %(str(self.ifo_list),) + raise TypeError(err) + + @property + def segment(self): + """ + If only one segment in the segmentlist this will be that segment. + Otherwise an error is raised. + """ + if len(self.segment_list) == 1: + return self.segment_list[0] + else: + err = "self.segment_list must only contain one segment to access" + err += " the segment property. %s." %(str(self.segment_list),) + raise TypeError(err) + + @property + def cache_entry(self): + """ + Returns a CacheEntry instance for File. + """ + if self.storage_path is None: + raise ValueError('This file is temporary and so a lal ' + 'cache entry cannot be made') + + file_url = urllib.parse.urlunparse(['file', 'localhost', + self.storage_path, None, + None, None]) + cache_entry = lal.utils.CacheEntry(self.ifo_string, + self.tagged_description, self.segment_list.extent(), file_url) + cache_entry.workflow_file = self + return cache_entry + + def _filename(self, ifo, description, extension, segment): + """ + Construct the standard output filename. Should only be used internally + of the File class. + """ + if extension.startswith('.'): + extension = extension[1:] + + # Follow the frame convention of using integer filenames, + # but stretching to cover partially covered seconds. + start = int(segment[0]) + end = int(math.ceil(segment[1])) + duration = str(end-start) + start = str(start) + + return "%s-%s-%s-%s.%s" % (ifo, description.upper(), start, + duration, extension) + +
+[docs] + @classmethod + def from_path(cls, path, attrs=None, **kwargs): + """ + Create an output File object from path, with optional attributes. + """ + if attrs is None: + attrs = {} + if attrs and 'ifos' in attrs: + ifos = attrs['ifos'] + else: + ifos = ['H1', 'K1', 'L1', 'V1'] + if attrs and 'exe_name' in attrs: + exe_name = attrs['exe_name'] + else: + exe_name = 'INPUT' + if attrs and 'segs' in attrs: + segs = attrs['segs'] + else: + segs = segments.segment([1, 2000000000]) + if attrs and 'tags' in attrs: + tags = attrs['tags'] + else: + tags = [] + + curr_file = cls(ifos, exe_name, segs, path, tags=tags, **kwargs) + return curr_file
+
+ + +
+[docs] +class FileList(list): + ''' + This class holds a list of File objects. It inherits from the + built-in list class, but also allows a number of features. ONLY + pycbc.workflow.File instances should be within a FileList instance. + ''' + entry_class = File + +
+[docs] + def categorize_by_attr(self, attribute): + ''' + Function to categorize a FileList by a File object + attribute (eg. 'segment', 'ifo', 'description'). + + Parameters + ----------- + attribute : string + File object attribute to categorize FileList + + Returns + -------- + keys : list + A list of values for an attribute + groups : list + A list of FileLists + ''' + + # need to sort FileList otherwise using groupby without sorting does + # 'AAABBBCCDDAABB' -> ['AAA','BBB','CC','DD','AA','BB'] + # and using groupby with sorting does + # 'AAABBBCCDDAABB' -> ['AAAAA','BBBBB','CC','DD'] + flist = sorted(self, key=attrgetter(attribute), reverse=True) + + # use groupby to create lists + groups = [] + keys = [] + for k, g in groupby(flist, attrgetter(attribute)): + groups.append(FileList(g)) + keys.append(k) + + return keys, groups
+ + +
+[docs] + def find_output(self, ifo, time): + '''Returns one File most appropriate at the given time/time range. + + Return one File that covers the given time, or is most + appropriate for the supplied time range. + + Parameters + ----------- + ifo : string + Name of the ifo (or ifos) that the file should be valid for. + time : int/float/LIGOGPStime or tuple containing two values + If int/float/LIGOGPStime (or similar may of specifying one time) is + given, return the File corresponding to the time. This calls + self.find_output_at_time(ifo,time). + If a tuple of two values is given, return the File that is + **most appropriate** for the time range given. This calls + self.find_output_in_range + + Returns + -------- + pycbc_file : pycbc.workflow.File instance + The File that corresponds to the time or time range + ''' + # Determine whether I have a specific time, or a range of times + try: + lenTime = len(time) + except TypeError: + # This is if I have a single time + outFile = self.find_output_at_time(ifo,time) + else: + # This is if I have a range of times + if lenTime == 2: + outFile = self.find_output_in_range(ifo,time[0],time[1]) + # This is if I got a list that had more (or less) than 2 entries + if len(time) != 2: + raise TypeError("I do not understand the input variable time") + return outFile
+ + +
+[docs] + def find_output_at_time(self, ifo, time): + ''' + Return File that covers the given time. + + Parameters + ----------- + ifo : string + Name of the ifo (or ifos) that the File should correspond to + time : int/float/LIGOGPStime + Return the Files that covers the supplied time. If no + File covers the time this will return None. + + Returns + -------- + list of File classes + The Files that corresponds to the time. + ''' + # Get list of Files that overlap time, for given ifo + outFiles = [i for i in self if ifo in i.ifo_list and time in i.segment_list] + if len(outFiles) == 0: + # No OutFile at this time + return None + elif len(outFiles) == 1: + # 1 OutFile at this time (good!) + return outFiles + else: + # Multiple output files. Currently this is valid, but we may want + # to demand exclusivity later, or in certain cases. Hence the + # separation. + return outFiles
+ + +
+[docs] + def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False): + """ + Return the list of Files that is most appropriate for the supplied + time range. That is, the Files whose coverage time has the + largest overlap with the supplied time range. + + Parameters + ----------- + ifo : string + Name of the ifo (or ifos) that the File should correspond to + current_segment : ligo.segments.segment + The segment of time that files must intersect. + + Returns + -------- + FileList class + The list of Files that are most appropriate for the time range + """ + currsegment_list = segments.segmentlist([current_segment]) + + # Get all files overlapping the window + overlap_files = self.find_all_output_in_range(ifo, current_segment, + useSplitLists=useSplitLists) + + # By how much do they overlap? + overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files] + + if not overlap_windows: + return [] + + # Return the File with the biggest overlap + # Note if two File have identical overlap, the first is used + # to define the valid segment + overlap_windows = numpy.array(overlap_windows, dtype = int) + segmentLst = overlap_files[overlap_windows.argmax()].segment_list + + # Get all output files with the exact same segment definition + output_files = [f for f in overlap_files if f.segment_list==segmentLst] + return output_files
+ + +
+[docs] + def find_output_in_range(self, ifo, start, end): + ''' + Return the File that is most appropriate for the supplied + time range. That is, the File whose coverage time has the + largest overlap with the supplied time range. If no Files + overlap the supplied time window, will return None. + + Parameters + ----------- + ifo : string + Name of the ifo (or ifos) that the File should correspond to + start : int/float/LIGOGPStime + The start of the time range of interest. + end : int/float/LIGOGPStime + The end of the time range of interest + + Returns + -------- + File class + The File that is most appropriate for the time range + ''' + currsegment_list = segments.segmentlist([segments.segment(start, end)]) + + # First filter Files corresponding to ifo + outFiles = [i for i in self if ifo in i.ifo_list] + + if len(outFiles) == 0: + # No OutFiles correspond to that ifo + return None + # Filter OutFiles to those overlapping the given window + currSeg = segments.segment([start,end]) + outFiles = [i for i in outFiles \ + if i.segment_list.intersects_segment(currSeg)] + + if len(outFiles) == 0: + # No OutFile overlap that time period + return None + elif len(outFiles) == 1: + # One OutFile overlaps that period + return outFiles[0] + else: + overlap_windows = [abs(i.segment_list & currsegment_list) \ + for i in outFiles] + # Return the File with the biggest overlap + # Note if two File have identical overlap, this will return + # the first File in the list + overlap_windows = numpy.array(overlap_windows, dtype = int) + return outFiles[overlap_windows.argmax()]
+ + +
+[docs] + def find_all_output_in_range(self, ifo, currSeg, useSplitLists=False): + """ + Return all files that overlap the specified segment. + """ + if not useSplitLists: + # Slower, but simpler method + outFiles = [i for i in self if ifo in i.ifo_list] + outFiles = [i for i in outFiles + if i.segment_list.intersects_segment(currSeg)] + else: + # Faster, but more complicated + # Basically only check if a subset of files intersects_segment by + # using a presorted list. Sorting only happens once. + if not self._check_split_list_validity(): + # FIXME: DO NOT hard code this. + self._temporal_split_list(100) + startIdx = int((currSeg[0] - self._splitListsStart) / + self._splitListsStep) + # Add some small rounding here + endIdx = (currSeg[1] - self._splitListsStart) / self._splitListsStep + endIdx = int(endIdx - 0.000001) + + outFiles = [] + for idx in range(startIdx, endIdx + 1): + if idx < 0 or idx >= self._splitListsNum: + continue + outFilesTemp = [i for i in self._splitLists[idx] + if ifo in i.ifo_list] + outFiles.extend([i for i in outFilesTemp + if i.segment_list.intersects_segment(currSeg)]) + # Remove duplicates + outFiles = list(set(outFiles)) + + return self.__class__(outFiles)
+ + +
+[docs] + def find_output_with_tag(self, tag): + """ + Find all files who have tag in self.tags + """ + # Enforce upper case + tag = tag.upper() + return FileList([i for i in self if tag in i.tags])
+ + +
+[docs] + def find_output_without_tag(self, tag): + """ + Find all files who do not have tag in self.tags + """ + # Enforce upper case + tag = tag.upper() + return FileList([i for i in self if tag not in i.tags])
+ + +
+[docs] + def find_output_with_ifo(self, ifo): + """ + Find all files who have ifo = ifo + """ + # Enforce upper case + ifo = ifo.upper() + return FileList([i for i in self if ifo in i.ifo_list])
+ + +
+[docs] + def get_times_covered_by_files(self): + """ + Find the coalesced intersection of the segments of all files in the + list. + """ + times = segments.segmentlist([]) + for entry in self: + times.extend(entry.segment_list) + times.coalesce() + return times
+ + +
+[docs] + def convert_to_lal_cache(self): + """ + Return all files in this object as a glue.lal.Cache object + """ + from glue import lal as gluelal + + lal_cache = gluelal.Cache([]) + for entry in self: + try: + lal_cache.append(entry.cache_entry) + except ValueError: + pass + return lal_cache
+ + + def _temporal_split_list(self,numSubLists): + """ + This internal function is used to speed the code up in cases where a + number of operations are being made to determine if files overlap a + specific time. Normally such operations are done on *all* entries with + *every* call. However, if we predetermine which files are at which + times, we can avoid testing *every* file every time. + + We therefore create numSubLists distinct and equal length time windows + equally spaced from the first time entry in the list until the last. + A list is made for each window and files are added to lists which they + overlap. + + If the list changes it should be captured and these split lists become + invalid. Currently the testing for this is pretty basic + """ + # Assume segment lists are coalesced! + startTime = float( min([i.segment_list[0][0] for i in self])) + endTime = float( max([i.segment_list[-1][-1] for i in self])) + step = (endTime - startTime) / float(numSubLists) + + # Set up storage + self._splitLists = [] + for idx in range(numSubLists): + self._splitLists.append(FileList([])) + + # Sort the files + + for currFile in self: + segExtent = currFile.segment_list.extent() + startIdx = (segExtent[0] - startTime) / step + endIdx = (segExtent[1] - startTime) / step + # Add some small rounding here + startIdx = int(startIdx - 0.001) + endIdx = int(endIdx + 0.001) + + if startIdx < 0: + startIdx = 0 + if endIdx >= numSubLists: + endIdx = numSubLists - 1 + + for idx in range(startIdx, endIdx + 1): + self._splitLists[idx].append(currFile) + + # Set information needed to detect changes and to be used elsewhere + self._splitListsLength = len(self) + self._splitListsNum = numSubLists + self._splitListsStart = startTime + self._splitListsEnd = endTime + self._splitListsStep = step + self._splitListsSet = True + + def _check_split_list_validity(self): + """ + See _temporal_split_list above. This function checks if the current + split lists are still valid. + """ + # FIXME: Currently very primitive, but needs to be fast + if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)): + return False + elif len(self) != self._splitListsLength: + return False + else: + return True + +
+[docs] + @classmethod + def load(cls, filename): + """ + Load a FileList from a pickle file + """ + f = open(filename, 'r') + return pickle.load(f)
+ + +
+[docs] + def dump(self, filename): + """ + Output this FileList to a pickle file + """ + f = open(filename, 'w') + pickle.dump(self, f)
+ + +
+[docs] + def to_file_object(self, name, out_dir): + """Dump to a pickle file and return an File object reference + + Parameters + ---------- + name : str + An identifier of this file. Needs to be unique. + out_dir : path + path to place this file + + Returns + ------- + file : AhopeFile + """ + make_analysis_dir(out_dir) + + file_ref = File('ALL', name, self.get_times_covered_by_files(), + extension='.pkl', directory=out_dir) + self.dump(file_ref.storage_path) + return file_ref
+
+ + + +
+[docs] +class SegFile(File): + ''' + This class inherits from the File class, and is designed to store + workflow output files containing a segment dict. This is identical in + usage to File except for an additional kwarg for holding the + segment dictionary, if it is known at workflow run time. + ''' + def __init__(self, ifo_list, description, valid_segment, + segment_dict=None, seg_summ_dict=None, **kwargs): + """ + See File.__init__ for a full set of documentation for how to + call this class. The only thing unique and added to this class is + the optional segment_dict. NOTE that while segment_dict is a + ligo.segments.segmentlistdict rather than the usual dict[ifo] + we key by dict[ifo:name]. + + Parameters + ------------ + ifo_list : string or list (required) + See File.__init__ + description : string (required) + See File.__init__ + segment : ligo.segments.segment or ligo.segments.segmentlist + See File.__init__ + segment_dict : ligo.segments.segmentlistdict (optional, default=None) + A ligo.segments.segmentlistdict covering the times covered by the + segmentlistdict associated with this file. + Can be added by setting self.segment_dict after initializing an + instance of the class. + + """ + super(SegFile, self).__init__(ifo_list, description, valid_segment, + **kwargs) + # To avoid confusion with the segment_list property of the parent class + # we refer to this as valid_segments here + self.valid_segments = self.segment_list + self.segment_dict = segment_dict + self.seg_summ_dict = seg_summ_dict + +
+[docs] + @classmethod + def from_segment_list(cls, description, segmentlist, name, ifo, + seg_summ_list=None, **kwargs): + """ Initialize a SegFile object from a segmentlist. + + Parameters + ------------ + description : string (required) + See File.__init__ + segmentlist : ligo.segments.segmentslist + The segment list that will be stored in this file. + name : str + The name of the segment lists to be stored in the file. + ifo : str + The ifo of the segment lists to be stored in this file. + seg_summ_list : ligo.segments.segmentslist (OPTIONAL) + Specify the segment_summary segmentlist that goes along with the + segmentlist. Default=None, in this case segment_summary is taken + from the valid_segment of the SegFile class. + """ + seglistdict = segments.segmentlistdict() + seglistdict[ifo + ':' + name] = segmentlist + seg_summ_dict = None + if seg_summ_list is not None: + seg_summ_dict = segments.segmentlistdict() + seg_summ_dict[ifo + ':' + name] = seg_summ_list + return cls.from_segment_list_dict(description, seglistdict, + seg_summ_dict=seg_summ_dict, **kwargs)
+ + +
+[docs] + @classmethod + def from_multi_segment_list(cls, description, segmentlists, names, ifos, + seg_summ_lists=None, **kwargs): + """ Initialize a SegFile object from a list of segmentlists. + + Parameters + ------------ + description : string (required) + See File.__init__ + segmentlists : List of ligo.segments.segmentslist + List of segment lists that will be stored in this file. + names : List of str + List of names of the segment lists to be stored in the file. + ifos : str + List of ifos of the segment lists to be stored in this file. + seg_summ_lists : ligo.segments.segmentslist (OPTIONAL) + Specify the segment_summary segmentlists that go along with the + segmentlists. Default=None, in this case segment_summary is taken + from the valid_segment of the SegFile class. + """ + seglistdict = segments.segmentlistdict() + for name, ifo, segmentlist in zip(names, ifos, segmentlists): + seglistdict[ifo + ':' + name] = segmentlist + if seg_summ_lists is not None: + seg_summ_dict = segments.segmentlistdict() + for name, ifo, seg_summ_list in zip(names, ifos, seg_summ_lists): + seg_summ_dict[ifo + ':' + name] = seg_summ_list + else: + seg_summ_dict = None + + return cls.from_segment_list_dict(description, seglistdict, + seg_summ_dict=seg_summ_dict, **kwargs)
+ + +
+[docs] + @classmethod + def from_segment_list_dict(cls, description, segmentlistdict, + ifo_list=None, valid_segment=None, + file_exists=False, seg_summ_dict=None, + **kwargs): + """ Initialize a SegFile object from a segmentlistdict. + + Parameters + ------------ + description : string (required) + See File.__init__ + segmentlistdict : ligo.segments.segmentslistdict + See SegFile.__init__ + ifo_list : string or list (optional) + See File.__init__, if not given a list of all ifos in the + segmentlistdict object will be used + valid_segment : ligo.segments.segment or ligo.segments.segmentlist + See File.__init__, if not given the extent of all segments in the + segmentlistdict is used. + file_exists : boolean (default = False) + If provided and set to True it is assumed that this file already + exists on disk and so there is no need to write again. + seg_summ_dict : ligo.segments.segmentslistdict + Optional. See SegFile.__init__. + """ + if ifo_list is None: + ifo_set = set([i.split(':')[0] for i in segmentlistdict.keys()]) + ifo_list = list(ifo_set) + ifo_list.sort() + if valid_segment is None: + if seg_summ_dict and \ + numpy.any([len(v) for _, v in seg_summ_dict.items()]): + # Only come here if seg_summ_dict is supplied and it is + # not empty. + valid_segment = seg_summ_dict.extent_all() + else: + try: + valid_segment = segmentlistdict.extent_all() + except: + # Numpty probably didn't supply a + # ligo.segments.segmentlistdict + segmentlistdict=segments.segmentlistdict(segmentlistdict) + try: + valid_segment = segmentlistdict.extent_all() + except ValueError: + # No segment_summary and segment list is empty + # Setting valid segment now is hard! + warn_msg = "No information with which to set valid " + warn_msg += "segment." + logger.warning(warn_msg) + valid_segment = segments.segment([0,1]) + instnc = cls(ifo_list, description, valid_segment, + segment_dict=segmentlistdict, seg_summ_dict=seg_summ_dict, + **kwargs) + if not file_exists: + instnc.to_segment_xml() + else: + instnc.add_pfn(urljoin('file:', pathname2url(instnc.storage_path)), + site='local') + return instnc
+ + +
+[docs] + @classmethod + def from_segment_xml(cls, xml_file, **kwargs): + """ + Read a ligo.segments.segmentlist from the file object file containing an + xml segment table. + + Parameters + ----------- + xml_file : file object + file object for segment xml file + """ + # load xmldocument and SegmentDefTable and SegmentTables + fp = open(xml_file, 'rb') + xmldoc = ligolw_utils.load_fileobj( + fp, compress='auto', contenthandler=LIGOLWContentHandler) + + seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc) + seg_table = lsctables.SegmentTable.get_table(xmldoc) + seg_sum_table = lsctables.SegmentSumTable.get_table(xmldoc) + + segs = segments.segmentlistdict() + seg_summ = segments.segmentlistdict() + + seg_id = {} + for seg_def in seg_def_table: + # Here we want to encode ifo and segment name + full_channel_name = ':'.join([str(seg_def.ifos), + str(seg_def.name)]) + seg_id[int(seg_def.segment_def_id)] = full_channel_name + segs[full_channel_name] = segments.segmentlist() + seg_summ[full_channel_name] = segments.segmentlist() + + for seg in seg_table: + seg_obj = segments.segment( + lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns), + lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns)) + segs[seg_id[int(seg.segment_def_id)]].append(seg_obj) + + for seg in seg_sum_table: + seg_obj = segments.segment( + lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns), + lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns)) + seg_summ[seg_id[int(seg.segment_def_id)]].append(seg_obj) + + for seg_name in seg_id.values(): + segs[seg_name] = segs[seg_name].coalesce() + + xmldoc.unlink() + fp.close() + curr_url = urllib.parse.urlunparse(['file', 'localhost', xml_file, + None, None, None]) + + return cls.from_segment_list_dict('SEGMENTS', segs, file_url=curr_url, + file_exists=True, + seg_summ_dict=seg_summ, **kwargs)
+ + +
+[docs] + def remove_short_sci_segs(self, minSegLength): + """ + Function to remove all science segments + shorter than a specific length. Also updates the file on disk to remove + these segments. + + Parameters + ----------- + minSegLength : int + Maximum length of science segments. Segments shorter than this will + be removed. + """ + newsegment_list = segments.segmentlist() + for key, seglist in self.segment_dict.items(): + newsegment_list = segments.segmentlist() + for seg in seglist: + if abs(seg) > minSegLength: + newsegment_list.append(seg) + newsegment_list.coalesce() + self.segment_dict[key] = newsegment_list + self.to_segment_xml(override_file_if_exists=True)
+ + +
+[docs] + def return_union_seglist(self): + return self.segment_dict.union(self.segment_dict.keys())
+ + +
+[docs] + def parse_segdict_key(self, key): + """ + Return ifo and name from the segdict key. + """ + splt = key.split(':') + if len(splt) == 2: + return splt[0], splt[1] + else: + err_msg = "Key should be of the format 'ifo:name', got %s." %(key,) + raise ValueError(err_msg)
+ + +
+[docs] + def to_segment_xml(self, override_file_if_exists=False): + """ + Write the segment list in self.segmentList to self.storage_path. + """ + # create XML doc and add process table + outdoc = ligolw.Document() + outdoc.appendChild(ligolw.LIGO_LW()) + process = create_process_table(outdoc) + + for key, seglist in self.segment_dict.items(): + ifo, name = self.parse_segdict_key(key) + # Ensure we have LIGOTimeGPS + fsegs = [(lal.LIGOTimeGPS(seg[0]), + lal.LIGOTimeGPS(seg[1])) for seg in seglist] + + if self.seg_summ_dict is None: + vsegs = [(lal.LIGOTimeGPS(seg[0]), + lal.LIGOTimeGPS(seg[1])) \ + for seg in self.valid_segments] + else: + vsegs = [(lal.LIGOTimeGPS(seg[0]), + lal.LIGOTimeGPS(seg[1])) \ + for seg in self.seg_summ_dict[key]] + + # Add using glue library to set all segment tables + with ligolw_segments.LigolwSegments(outdoc, process) as x: + x.add(ligolw_segments.LigolwSegmentList(active=fsegs, + instruments=set([ifo]), name=name, + version=1, valid=vsegs)) + + # write file + url = urljoin('file:', pathname2url(self.storage_path)) + if not override_file_if_exists or not self.has_pfn(url, site='local'): + self.add_pfn(url, site='local') + ligolw_utils.write_filename(outdoc, self.storage_path)
+
+ + + +
+[docs] +def make_external_call(cmdList, out_dir=None, out_basename='external_call', + shell=False, fail_on_error=True): + """ + Use this to make an external call using the python subprocess module. + See the subprocess documentation for more details of how this works. + http://docs.python.org/2/library/subprocess.html + + Parameters + ----------- + cmdList : list of strings + This list of strings contains the command to be run. See the subprocess + documentation for more details. + out_dir : string + If given the stdout and stderr will be redirected to + os.path.join(out_dir,out_basename+[".err",".out]) + If not given the stdout and stderr will not be recorded + out_basename : string + The value of out_basename used to construct the file names used to + store stderr and stdout. See out_dir for more information. + shell : boolean, default=False + This value will be given as the shell kwarg to the subprocess call. + **WARNING** See the subprocess documentation for details on this + Kwarg including a warning about a serious security exploit. Do not + use this unless you are sure it is necessary **and** safe. + fail_on_error : boolean, default=True + If set to true an exception will be raised if the external command does + not return a code of 0. If set to false such failures will be ignored. + Stderr and Stdout can be stored in either case using the out_dir + and out_basename options. + + Returns + -------- + exitCode : int + The code returned by the process. + """ + if out_dir: + outBase = os.path.join(out_dir,out_basename) + errFile = outBase + '.err' + errFP = open(errFile, 'w') + outFile = outBase + '.out' + outFP = open(outFile, 'w') + cmdFile = outBase + '.sh' + cmdFP = open(cmdFile, 'w') + cmdFP.write(' '.join(cmdList)) + cmdFP.close() + else: + errFile = None + outFile = None + cmdFile = None + errFP = None + outFP = None + + msg = "Making external call %s" %(' '.join(cmdList)) + logger.info(msg) + errCode = subprocess.call(cmdList, stderr=errFP, stdout=outFP,\ + shell=shell) + if errFP: + errFP.close() + if outFP: + outFP.close() + + if errCode and fail_on_error: + raise CalledProcessErrorMod(errCode, ' '.join(cmdList), + errFile=errFile, outFile=outFile, cmdFile=cmdFile) + logger.info("Call successful, or error checking disabled.")
+ + + +
+[docs] +class CalledProcessErrorMod(Exception): + """ + This exception is raised when subprocess.call returns a non-zero exit code + and checking has been requested. This should not be accessed by the user + it is used only within make_external_call. + """ + def __init__(self, returncode, cmd, errFile=None, outFile=None, + cmdFile=None): + self.returncode = returncode + self.cmd = cmd + self.errFile = errFile + self.outFile = outFile + self.cmdFile = cmdFile + def __str__(self): + msg = "Command '%s' returned non-zero exit status %d.\n" \ + %(self.cmd, self.returncode) + if self.errFile: + msg += "Stderr can be found in %s .\n" %(self.errFile) + if self.outFile: + msg += "Stdout can be found in %s .\n" %(self.outFile) + if self.cmdFile: + msg += "The failed command has been printed in %s ." %(self.cmdFile) + return msg
+ + + +
+[docs] +def resolve_url_to_file( + curr_pfn, + attrs=None, + hash_max_chunks=10, + hash_chunk_size=int(1e6) +): + """ + Resolves a PFN into a workflow.File object. + + This function will resolve a PFN to a workflow.File object. If a File + object already exists for that PFN that will be returned, otherwise a new + object is returned. We will implement default site schemes here as needed, + for example cvfms paths will be added to the osg and nonfsio sites in + addition to local. If the LFN is a duplicate of an existing one, but with a + different PFN an AssertionError is raised. The attrs keyword-argument can + be used to specify attributes of a file. All files have 4 possible + attributes. A list of ifos, an identifying string - usually used to give + the name of the executable that created the file, a segmentlist over which + the file is valid and tags specifying particular details about those files. + If attrs['ifos'] is set it will be used as the ifos, otherwise this will + default to ['H1', 'K1', 'L1', 'V1']. If attrs['exe_name'] is given this + will replace the "exe_name" sent to File.__init__ otherwise 'INPUT' will + be given. segs will default to [[1,2000000000]] unless overridden with + attrs['segs']. tags will default to an empty list unless overriden + with attrs['tag']. If attrs is None it will be ignored and all defaults + will be used. It is emphasized that these attributes are for the most part + not important with input files. Exceptions include things like input + template banks, where ifos and valid times will be checked in the workflow + and used in the naming of child job output files. + + hash_max_chunks and hash_chunk_size are used to decide how much of the + files to check before they are considered the same, and not copied. + """ + cvmfsstr1 = 'file:///cvmfs/' + cvmfsstr2 = 'file://localhost/cvmfs/' + osdfstr1 = 'osdf:///' # Technically this isn't CVMFS, but same handling! + cvmfsstrs = (cvmfsstr1, cvmfsstr2, osdfstr1) + + # Get LFN + urlp = urllib.parse.urlparse(curr_pfn) + curr_lfn = os.path.basename(urlp.path) + + # Does this already exist as a File? + if curr_lfn in file_input_from_config_dict.keys(): + file_pfn = file_input_from_config_dict[curr_lfn][2] + # If the PFNs are different, but LFNs are the same then fail. + assert(file_pfn == curr_pfn) + curr_file = file_input_from_config_dict[curr_lfn][1] + else: + # Use resolve_url to download file/symlink as appropriate + local_file_path = resolve_url( + curr_pfn, + hash_max_chunks=hash_max_chunks, + hash_chunk_size=hash_chunk_size, + ) + # Create File object with default local path + curr_file = File.from_path(local_file_path, attrs=attrs) + + if curr_pfn.startswith(cvmfsstrs): + # Add PFNs for nonlocal sites for special cases (e.g. CVMFS). + # This block could be extended as needed + curr_file.add_pfn(curr_pfn, site='all') + else: + pfn_local = urljoin('file:', pathname2url(local_file_path)) + curr_file.add_pfn(pfn_local, 'local') + # Store the file to avoid later duplication + tuple_val = (local_file_path, curr_file, curr_pfn) + file_input_from_config_dict[curr_lfn] = tuple_val + return curr_file
+ + + +
+[docs] +def configparser_value_to_file(cp, sec, opt, attrs=None): + """ + Fetch a file given its url location via the section + and option in the workflow configuration parser. + + Parameters + ----------- + cp : ConfigParser object + The ConfigParser object holding the workflow configuration settings + sec : string + The section containing options for this job. + opt : string + Name of option (e.g. --output-file) + attrs : list to specify the 4 attributes of the file. + + Returns + -------- + fileobj_from_path : workflow.File object obtained from the path + specified by opt, within sec, in cp. + """ + path = cp.get(sec, opt) + fileobj_from_path = resolve_url_to_file(path, attrs=attrs) + return fileobj_from_path
+ + + +
+[docs] +def get_full_analysis_chunk(science_segs): + """ + Function to find the first and last time point contained in the science segments + and return a single segment spanning that full time. + + Parameters + ----------- + science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances + The list of times that are being analysed in this workflow. + + Returns + -------- + fullSegment : ligo.segments.segment + The segment spanning the first and last time point contained in science_segs. + """ + extents = [science_segs[ifo].extent() for ifo in science_segs.keys()] + min, max = extents[0] + for lo, hi in extents: + if min > lo: + min = lo + if max < hi: + max = hi + fullSegment = segments.segment(min, max) + return fullSegment
+ + + +
+[docs] +def get_random_label(): + """ + Get a random label string to use when clustering jobs. + """ + return ''.join(random.choice(string.ascii_uppercase + string.digits) \ + for _ in range(15))
+ + + +
+[docs] +def resolve_td_option(val_str, valid_seg): + """ + Take an option which might be time-dependent and resolve it + + Some options might take different values depending on the GPS time. For + example if you want opt_1 to take value_a if the time is between 10 and + 100, value_b if between 100 and 250, and value_c if between 250 and 500 you + can supply: + + value_a[10:100],value_b[100:250],value_c[250:500]. + + This function will parse that string (as opt) and return the value fully + contained in valid_seg. If valid_seg is not full contained in one, and only + one, of these options. The code will fail. If given a simple option like: + + value_a + + The function will just return value_a. + """ + # Track if we've already found a matching option + output = '' + # Strip any whitespace, and split on comma + curr_vals = val_str.replace(' ', '').strip().split(',') + + # Resolving the simple case is trivial and can be done immediately. + if len(curr_vals) == 1 and '[' not in curr_vals[0]: + return curr_vals[0] + + # Loop over all possible values + for cval in curr_vals: + start = int(valid_seg[0]) + end = int(valid_seg[1]) + # Extract limits for each case, and check overlap with valid_seg + if '[' in cval: + bopt = cval.split('[')[1].split(']')[0] + start, end = bopt.split(':') + cval = cval.replace('[' + bopt + ']', '') + curr_seg = segments.segment(int(start), int(end)) + # The segments module is a bit weird so we need to check if the two + # overlap using the following code. If valid_seg is fully within + # curr_seg this will be true. + if curr_seg.intersects(valid_seg) and \ + (curr_seg & valid_seg == valid_seg): + if output: + err_msg = "Time-dependent options must be disjoint." + raise ValueError(err_msg) + output = cval + if not output: + err_msg = "Could not resolve option {}".format(val_str) + raise ValueError + return output
+ + + +
+[docs] +def add_workflow_settings_cli(parser, include_subdax_opts=False): + """Adds workflow options to an argument parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + Argument parser to add the options to. + include_subdax_opts : bool, optional + If True, will add output-map and dax-file-directory options + to the parser. These can be used for workflows that are + generated as a subdax of another workflow. Default is False. + """ + wfgrp = parser.add_argument_group("Options for setting workflow files") + wfgrp.add_argument("--workflow-name", required=True, + help="Name of the workflow.") + wfgrp.add_argument("--tags", nargs="+", default=[], + help="Append the given tags to file names.") + wfgrp.add_argument("--output-dir", default=None, + help="Path to directory where the workflow will be " + "written. Default is to use " + "{workflow-name}_output.") + wfgrp.add_argument("--cache-file", default=None, + help="Path to input file containing list of files to " + "be reused (the 'input_map' file)") + wfgrp.add_argument("--plan-now", default=False, action='store_true', + help="If given, workflow will immediately be planned " + "on completion of workflow generation but not " + "submitted to the condor pool. A start script " + "will be created to submit to condor.") + wfgrp.add_argument("--submit-now", default=False, action='store_true', + help="If given, workflow will immediately be submitted " + "on completion of workflow generation") + wfgrp.add_argument("--dax-file", default=None, + help="Path to DAX file. Default is to write to the " + "output directory with name " + "{workflow-name}.dax.") + if include_subdax_opts: + wfgrp.add_argument("--output-map", default=None, + help="Path to an output map file.") + wfgrp.add_argument("--dax-file-directory", default=None, + help="Put dax files (including output map, " + "sites.yml etc. in this directory. The use " + "case for this is when running a sub-workflow " + "under pegasus the outputs need to be copied " + "back to the appropriate directory, and " + "using this as --dax-file-directory . allows " + "that to be done.")
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/datafind.html b/latest/html/_modules/pycbc/workflow/datafind.html new file mode 100644 index 00000000000..24f4c108bc3 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/datafind.html @@ -0,0 +1,1264 @@ + + + + + + pycbc.workflow.datafind — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.datafind

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is responsible for querying a datafind server to determine the
+availability of the data that the code is attempting to run on. It also
+performs a number of tests and can act on these as described below. Full
+documentation for this function can be found here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/datafind.html
+"""
+
+import os, copy
+import logging
+import urllib.parse
+
+from ligo import segments
+from ligo.lw import utils, table
+from gwdatafind import find_urls as find_frame_urls
+
+from pycbc.workflow.core import SegFile, File, FileList, make_analysis_dir
+from pycbc.io.ligolw import LIGOLWContentHandler
+
+# NOTE urllib is weird. For some reason it only allows known schemes and will
+# give *wrong* results, rather then failing, if you use something like gsiftp
+# We can add schemes explicitly, as below, but be careful with this!
+# (urllib is used indirectly through lal.Cache objects)
+urllib.parse.uses_relative.append('osdf')
+urllib.parse.uses_netloc.append('osdf')
+
+logger = logging.getLogger('pycbc.workflow.datafind')
+
+
+[docs] +def setup_datafind_workflow(workflow, scienceSegs, outputDir, seg_file=None, + tags=None): + """ + Setup datafind section of the workflow. This section is responsible for + generating, or setting up the workflow to generate, a list of files that + record the location of the frame files needed to perform the analysis. + There could be multiple options here, the datafind jobs could be done at + run time or could be put into a dag. The subsequent jobs will know + what was done here from the OutFileList containing the datafind jobs + (and the Dagman nodes if appropriate. + For now the only implemented option is to generate the datafind files at + runtime. This module can also check if the frameFiles actually exist, check + whether the obtained segments line up with the original ones and update the + science segments to reflect missing data files. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + The workflow class that stores the jobs that will be run. + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. + outputDir : path + All output files written by datafind processes will be written to this + directory. + seg_file : SegFile, optional (default=None) + The file returned by get_science_segments containing the science + segments and the associated segment_summary. This will + be used for the segment_summary test and is required if, and only if, + performing that test. + tags : list of string, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + datafindOuts : OutGroupList + List of all the datafind output files for use later in the pipeline. + sci_avlble_file : SegFile + SegFile containing the analysable time after checks in the datafind + module are applied to the input segment list. For production runs this + is expected to be equal to the input segment list. + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. If + the updateSegmentTimes kwarg is given this will be updated to reflect + any instances of missing data. + sci_avlble_name : string + The name with which the analysable time is stored in the + sci_avlble_file. + """ + if tags is None: + tags = [] + logger.info("Entering datafind module") + make_analysis_dir(outputDir) + cp = workflow.cp + + # Parse for options in ini file + datafind_method = cp.get_opt_tags("workflow-datafind", + "datafind-method", tags) + + if cp.has_option_tags("workflow-datafind", + "datafind-check-segment-gaps", tags): + checkSegmentGaps = cp.get_opt_tags("workflow-datafind", + "datafind-check-segment-gaps", tags) + else: + checkSegmentGaps = "no_test" + if cp.has_option_tags("workflow-datafind", + "datafind-check-frames-exist", tags): + checkFramesExist = cp.get_opt_tags("workflow-datafind", + "datafind-check-frames-exist", tags) + else: + checkFramesExist = "no_test" + if cp.has_option_tags("workflow-datafind", + "datafind-check-segment-summary", tags): + checkSegmentSummary = cp.get_opt_tags("workflow-datafind", + "datafind-check-segment-summary", tags) + else: + checkSegmentSummary = "no_test" + + logger.info("Starting datafind with setup_datafind_runtime_generated") + if datafind_method == "AT_RUNTIME_MULTIPLE_CACHES": + datafindcaches, datafindouts = \ + setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, + outputDir, tags=tags) + elif datafind_method == "AT_RUNTIME_SINGLE_CACHES": + datafindcaches, datafindouts = \ + setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, + outputDir, tags=tags) + elif datafind_method == "AT_RUNTIME_MULTIPLE_FRAMES": + datafindcaches, datafindouts = \ + setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs, + outputDir, tags=tags) + elif datafind_method == "AT_RUNTIME_SINGLE_FRAMES": + datafindcaches, datafindouts = \ + setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs, + outputDir, tags=tags) + elif datafind_method == "AT_RUNTIME_FAKE_DATA": + pass + elif datafind_method == "FROM_PREGENERATED_LCF_FILES": + ifos = scienceSegs.keys() + datafindcaches, datafindouts = \ + setup_datafind_from_pregenerated_lcf_files(cp, ifos, + outputDir, tags=tags) + else: + msg = """Entry datafind-method in [workflow-datafind] does not have " + expected value. Valid values are + AT_RUNTIME_MULTIPLE_FRAMES, AT_RUNTIME_SINGLE_FRAMES + AT_RUNTIME_MULTIPLE_CACHES, AT_RUNTIME_SINGLE_CACHES, + FROM_PREGENERATED_LCF_FILES, or AT_RUNTIME_FAKE_DATA. + Consult the documentation for more info.""" + raise ValueError(msg) + + using_backup_server = False + if datafind_method == "AT_RUNTIME_MULTIPLE_FRAMES" or \ + datafind_method == "AT_RUNTIME_SINGLE_FRAMES": + if cp.has_option_tags("workflow-datafind", + "datafind-backup-datafind-server", tags): + using_backup_server = True + backup_server = cp.get_opt_tags("workflow-datafind", + "datafind-backup-datafind-server", tags) + cp_new = copy.deepcopy(cp) + cp_new.set("workflow-datafind", + "datafind-ligo-datafind-server", backup_server) + cp_new.set('datafind', 'urltype', 'gsiftp') + backup_datafindcaches, backup_datafindouts =\ + setup_datafind_runtime_frames_single_call_perifo(cp_new, + scienceSegs, outputDir, tags=tags) + backup_datafindouts = datafind_keep_unique_backups(\ + backup_datafindouts, datafindouts) + datafindcaches.extend(backup_datafindcaches) + datafindouts.extend(backup_datafindouts) + + logger.info("setup_datafind_runtime_generated completed") + # If we don't have frame files covering all times we can update the science + # segments. + if checkSegmentGaps in ['warn','update_times','raise_error']: + logger.info("Checking science segments against datafind output....") + newScienceSegs = get_science_segs_from_datafind_outs(datafindcaches) + logger.info("New segments calculated from data find output.....") + missingData = False + for ifo in scienceSegs.keys(): + # If no science segments in input then do nothing + if not scienceSegs[ifo]: + msg = "No science segments are present for ifo %s, " %(ifo) + msg += "the segment metadata indicates there is no analyzable" + msg += " strain data between the selected GPS start and end " + msg += "times." + logger.warning(msg) + continue + if ifo not in newScienceSegs: + msg = "No data frames were found corresponding to the science " + msg += "segments for ifo %s" %(ifo) + logger.error(msg) + missingData = True + if checkSegmentGaps == 'update_times': + scienceSegs[ifo] = segments.segmentlist() + continue + missing = scienceSegs[ifo] - newScienceSegs[ifo] + if abs(missing): + msg = "From ifo %s we are missing frames covering:" %(ifo) + msg += "\n%s" % "\n".join(map(str, missing)) + missingData = True + logger.error(msg) + if checkSegmentGaps == 'update_times': + # Remove missing time, so that we can carry on if desired + logger.info("Updating science segments for ifo %s.", ifo) + scienceSegs[ifo] = scienceSegs[ifo] - missing + + if checkSegmentGaps == 'raise_error' and missingData: + raise ValueError("Workflow cannot find needed data, exiting.") + logger.info("Done checking, any discrepancies are reported above.") + elif checkSegmentGaps == 'no_test': + pass + else: + errMsg = "checkSegmentGaps kwarg must take a value from 'no_test', " + errMsg += "'warn', 'update_times' or 'raise_error'." + raise ValueError(errMsg) + + # Do all of the frame files that were returned actually exist? + if checkFramesExist in ['warn','update_times','raise_error']: + logger.info("Verifying that all frames exist on disk.") + missingFrSegs, missingFrames = \ + get_missing_segs_from_frame_file_cache(datafindcaches) + missingFlag = False + for ifo in missingFrames.keys(): + # If no data in the input then do nothing + if not scienceSegs[ifo]: + continue + # If using a backup server, does the frame exist remotely? + if using_backup_server: + # WARNING: This will be slow, but hopefully it will not occur + # for too many frames. This could be optimized if + # it becomes necessary. + new_list = [] + for frame in missingFrames[ifo]: + for dfout in datafindouts: + dfout_pfns = list(dfout.pfns) + dfout_urls = [a.url for a in dfout_pfns] + if frame.url in dfout_urls: + pfn = dfout_pfns[dfout_urls.index(frame.url)] + dfout.removePFN(pfn) + if len(dfout.pfns) == 0: + new_list.append(frame) + else: + msg = "Frame %s not found locally. "\ + %(frame.url,) + msg += "Replacing with remote url(s) %s." \ + %(str([a.url for a in dfout.pfns]),) + logger.info(msg) + break + else: + new_list.append(frame) + missingFrames[ifo] = new_list + if missingFrames[ifo]: + msg = "From ifo %s we are missing the following frames:" %(ifo) + msg +='\n'.join([a.url for a in missingFrames[ifo]]) + missingFlag = True + logger.error(msg) + if checkFramesExist == 'update_times': + # Remove missing times, so that we can carry on if desired + logger.info("Updating science times for ifo %s.", ifo) + scienceSegs[ifo] = scienceSegs[ifo] - missingFrSegs[ifo] + + if checkFramesExist == 'raise_error' and missingFlag: + raise ValueError("Workflow cannot find all frames, exiting.") + logger.info("Finished checking frames.") + elif checkFramesExist == 'no_test': + pass + else: + errMsg = "checkFramesExist kwarg must take a value from 'no_test', " + errMsg += "'warn', 'update_times' or 'raise_error'." + raise ValueError(errMsg) + + # Check if there are cases where frames exist, but no entry in the segment + # summary table are present. + if checkSegmentSummary in ['warn', 'raise_error']: + logger.info("Checking the segment summary table against frames.") + dfScienceSegs = get_science_segs_from_datafind_outs(datafindcaches) + missingFlag = False + # NOTE: Should this be overrideable in the config file? + sci_seg_name = "SCIENCE" + if seg_file is None: + err_msg = "You must provide the science segments SegFile object " + err_msg += "if using the datafind-check-segment-summary option." + raise ValueError(err_msg) + if seg_file.seg_summ_dict is None: + err_msg = "The provided science segments SegFile object must " + err_msg += "contain a valid segment_summary table if using the " + err_msg += "datafind-check-segment-summary option." + raise ValueError(err_msg) + seg_summary_times = seg_file.seg_summ_dict + for ifo in dfScienceSegs.keys(): + curr_seg_summ_times = seg_summary_times[ifo + ":" + sci_seg_name] + missing = (dfScienceSegs[ifo] & seg_file.valid_segments) + missing.coalesce() + missing = missing - curr_seg_summ_times + missing.coalesce() + scienceButNotFrame = scienceSegs[ifo] - dfScienceSegs[ifo] + scienceButNotFrame.coalesce() + missing2 = scienceSegs[ifo] - scienceButNotFrame + missing2.coalesce() + missing2 = missing2 - curr_seg_summ_times + missing2.coalesce() + if abs(missing): + msg = "From ifo %s the following times have frames, " %(ifo) + msg += "but are not covered in the segment summary table." + msg += "\n%s" % "\n".join(map(str, missing)) + logger.error(msg) + missingFlag = True + if abs(missing2): + msg = "From ifo %s the following times have frames, " %(ifo) + msg += "are science, and are not covered in the segment " + msg += "summary table." + msg += "\n%s" % "\n".join(map(str, missing2)) + logger.error(msg) + missingFlag = True + if checkSegmentSummary == 'raise_error' and missingFlag: + errMsg = "Segment_summary discrepancy detected, exiting." + raise ValueError(errMsg) + elif checkSegmentSummary == 'no_test': + pass + else: + errMsg = "checkSegmentSummary kwarg must take a value from 'no_test', " + errMsg += "'warn', or 'raise_error'." + raise ValueError(errMsg) + + # Now need to create the file for SCIENCE_AVAILABLE + sci_avlble_dict = segments.segmentlistdict() + # NOTE: Should this be overrideable in the config file? + sci_avlble_name = "SCIENCE_AVAILABLE" + for ifo in scienceSegs.keys(): + sci_avlble_dict[ifo + ':' + sci_avlble_name] = scienceSegs[ifo] + + sci_avlble_file = SegFile.from_segment_list_dict('SCIENCE_AVAILABLE', + sci_avlble_dict, ifo_list = scienceSegs.keys(), + valid_segment=workflow.analysis_time, + extension='.xml', tags=tags, directory=outputDir) + + logger.info("Leaving datafind module") + if datafind_method == "AT_RUNTIME_FAKE_DATA": + datafindouts = None + else: + datafindouts = FileList(datafindouts) + + + return datafindouts, sci_avlble_file, scienceSegs, sci_avlble_name
+ + + +
+[docs] +def setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, + outputDir, tags=None): + """ + This function uses the `gwdatafind` library to obtain the location of all + the frame files that will be needed to cover the analysis of the data + given in scienceSegs. This function will not check if the returned frames + cover the whole time requested, such sanity checks are done in the + pycbc.workflow.setup_datafind_workflow entry function. As opposed to + setup_datafind_runtime_single_call_perifo this call will one call to the + datafind server for every science segment. This function will return a list + of output files that correspond to the cache .lcf files that are produced, + which list the locations of all frame files. This will cause problems with + pegasus, which expects to know about all input files (ie. the frame files + themselves.) + + Parameters + ----------- + cp : ConfigParser.ConfigParser instance + This contains a representation of the information stored within the + workflow configuration files + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. + outputDir : path + All output files written by datafind processes will be written to this + directory. + tags : list of strings, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + datafindcaches : list of glue.lal.Cache instances + The glue.lal.Cache representations of the various calls to the datafind + server and the returned frame files. + datafindOuts : pycbc.workflow.core.FileList + List of all the datafind output files for use later in the pipeline. + + """ + if tags is None: + tags = [] + + # Now ready to loop over the input segments + datafindouts = [] + datafindcaches = [] + logger.info("Querying datafind server for all science segments.") + for ifo, scienceSegsIfo in scienceSegs.items(): + observatory = ifo[0].upper() + frameType = cp.get_opt_tags("workflow-datafind", + "datafind-%s-frame-type" % (ifo.lower()), tags) + for seg in scienceSegsIfo: + msg = "Finding data between %d and %d " %(seg[0],seg[1]) + msg += "for ifo %s" %(ifo) + logger.info(msg) + # WARNING: For now the workflow will expect times to be in integer seconds + startTime = int(seg[0]) + endTime = int(seg[1]) + + # Sometimes the connection can drop, so try a backup here + try: + cache, cache_file = run_datafind_instance( + cp, + outputDir, + observatory, + frameType, + startTime, + endTime, + ifo, + tags=tags + ) + except: + cache, cache_file = run_datafind_instance( + cp, + outputDir, + observatory, + frameType, + startTime, + endTime, + ifo, + tags=tags + ) + datafindouts.append(cache_file) + datafindcaches.append(cache) + return datafindcaches, datafindouts
+ + +
+[docs] +def setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, outputDir, + tags=None): + """ + This function uses the `gwdatafind` library to obtain the location of all + the frame files that will be needed to cover the analysis of the data + given in scienceSegs. This function will not check if the returned frames + cover the whole time requested, such sanity checks are done in the + pycbc.workflow.setup_datafind_workflow entry function. As opposed to + setup_datafind_runtime_generated this call will only run one call to + datafind per ifo, spanning the whole time. This function will return a list + of output files that correspond to the cache .lcf files that are produced, + which list the locations of all frame files. This will cause problems with + pegasus, which expects to know about all input files (ie. the frame files + themselves.) + + Parameters + ----------- + cp : ConfigParser.ConfigParser instance + This contains a representation of the information stored within the + workflow configuration files + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. + outputDir : path + All output files written by datafind processes will be written to this + directory. + tags : list of strings, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + datafindcaches : list of glue.lal.Cache instances + The glue.lal.Cache representations of the various calls to the datafind + server and the returned frame files. + datafindOuts : pycbc.workflow.core.FileList + List of all the datafind output files for use later in the pipeline. + + """ + if tags is None: + tags = [] + + # We want to ignore gaps as the detectors go up and down and calling this + # way will give gaps. See the setup_datafind_runtime_generated function + # for datafind calls that only query for data that will exist + cp.set("datafind","on_gaps","ignore") + + # Now ready to loop over the input segments + datafindouts = [] + datafindcaches = [] + logger.info("Querying datafind server for all science segments.") + for ifo, scienceSegsIfo in scienceSegs.items(): + observatory = ifo[0].upper() + checked_times = segments.segmentlist([]) + frame_types = cp.get_opt_tags( + "workflow-datafind", + "datafind-%s-frame-type" % (ifo.lower()), tags + ) + # Check if this is one type, or time varying + frame_types = frame_types.replace(' ', '').strip().split(',') + for ftype in frame_types: + # Check the times, default to full time initially + # This REQUIRES a coalesced segment list to work + start = int(scienceSegsIfo[0][0]) + end = int(scienceSegsIfo[-1][1]) + # Then check for limits. We're expecting something like: + # value[start:end], so need to extract value, start and end + if '[' in ftype: + # This gets start and end out + bopt = ftype.split('[')[1].split(']')[0] + newstart, newend = bopt.split(':') + # Then check if the times are within science time + start = max(int(newstart), start) + end = min(int(newend), end) + if end <= start: + continue + # This extracts value + ftype = ftype.split('[')[0] + curr_times = segments.segment(start, end) + # The times here must be distinct. We cannot have two different + # frame files at the same time from the same ifo. + if checked_times.intersects_segment(curr_times): + err_msg = "Different frame types cannot overlap in time." + raise ValueError(err_msg) + checked_times.append(curr_times) + + # Ask datafind where the frames are + try: + cache, cache_file = run_datafind_instance( + cp, + outputDir, + observatory, + ftype, + start, + end, + ifo, + tags=tags + ) + except: + cache, cache_file = run_datafind_instance( + cp, + outputDir, + observatory, + ftype, + start, + end, + ifo, + tags=tags + ) + + datafindouts.append(cache_file) + datafindcaches.append(cache) + return datafindcaches, datafindouts
+ + +
+[docs] +def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs, + outputDir, tags=None): + """ + This function uses the `gwdatafind` library to obtain the location of all + the frame files that will be needed to cover the analysis of the data + given in scienceSegs. This function will not check if the returned frames + cover the whole time requested, such sanity checks are done in the + pycbc.workflow.setup_datafind_workflow entry function. As opposed to + setup_datafind_runtime_generated this call will only run one call to + datafind per ifo, spanning the whole time. This function will return a list + of files corresponding to the individual frames returned by the datafind + query. This will allow pegasus to more easily identify all the files used + as input, but may cause problems for codes that need to take frame cache + files as input. + + Parameters + ----------- + cp : ConfigParser.ConfigParser instance + This contains a representation of the information stored within the + workflow configuration files + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. + outputDir : path + All output files written by datafind processes will be written to this + directory. + tags : list of strings, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + datafindcaches : list of glue.lal.Cache instances + The glue.lal.Cache representations of the various calls to the datafind + server and the returned frame files. + datafindOuts : pycbc.workflow.core.FileList + List of all the datafind output files for use later in the pipeline. + + """ + datafindcaches, _ = \ + setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, + outputDir, tags=tags) + + datafindouts = convert_cachelist_to_filelist(datafindcaches) + + return datafindcaches, datafindouts
+ + +
+[docs] +def setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs, + outputDir, tags=None): + """ + This function uses the `gwdatafind` library to obtain the location of all + the frame files that will be needed to cover the analysis of the data + given in scienceSegs. This function will not check if the returned frames + cover the whole time requested, such sanity checks are done in the + pycbc.workflow.setup_datafind_workflow entry function. As opposed to + setup_datafind_runtime_single_call_perifo this call will one call to the + datafind server for every science segment. This function will return a list + of files corresponding to the individual frames returned by the datafind + query. This will allow pegasus to more easily identify all the files used + as input, but may cause problems for codes that need to take frame cache + files as input. + + Parameters + ----------- + cp : ConfigParser.ConfigParser instance + This contains a representation of the information stored within the + workflow configuration files + scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + This contains the times that the workflow is expected to analyse. + outputDir : path + All output files written by datafind processes will be written to this + directory. + tags : list of strings, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + datafindcaches : list of glue.lal.Cache instances + The glue.lal.Cache representations of the various calls to the datafind + server and the returned frame files. + datafindOuts : pycbc.workflow.core.FileList + List of all the datafind output files for use later in the pipeline. + + """ + datafindcaches, _ = \ + setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, + outputDir, tags=tags) + + datafindouts = convert_cachelist_to_filelist(datafindcaches) + + return datafindcaches, datafindouts
+ + +
+[docs] +def setup_datafind_from_pregenerated_lcf_files(cp, ifos, outputDir, tags=None): + """ + This function is used if you want to run with pregenerated lcf frame + cache files. + + Parameters + ----------- + cp : ConfigParser.ConfigParser instance + This contains a representation of the information stored within the + workflow configuration files + ifos : list of ifo strings + List of ifos to get pregenerated files for. + outputDir : path + All output files written by datafind processes will be written to this + directory. Currently this sub-module writes no output. + tags : list of strings, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniqueify the actual filename. + + Returns + -------- + datafindcaches : list of glue.lal.Cache instances + The glue.lal.Cache representations of the various calls to the datafind + server and the returned frame files. + datafindOuts : pycbc.workflow.core.FileList + List of all the datafind output files for use later in the pipeline. + """ + from glue import lal + + if tags is None: + tags = [] + + datafindcaches = [] + for ifo in ifos: + search_string = "datafind-pregenerated-cache-file-%s" %(ifo.lower(),) + frame_cache_file_name = cp.get_opt_tags("workflow-datafind", + search_string, tags=tags) + curr_cache = lal.Cache.fromfilenames([frame_cache_file_name], + coltype=lal.LIGOTimeGPS) + curr_cache.ifo = ifo + datafindcaches.append(curr_cache) + datafindouts = convert_cachelist_to_filelist(datafindcaches) + + return datafindcaches, datafindouts
+ + +
+[docs] +def convert_cachelist_to_filelist(datafindcache_list): + """ + Take as input a list of glue.lal.Cache objects and return a pycbc FileList + containing all frames within those caches. + + Parameters + ----------- + datafindcache_list : list of glue.lal.Cache objects + The list of cache files to convert. + + Returns + -------- + datafind_filelist : FileList of frame File objects + The list of frame files. + """ + prev_file = None + prev_name = None + this_name = None + + datafind_filelist = FileList([]) + + for cache in datafindcache_list: + # sort the cache into time sequential order + cache.sort() + curr_ifo = cache.ifo + for frame in cache: + # Pegasus doesn't like "localhost" in URLs. + frame.url = frame.url.replace('file://localhost', 'file://') + # Not sure why it happens in OSDF URLs!! + # May need to remove use of Cache objects + frame.url = frame.url.replace('osdf://localhost/', 'osdf:///') + + # Create one File() object for each unique frame file that we + # get back in the cache. + if prev_file: + prev_name = os.path.basename(prev_file.cache_entry.url) + this_name = os.path.basename(frame.url) + + if (prev_file is None) or (prev_name != this_name): + currFile = File(curr_ifo, frame.description, + frame.segment, file_url=frame.url, use_tmp_subdirs=True) + datafind_filelist.append(currFile) + prev_file = currFile + + # Populate the PFNs for the File() we just created + cvmfs_urls = ('file:///cvmfs/', 'osdf://') + if frame.url.startswith(cvmfs_urls): + # Frame is on CVMFS/OSDF, so let all sites read it directly. + currFile.add_pfn(frame.url, site='all') + elif frame.url.startswith('file://'): + # Frame not on CVMFS, so may need transferring. + # Be careful here! If all your frames files are on site + # = local and you try to run on OSG, it will likely + # overwhelm the condor file transfer process! + currFile.add_pfn(frame.url, site='local') + else: + # Frame is at some unknown URL. Pegasus will decide how to deal + # with this, but will likely transfer to local site first, and + # from there transfer to remote sites as needed. + currFile.add_pfn(frame.url, site='notlocal') + + return datafind_filelist
+ + + +
+[docs] +def get_science_segs_from_datafind_outs(datafindcaches): + """ + This function will calculate the science segments that are covered in + the OutGroupList containing the frame files returned by various + calls to the datafind server. This can then be used to check whether this + list covers what it is expected to cover. + + Parameters + ---------- + datafindcaches : OutGroupList + List of all the datafind output files. + + Returns + -------- + newScienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances + The times covered by the frames found in datafindOuts. + """ + newScienceSegs = {} + for cache in datafindcaches: + if len(cache) > 0: + groupSegs = segments.segmentlist(e.segment for e in cache).coalesce() + ifo = cache.ifo + if ifo not in newScienceSegs: + newScienceSegs[ifo] = groupSegs + else: + newScienceSegs[ifo].extend(groupSegs) + newScienceSegs[ifo].coalesce() + return newScienceSegs
+ + +
+[docs] +def get_missing_segs_from_frame_file_cache(datafindcaches): + """ + This function will use os.path.isfile to determine if all the frame files + returned by the local datafind server actually exist on the disk. This can + then be used to update the science times if needed. + + Parameters + ----------- + datafindcaches : OutGroupList + List of all the datafind output files. + + Returns + -------- + missingFrameSegs : Dict. of ifo keyed ligo.segments.segmentlist instances + The times corresponding to missing frames found in datafindOuts. + missingFrames: Dict. of ifo keyed lal.Cache instances + The list of missing frames + """ + from glue import lal + + missingFrameSegs = {} + missingFrames = {} + for cache in datafindcaches: + if len(cache) > 0: + # Don't bother if these are not file:// urls, assume all urls in + # one cache file must be the same type + if not cache[0].scheme == 'file': + warn_msg = "We have %s entries in the " %(cache[0].scheme,) + warn_msg += "cache file. I do not check if these exist." + logger.warning(warn_msg) + continue + _, currMissingFrames = cache.checkfilesexist(on_missing="warn") + missingSegs = segments.segmentlist(e.segment \ + for e in currMissingFrames).coalesce() + ifo = cache.ifo + if ifo not in missingFrameSegs: + missingFrameSegs[ifo] = missingSegs + missingFrames[ifo] = lal.Cache(currMissingFrames) + else: + missingFrameSegs[ifo].extend(missingSegs) + # NOTE: This .coalesce probably isn't needed as the segments + # should be disjoint. If speed becomes an issue maybe remove it? + missingFrameSegs[ifo].coalesce() + missingFrames[ifo].extend(currMissingFrames) + return missingFrameSegs, missingFrames
+ + + +
+[docs] +def get_segment_summary_times(scienceFile, segmentName): + """ + This function will find the times for which the segment_summary is set + for the flag given by segmentName. + + Parameters + ----------- + scienceFile : SegFile + The segment file that we want to use to determine this. + segmentName : string + The DQ flag to search for times in the segment_summary table. + + Returns + --------- + summSegList : ligo.segments.segmentlist + The times that are covered in the segment summary table. + """ + # Parse the segmentName + segmentName = segmentName.split(':') + if not len(segmentName) in [2, 3]: + raise ValueError(f"Invalid channel name {segmentName}.") + ifo = segmentName[0] + channel = segmentName[1] + version = '' + if len(segmentName) == 3: + version = int(segmentName[2]) + + # Load the filename + xmldoc = utils.load_filename( + scienceFile.cache_entry.path, + compress='auto', + contenthandler=LIGOLWContentHandler + ) + + # Get the segment_def_id for the segmentName + segmentDefTable = table.Table.get_table(xmldoc, "segment_definer") + for entry in segmentDefTable: + if (entry.ifos == ifo) and (entry.name == channel): + if len(segmentName) == 2 or (entry.version==version): + segDefID = entry.segment_def_id + break + else: + raise ValueError("Cannot find channel %s in segment_definer table."\ + %(segmentName)) + + # Get the segmentlist corresponding to this segmentName in segment_summary + segmentSummTable = table.Table.get_table(xmldoc, "segment_summary") + summSegList = segments.segmentlist([]) + for entry in segmentSummTable: + if entry.segment_def_id == segDefID: + segment = segments.segment(entry.start_time, entry.end_time) + summSegList.append(segment) + summSegList.coalesce() + + return summSegList
+ + +
+[docs] +def run_datafind_instance(cp, outputDir, observatory, frameType, + startTime, endTime, ifo, tags=None): + """ + This function will query the datafind server once to find frames between + the specified times for the specified frame type and observatory. + + Parameters + ---------- + cp : ConfigParser instance + Source for any kwargs that should be sent to the datafind module + outputDir : Output cache files will be written here. We also write the + commands for reproducing what is done in this function to this + directory. + observatory : string + The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not + 'H1', 'L1', 'V1' which denote interferometers. + frameType : string + The frame type to query for. + startTime : int + Integer start time to query the datafind server for frames. + endTime : int + Integer end time to query the datafind server for frames. + ifo : string + The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'. + Maybe this could be merged with the observatory string, but this + could cause issues if running on old 'H2' and 'H1' data. + tags : list of string, optional (default=None) + Use this to specify tags. This can be used if this module is being + called more than once to give call specific configuration (by setting + options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). + This is also used to tag the Files returned by the class to uniqueify + the Files and uniquify the actual filename. + FIXME: Filenames may not be unique with current codes! + + Returns + -------- + dfCache : glue.lal.Cache instance + The glue.lal.Cache representation of the call to the datafind + server and the returned frame files. + cacheFile : pycbc.workflow.core.File + Cache file listing all of the datafind output files for use later in the pipeline. + + """ + from glue import lal + + if tags is None: + tags = [] + + # Determine if we should override the default datafind server + if cp.has_option_tags("workflow-datafind", + "datafind-ligo-datafind-server", tags): + datafind_server = cp.get_opt_tags( + "workflow-datafind", + "datafind-ligo-datafind-server", + tags + ) + else: + datafind_server = None + + seg = segments.segment([startTime, endTime]) + # Take the datafind kwargs from config (usually urltype=file is + # given). + dfKwargs = {} + # By default ignore missing frames, this case is dealt with outside of here + dfKwargs['on_gaps'] = 'ignore' + if cp.has_section("datafind"): + for item, value in cp.items("datafind"): + dfKwargs[item] = value + for tag in tags: + if cp.has_section('datafind-%s' %(tag)): + for item, value in cp.items("datafind-%s" %(tag)): + dfKwargs[item] = value + + # It is useful to print the corresponding command to the logs + # directory to check if this was expected. + log_datafind_command(observatory, frameType, startTime, endTime, + os.path.join(outputDir,'logs'), **dfKwargs) + logger.debug("Asking datafind server for frames.") + dfCache = lal.Cache.from_urls( + find_frame_urls( + observatory, + frameType, + startTime, + endTime, + host=datafind_server, + **dfKwargs + ), + ) + logger.debug("Frames returned") + # workflow format output file + cache_file = File(ifo, 'DATAFIND', seg, extension='lcf', + directory=outputDir, tags=tags) + cache_file.add_pfn(cache_file.cache_entry.path, site='local') + + dfCache.ifo = ifo + # Dump output to file + fP = open(cache_file.storage_path, "w") + # FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000 + # as a gps time which is incompatible with the lal cache format + # (and the C codes) which demand an integer. + #dfCache.tofile(fP) + for entry in dfCache: + start = str(int(entry.segment[0])) + duration = str(int(abs(entry.segment))) + print("%s %s %s %s %s" \ + % (entry.observatory, entry.description, start, duration, entry.url), file=fP) + entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1])) + + fP.close() + return dfCache, cache_file
+ + + +
+[docs] +def log_datafind_command(observatory, frameType, startTime, endTime, + outputDir, **dfKwargs): + """ + This command will print an equivalent gw_data_find command to disk that + can be used to debug why the internal datafind module is not working. + """ + # FIXME: This does not accurately reproduce the call as assuming the + # kwargs will be the same is wrong, so some things need to be converted + # "properly" to the command line equivalent. + gw_command = ['gw_data_find', '--observatory', observatory, + '--type', frameType, + '--gps-start-time', str(startTime), + '--gps-end-time', str(endTime)] + + for name, value in dfKwargs.items(): + if name == 'match': + gw_command.append("--match") + gw_command.append(str(value)) + elif name == 'urltype': + gw_command.append("--url-type") + gw_command.append(str(value)) + elif name == 'on_gaps': + pass + else: + errMsg = "Unknown datafind kwarg given: %s. " %(name) + errMsg+= "This argument is stripped in the logged .sh command." + logger.warning(errMsg) + + fileName = "%s-%s-%d-%d.sh" \ + %(observatory, frameType, startTime, endTime-startTime) + filePath = os.path.join(outputDir, fileName) + fP = open(filePath, 'w') + fP.write(' '.join(gw_command)) + fP.close()
+ + +
+[docs] +def datafind_keep_unique_backups(backup_outs, orig_outs): + """This function will take a list of backup datafind files, presumably + obtained by querying a remote datafind server, e.g. CIT, and compares + these against a list of original datafind files, presumably obtained by + querying the local datafind server. Only the datafind files in the backup + list that do not appear in the original list are returned. This allows us + to use only files that are missing from the local cluster. + + Parameters + ----------- + backup_outs : FileList + List of datafind files from the remote datafind server. + orig_outs : FileList + List of datafind files from the local datafind server. + + Returns + -------- + FileList + List of datafind files in backup_outs and not in orig_outs. + """ + # NOTE: This function is not optimized and could be made considerably + # quicker if speed becomes in issue. With 4s frame files this might + # be slow, but for >1000s files I don't foresee any issue, so I keep + # this simple. + return_list = FileList([]) + # We compare the LFNs to determine uniqueness + # Is there a way to associate two paths with one LFN?? + orig_names = [f.name for f in orig_outs] + for file in backup_outs: + if file.name not in orig_names: + return_list.append(file) + else: + index_num = orig_names.index(file.name) + orig_out = orig_outs[index_num] + pfns = list(file.pfns) + # This shouldn't happen, but catch if it does + assert(len(pfns) == 1) + orig_out.add_pfn(pfns[0].url, site='notlocal') + + return return_list
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/dq.html b/latest/html/_modules/pycbc/workflow/dq.html new file mode 100644 index 00000000000..bc82698fb4f --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/dq.html @@ -0,0 +1,283 @@ + + + + + + pycbc.workflow.dq — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.dq

+# Copyright (C) 2020 Max Trevor and Derek Davis
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+import logging
+from pycbc.workflow.core import (FileList, Executable, Node, make_analysis_dir)
+
+logger = logging.getLogger('pycbc.workflow.dq')
+
+
+
+[docs] +class PyCBCBinTemplatesDQExecutable(Executable): + current_retention_level = Executable.MERGED_TRIGGERS + +
+[docs] + def create_node(self, workflow, ifo, template_bank_file): + node = Node(self) + node.add_opt('--ifo', ifo) + node.add_input_opt('--bank-file', template_bank_file) + node.new_output_file_opt( + workflow.analysis_time, '.hdf', '--output-file') + return node
+
+ + + +
+[docs] +class PyCBCBinTriggerRatesDQExecutable(Executable): + current_retention_level = Executable.MERGED_TRIGGERS + +
+[docs] + def create_node(self, workflow, flag_file, flag_name, + analysis_segment_file, analysis_segment_name, + trig_file, template_bins_file): + node = Node(self) + node.add_input_opt('--template-bins-file', template_bins_file) + node.add_input_opt('--trig-file', trig_file) + node.add_input_opt('--flag-file', flag_file) + node.add_opt('--flag-name', flag_name) + node.add_input_opt('--analysis-segment-file', analysis_segment_file) + node.add_opt('--analysis-segment-name', analysis_segment_name) + node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file') + return node
+
+ + + +
+[docs] +def setup_dq_reranking(workflow, insps, bank, + analyzable_seg_file, + analyzable_name, + dq_seg_file, + output_dir=None, tags=None): + logger.info("Setting up dq reranking") + make_analysis_dir(output_dir) + output_files = FileList() + output_labels = [] + if tags is None: + tags = [] + + dq_labels = workflow.cp.get_subsections('workflow-data_quality') + + dq_ifos = {} + dq_names = {} + dq_types = {} + for dql in dq_labels: + dq_ifos[dql] = workflow.cp.get_opt_tags( + 'workflow-data_quality', 'dq-ifo', [dql]) + dq_names[dql] = workflow.cp.get_opt_tags( + 'workflow-data_quality', 'dq-name', [dql]) + dq_types[dql] = workflow.cp.get_opt_tags( + 'workflow-data_quality', 'dq-type', [dql]) + + ifos = set(dq_ifos.values()) + + for ifo in ifos: + # get the dq label, type, and name for this ifo + ifo_dq_labels = [dql for dql in dq_labels if (dq_ifos[dql] == ifo)] + assert len(ifo_dq_labels) < 2, f"Received multiple dq files for {ifo}" + dq_label = ifo_dq_labels[0] + dq_name = dq_names[dq_label] + dq_type = dq_types[dq_label] + + dq_tags = tags + [dq_label] + + # get triggers for this ifo + ifo_insp = [insp for insp in insps if (insp.ifo == ifo)] + assert len(ifo_insp) == 1, \ + f"Received more than one inspiral file for {ifo}" + ifo_insp = ifo_insp[0] + + # calculate template bins for this ifo + bin_templates_exe = PyCBCBinTemplatesDQExecutable( + workflow.cp, + 'bin_templates', + ifos=ifo, + out_dir=output_dir, + tags=tags) + bin_templates_node = bin_templates_exe.create_node(workflow, ifo, bank) + workflow += bin_templates_node + template_bins_file = bin_templates_node.output_file + + if dq_type == 'flag': + flag_file = dq_seg_file + flag_name = dq_name + else: + raise ValueError(f"{dq_type} dq support not yet implemented") + + # calculate trigger rates during dq flags + bin_triggers_exe = PyCBCBinTriggerRatesDQExecutable( + workflow.cp, + 'bin_trigger_rates_dq', + ifos=ifo, + out_dir=output_dir, + tags=dq_tags) + bin_triggers_node = bin_triggers_exe.create_node( + workflow, + flag_file, + flag_name, + analyzable_seg_file, + analyzable_name, + ifo_insp, + template_bins_file) + workflow += bin_triggers_node + output_files += bin_triggers_node.output_files + output_labels += [dq_label] + + logger.info("Finished setting up dq reranking") + return output_files, output_labels
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/grb_utils.html b/latest/html/_modules/pycbc/workflow/grb_utils.html new file mode 100644 index 00000000000..6dbc2ddb044 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/grb_utils.html @@ -0,0 +1,1013 @@ + + + + + + pycbc.workflow.grb_utils — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.grb_utils

+# Copyright (C) 2015  Andrew Williamson, Francesco Pannarale
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This library code contains functions and classes that are used in the
+generation of pygrb workflows. For details about pycbc.workflow see here:
+http://pycbc.org/pycbc/latest/html/workflow.html
+"""
+
+import glob
+import os
+import logging
+import numpy as np
+from scipy.stats import rayleigh
+from gwdatafind.utils import filename_metadata
+
+from pycbc import makedir
+from pycbc.workflow.core import \
+    File, FileList, resolve_url_to_file,\
+    Executable, Node
+from pycbc.workflow.jobsetup import select_generic_executable
+from pycbc.workflow.pegasus_workflow import SubWorkflow
+from pycbc.workflow.plotting import PlotExecutable
+
+logger = logging.getLogger('pycbc.workflow.grb_utils')
+
+
+def _select_grb_pp_class(wflow, curr_exe):
+    """
+    This function returns the class for PyGRB post-processing scripts.
+
+    Parameters
+    ----------
+    curr_exe : string
+        The name of the executable
+
+    Returns
+    -------
+    exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
+        functions appropriate for the given executable.  Instances of the class
+        ('jobs') **must** have methods
+        * job.create_node()
+        and
+        * job.get_valid_times(ifo, )
+    """
+    exe_path = wflow.cp.get('executables', curr_exe)
+    exe_name = os.path.basename(exe_path)
+    exe_to_class_map = {
+        'pycbc_grb_trig_combiner': PycbcGrbTrigCombinerExecutable,
+        'pycbc_grb_trig_cluster': PycbcGrbTrigClusterExecutable,
+        'pycbc_grb_inj_finder': PycbcGrbInjFinderExecutable
+    }
+    if exe_name not in exe_to_class_map:
+        raise ValueError(f"No job class exists for executable {curr_exe}")
+
+    return exe_to_class_map[exe_name]
+
+
+
+[docs] +def set_grb_start_end(cp, start, end): + """ + Function to update analysis boundaries as workflow is generated + + Parameters + ---------- + cp : pycbc.workflow.configuration.WorkflowConfigParser object + The parsed configuration options of a pycbc.workflow.core.Workflow. + + start : int + The start of the workflow analysis time. + + end : int + The end of the workflow analysis time. + + Returns + -------- + cp : pycbc.workflow.configuration.WorkflowConfigParser object + The modified WorkflowConfigParser object. + + """ + cp.set("workflow", "start-time", str(start)) + cp.set("workflow", "end-time", str(end)) + + return cp
+ + + +
+[docs] +def make_gating_node(workflow, datafind_files, outdir=None, tags=None): + ''' + Generate jobs for autogating the data for PyGRB runs. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + datafind_files : pycbc.workflow.core.FileList + A FileList containing the frame files to be gated. + outdir : string + Path of the output directory + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + + Returns + -------- + condition_strain_nodes : list + List containing the pycbc.workflow.core.Node objects representing the + autogating jobs. + condition_strain_outs : pycbc.workflow.core.FileList + FileList containing the pycbc.workflow.core.File objects representing + the gated frame files. + ''' + + cp = workflow.cp + if tags is None: + tags = [] + + condition_strain_class = select_generic_executable(workflow, + "condition_strain") + condition_strain_nodes = [] + condition_strain_outs = FileList([]) + for ifo in workflow.ifos: + input_files = FileList([datafind_file for datafind_file in + datafind_files if datafind_file.ifo == ifo]) + condition_strain_jobs = condition_strain_class(cp, "condition_strain", + ifos=ifo, + out_dir=outdir, + tags=tags) + condition_strain_node, condition_strain_out = \ + condition_strain_jobs.create_node(input_files, tags=tags) + condition_strain_nodes.append(condition_strain_node) + condition_strain_outs.extend(FileList([condition_strain_out])) + + return condition_strain_nodes, condition_strain_outs
+ + + +
+[docs] +def fermi_core_tail_model( + sky_err, rad, core_frac=0.98, core_sigma=3.6, tail_sigma=29.6): + """Fermi systematic error model following + https://arxiv.org/abs/1909.03006, with default values valid + before 11 September 2019. + + Parameters + ---------- + core_frac : float + Fraction of the systematic uncertainty contained within the core + component. + core_sigma : float + Size of the GBM systematic core component. + tail_sigma : float + Size of the GBM systematic tail component. + + Returns + _______ + tuple + Tuple containing the core and tail probability distributions + as a function of radius. + """ + scaledsq = sky_err**2 / -2 / np.log(0.32) + return ( + frac * (1 - np.exp(-0.5 * (rad / np.sqrt(scaledsq + sigma**2))**2)) + for frac, sigma + in zip([core_frac, 1 - core_frac], [core_sigma, tail_sigma]))
+ + + +
+[docs] +def get_sky_grid_scale( + sky_error=0.0, containment=0.9, upscale=False, fermi_sys=False, + precision=1e-3, **kwargs): + """ + Calculate the angular radius corresponding to a desired + localization uncertainty level. This is used to generate the search + grid and involves scaling up the standard 1-sigma value provided to + the workflow, assuming a normal probability profile. Fermi + systematic errors can be included, following + https://arxiv.org/abs/1909.03006, with default values valid before + 11 September 2019. The default probability coverage is 90%. + + Parameters + ---------- + sky_error : float + The reported statistical 1-sigma sky error of the trigger. + containment : float + The desired localization probability to be covered by the sky + grid. + upscale : bool, optional + Whether to apply rescale to convert from 1 sigma -> containment + for non-Fermi triggers. Default = True as Swift reports 90% + radius directly. + fermi_sys : bool, optional + Whether to apply Fermi-GBM systematics via + ``fermi_core_tail_model``. Default = False. + precision : float, optional + Precision (in degrees) for calculating the error radius via + Fermi-GBM model. + **kwargs + Additional keyword arguments passed to `fermi_core_tail_model`. + + Returns + _______ + + float + Sky error radius in degrees. + """ + if fermi_sys: + lims = (0.5, 4) + radii = np.linspace( + lims[0] * sky_error, lims[1] * sky_error, + int((lims[1] - lims[0]) * sky_error / precision) + 1) + core, tail = fermi_core_tail_model(sky_error, radii, **kwargs) + out = radii[(abs(core + tail - containment)).argmin()] + else: + # Use Rayleigh distribution to go from 1 sigma containment to + # containment given by function variable. Interval method returns + # bounds of equal probability about the median, but we want 1-sided + # bound, hence use (2 * containment - 1) + out = sky_error + if upscale: + out *= rayleigh.interval(2 * containment - 1)[-1] + return out
+ + + +
+[docs] +def generate_tc_prior(wflow, tc_path, buffer_seg): + """ + Generate the configuration file for the prior on the coalescence + time of injections, ensuring that these times fall in the analysis + time and avoid the onsource and its buffer. + + Parameters + ---------- + tc_path : str + Path where the configuration file for the prior needs to be written. + buffer_seg : segmentlist + Start and end times of the buffer segment encapsulating the onsource. + """ + + # Write the tc-prior configuration file if it does not exist + if os.path.exists(tc_path): + raise ValueError("Refusing to overwrite %s." % tc_path) + tc_file = open(tc_path, "w") + tc_file.write("[prior-tc]\n") + tc_file.write("name = uniform\n") + tc_file.write("min-tc = %s\n" % wflow.analysis_time[0]) + tc_file.write("max-tc = %s\n\n" % wflow.analysis_time[1]) + tc_file.write("[constraint-tc]\n") + tc_file.write("name = custom\n") + tc_file.write("constraint_arg = (tc < %s) | (tc > %s)\n" % + (buffer_seg[0], buffer_seg[1])) + tc_file.close() + + # Add the tc-prior configuration file url to wflow.cp if necessary + tc_file_path = "file://"+tc_path + for inj_sec in wflow.cp.get_subsections("injections"): + config_urls = wflow.cp.get("workflow-injections", + inj_sec+"-config-files") + config_urls = [url.strip() for url in config_urls.split(",")] + if tc_file_path not in config_urls: + config_urls += [tc_file_path] + config_urls = ', '.join([str(item) for item in config_urls]) + wflow.cp.set("workflow-injections", + inj_sec+"-config-files", + config_urls)
+ + + +
+[docs] +def setup_pygrb_pp_workflow(wf, pp_dir, seg_dir, segment, bank_file, + insp_files, inj_files, inj_insp_files, inj_tags): + """ + Generate post-processing section of PyGRB offline workflow + + Parameters + ---------- + wf : The workflow object + pp_dir : The directory where the post-processing files will be stored + seg_dir : The directory where the segment files are stored + segment : The segment to be analyzed + bank_file : The full template bank file + insp_files : The list of inspiral files + inj_files : The list of injection files + inj_insp_files : The list of inspiral files for injections + inj_tags : The list of injection tags + + Returns + ------- + trig_files : FileList + The list of combined trigger files + [ALL_TIMES, ONSOURCE, OFFSOURCE, OFFTRIAL_1, ..., OFFTRIAL_N] + FileList (N can be set by the user and is 6 by default) + clustered_files : FileList + CLUSTERED FileList, same order as trig_files + Contains triggers after clustering + inj_find_files : FileList + FOUNDMISSED FileList covering all injection sets + """ + # Begin setting up trig combiner job(s) + # Select executable class and initialize + exe_class = _select_grb_pp_class(wf, "trig_combiner") + job_instance = exe_class(wf.cp, "trig_combiner") + # Create node for coherent no injections jobs + node, trig_files = job_instance.create_node(wf.ifos, seg_dir, segment, + insp_files, pp_dir, bank_file) + wf.add_node(node) + + # Trig clustering for each trig file + exe_class = _select_grb_pp_class(wf, "trig_cluster") + job_instance = exe_class(wf.cp, "trig_cluster") + clustered_files = FileList([]) + for trig_file in trig_files: + # Create and add nodes + node, out_file = job_instance.create_node(trig_file, pp_dir) + wf.add_node(node) + clustered_files.append(out_file) + + # Find injections from triggers + exe_class = _select_grb_pp_class(wf, "inj_finder") + job_instance = exe_class(wf.cp, "inj_finder") + inj_find_files = FileList([]) + for inj_tag in inj_tags: + tag_inj_files = FileList([f for f in inj_files + if inj_tag in f.tags]) + # The here stems from the injection group information + # being stored in the second tag. This could be improved + # depending on the final implementation of injections + tag_insp_files = FileList([f for f in inj_insp_files + if inj_tag in f.tags[1]]) + node, inj_find_file = job_instance.create_node( + tag_inj_files, tag_insp_files, + bank_file, pp_dir) + wf.add_node(node) + inj_find_files.append(inj_find_file) + + return trig_files, clustered_files, inj_find_files
+ + + +
+[docs] +class PycbcGrbTrigCombinerExecutable(Executable): + """ The class responsible for creating jobs + for ''pycbc_grb_trig_combiner''. + """ + + current_retention_level = Executable.ALL_TRIGGERS + + def __init__(self, cp, name): + super().__init__(cp=cp, name=name) + self.trigger_name = cp.get('workflow', 'trigger-name') + self.trig_start_time = cp.get('workflow', 'start-time') + self.num_trials = int(cp.get('trig_combiner', 'num-trials')) + +
+[docs] + def create_node(self, ifo_tag, seg_dir, segment, insp_files, + out_dir, bank_file, tags=None): + node = Node(self) + node.add_opt('--verbose') + node.add_opt("--ifo-tag", ifo_tag) + node.add_opt("--grb-name", self.trigger_name) + node.add_opt("--trig-start-time", self.trig_start_time) + node.add_opt("--segment-dir", seg_dir) + node.add_input_list_opt("--input-files", insp_files) + node.add_opt("--user-tag", "PYGRB") + node.add_opt("--num-trials", self.num_trials) + node.add_input_opt("--bank-file", bank_file) + # Prepare output file tag + user_tag = f"PYGRB_GRB{self.trigger_name}" + if tags: + user_tag += "_{}".format(tags) + # Add on/off source and off trial outputs + output_files = FileList([]) + outfile_types = ['ALL_TIMES', 'ONSOURCE', 'OFFSOURCE'] + for i in range(self.num_trials): + outfile_types.append("OFFTRIAL_{}".format(i+1)) + for out_type in outfile_types: + out_name = "{}-{}_{}-{}-{}.h5".format( + ifo_tag, user_tag, out_type, + segment[0], segment[1]-segment[0]) + out_file = File(ifo_tag, 'trig_combiner', segment, + file_url=os.path.join(out_dir, out_name)) + node.add_output(out_file) + output_files.append(out_file) + + return node, output_files
+
+ + + +
+[docs] +class PycbcGrbTrigClusterExecutable(Executable): + """ The class responsible for creating jobs + for ''pycbc_grb_trig_cluster''. + """ + + current_retention_level = Executable.ALL_TRIGGERS + + def __init__(self, cp, name): + super().__init__(cp=cp, name=name) + +
+[docs] + def create_node(self, in_file, out_dir): + node = Node(self) + node.add_input_opt("--trig-file", in_file) + # Determine output file name + ifotag, filetag, segment = filename_metadata(in_file.name) + start, end = segment + out_name = "{}-{}_CLUSTERED-{}-{}.h5".format(ifotag, filetag, + start, end-start) + out_file = File(ifotag, 'trig_cluster', segment, + file_url=os.path.join(out_dir, out_name)) + node.add_output(out_file) + + return node, out_file
+
+ + + +
+[docs] +class PycbcGrbInjFinderExecutable(Executable): + """The class responsible for creating jobs for ``pycbc_grb_inj_finder`` + """ + current_retention_level = Executable.ALL_TRIGGERS + + def __init__(self, cp, exe_name): + super().__init__(cp=cp, name=exe_name) + +
+[docs] + def create_node(self, inj_files, inj_insp_files, bank_file, + out_dir, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_list_opt('--input-files', inj_insp_files) + node.add_input_list_opt('--inj-files', inj_files) + node.add_input_opt('--bank-file', bank_file) + ifo_tag, desc, segment = filename_metadata(inj_files[0].name) + desc = '_'.join(desc.split('_')[:-1]) + out_name = "{}-{}_FOUNDMISSED-{}-{}.h5".format( + ifo_tag, desc, segment[0], abs(segment)) + out_file = File(ifo_tag, 'inj_finder', segment, + os.path.join(out_dir, out_name), tags=tags) + node.add_output(out_file) + return node, out_file
+
+ + + +
+[docs] +def build_veto_filelist(workflow): + """Construct a FileList instance containing all veto xml files""" + + veto_dir = workflow.cp.get('workflow', 'veto-directory') + veto_files = glob.glob(veto_dir + '/*CAT*.xml') + veto_files = [resolve_url_to_file(vf) for vf in veto_files] + veto_files = FileList(veto_files) + + return veto_files
+ + + +
+[docs] +def build_segment_filelist(seg_dir): + """Construct a FileList instance containing all segments txt files""" + + file_names = ["bufferSeg.txt", "offSourceSeg.txt", "onSourceSeg.txt"] + seg_files = [os.path.join(seg_dir, fn) for fn in file_names] + seg_files = [resolve_url_to_file(sf) for sf in seg_files] + seg_files = FileList(seg_files) + + return seg_files
+ + + +
+[docs] +def make_pygrb_plot(workflow, exec_name, out_dir, + ifo=None, inj_file=None, trig_file=None, + onsource_file=None, bank_file=None, + seg_files=None, tags=None): + """Adds a node for a plot of PyGRB results to the workflow""" + + tags = [] if tags is None else tags + + # Initialize job node with its tags + grb_name = workflow.cp.get('workflow', 'trigger-name') + extra_tags = ['GRB'+grb_name] + # TODO: why is inj_set repeated twice in output files? + # if inj_set is not None: + # extra_tags.append(inj_set) + if ifo: + extra_tags.append(ifo) + node = PlotExecutable(workflow.cp, exec_name, ifos=workflow.ifos, + out_dir=out_dir, + tags=tags+extra_tags).create_node() + if trig_file is not None: + node.add_input_opt('--trig-file', trig_file) + # Pass the veto and segment files and options + if workflow.cp.has_option('workflow', 'veto-category'): + node.add_opt('--veto-category', + workflow.cp.get('workflow', 'veto-category')) + # FIXME: move to next if within previous one and else Raise error? + if workflow.cp.has_option('workflow', 'veto-files'): + veto_files = build_veto_filelist(workflow) + node.add_input_list_opt('--veto-files', veto_files) + # TODO: check this for pygrb_plot_stats_distribution + # They originally wanted seg_files + if exec_name in ['pygrb_plot_injs_results', + 'pygrb_plot_snr_timeseries']: + trig_time = workflow.cp.get('workflow', 'trigger-time') + node.add_opt('--trigger-time', trig_time) + # Pass the injection file as an input File instance + if inj_file is not None and exec_name not in \ + ['pygrb_plot_skygrid', 'pygrb_plot_stats_distribution']: + fm_file = inj_file + node.add_input_opt('--found-missed-file', fm_file) + # IFO option + if ifo: + node.add_opt('--ifo', ifo) + # Output files and final input file (passed as a File instance) + if exec_name == 'pygrb_efficiency': + # In this case tags[0] is the offtrial number + node.add_input_list_opt('--seg-files', seg_files) + node.add_input_opt('--onsource-file', + onsource_file) + node.add_input_opt('--bank-file', bank_file) + node.new_output_file_opt(workflow.analysis_time, '.png', + '--background-output-file', + tags=extra_tags+['max_background']) + node.new_output_file_opt(workflow.analysis_time, '.png', + '--onsource-output-file', + tags=extra_tags+['onsource']) + node.new_output_file_opt(workflow.analysis_time, '.json', + '--exclusion-dist-output-file', + tags=extra_tags) + node.add_opt('--injection-set-name', tags[1]) + node.add_opt('--trial-name', tags[0]) + else: + node.new_output_file_opt(workflow.analysis_time, '.png', + '--output-file', tags=extra_tags) + if exec_name in ['pygrb_plot_coh_ifosnr', 'pygrb_plot_null_stats'] \ + and 'zoomin' in tags: + node.add_opt('--zoom-in') + # Quantity to be displayed on the y-axis of the plot + if exec_name in ['pygrb_plot_chisq_veto', 'pygrb_plot_null_stats', + 'pygrb_plot_snr_timeseries']: + node.add_opt('--y-variable', tags[0]) + # Quantity to be displayed on the x-axis of the plot + elif exec_name == 'pygrb_plot_stats_distribution': + node.add_input_list_opt('--seg-files', seg_files) + node.add_opt('--x-variable', tags[0]) + elif exec_name == 'pygrb_plot_injs_results': + # Variables to plot on x and y axes + node.add_opt('--y-variable', tags[0]) + node.add_opt('--x-variable', tags[1]) + # Flag to plot found over missed or missed over found + if tags[2] == 'missed-on-top': + node.add_opt('--'+tags[2]) + # Enable log axes + subsection = '_'.join(tags[0:2]) + for log_flag in ['x-log', 'y-log']: + if workflow.cp.has_option_tags(exec_name, log_flag, + tags=[subsection]): + node.add_opt('--'+log_flag) + + # Add job node to workflow + workflow += node + + return node, node.output_files
+ + + +
+[docs] +def make_pygrb_info_table(workflow, exec_name, out_dir, in_files=None, + tags=None): + """ + Setup a job to create an html snippet with the GRB trigger information + or exlusion distances information. + """ + + # Organize tags + tags = [] if tags is None else tags + grb_name = workflow.cp.get('workflow', 'trigger-name') + extra_tags = ['GRB'+grb_name] + + # Initialize job node + node = PlotExecutable(workflow.cp, exec_name, + ifos=workflow.ifos, out_dir=out_dir, + tags=tags+extra_tags).create_node() + + # Options + if exec_name == 'pygrb_grb_info_table': + node.add_opt('--ifos', ' '.join(workflow.ifos)) + elif exec_name == 'pygrb_exclusion_dist_table': + node.add_input_opt('--input-files', in_files) + + # Output + node.new_output_file_opt(workflow.analysis_time, '.html', + '--output-file', tags=extra_tags) + + # Add job node to workflow + workflow += node + + return node, node.output_files
+ + + +
+[docs] +def make_pygrb_injs_tables(workflow, out_dir, bank_file, off_file, seg_files, + inj_file=None, on_file=None, tags=None): + """ + Adds a job to make quiet-found and missed-found injection tables, + or loudest trigger(s) table.""" + + tags = [] if tags is None else tags + + # Executable + exec_name = 'pygrb_page_tables' + # Initialize job node + grb_name = workflow.cp.get('workflow', 'trigger-name') + extra_tags = ['GRB'+grb_name] + node = PlotExecutable(workflow.cp, exec_name, + ifos=workflow.ifos, out_dir=out_dir, + tags=tags+extra_tags).create_node() + # Pass the bank-file + node.add_input_opt('--bank-file', bank_file) + # Offsource input file (or equivalently trigger file for injections) + offsource_file = off_file + node.add_input_opt('--offsource-file', offsource_file) + # Pass the veto and segment files and options + if workflow.cp.has_option('workflow', 'veto-files'): + veto_files = build_veto_filelist(workflow) + node.add_input_list_opt('--veto-files', veto_files) + node.add_input_list_opt('--seg-files', seg_files) + # Handle input/output for injections + if inj_file is not None: + # Found-missed injection file (passed as File instance) + fm_file = inj_file + node.add_input_opt('--found-missed-file', fm_file) + # Missed-found and quiet-found injections html output files + for mf_or_qf in ['missed-found', 'quiet-found']: + mf_or_qf_tags = [mf_or_qf.upper().replace('-', '_')] + node.new_output_file_opt(workflow.analysis_time, '.html', + '--'+mf_or_qf+'-injs-output-file', + tags=extra_tags+mf_or_qf_tags) + # Quiet-found injections h5 output file + node.new_output_file_opt(workflow.analysis_time, '.h5', + '--quiet-found-injs-h5-output-file', + tags=extra_tags+['QUIET_FOUND']) + # Handle input/output for onsource/offsource + else: + src_type = 'offsource-trigs' + if on_file is not None: + src_type = 'onsource-trig' + # Onsource input file (passed as File instance) + onsource_file = on_file + node.add_input_opt('--onsource-file', onsource_file) + # Loudest offsource/onsource triggers html and h5 output files + src_type_tags = [src_type.upper().replace('-', '_')] + node.new_output_file_opt(workflow.analysis_time, '.html', + '--loudest-'+src_type+'-output-file', + tags=extra_tags+src_type_tags) + node.new_output_file_opt(workflow.analysis_time, '.h5', + '--loudest-'+src_type+'-h5-output-file', + tags=extra_tags+src_type_tags) + + # Add job node to the workflow + workflow += node + + return node, node.output_files
+ + + +# Based on setup_single_det_minifollowups +
+[docs] +def setup_pygrb_minifollowups(workflow, followups_file, trigger_file, + dax_output, out_dir, tags=None): + """ Create plots that followup the the loudest PyGRB triggers or + missed injections from an HDF file. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + followups_file: pycbc.workflow.File + The File class holding the triggers/injections to follow up + trigger_file: pycbc.workflow.File + The File class holding the triggers + dax_output: The directory that will contain the dax file + out_dir: path + The directory to store minifollowups result plots and files + tags: {None, optional} + Tags to add to the minifollowups executables + """ + + logging.info('Entering minifollowups module') + + if not workflow.cp.has_section('workflow-minifollowups'): + msg = 'There is no [workflow-minifollowups] section in ' + msg += 'the configuration file' + logging.info(msg) + logging.info('Leaving minifollowups') + return + + tags = [] if tags is None else tags + makedir(dax_output) + + # Turn the config file into a File instance + config_path = os.path.abspath(dax_output + '/' + + '_'.join(tags) + '_minifollowup.ini') + workflow.cp.write(open(config_path, 'w')) + config_file = resolve_url_to_file(config_path) + + # wikifile = curr_ifo + '_'.join(tags) + 'loudest_table.txt' + wikifile = '_'.join(tags) + 'loudest_table.txt' + + # Create the node + exe = Executable(workflow.cp, 'pygrb_minifollowups', + ifos=workflow.ifos, out_dir=dax_output, + tags=tags) + node = exe.create_node() + + node.add_input_opt('--trig-file', trigger_file) + + # Grab and pass all necessary files + if workflow.cp.has_option('workflow', 'veto-files'): + veto_files = build_veto_filelist(workflow) + node.add_input_list_opt('--veto-files', veto_files) + node.add_input_opt('--config-files', config_file) + node.add_input_opt('--followups-file', followups_file) + node.add_opt('--wiki-file', wikifile) + if tags: + node.add_list_opt('--tags', tags) + node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file') + node.new_output_file_opt(workflow.analysis_time, '.dax.map', + '--output-map') + + name = node.output_files[0].name + assert name.endswith('.dax') + map_file = node.output_files[1] + assert map_file.name.endswith('.map') + + node.add_opt('--workflow-name', name) + node.add_opt('--output-dir', out_dir) + node.add_opt('--dax-file-directory', '.') + + workflow += node + + # Execute this in a sub-workflow + fil = node.output_files[0] + job = SubWorkflow(fil.name, is_planned=False) + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + logging.info('Leaving minifollowups module')
+ + + +
+[docs] +def setup_pygrb_results_workflow(workflow, res_dir, trig_files, + inj_files, bank_file, seg_dir, tags=None, + explicit_dependencies=None): + """Create subworkflow to produce plots, tables, + and results webpage for a PyGRB analysis. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + res_dir: The post-processing directory where + results (plots, etc.) will be stored + trig_files: FileList of trigger files + inj_files: FileList of injection results + bank_file: The template bank File object + tags: {None, optional} + Tags to add to the executables + explicit_dependencies: nodes that must precede this + """ + + tags = [] if tags is None else tags + dax_output = res_dir+'/webpage_daxes' + # _workflow.makedir(dax_output) + makedir(dax_output) + + # Create the node + exe = Executable(workflow.cp, 'pygrb_pp_workflow', + ifos=workflow.ifos, out_dir=dax_output, + tags=tags) + node = exe.create_node() + # Grab and pass all necessary files + node.add_input_list_opt('--trig-files', trig_files) + if workflow.cp.has_option('workflow', 'veto-files'): + veto_files = build_veto_filelist(workflow) + node.add_input_list_opt('--veto-files', veto_files) + # node.add_input_opt('--config-files', config_file) + node.add_input_list_opt('--inj-files', inj_files) + node.add_input_opt('--bank-file', bank_file) + node.add_opt('--segment-dir', seg_dir) + + if tags: + node.add_list_opt('--tags', tags) + + node.new_output_file_opt(workflow.analysis_time, '.dax', + '--dax-file', tags=tags) + node.new_output_file_opt(workflow.analysis_time, '.map', + '--output-map', tags=tags) + # + ['MAP'], use_tmp_subdirs=True) + name = node.output_files[0].name + assert name.endswith('.dax') + map_file = node.output_files[1] + assert map_file.name.endswith('.map') + node.add_opt('--workflow-name', name) + # This is the output dir for the products of this node, namely dax and map + node.add_opt('--output-dir', res_dir) + node.add_opt('--dax-file-directory', '.') + + # Turn the config file into a File instance + config_path = os.path.abspath(dax_output + '/' + + '_'.join(tags) + 'webpage.ini') + workflow.cp.write(open(config_path, 'w')) + config_file = resolve_url_to_file(config_path) + node.add_input_opt('--config-files', config_file) + + # Track additional ini file produced by pycbc_pygrb_pp_workflow + out_file = File(workflow.ifos, 'pygrb_pp_workflow', workflow.analysis_time, + file_url=os.path.join(dax_output, name+'.ini')) + node.add_output(out_file) + + # Add node to the workflow + workflow += node + if explicit_dependencies is not None: + for dep in explicit_dependencies: + workflow.add_explicit_dependancy(dep, node) + + # Execute this in a sub-workflow + job = SubWorkflow(name, is_planned=False) # , _id='results') + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + + return node.output_files
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/inference_followups.html b/latest/html/_modules/pycbc/workflow/inference_followups.html new file mode 100644 index 00000000000..c0ea1e5cce4 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/inference_followups.html @@ -0,0 +1,1213 @@ + + + + + + pycbc.workflow.inference_followups — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.inference_followups

+# Copyright (C) 2016 Christopher M. Biwer, Alexander Harvey Nitz, Collin Capano
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+"""
+Module that contains functions for setting up the inference workflow.
+"""
+import logging
+
+from pycbc.workflow.core import (Executable, makedir)
+from pycbc.workflow.plotting import PlotExecutable
+from pycbc.results import layout
+
+logger = logging.getLogger('pycbc.workflow.inference_followups')
+
+
+
+[docs] +def make_inference_plot(workflow, input_file, output_dir, + name, analysis_seg=None, + tags=None, input_file_opt='input-file', + output_file_extension='.png', + add_to_workflow=False): + """Boiler-plate function for creating a standard plotting job. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + input_file: (list of) pycbc.workflow.File + The file used for the input. May provide either a single file or a + list of files. + output_dir: str + The directory to store result plots. + name: str + The name in the [executables] section of the configuration file + to use. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + input_file_opt : str, optional + The name of the input-file option used by the executable. Default + is ``input-file``. + output_file_extension : str, optional + What file type to create. Default is ``.png``. + add_to_workflow : bool, optional + If True, the node will be added to the workflow before being returned. + **This means that no options may be added to the node afterward.** + Default is ``False``. + + Returns + ------- + pycbc.workflow.plotting.PlotExecutable + The job node for creating the plot. + """ + # default values + if tags is None: + tags = [] + if analysis_seg is None: + analysis_seg = workflow.analysis_time + # make the directory that will contain the output files + makedir(output_dir) + # Catch if a parameters option was specified: + # we need to do this because PlotExecutable will automatically add any + # option in the section to the node. However, we need to add the + # appropriate escapes to the parameters option so pegasus will render it + # properly (see _params_for_pegasus for details). + parameters = None + if workflow.cp.has_option(name, 'parameters'): + parameters = workflow.cp.get(name, 'parameters') + workflow.cp.remove_option(name, 'parameters') + # make a node for plotting the posterior as a corner plot + node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, + out_dir=output_dir, + tags=tags).create_node() + # add back the parameters option if it was specified + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + # and put the opt back in the config file in memory + workflow.cp.set(name, 'parameters', parameters) + # add input and output options + if isinstance(input_file, list): + # list of input files are given, use input_list_opt + node.add_input_list_opt("--{}".format(input_file_opt), input_file) + else: + # assume just a single file + node.add_input_opt("--{}".format(input_file_opt), input_file) + node.new_output_file_opt(analysis_seg, output_file_extension, + "--output-file") + # add node to workflow + if add_to_workflow: + workflow += node + return node
+ + + +
+[docs] +def make_inference_prior_plot(workflow, config_file, output_dir, + name="plot_prior", + analysis_seg=None, tags=None): + """Sets up the corner plot of the priors in the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + config_file: pycbc.workflow.File + The WorkflowConfigParser parasable inference configuration file.. + output_dir: str + The directory to store result plots and files. + name: str + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_prior``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of the output files. + """ + node = make_inference_plot(workflow, config_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + input_file_opt='config-file', + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def create_posterior_files(workflow, samples_files, output_dir, + parameters=None, name="extract_posterior", + analysis_seg=None, tags=None): + """Sets up job to create posterior files from some given samples files. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The workflow instance we are populating + samples_files : str or list of str + One or more files to extract the posterior samples from. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``extract_posterior``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + if analysis_seg is None: + analysis_seg = workflow.analysis_time + if tags is None: + tags = [] + # Catch if a parameters option was specified: + # we need to do this because Executable will automatically add any + # option in the section to the node. However, we need to add the + # appropriate escapes to the parameters option so pegasus will render it + # properly (see _params_for_pegasus for details). + parameters = None + if workflow.cp.has_option(name, 'parameters'): + parameters = workflow.cp.get(name, 'parameters') + workflow.cp.remove_option(name, 'parameters') + extract_posterior_exe = Executable(workflow.cp, name, + ifos=workflow.ifos, + out_dir=output_dir) + node = extract_posterior_exe.create_node() + # add back the parameters option if it was specified + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + # and put the opt back in the config file in memory + workflow.cp.set(name, 'parameters', parameters) + if not isinstance(samples_files, list): + samples_files = [samples_files] + node.add_input_list_opt("--input-file", samples_files) + node.new_output_file_opt(analysis_seg, ".hdf", "--output-file", tags=tags) + # add node to workflow + workflow += node + return node.output_files
+ + + +
+[docs] +def create_fits_file(workflow, inference_file, output_dir, + name="create_fits_file", + analysis_seg=None, tags=None): + """Sets up job to create fits files from some given samples files. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``create_fits_file``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + if analysis_seg is None: + analysis_seg = workflow.analysis_time + if tags is None: + tags = [] + create_fits_exe = Executable(workflow.cp, name, + ifos=workflow.ifos, + out_dir=output_dir) + node = create_fits_exe.create_node() + node.add_input_opt("--input-file", inference_file) + node.new_output_file_opt(analysis_seg, ".fits", "--output-file", tags=tags) + # add node to workflow + workflow += node + return node.output_files
+ + + +
+[docs] +def make_inference_skymap(workflow, fits_file, output_dir, + name="plot_skymap", analysis_seg=None, + tags=None): + """Sets up the skymap plot. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + fits_file: pycbc.workflow.File + The fits file with the sky location. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_skymap``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of result and output files. + """ + node = make_inference_plot(workflow, fits_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_summary_table(workflow, inference_file, output_dir, + parameters=None, print_metadata=None, + name="table_summary", + analysis_seg=None, tags=None): + """Sets up the html table summarizing parameter estimates. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + parameters : list or str + A list or string of parameters to generate the table for. If a string + is provided, separate parameters should be space or new-line separated. + print_metadata : list or str + A list or string of metadata parameters to print. Syntax is the same + as for ``parameters``. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``table_summary``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + # we'll use make_inference_plot even though this isn't a plot; the + # setup is the same, we just change the file extension + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + output_file_extension='.html', + add_to_workflow=False) + # now add the parameters and print metadata options; these are pulled + # from separate sections in the workflow config file, which is why we + # add them separately here + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + if print_metadata is not None: + node.add_opt("--print-metadata", _params_for_pegasus(print_metadata)) + workflow += node + return node.output_files
+ + + +
+[docs] +def make_inference_posterior_plot(workflow, inference_file, output_dir, + parameters=None, plot_prior_from_file=None, + name="plot_posterior", + analysis_seg=None, tags=None): + """Sets up the corner plot of the posteriors in the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + parameters : list or str + The parameters to plot. + plot_prior_from_file : str, optional + Plot the prior from the given config file on the 1D marginal plots. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_posterior``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + # create the node, but delay adding it to the workflow so we can add + # the prior file if it is requested + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=False) + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + if plot_prior_from_file is not None: + node.add_input_opt('--plot-prior', plot_prior_from_file) + # now add the node to workflow + workflow += node + return node.output_files
+ + + +
+[docs] +def make_inference_samples_plot(workflow, inference_file, output_dir, + name="plot_samples", + analysis_seg=None, tags=None): + """Sets up a plot of the samples versus iteration (for MCMC samplers). + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_samples``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_acceptance_rate_plot(workflow, inference_file, output_dir, + name="plot_acceptance_rate", + analysis_seg=None, tags=None): + """Sets up a plot of the acceptance rate (for MCMC samplers). + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_acceptance_rate``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_plot_mcmc_history(workflow, inference_file, output_dir, + name="plot_mcmc_history", + analysis_seg=None, tags=None): + """Sets up a plot showing the checkpoint history of an MCMC sampler. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_mcmc_history``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_dynesty_run_plot(workflow, inference_file, output_dir, + name="plot_dynesty_run", + analysis_seg=None, tags=None): + """Sets up a debugging plot for the dynesty run (for Dynesty sampler). + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_dynesty_run``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_dynesty_trace_plot(workflow, inference_file, output_dir, + name="plot_dynesty_traceplot", + analysis_seg=None, tags=None): + """Sets up a trace plot for the dynesty run (for Dynesty sampler). + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_file: pycbc.workflow.File + The file with posterior samples. + output_dir: str + The directory to store result plots and files. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_dynesty_traceplot``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, inference_file, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=True) + return node.output_files
+ + + +
+[docs] +def make_inference_pp_table(workflow, posterior_files, output_dir, + parameters=None, injection_samples_map=None, + name="pp_table_summary", + analysis_seg=None, tags=None): + """Performs a PP, writing results to an html table. + + Parameters + ---------- + workflow : pycbc.workflow.Workflow + The core workflow instance we are populating + posterior_files : pycbc.workflow.core.FileList + List of files with posteriors of injections. + output_dir : str + The directory to store result plots and files. + parameters : list or str, optional + A list or string of parameters to generate the table for. If a string + is provided, separate parameters should be space or new-line separated. + injection_samples_map : (list of) str, optional + Map between injection parameters and parameters in the posterior file. + Format is ``INJECTION_PARAM:SAMPLES_PARAM``. + name : str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``table_summary``. + analysis_segs : ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags : list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + # we'll use make_inference_plot even though this isn't a plot; the + # setup is the same, we just change the file extension + node = make_inference_plot(workflow, posterior_files, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + output_file_extension='.html', + add_to_workflow=False) + # add the parameters and inj/samples map + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + if injection_samples_map is not None: + node.add_opt("--injection-samples-map", + _params_for_pegasus(injection_samples_map)) + workflow += node + return node.output_files
+ + + +
+[docs] +def make_inference_pp_plot(workflow, posterior_files, output_dir, + parameters=None, injection_samples_map=None, + name="plot_pp", + analysis_seg=None, tags=None): + """Sets up a pp plot in the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + posterior_files: pycbc.workflow.core.FileList + List of files with posteriors of injections. + output_dir: str + The directory to store result plots and files. + parameters : list or str, optional + The parameters to plot. + injection_samples_map : (list of) str, optional + Map between injection parameters and parameters in the posterior file. + Format is ``INJECTION_PARAM:SAMPLES_PARAM``. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``plot_pp``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + node = make_inference_plot(workflow, posterior_files, output_dir, + name, analysis_seg=analysis_seg, tags=tags, + add_to_workflow=False) + # add the parameters and inj/samples map + if parameters is not None: + node.add_opt("--parameters", _params_for_pegasus(parameters)) + if injection_samples_map is not None: + node.add_opt("--injection-samples-map", + _params_for_pegasus(injection_samples_map)) + # now add the node to workflow + workflow += node + return node.output_files
+ + + +
+[docs] +def make_inference_inj_recovery_plot(workflow, posterior_files, output_dir, + parameter, injection_samples_map=None, + name="inj_recovery", + analysis_seg=None, tags=None): + """Sets up the recovered versus injected parameter plot in the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + inference_files: pycbc.workflow.core.FileList + List of files with posteriors of injections. + output_dir: str + The directory to store result plots and files. + parameter : str + The parameter to plot. + injection_samples_map : (list of) str, optional + Map between injection parameters and parameters in the posterior file. + Format is ``INJECTION_PARAM:SAMPLES_PARAM``. + name: str, optional + The name in the [executables] section of the configuration file + to use, and the section to read for additional arguments to pass to + the executable. Default is ``inj_recovery``. + analysis_segs: ligo.segments.Segment, optional + The segment this job encompasses. If None then use the total analysis + time from the workflow. + tags: list, optional + Tags to add to the inference executables. + + Returns + ------- + pycbc.workflow.FileList + A list of output files. + """ + # arguments are the same as plot_pp, so just call that with the + # different executable name + return make_inference_pp_plot( + workflow, posterior_files, output_dir, parameters=parameter, + injection_samples_map=injection_samples_map, + name=name, analysis_seg=analysis_seg, tags=tags)
+ + + +
+[docs] +def get_plot_group(cp, section_tag): + """Gets plotting groups from ``[workflow-section_tag]``.""" + group_prefix = "plot-group-" + # parameters for the summary plots + plot_groups = {} + opts = [opt for opt in cp.options("workflow-{}".format(section_tag)) + if opt.startswith(group_prefix)] + for opt in opts: + group = opt.replace(group_prefix, "").replace("-", "_") + plot_groups[group] = cp.get_opt_tag("workflow", opt, section_tag) + return plot_groups
+ + + +
+[docs] +def get_diagnostic_plots(workflow): + """Determines what diagnostic plots to create based on workflow. + + The plots to create are based on what executable's are specified in the + workflow's config file. A list of strings is returned giving the diagnostic + plots to create. This list may contain: + + * ``samples``: For MCMC samplers, a plot of the sample chains as a function + of iteration. This will be created if ``plot_samples`` is in the + executables section. + * ``acceptance_rate``: For MCMC samplers, a plot of the acceptance rate. + This will be created if ``plot_acceptance_rate`` is in the executables + section. + + Returns + ------- + list : + List of names of diagnostic plots. + """ + diagnostics = [] + if "plot_samples" in workflow.cp.options("executables"): + diagnostics.append('samples') + if "plot_acceptance_rate" in workflow.cp.options("executables"): + diagnostics.append('acceptance_rate') + if "plot_mcmc_history" in workflow.cp.options("executables"): + diagnostics.append('mcmc_history') + if "plot_dynesty_run" in workflow.cp.options("executables"): + diagnostics.append('dynesty_run') + if "plot_dynesty_traceplot" in workflow.cp.options("executables"): + diagnostics.append('dynesty_traceplot') + return diagnostics
+ + + +
+[docs] +def make_diagnostic_plots(workflow, diagnostics, samples_file, label, rdir, + tags=None): + """Makes diagnostic plots. + + Diagnostic plots are sampler-specific plots the provide information on + how the sampler performed. All diagnostic plots use the output file + produced by ``pycbc_inference`` as their input. Diagnostic plots are added + to the results directory ``rdir/NAME`` where ``NAME`` is the name of the + diagnostic given in ``diagnostics``. + + Parameters + ---------- + workflow : pycbc.workflow.core.Workflow + The workflow to add the plotting jobs to. + diagnostics : list of str + The names of the diagnostic plots to create. See + :py:func:`get_diagnostic_plots` for recognized names. + samples_file : (list of) pycbc.workflow.File + One or more samples files with which to create the diagnostic plots. + If a list of files is provided, a diagnostic plot for each file will + be created. + label : str + Event label for the diagnostic plots. + rdir : pycbc.results.layout.SectionNumber + Results directory layout. + tags : list of str, optional + Additional tags to add to the file names. + + Returns + ------- + dict : + Dictionary of diagnostic name -> list of files giving the plots that + will be created. + """ + if tags is None: + tags = [] + out = {} + if not isinstance(samples_file, list): + samples_file = [samples_file] + if 'samples' in diagnostics: + # files for samples summary subsection + base = "samples/{}".format(label) + samples_plots = [] + for kk, sf in enumerate(samples_file): + samples_plots += make_inference_samples_plot( + workflow, sf, rdir[base], + analysis_seg=workflow.analysis_time, + tags=tags+[label, str(kk)]) + out['samples'] = samples_plots + layout.group_layout(rdir[base], samples_plots) + + if 'acceptance_rate' in diagnostics: + # files for samples acceptance_rate subsection + base = "acceptance_rate/{}".format(label) + acceptance_plots = [] + for kk, sf in enumerate(samples_file): + acceptance_plots += make_inference_acceptance_rate_plot( + workflow, sf, rdir[base], + analysis_seg=workflow.analysis_time, + tags=tags+[label, str(kk)]) + out['acceptance_rate'] = acceptance_plots + layout.single_layout(rdir[base], acceptance_plots) + + if 'mcmc_history' in diagnostics: + # files for samples mcmc history subsection + base = "mcmc_history/{}".format(label) + history_plots = [] + for kk, sf in enumerate(samples_file): + history_plots += make_inference_plot_mcmc_history( + workflow, sf, rdir[base], + analysis_seg=workflow.analysis_time, + tags=tags+[label, str(kk)]) + out['mcmc_history'] = history_plots + layout.single_layout(rdir[base], history_plots) + + if 'dynesty_run' in diagnostics: + # files for dynesty run subsection + base = "dynesty_run/{}".format(label) + dynesty_run_plots = [] + for kk, sf in enumerate(samples_file): + dynesty_run_plots += make_inference_dynesty_run_plot( + workflow, sf, rdir[base], + analysis_seg=workflow.analysis_time, + tags=tags+[label, str(kk)]) + out['dynesty_run'] = dynesty_run_plots + layout.single_layout(rdir[base], dynesty_run_plots) + + if 'dynesty_traceplot' in diagnostics: + # files for samples dynesty tyrace plots subsection + base = "dynesty_traceplot/{}".format(label) + dynesty_trace_plots = [] + for kk, sf in enumerate(samples_file): + dynesty_trace_plots += make_inference_dynesty_trace_plot( + workflow, sf, rdir[base], + analysis_seg=workflow.analysis_time, + tags=tags+[label, str(kk)]) + out['dynesty_traceplot'] = dynesty_trace_plots + layout.single_layout(rdir[base], dynesty_trace_plots) + + return out
+ + + +
+[docs] +def make_posterior_workflow(workflow, samples_files, config_file, label, + rdir, posterior_file_dir='posterior_files', + tags=None): + """Adds jobs to a workflow that make a posterior file and subsequent plots. + + A posterior file is first created from the given samples file(s). The + settings for extracting the posterior are set by the + ``[extract_posterior]`` section. If that section has a ``parameters`` + argument, then the parameters in the posterior file (and for use in all + subsequent plotting) will be whatever that option is set to. Otherwise, + the parameters in the posterior file will be whatever is common to + all of the given samples file(s). + + Except for prior plots (which use the given inference config file), all + subsequent jobs use the posterior file. The following are created: + + * **Summary table**: an html table created using the ``table_summary`` + executable. The parameters to print in the table are retrieved from the + ``table-params`` option in the ``[workflow-summary_table]`` section. + Metadata may also be printed by adding a ``print-metadata`` option to + that section. + * **Summary posterior plots**: a collection of posterior plots to include + in the summary page, after the summary table. The parameters to plot + are read from ``[workflow-summary_plots]``. Parameters should be grouped + together by providing + ``plot-group-NAME = PARAM1[:LABEL1] PARAM2[:LABEL2]`` in that section, + where ``NAME`` is a unique name for each group. One posterior plot will + be created for each plot group. For clarity, only one or two parameters + should be plotted in each summary group, but this is not enforced. + Settings for the plotting executable are read from the + ``plot_posterior_summary`` section; likewise, the executable used + is read from ``plot_posterior_summary`` in the + ``[executables]`` section. + * **Sky maps**: if *both* ``create_fits_file`` and ``plot_skymap`` + are listed in the ``[executables]`` section, then a ``.fits`` file and + sky map plot will be produced. The sky map plot will be included in + the summary plots. You must be running in a python 3 environment to + create these. + * **Prior plots**: plots of the prior will be created using the + ``plot_prior`` executable. By default, all of the variable + parameters will be plotted. The prior plots are added to + ``priors/LALBEL/`` in the results directory, where ``LABEL`` is the + given ``label``. + * **Posterior plots**: additional posterior plots are created using the + ``plot_posterior`` executable. The parameters to plot are + read from ``[workflow-plot_params]`` section. As with the summary + posterior plots, parameters are grouped together by providing + ``plot-group-NAME`` options in that section. A posterior plot will be + created for each group, and added to the ``posteriors/LABEL/`` directory. + Plot settings are read from the ``[plot_posterior]`` section; this + is kept separate from the posterior summary so that different settings + can be used. For example, you may want to make a density plot for the + summary plots, but a scatter plot colored by SNR for the posterior plots. + + + Parameters + ---------- + samples_file : pycbc.workflow.core.FileList + List of samples files to combine into a single posterior file. + config_file : pycbc.worfkow.File + The inference configuration file used to generate the samples file(s). + This is needed to make plots of the prior. + label : str + Unique label for the plots. Used in file names. + rdir : pycbc.results.layout.SectionNumber + The results directory to save the plots to. + posterior_file_dir : str, optional + The name of the directory to save the posterior file to. Default is + ``posterior_files``. + tags : list of str, optional + Additional tags to add to the file names. + + Returns + ------- + posterior_file : pycbc.workflow.File + The posterior file that was created. + summary_files : list + List of files to go on the summary results page. + prior_plots : list + List of prior plots that will be created. These will be saved to + ``priors/LABEL/`` in the resuls directory, where ``LABEL`` is the + provided label. + posterior_plots : list + List of posterior plots that will be created. These will be saved to + ``posteriors/LABEL/`` in the results directory. + """ + # the list of plots to go in the summary + summary_files = [] + + if tags is None: + tags = [] + + analysis_seg = workflow.analysis_time + + # figure out what parameters user wants to plot from workflow configuration + # parameters for the summary plots + summary_plot_params = get_plot_group(workflow.cp, 'summary_plots') + # parameters to plot in large corner plots + plot_params = get_plot_group(workflow.cp, 'plot_params') + # get parameters for the summary tables + table_params = workflow.cp.get_opt_tag('workflow', 'table-params', + 'summary_table') + # get any metadata that should be printed + if workflow.cp.has_option('workflow-summary_table', 'print-metadata'): + table_metadata = workflow.cp.get_opt_tag('workflow', 'print-metadata', + 'summary_table') + else: + table_metadata = None + + # figure out if we are making a skymap + make_skymap = ("create_fits_file" in workflow.cp.options("executables") and + "plot_skymap" in workflow.cp.options("executables")) + + make_prior = ("plot_prior" in workflow.cp.options("executables")) + _config = None + if make_prior: + _config = config_file + + # make node for running extract samples + posterior_file = create_posterior_files( + workflow, samples_files, posterior_file_dir, + analysis_seg=analysis_seg, tags=tags+[label])[0] + + # summary table + summary_files += (make_inference_summary_table( + workflow, posterior_file, rdir.base, + parameters=table_params, print_metadata=table_metadata, + analysis_seg=analysis_seg, + tags=tags+[label]),) + + # summary posteriors + summary_plots = [] + for group, params in summary_plot_params.items(): + summary_plots += make_inference_posterior_plot( + workflow, posterior_file, rdir.base, + name='plot_posterior_summary', + parameters=params, plot_prior_from_file=_config, + analysis_seg=analysis_seg, + tags=tags+[label, group]) + + # sky map + if make_skymap: + # create the fits file + fits_file = create_fits_file( + workflow, posterior_file, rdir.base, analysis_seg=analysis_seg, + tags=tags+[label])[0] + # now plot the skymap + skymap_plot = make_inference_skymap( + workflow, fits_file, rdir.base, analysis_seg=analysis_seg, + tags=tags+[label]) + summary_plots += skymap_plot + + summary_files += list(layout.grouper(summary_plots, 2)) + + # files for posteriors summary subsection + base = "posteriors/{}".format(label) + posterior_plots = [] + for group, params in plot_params.items(): + posterior_plots += make_inference_posterior_plot( + workflow, posterior_file, rdir[base], + parameters=params, plot_prior_from_file=_config, + analysis_seg=analysis_seg, + tags=tags+[label, group]) + layout.single_layout(rdir[base], posterior_plots) + + prior_plots = [] + # files for priors summary section + if make_prior: + base = "priors/{}".format(label) + prior_plots += make_inference_prior_plot( + workflow, config_file, rdir[base], + analysis_seg=workflow.analysis_time, tags=tags+[label]) + layout.single_layout(rdir[base], prior_plots) + return posterior_file, summary_files, prior_plots, posterior_plots
+ + + +def _params_for_pegasus(parameters): + """Escapes $ and escapes in parameters string for pegasus. + + Pegaus kickstart tries to do variable substitution if it sees a ``$``, and + it will strip away back slashes. This can be problematic when trying to use + LaTeX in parameter labels. This function adds escapes to all ``$`` and + backslashes in a parameters argument, so the argument can be safely passed + through pegasus-kickstart. + + Parameters + ---------- + parameters : list or str + The parameters argument to modify. If a list, the output will be + converted to a space-separated string. + """ + if isinstance(parameters, list): + parameters = " ".join(parameters) + return parameters.replace('\\', '\\\\').replace('$', '\$') +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/injection.html b/latest/html/_modules/pycbc/workflow/injection.html new file mode 100644 index 00000000000..a078ea302fb --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/injection.html @@ -0,0 +1,417 @@ + + + + + + pycbc.workflow.injection — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.injection

+# Copyright (C) 2015  Ian Harry, Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is responsible for setting up the part of a pycbc workflow that
+will generate the injection files to be used for assessing the workflow's
+ability to detect predicted signals.
+Full documentation for this module can be found here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
+"""
+
+import logging
+import os.path
+import configparser as ConfigParser
+
+from pycbc.workflow.core import FileList, make_analysis_dir, Node
+from pycbc.workflow.core import Executable, resolve_url_to_file
+from pycbc.workflow.jobsetup import (LalappsInspinjExecutable,
+        PycbcCreateInjectionsExecutable, select_generic_executable)
+
+logger = logging.getLogger('pycbc.workflow.injection')
+
+
+[docs] +def veto_injections(workflow, inj_file, veto_file, veto_name, out_dir, tags=None): + tags = [] if tags is None else tags + make_analysis_dir(out_dir) + + node = Executable(workflow.cp, 'strip_injections', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_opt('--segment-name', veto_name) + node.add_input_opt('--veto-file', veto_file) + node.add_input_opt('--injection-file', inj_file) + node.add_opt('--ifos', ' '.join(workflow.ifos)) + node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +class PyCBCOptimalSNRExecutable(Executable): + """Compute optimal SNR for injections""" + current_retention_level = Executable.ALL_TRIGGERS + +
+[docs] + def create_node(self, workflow, inj_file, precalc_psd_files, group_str): + node = Node(self) + _, ext = os.path.splitext(inj_file.name) + node.add_input_opt('--input-file', inj_file) + node.add_opt('--injection-fraction-range', group_str) + node.add_input_list_opt('--time-varying-psds', precalc_psd_files) + node.new_output_file_opt(workflow.analysis_time, ext, + '--output-file') + return node
+
+ + + +
+[docs] +class PyCBCMergeHDFExecutable(Executable): + """Merge HDF injection files executable class""" + current_retention_level = Executable.MERGED_TRIGGERS + +
+[docs] + def create_node(self, workflow, input_files): + node = Node(self) + node.add_input_list_opt('--injection-files', input_files) + node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file') + return node
+
+ + + +
+[docs] +def compute_inj_optimal_snr(workflow, inj_file, precalc_psd_files, out_dir, + tags=None): + "Set up a job for computing optimal SNRs of a sim_inspiral file." + if tags is None: + tags = [] + + try: + factor = int(workflow.cp.get_opt_tags('workflow-optimal-snr', + 'parallelization-factor', + tags)) + except Exception as e: + logger.warning(e) + factor = 1 + + if factor == 1: + # parallelization factor not given - default to single optimal snr job + opt_snr_exe = PyCBCOptimalSNRExecutable(workflow.cp, 'optimal_snr', + ifos=workflow.ifos, + out_dir=out_dir, tags=tags) + node = opt_snr_exe.create_node(workflow, inj_file, + precalc_psd_files, '0/1') + workflow += node + + return node.output_files[0] + + opt_snr_split_files = [] + for i in range(factor): + group_str = '%s/%s' % (i, factor) + opt_snr_exe = PyCBCOptimalSNRExecutable(workflow.cp, 'optimal_snr', + ifos=workflow.ifos, + out_dir=out_dir, + tags=tags + [str(i)]) + opt_snr_exe.update_current_retention_level( + Executable.INTERMEDIATE_PRODUCT) + node = opt_snr_exe.create_node(workflow, inj_file, precalc_psd_files, + group_str) + opt_snr_split_files += [node.output_files[0]] + workflow += node + + hdfcombine_exe = PyCBCMergeHDFExecutable( + workflow.cp, + 'optimal_snr_merge', + ifos=workflow.ifos, + out_dir=out_dir, + tags=tags + ) + + hdfcombine_node = hdfcombine_exe.create_node( + workflow, + opt_snr_split_files + ) + workflow += hdfcombine_node + + return hdfcombine_node.output_files[0]
+ + +
+[docs] +def cut_distant_injections(workflow, inj_file, out_dir, tags=None): + "Set up a job for removing injections that are too distant to be seen" + if tags is None: + tags = [] + + node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--input', inj_file) + node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file') + workflow += node + return node.output_files[0]
+ + +
+[docs] +def inj_to_hdf(workflow, inj_file, out_dir, tags=None): + """ Convert injection file to hdf format. + + If the file is already PyCBC HDF format, this will just make a copy. + """ + if tags is None: + tags = [] + + node = Executable(workflow.cp, 'inj2hdf', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--injection-file', inj_file) + node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file') + workflow += node + return node.output_file
+ + +
+[docs] +def setup_injection_workflow(workflow, output_dir=None, + inj_section_name='injections', tags=None): + """ + This function is the gateway for setting up injection-generation jobs in a + workflow. It should be possible for this function to support a number + of different ways/codes that could be used for doing this, however as this + will presumably stay as a single call to a single code (which need not be + inspinj) there are currently no subfunctions in this moudle. + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance that the coincidence jobs will be added to. + output_dir : path + The directory in which injection files will be stored. + inj_section_name : string (optional, default='injections') + The string that corresponds to the option describing the exe location + in the [executables] section of the .ini file and that corresponds to + the section (and sub-sections) giving the options that will be given to + the code at run time. + tags : list of strings (optional, default = []) + A list of the tagging strings that will be used for all jobs created + by this call to the workflow. This will be used in output names. + + Returns + -------- + inj_files : pycbc.workflow.core.FileList + The list of injection files created by this call. + inj_tags : list of strings + The tag corresponding to each injection file and used to uniquely + identify them. The FileList class contains functions to search + based on tags. + """ + if tags is None: + tags = [] + logger.info("Entering injection module.") + make_analysis_dir(output_dir) + + # Get full analysis segment for output file naming + full_segment = workflow.analysis_time + + # Identify which injections to do by presence of sub-sections in + # the configuration file + inj_tags = [] + inj_files = FileList([]) + + for section in workflow.cp.get_subsections(inj_section_name): + inj_tag = section.upper() + curr_tags = tags + [inj_tag] + + # Parse for options in ini file + injection_method = workflow.cp.get_opt_tags("workflow-injections", + "injections-method", + curr_tags) + + if injection_method in ["IN_WORKFLOW", "AT_RUNTIME"]: + exe = select_generic_executable(workflow, 'injections') + inj_job = exe(workflow.cp, inj_section_name, + out_dir=output_dir, ifos='HL', + tags=curr_tags) + if exe is PycbcCreateInjectionsExecutable: + config_urls = workflow.cp.get('workflow-injections', + section+'-config-files') + config_urls = config_urls.split(',') + config_files = FileList([resolve_url_to_file(cf.strip()) + for cf in config_urls]) + node, inj_file = inj_job.create_node(config_files) + else: + node = inj_job.create_node(full_segment) + if injection_method == "AT_RUNTIME": + workflow.execute_node(node) + else: + workflow.add_node(node) + inj_file = node.output_files[0] + inj_files.append(inj_file) + elif injection_method == "PREGENERATED": + file_attrs = { + 'ifos': ['HL'], + 'segs': full_segment, + 'tags': curr_tags + } + injection_path = workflow.cp.get_opt_tags( + "workflow-injections", + "injections-pregenerated-file", + curr_tags + ) + curr_file = resolve_url_to_file(injection_path, attrs=file_attrs) + inj_files.append(curr_file) + else: + err = "Injection method must be one of IN_WORKFLOW, " + err += "AT_RUNTIME or PREGENERATED. Got %s." % (injection_method) + raise ValueError(err) + + inj_tags.append(inj_tag) + + logger.info("Leaving injection module.") + return inj_files, inj_tags
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/jobsetup.html b/latest/html/_modules/pycbc/workflow/jobsetup.html new file mode 100644 index 00000000000..72e7f6835ce --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/jobsetup.html @@ -0,0 +1,1447 @@ + + + + + + pycbc.workflow.jobsetup — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.jobsetup

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This library code contains functions and classes that are used to set up
+and add jobs/nodes to a pycbc workflow. For details about pycbc.workflow see:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope.html
+"""
+
+import math, os
+import lal
+from ligo import segments
+from pycbc.workflow.core import Executable, File, FileList, Node
+
+
+[docs] +def int_gps_time_to_str(t): + """Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and + converts it to a string. If a LIGOTimeGPS with nonzero decimal part is + given, raises a ValueError.""" + int_t = int(t) + if abs(float(t - int_t)) > 0.: + raise ValueError('Need an integer GPS time, got %s' % str(t)) + return str(int_t)
+ + +
+[docs] +def select_tmpltbank_class(curr_exe): + """ This function returns a class that is appropriate for setting up + template bank jobs within workflow. + + Parameters + ---------- + curr_exe : string + The name of the executable to be used for generating template banks. + + Returns + -------- + exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility + functions appropriate for the given executable. Instances of the class + ('jobs') **must** have methods + * job.create_node() + and + * job.get_valid_times(ifo, ) + """ + exe_to_class_map = { + 'pycbc_geom_nonspinbank' : PyCBCTmpltbankExecutable, + 'pycbc_aligned_stoch_bank': PyCBCTmpltbankExecutable + } + try: + return exe_to_class_map[curr_exe] + except KeyError: + raise NotImplementedError( + "No job class exists for executable %s, exiting" % curr_exe)
+ + +
+[docs] +def select_matchedfilter_class(curr_exe): + """ This function returns a class that is appropriate for setting up + matched-filtering jobs within workflow. + + Parameters + ---------- + curr_exe : string + The name of the matched filter executable to be used. + + Returns + -------- + exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility + functions appropriate for the given executable. Instances of the class + ('jobs') **must** have methods + * job.create_node() + and + * job.get_valid_times(ifo, ) + """ + exe_to_class_map = { + 'pycbc_inspiral' : PyCBCInspiralExecutable, + 'pycbc_inspiral_skymax' : PyCBCInspiralExecutable, + 'pycbc_multi_inspiral' : PyCBCMultiInspiralExecutable, + } + try: + return exe_to_class_map[curr_exe] + except KeyError: + # also conceivable to introduce a default class?? + raise NotImplementedError( + "No job class exists for executable %s, exiting" % curr_exe)
+ + +
+[docs] +def select_generic_executable(workflow, exe_tag): + """ Returns a class that is appropriate for setting up jobs to run executables + having specific tags in the workflow config. + Executables should not be "specialized" jobs fitting into one of the + select_XXX_class functions above, i.e. not a matched filter or template + bank job, which require extra setup. + + Parameters + ---------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance. + + exe_tag : string + The name of the config section storing options for this executable and + the option giving the executable path in the [executables] section. + + Returns + -------- + exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility + functions appropriate for the given executable. Instances of the class + ('jobs') **must** have a method job.create_node() + """ + exe_path = workflow.cp.get("executables", exe_tag) + exe_name = os.path.basename(exe_path) + exe_to_class_map = { + 'ligolw_add' : LigolwAddExecutable, + 'lalapps_inspinj' : LalappsInspinjExecutable, + 'pycbc_create_injections' : PycbcCreateInjectionsExecutable, + 'pycbc_condition_strain' : PycbcConditionStrainExecutable + } + try: + return exe_to_class_map[exe_name] + except KeyError: + # Should we try some sort of default class?? + raise NotImplementedError( + "No job class exists for executable %s, exiting" % exe_name)
+ + +
+[docs] +def sngl_ifo_job_setup(workflow, ifo, out_files, curr_exe_job, science_segs, + datafind_outs, parents=None, + allow_overlap=True): + """ This function sets up a set of single ifo jobs. A basic overview of how this + works is as follows: + + * (1) Identify the length of data that each job needs to read in, and what + part of that data the job is valid for. + * START LOOPING OVER SCIENCE SEGMENTS + * (2) Identify how many jobs are needed (if any) to cover the given science + segment and the time shift between jobs. If no jobs continue. + * START LOOPING OVER JOBS + * (3) Identify the time that the given job should produce valid output (ie. + inspiral triggers) over. + * (4) Identify the data range that the job will need to read in to produce + the aforementioned valid output. + * (5) Identify all parents/inputs of the job. + * (6) Add the job to the workflow + * END LOOPING OVER JOBS + * END LOOPING OVER SCIENCE SEGMENTS + + Parameters + ----------- + workflow: pycbc.workflow.core.Workflow + An instance of the Workflow class that manages the constructed workflow. + ifo : string + The name of the ifo to set up the jobs for + out_files : pycbc.workflow.core.FileList + The FileList containing the list of jobs. Jobs will be appended + to this list, and it does not need to be empty when supplied. + curr_exe_job : Job + An instanced of the Job class that has a get_valid times method. + science_segs : ligo.segments.segmentlist + The list of times that the jobs should cover + datafind_outs : pycbc.workflow.core.FileList + The file list containing the datafind files. + parents : pycbc.workflow.core.FileList (optional, kwarg, default=None) + The FileList containing the list of jobs that are parents to + the one being set up. + allow_overlap : boolean (optional, kwarg, default = True) + If this is set the times that jobs are valid for will be allowed to + overlap. This may be desired for template banks which may have some + overlap in the times they cover. This may not be desired for inspiral + jobs, where you probably want triggers recorded by jobs to not overlap + at all. + + Returns + -------- + out_files : pycbc.workflow.core.FileList + A list of the files that will be generated by this step in the + workflow. + """ + + ########### (1) ############ + # Get the times that can be analysed and needed data lengths + data_length, valid_chunk, valid_length = identify_needed_data(curr_exe_job) + + exe_tags = curr_exe_job.tags + # Loop over science segments and set up jobs + for curr_seg in science_segs: + ########### (2) ############ + # Initialize the class that identifies how many jobs are needed and the + # shift between them. + segmenter = JobSegmenter(data_length, valid_chunk, valid_length, + curr_seg, curr_exe_job) + + for job_num in range(segmenter.num_jobs): + ############## (3) ############# + # Figure out over what times this job will be valid for + job_valid_seg = segmenter.get_valid_times_for_job(job_num, + allow_overlap=allow_overlap) + + ############## (4) ############# + # Get the data that this job should read in + job_data_seg = segmenter.get_data_times_for_job(job_num) + + ############# (5) ############ + # Identify parents/inputs to the job + if parents: + # Find the set of files with the best overlap + curr_parent = parents.find_outputs_in_range(ifo, job_valid_seg, + useSplitLists=True) + if not curr_parent: + err_string = ("No parent jobs found overlapping %d to %d." + %(job_valid_seg[0], job_valid_seg[1])) + err_string += "\nThis is a bad error! Contact a developer." + raise ValueError(err_string) + else: + curr_parent = [None] + + curr_dfouts = None + if datafind_outs: + curr_dfouts = datafind_outs.find_all_output_in_range(ifo, + job_data_seg, useSplitLists=True) + if not curr_dfouts: + err_str = ("No datafind jobs found overlapping %d to %d." + %(job_data_seg[0],job_data_seg[1])) + err_str += "\nThis shouldn't happen. Contact a developer." + raise ValueError(err_str) + + + ############## (6) ############# + # Make node and add to workflow + + # Note if I have more than one curr_parent I need to make more than + # one job. If there are no curr_parents it is set to [None] and I + # make a single job. This catches the case of a split template bank + # where I run a number of jobs to cover a single range of time. + + for parent in curr_parent: + if len(curr_parent) != 1: + bank_tag = [t for t in parent.tags if 'bank' in t.lower()] + curr_exe_job.update_current_tags(bank_tag + exe_tags) + # We should generate unique names automatically, but it is a + # pain until we can set the output names for all Executables + node = curr_exe_job.create_node(job_data_seg, job_valid_seg, + parent=parent, + df_parents=curr_dfouts) + workflow.add_node(node) + curr_out_files = node.output_files + # FIXME: Here we remove PSD files if they are coming through. + # This should be done in a better way. On to-do list. + curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\ + not in i.tags] + out_files += curr_out_files + + return out_files
+ + +
+[docs] +def multi_ifo_coherent_job_setup(workflow, out_files, curr_exe_job, + science_segs, datafind_outs, output_dir, + parents=None, slide_dict=None, tags=None): + """ + Method for setting up coherent inspiral jobs. + """ + if tags is None: + tags = [] + data_seg, job_valid_seg = curr_exe_job.get_valid_times() + curr_out_files = FileList([]) + if 'IPN' in datafind_outs[-1].description \ + and 'bank_veto_bank' in datafind_outs[-2].description: + # FIXME: This looks like a really nasty hack for the GRB code. + # This should be fixed properly to avoid strange behaviour! + ipn_sky_points = datafind_outs[-1] + bank_veto = datafind_outs[-2] + frame_files = datafind_outs[:-2] + else: + ipn_sky_points = None + if 'bank_veto_bank' in datafind_outs[-1].name: + bank_veto = datafind_outs[-1] + frame_files = datafind_outs[:-1] + else: + bank_veto = None + frame_files = datafind_outs + + split_bank_counter = 0 + + if curr_exe_job.injection_file is None: + for split_bank in parents: + tag = list(tags) + tag.append(split_bank.tag_str) + node = curr_exe_job.create_node(data_seg, job_valid_seg, + parent=split_bank, dfParents=frame_files, + bankVetoBank=bank_veto, ipn_file=ipn_sky_points, + slide=slide_dict, tags=tag) + workflow.add_node(node) + split_bank_counter += 1 + curr_out_files.extend(node.output_files) + else: + for inj_file in curr_exe_job.injection_file: + for split_bank in parents: + tag = list(tags) + tag.append(inj_file.tag_str) + tag.append(split_bank.tag_str) + node = curr_exe_job.create_node(data_seg, job_valid_seg, + parent=split_bank, inj_file=inj_file, tags=tag, + dfParents=frame_files, bankVetoBank=bank_veto, + ipn_file=ipn_sky_points) + workflow.add_node(node) + split_bank_counter += 1 + curr_out_files.extend(node.output_files) + + # FIXME: Here we remove PSD files if they are coming + # through. This should be done in a better way. On + # to-do list. + # IWHNOTE: This will not be needed when coh_PTF is retired, but it is + # okay to do this. It just means you can't access these files + # later. + curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\ + not in i.tags] + out_files += curr_out_files + + return out_files
+ + +
+[docs] +def identify_needed_data(curr_exe_job): + """ This function will identify the length of data that a specific + executable needs to analyse and what part of that data is valid (ie. + inspiral doesn't analyse the first or last 8s of data it reads in). + + Parameters + ----------- + curr_exe_job : Job + An instance of the Job class that has a get_valid times method. + + Returns + -------- + dataLength : float + The amount of data (in seconds) that each instance of the job must read + in. + valid_chunk : ligo.segments.segment + The times within dataLength for which that jobs output **can** be + valid (ie. for inspiral this is (72, dataLength-72) as, for a standard + setup the inspiral job cannot look for triggers in the first 72 or + last 72 seconds of data read in.) + valid_length : float + The maximum length of data each job can be valid for. This is + abs(valid_segment). + """ + # Set up the condorJob class for the current executable + data_lengths, valid_chunks = curr_exe_job.get_valid_times() + + # Begin by getting analysis start and end, and start and end of time + # that the output file is valid for + valid_lengths = [abs(valid_chunk) for valid_chunk in valid_chunks] + + return data_lengths, valid_chunks, valid_lengths
+ + + +
+[docs] +class JobSegmenter(object): + """ This class is used when running sngl_ifo_job_setup to determine what times + should be analysed be each job and what data is needed. + """ + + def __init__(self, data_lengths, valid_chunks, valid_lengths, curr_seg, + curr_exe_class): + """ Initialize class. """ + self.exe_class = curr_exe_class + self.curr_seg = curr_seg + self.curr_seg_length = float(abs(curr_seg)) + + self.data_length, self.valid_chunk, self.valid_length = \ + self.pick_tile_size(self.curr_seg_length, data_lengths, + valid_chunks, valid_lengths) + + self.data_chunk = segments.segment([0, self.data_length]) + self.data_loss = self.data_length - abs(self.valid_chunk) + + if self.data_loss < 0: + raise ValueError("pycbc.workflow.jobsetup needs fixing! Please contact a developer") + + if self.curr_seg_length < self.data_length: + self.num_jobs = 0 + return + + # How many jobs do we need + self.num_jobs = int( math.ceil( (self.curr_seg_length \ + - self.data_loss) / float(self.valid_length) )) + + if self.curr_seg_length == self.data_length: + # If the segment length is identical to the data length then I + # will have exactly 1 job! + self.job_time_shift = 0 + else: + # What is the incremental shift between jobs + self.job_time_shift = (self.curr_seg_length - self.data_length) / \ + float(self.num_jobs - 1) + +
+[docs] + def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths): + """ Choose job tiles size based on science segment length """ + + if len(valid_lengths) == 1: + return data_lengths[0], valid_chunks[0], valid_lengths[0] + else: + # Pick the tile size that is closest to 1/3 of the science segment + target_size = seg_size / 3 + pick, pick_diff = 0, abs(valid_lengths[0] - target_size) + for i, size in enumerate(valid_lengths): + if abs(size - target_size) < pick_diff: + pick, pick_diff = i, abs(size - target_size) + return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]
+ + +
+[docs] + def get_valid_times_for_job(self, num_job, allow_overlap=True): + """ Get the times for which this job is valid. """ + # small factor of 0.0001 to avoid float round offs causing us to + # miss a second at end of segments. + shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job\ + + 0.0001) + job_valid_seg = self.valid_chunk.shift(shift_dur) + # If we need to recalculate the valid times to avoid overlap + if not allow_overlap: + data_per_job = (self.curr_seg_length - self.data_loss) / \ + float(self.num_jobs) + lower_boundary = num_job*data_per_job + \ + self.valid_chunk[0] + self.curr_seg[0] + upper_boundary = data_per_job + lower_boundary + # NOTE: Convert to int after calculating both boundaries + # small factor of 0.0001 to avoid float round offs causing us to + # miss a second at end of segments. + lower_boundary = int(lower_boundary) + upper_boundary = int(upper_boundary + 0.0001) + if lower_boundary < job_valid_seg[0] or \ + upper_boundary > job_valid_seg[1]: + err_msg = ("Workflow is attempting to generate output " + "from a job at times where it is not valid.") + raise ValueError(err_msg) + job_valid_seg = segments.segment([lower_boundary, + upper_boundary]) + return job_valid_seg
+ + +
+[docs] + def get_data_times_for_job(self, num_job): + """ Get the data that this job will read in. """ + # small factor of 0.0001 to avoid float round offs causing us to + # miss a second at end of segments. + shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job\ + + 0.0001) + job_data_seg = self.data_chunk.shift(shift_dur) + # Sanity check that all data is used + if num_job == 0: + if job_data_seg[0] != self.curr_seg[0]: + err= "Job is not using data from the start of the " + err += "science segment. It should be using all data." + raise ValueError(err) + if num_job == (self.num_jobs - 1): + if job_data_seg[1] != self.curr_seg[1]: + err = "Job is not using data from the end of the " + err += "science segment. It should be using all data." + raise ValueError(err) + + if hasattr(self.exe_class, 'zero_pad_data_extend'): + job_data_seg = self.exe_class.zero_pad_data_extend(job_data_seg, + self.curr_seg) + + return job_data_seg
+
+ + + +
+[docs] +class PyCBCInspiralExecutable(Executable): + """ The class used to create jobs for pycbc_inspiral Executable. """ + + current_retention_level = Executable.ALL_TRIGGERS + time_dependent_options = ['--channel-name'] + + def __init__(self, cp, exe_name, ifo=None, out_dir=None, + injection_file=None, tags=None, reuse_executable=False): + if tags is None: + tags = [] + super().__init__(cp, exe_name, ifo, out_dir, tags=tags, + reuse_executable=reuse_executable, + set_submit_subdir=False) + self.cp = cp + self.injection_file = injection_file + self.ext = '.hdf' + + self.num_threads = 1 + if self.get_opt('processing-scheme') is not None: + stxt = self.get_opt('processing-scheme') + if len(stxt.split(':')) > 1: + self.num_threads = stxt.split(':')[1] + +
+[docs] + def create_node(self, data_seg, valid_seg, parent=None, df_parents=None, + tags=None): + if tags is None: + tags = [] + node = Node(self, valid_seg=valid_seg) + if not self.has_opt('pad-data'): + raise ValueError("The option pad-data is a required option of " + "%s. Please check the ini file." % self.name) + pad_data = int(self.get_opt('pad-data')) + + # set remaining options flags + node.add_opt('--gps-start-time', + int_gps_time_to_str(data_seg[0] + pad_data)) + node.add_opt('--gps-end-time', + int_gps_time_to_str(data_seg[1] - pad_data)) + node.add_opt('--trig-start-time', int_gps_time_to_str(valid_seg[0])) + node.add_opt('--trig-end-time', int_gps_time_to_str(valid_seg[1])) + + if self.injection_file is not None: + node.add_input_opt('--injection-file', self.injection_file) + + # set the input and output files + fil = node.new_output_file_opt( + valid_seg, + self.ext, + '--output', + tags=tags, + store_file=self.retain_files, + use_tmp_subdirs=True + ) + + # For inspiral jobs we overrwrite the "relative.submit.dir" + # attribute to avoid too many files in one sub-directory + curr_rel_dir = fil.name.split('/')[0] + node.add_profile('pegasus', 'relative.submit.dir', + self.pegasus_name + '_' + curr_rel_dir) + + # Must ensure this is not a LIGOGPS as JSON won't understand it + data_seg = segments.segment([int(data_seg[0]), int(data_seg[1])]) + fil.add_metadata('data_seg', data_seg) + node.add_input_opt('--bank-file', parent) + if df_parents is not None: + node.add_input_list_opt('--frame-files', df_parents) + + return node
+ + +
+[docs] + def get_valid_times(self): + """ Determine possible dimensions of needed input and valid output + """ + + if self.cp.has_option('workflow-matchedfilter', + 'min-analysis-segments'): + min_analysis_segs = int(self.cp.get('workflow-matchedfilter', + 'min-analysis-segments')) + else: + min_analysis_segs = 0 + + if self.cp.has_option('workflow-matchedfilter', + 'max-analysis-segments'): + max_analysis_segs = int(self.cp.get('workflow-matchedfilter', + 'max-analysis-segments')) + else: + # Choose ridiculously large default value + max_analysis_segs = 1000 + + if self.cp.has_option('workflow-matchedfilter', 'min-analysis-length'): + min_analysis_length = int(self.cp.get('workflow-matchedfilter', + 'min-analysis-length')) + else: + min_analysis_length = 0 + + if self.cp.has_option('workflow-matchedfilter', 'max-analysis-length'): + max_analysis_length = int(self.cp.get('workflow-matchedfilter', + 'max-analysis-length')) + else: + # Choose a ridiculously large default value + max_analysis_length = 100000 + + segment_length = int(self.get_opt('segment-length')) + pad_data = 0 + if self.has_opt('pad-data'): + pad_data += int(self.get_opt('pad-data')) + + # NOTE: Currently the tapered data is ignored as it is short and + # will lie within the segment start/end pad. This means that + # the tapered data *will* be used for PSD estimation (but this + # effect should be small). It will also be in the data segments + # used for SNR generation (when in the middle of a data segment + # where zero-padding is not being used) but the templates should + # not be long enough to use this data assuming segment start/end + # pad take normal values. When using zero-padding this data will + # be used for SNR generation. + + #if self.has_opt('taper-data'): + # pad_data += int(self.get_opt( 'taper-data' )) + if self.has_opt('allow-zero-padding'): + self.zero_padding=True + else: + self.zero_padding=False + + start_pad = int(self.get_opt( 'segment-start-pad')) + end_pad = int(self.get_opt('segment-end-pad')) + + seg_ranges = range(min_analysis_segs, max_analysis_segs + 1) + data_lengths = [] + valid_regions = [] + for nsegs in seg_ranges: + analysis_length = (segment_length - start_pad - end_pad) * nsegs + if not self.zero_padding: + data_length = analysis_length + pad_data * 2 \ + + start_pad + end_pad + start = pad_data + start_pad + end = data_length - pad_data - end_pad + else: + data_length = analysis_length + pad_data * 2 + start = pad_data + end = data_length - pad_data + if data_length > max_analysis_length: continue + if data_length < min_analysis_length: continue + data_lengths += [data_length] + valid_regions += [segments.segment(start, end)] + # If min_analysis_length is given, ensure that it is added as an option + # for job analysis length. + if min_analysis_length: + data_length = min_analysis_length + if not self.zero_padding: + start = pad_data + start_pad + end = data_length - pad_data - end_pad + else: + start = pad_data + end = data_length - pad_data + if end > start: + data_lengths += [data_length] + valid_regions += [segments.segment(start, end)] + return data_lengths, valid_regions
+ + +
+[docs] + def zero_pad_data_extend(self, job_data_seg, curr_seg): + """When using zero padding, *all* data is analysable, but the setup + functions must include the padding data where it is available so that + we are not zero-padding in the middle of science segments. This + function takes a job_data_seg, that is chosen for a particular node + and extends it with segment-start-pad and segment-end-pad if that + data is available. + """ + if self.zero_padding is False: + return job_data_seg + else: + start_pad = int(self.get_opt( 'segment-start-pad')) + end_pad = int(self.get_opt('segment-end-pad')) + new_data_start = max(curr_seg[0], job_data_seg[0] - start_pad) + new_data_end = min(curr_seg[1], job_data_seg[1] + end_pad) + new_data_seg = segments.segment([new_data_start, new_data_end]) + return new_data_seg
+
+ + +# FIXME: This is probably misnamed, this is really GRBInspiralExectuable. +# There's nothing coherent here, it's just that data segment stuff is +# very different between GRB and all-sky/all-time +
+[docs] +class PyCBCMultiInspiralExecutable(Executable): + """ + The class responsible for setting up jobs for the + pycbc_multi_inspiral executable. + """ + current_retention_level = Executable.ALL_TRIGGERS + + # bank-veto-bank-file is a file input option for pycbc_multi_inspiral + file_input_options = Executable.file_input_options + \ + ['--bank-veto-bank-file'] + + def __init__(self, cp, name, ifo=None, injection_file=None, + gate_files=None, out_dir=None, tags=None): + if tags is None: + tags = [] + super().__init__(cp, name, ifo, out_dir=out_dir, tags=tags) + self.injection_file = injection_file + self.data_seg = segments.segment(int(cp.get('workflow', 'start-time')), + int(cp.get('workflow', 'end-time'))) + self.num_threads = 1 + +
+[docs] + def create_node(self, data_seg, valid_seg, parent=None, inj_file=None, + dfParents=None, bankVetoBank=None, ipn_file=None, + slide=None, tags=None): + if tags is None: + tags = [] + node = Node(self) + + if not dfParents: + raise ValueError("%s must be supplied with frame or cache files" + % self.name) + + # If doing single IFO search, make sure slides are disabled + if len(self.ifo_list) < 2 and \ + (node.get_opt('--do-short-slides') is not None or \ + node.get_opt('--short-slide-offset') is not None): + raise ValueError("Cannot run with time slides in a single IFO " + "configuration! Please edit your configuration " + "file accordingly.") + + # Set instuments + node.add_opt("--instruments", " ".join(self.ifo_list)) + + pad_data = self.get_opt('pad-data') + if pad_data is None: + raise ValueError("The option pad-data is a required option of " + "%s. Please check the ini file." % self.name) + + # Feed in bank_veto_bank.xml, if given + if self.cp.has_option('workflow-inspiral', 'bank-veto-bank-file'): + node.add_input_opt('--bank-veto-bank-file', bankVetoBank) + # Set time options + node.add_opt('--gps-start-time', data_seg[0] + int(pad_data)) + node.add_opt('--gps-end-time', data_seg[1] - int(pad_data)) + node.add_opt('--trig-start-time', valid_seg[0]) + node.add_opt('--trig-end-time', valid_seg[1]) + node.add_opt('--trigger-time', self.cp.get('workflow', 'trigger-time')) + + # Set the input and output files + node.new_output_file_opt(data_seg, '.hdf', '--output', + tags=tags, store_file=self.retain_files) + node.add_input_opt('--bank-file', parent, ) + + if dfParents is not None: + frame_arg = '--frame-files' + for frame_file in dfParents: + frame_arg += f" {frame_file.ifo}:{frame_file.name}" + node.add_input(frame_file) + node.add_arg(frame_arg) + + if ipn_file is not None: + node.add_input_opt('--sky-positions-file', ipn_file) + + if inj_file is not None: + if self.get_opt('--do-short-slides') is not None or \ + self.get_opt('--short-slide-offset') is not None: + raise ValueError("Cannot run with short slides in an " + "injection job. Please edit your " + "configuration file accordingly.") + node.add_input_opt('--injection-file', inj_file) + + if slide is not None: + for ifo in self.ifo_list: + node.add_opt('--%s-slide-segment' % ifo.lower(), slide[ifo]) + + # Channels + channel_names = {} + for ifo in self.ifo_list: + channel_names[ifo] = self.cp.get_opt_tags( + "workflow", "%s-channel-name" % ifo.lower(), "") + channel_names_str = \ + " ".join([val for key, val in channel_names.items()]) + node.add_opt("--channel-name", channel_names_str) + + return node
+ + +
+[docs] + def get_valid_times(self): + pad_data = int(self.get_opt('pad-data')) + if self.has_opt("segment-start-pad"): + pad_data = int(self.get_opt("pad-data")) + start_pad = int(self.get_opt("segment-start-pad")) + end_pad = int(self.get_opt("segment-end-pad")) + valid_start = self.data_seg[0] + pad_data + start_pad + valid_end = self.data_seg[1] - pad_data - end_pad + elif self.has_opt('analyse-segment-end'): + safety = 1 + deadtime = int(self.get_opt('segment-length')) / 2 + spec_len = int(self.get_opt('inverse-spec-length')) / 2 + valid_start = (self.data_seg[0] + deadtime - spec_len + pad_data - + safety) + valid_end = self.data_seg[1] - spec_len - pad_data - safety + else: + overlap = int(self.get_opt('segment-length')) / 4 + valid_start = self.data_seg[0] + overlap + pad_data + valid_end = self.data_seg[1] - overlap - pad_data + + return self.data_seg, segments.segment(valid_start, valid_end)
+
+ + + +
+[docs] +class PyCBCTmpltbankExecutable(Executable): + """ The class used to create jobs for pycbc_geom_nonspin_bank Executable and + any other Executables using the same command line option groups. + """ + + current_retention_level = Executable.MERGED_TRIGGERS + def __init__(self, cp, exe_name, ifo=None, out_dir=None, + tags=None, write_psd=False, psd_files=None): + if tags is None: + tags = [] + super().__init__(cp, exe_name, ifo, out_dir, tags=tags) + self.cp = cp + self.write_psd = write_psd + self.psd_files = psd_files + +
+[docs] + def create_node(self, data_seg, valid_seg, parent=None, df_parents=None, tags=None): + if tags is None: + tags = [] + node = Node(self) + + if not df_parents: + raise ValueError("%s must be supplied with data file(s)" + % self.name) + + pad_data = int(self.get_opt('pad-data')) + if pad_data is None: + raise ValueError("The option pad-data is a required option of " + "%s. Please check the ini file." % self.name) + + # set the remaining option flags + node.add_opt('--gps-start-time', + int_gps_time_to_str(data_seg[0] + pad_data)) + node.add_opt('--gps-end-time', + int_gps_time_to_str(data_seg[1] - pad_data)) + + # set the input and output files + # Add the PSD file if needed + if self.write_psd: + node.new_output_file_opt(valid_seg, '.txt', '--psd-output', + tags=tags+['PSD_FILE'], store_file=self.retain_files) + node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file', + tags=tags, store_file=self.retain_files) + node.add_input_list_opt('--frame-files', df_parents) + return node
+ + +
+[docs] + def create_nodata_node(self, valid_seg, tags=None): + """ A simplified version of create_node that creates a node that does + not need to read in data. + + Parameters + ----------- + valid_seg : ligo.segments.segment + The segment over which to declare the node valid. Usually this + would be the duration of the analysis. + + Returns + -------- + node : pycbc.workflow.core.Node + The instance corresponding to the created node. + """ + if tags is None: + tags = [] + node = Node(self) + + # Set the output file + # Add the PSD file if needed + if self.write_psd: + node.new_output_file_opt(valid_seg, '.txt', '--psd-output', + tags=tags+['PSD_FILE'], + store_file=self.retain_files) + + node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file', + store_file=self.retain_files) + + if self.psd_files is not None: + should_add = False + + # If any of the ifos for this job are in the set + # of ifos for which a static psd was provided. + for ifo in self.ifo_list: + for psd_file in self.psd_files: + if ifo in psd_file.ifo_list: + should_add = True + + if should_add: + node.add_input_opt('--psd-file', psd_file) + + return node
+ + +
+[docs] + def get_valid_times(self): + pad_data = int(self.get_opt( 'pad-data')) + analysis_length = int(self.cp.get('workflow-tmpltbank', + 'analysis-length')) + data_length = analysis_length + pad_data * 2 + start = pad_data + end = data_length - pad_data + return [data_length], [segments.segment(start, end)]
+
+ + + +
+[docs] +class LigolwAddExecutable(Executable): + """ The class used to create nodes for the ligolw_add Executable. """ + + current_retention_level = Executable.INTERMEDIATE_PRODUCT + +
+[docs] + def create_node(self, jobSegment, input_files, output=None, + use_tmp_subdirs=True, tags=None): + if tags is None: + tags = [] + node = Node(self) + + # Very few options to ligolw_add, all input files are given as a long + # argument list. If this becomes unwieldy we could dump all these files + # to a cache file and read that in. ALL INPUT FILES MUST BE LISTED AS + # INPUTS (with .add_input_opt_file) IF THIS IS DONE THOUGH! + for fil in input_files: + node.add_input_arg(fil) + + if output: + node.add_output_opt('--output', output) + else: + node.new_output_file_opt(jobSegment, '.xml.gz', '--output', + tags=tags, store_file=self.retain_files, + use_tmp_subdirs=use_tmp_subdirs) + return node
+
+ + + +
+[docs] +class PycbcSplitInspinjExecutable(Executable): + """ + The class responsible for running the pycbc_split_inspinj executable + """ + current_retention_level = Executable.INTERMEDIATE_PRODUCT + + def __init__(self, cp, exe_name, num_splits, ifo=None, out_dir=None): + super().__init__(cp, exe_name, ifo, out_dir, tags=[]) + self.num_splits = int(num_splits) + +
+[docs] + def create_node(self, parent, tags=None): + if tags is None: + tags = [] + node = Node(self) + + node.add_input_opt('--input-file', parent) + + if parent.name.endswith("gz"): + ext = ".xml.gz" + else: + ext = ".xml" + + out_files = FileList([]) + for i in range(self.num_splits): + curr_tag = 'split%d' % i + curr_tags = parent.tags + [curr_tag] + job_tag = parent.description + "_" + self.name.upper() + out_file = File(parent.ifo_list, job_tag, parent.segment, + extension=ext, directory=self.out_dir, + tags=curr_tags, store_file=self.retain_files) + out_files.append(out_file) + + node.add_output_list_opt('--output-files', out_files) + return node
+
+ + + +
+[docs] +class LalappsInspinjExecutable(Executable): + """ + The class used to create jobs for the lalapps_inspinj Executable. + """ + current_retention_level = Executable.FINAL_RESULT + extension = '.xml' +
+[docs] + def create_node(self, segment, exttrig_file=None, tags=None): + if tags is None: + tags = [] + node = Node(self) + + curr_tags = self.tags + tags + # This allows the desired number of injections to be given explicitly + # in the config file. Used for coh_PTF as segment length is unknown + # before run time. + if self.get_opt('write-compress') is not None: + self.extension = '.xml.gz' + + # Check if these injections are using trigger information to choose + # sky positions for the simulated signals + if (self.get_opt('l-distr') == 'exttrig' and exttrig_file is not None \ + and 'trigger' in exttrig_file.description): + # Use an XML file containing trigger information + triggered = True + node.add_input_opt('--exttrig-file', exttrig_file) + elif (self.get_opt('l-distr') == 'ipn' and exttrig_file is not None \ + and 'IPN' in exttrig_file.description): + # Use an IPN sky points file + triggered = True + node.add_input_opt('--ipn-file', exttrig_file) + elif (self.get_opt('l-distr') != 'exttrig') \ + and (self.get_opt('l-distr') != 'ipn' and not \ + self.has_opt('ipn-file')): + # Use no trigger information for generating injections + triggered = False + else: + err_msg = "The argument 'l-distr' passed to the " + err_msg += "%s job has the value " % self.tagged_name + err_msg += "'%s' but you have not " % self.get_opt('l-distr') + err_msg += "provided the corresponding ExtTrig or IPN file. " + err_msg += "Please check your configuration files and try again." + raise ValueError(err_msg) + + if triggered: + num_injs = int(self.cp.get_opt_tags('workflow-injections', + 'num-injs', curr_tags)) + inj_tspace = float(segment[1] - segment[0]) / num_injs + node.add_opt('--time-interval', inj_tspace) + node.add_opt('--time-step', inj_tspace) + + node.new_output_file_opt(segment, self.extension, '--output', + store_file=self.retain_files) + + node.add_opt('--gps-start-time', int_gps_time_to_str(segment[0])) + node.add_opt('--gps-end-time', int_gps_time_to_str(segment[1])) + return node
+
+ + + +
+[docs] +class PycbcSplitBankExecutable(Executable): + """ The class responsible for creating jobs for pycbc_hdf5_splitbank. """ + + extension = '.hdf' + current_retention_level = Executable.ALL_TRIGGERS + def __init__(self, cp, exe_name, num_banks, + ifo=None, out_dir=None): + super().__init__(cp, exe_name, ifo, out_dir, tags=[]) + self.num_banks = int(num_banks) + +
+[docs] + def create_node(self, bank, tags=None): + """ + Set up a CondorDagmanNode class to run splitbank code + + Parameters + ---------- + bank : pycbc.workflow.core.File + The File containing the template bank to be split + + Returns + -------- + node : pycbc.workflow.core.Node + The node to run the job + """ + if tags is None: + tags = [] + node = Node(self) + node.add_input_opt('--bank-file', bank) + + # Get the output (taken from inspiral.py) + out_files = FileList([]) + for i in range( 0, self.num_banks): + curr_tag = 'bank%d' %(i) + # FIXME: What should the tags actually be? The job.tags values are + # currently ignored. + curr_tags = bank.tags + [curr_tag] + tags + job_tag = bank.description + "_" + self.name.upper() + out_file = File(bank.ifo_list, job_tag, bank.segment, + extension=self.extension, directory=self.out_dir, + tags=curr_tags, store_file=self.retain_files) + out_files.append(out_file) + node.add_output_list_opt('--output-filenames', out_files) + return node
+
+ + + +
+[docs] +class PycbcSplitBankXmlExecutable(PycbcSplitBankExecutable): + """ Subclass resonsible for creating jobs for pycbc_splitbank. """ + + extension='.xml.gz'
+ + + +
+[docs] +class PycbcConditionStrainExecutable(Executable): + """ The class responsible for creating jobs for pycbc_condition_strain. """ + + current_retention_level = Executable.ALL_TRIGGERS + +
+[docs] + def create_node(self, input_files, tags=None): + if tags is None: + tags = [] + node = Node(self) + start_time = self.cp.get("workflow", "start-time") + end_time = self.cp.get("workflow", "end-time") + node.add_opt('--gps-start-time', start_time) + node.add_opt('--gps-end-time', end_time) + node.add_input_list_opt('--frame-files', input_files) + + out_file = File(self.ifo, "gated", + segments.segment(int(start_time), int(end_time)), + directory=self.out_dir, store_file=self.retain_files, + extension=input_files[0].name.split('.', 1)[-1], + tags=tags) + node.add_output_opt('--output-strain-file', out_file) + + out_gates_file = File(self.ifo, "output_gates", + segments.segment(int(start_time), int(end_time)), + directory=self.out_dir, extension='txt', + store_file=self.retain_files, tags=tags) + node.add_output_opt('--output-gates-file', out_gates_file) + + return node, out_file
+
+ + + +
+[docs] +class PycbcCreateInjectionsExecutable(Executable): + """ The class responsible for creating jobs + for ``pycbc_create_injections``. + """ + + current_retention_level = Executable.ALL_TRIGGERS + extension = '.hdf' + +
+[docs] + def create_node(self, config_files=None, seed=None, tags=None): + """ Set up a CondorDagmanNode class to run ``pycbc_create_injections``. + + Parameters + ---------- + config_files : pycbc.workflow.core.FileList + A ``pycbc.workflow.core.FileList`` for injection configuration + files to be used with ``--config-files`` option. + seed : int + Seed to use for generating injections. + tags : list + A list of tags to include in filenames. + + Returns + -------- + node : pycbc.workflow.core.Node + The node to run the job. + """ + + # default for tags is empty list + tags = [] if tags is None else tags + + # get analysis start and end time + start_time = self.cp.get("workflow", "start-time") + end_time = self.cp.get("workflow", "end-time") + analysis_time = segments.segment(int(start_time), int(end_time)) + + # make node for running executable + node = Node(self) + if config_files is not None: + node.add_input_list_opt("--config-files", config_files) + if seed: + node.add_opt("--seed", seed) + injection_file = node.new_output_file_opt(analysis_time, + self.extension, + "--output-file", + tags=tags) + + return node, injection_file
+
+ + + +
+[docs] +class PycbcInferenceExecutable(Executable): + """ The class responsible for creating jobs for ``pycbc_inference``. + """ + + current_retention_level = Executable.ALL_TRIGGERS + +
+[docs] + def create_node(self, config_file, seed=None, tags=None, + analysis_time=None): + """ Set up a pegasus.Node instance to run ``pycbc_inference``. + + Parameters + ---------- + config_file : pycbc.workflow.core.File + A ``pycbc.workflow.core.File`` for inference configuration file + to be used with ``--config-files`` option. + seed : int + An ``int`` to be used with ``--seed`` option. + tags : list + A list of tags to include in filenames. + + Returns + -------- + node : pycbc.workflow.core.Node + The node to run the job. + """ + # default for tags is empty list + tags = [] if tags is None else tags + # if analysis time not provided, try to get it from the config file + if analysis_time is None: + start_time = self.cp.get("workflow", "start-time") + end_time = self.cp.get("workflow", "end-time") + analysis_time = segments.segment(int(start_time), int(end_time)) + # make node for running executable + node = Node(self) + node.add_input_opt("--config-file", config_file) + if seed is not None: + node.add_opt("--seed", seed) + inference_file = node.new_output_file_opt(analysis_time, + ".hdf", "--output-file", + tags=tags) + if self.cp.has_option("pegasus_profile-inference", + "condor|+CheckpointSig"): + err_msg = "This is not yet supported/tested with pegasus 5. " + err_msg += "Please reimplement this (with unittest :-) )." + raise ValueError(err_msg) + #ckpt_file_name = "{}.checkpoint".format(inference_file.name) + #ckpt_file = dax.File(ckpt_file_name) + # DO NOT call pegasus API stuff outside of + # pegasus_workflow.py. + #node._dax_node.uses(ckpt_file, link=dax.Link.OUTPUT, + # register=False, transfer=False) + + return node, inference_file
+
+ + + +
+[docs] +class PycbcHDFSplitInjExecutable(Executable): + """ The class responsible for creating jobs for ``pycbc_hdf_splitinj``. + """ + current_retention_level = Executable.ALL_TRIGGERS + + def __init__(self, cp, exe_name, num_splits, ifo=None, out_dir=None): + super().__init__(cp, exe_name, ifo, out_dir, tags=[]) + self.num_splits = int(num_splits) + +
+[docs] + def create_node(self, parent, tags=None): + if tags is None: + tags = [] + node = Node(self) + node.add_input_opt('--input-file', parent) + out_files = FileList([]) + for i in range(self.num_splits): + curr_tag = 'split%d' % i + curr_tags = parent.tags + [curr_tag] + job_tag = parent.description + "_" + self.name.upper() + out_file = File(parent.ifo_list, job_tag, parent.segment, + extension='.hdf', directory=self.out_dir, + tags=curr_tags, store_file=self.retain_files) + out_files.append(out_file) + node.add_output_list_opt('--output-files', out_files) + return node
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/matched_filter.html b/latest/html/_modules/pycbc/workflow/matched_filter.html new file mode 100644 index 00000000000..785bd090b88 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/matched_filter.html @@ -0,0 +1,428 @@ + + + + + + pycbc.workflow.matched_filter — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.matched_filter

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module is responsible for setting up the matched-filtering stage of
+workflows. For details about this module and its capabilities see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
+"""
+
+
+import os
+import logging
+
+from pycbc.workflow.core import FileList, make_analysis_dir
+from pycbc.workflow.jobsetup import (select_matchedfilter_class,
+                                     sngl_ifo_job_setup,
+                                     multi_ifo_coherent_job_setup)
+
+logger = logging.getLogger('pycbc.workflow.matched_filter')
+
+
+
+[docs] +def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs, + tmplt_banks, output_dir=None, + injection_file=None, tags=None): + ''' + This function aims to be the gateway for setting up a set of matched-filter + jobs in a workflow. This function is intended to support multiple + different ways/codes that could be used for doing this. For now the only + supported sub-module is one that runs the matched-filtering by setting up + a serious of matched-filtering jobs, from one executable, to create + matched-filter triggers covering the full range of science times for which + there is data and a template bank file. + + Parameters + ----------- + Workflow : pycbc.workflow.core.Workflow + The workflow instance that the coincidence jobs will be added to. + science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances + The list of times that are being analysed in this workflow. + datafind_outs : pycbc.workflow.core.FileList + An FileList of the datafind files that are needed to obtain the + data used in the analysis. + tmplt_banks : pycbc.workflow.core.FileList + An FileList of the template bank files that will serve as input + in this stage. + output_dir : path + The directory in which output will be stored. + injection_file : pycbc.workflow.core.File, optional (default=None) + If given the file containing the simulation file to be sent to these + jobs on the command line. If not given no file will be sent. + tags : list of strings (optional, default = []) + A list of the tagging strings that will be used for all jobs created + by this call to the workflow. An example might be ['BNSINJECTIONS'] or + ['NOINJECTIONANALYSIS']. This will be used in output names. + + Returns + ------- + inspiral_outs : pycbc.workflow.core.FileList + A list of output files written by this stage. This *will not* contain + any intermediate products produced within this stage of the workflow. + If you require access to any intermediate products produced at this + stage you can call the various sub-functions directly. + ''' + if tags is None: + tags = [] + logger.info("Entering matched-filtering setup module.") + make_analysis_dir(output_dir) + cp = workflow.cp + + # Parse for options in .ini file + mfltrMethod = cp.get_opt_tags("workflow-matchedfilter", "matchedfilter-method", + tags) + + # Could have a number of choices here + if mfltrMethod == "WORKFLOW_INDEPENDENT_IFOS": + logger.info("Adding matched-filter jobs to workflow.") + inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs, + datafind_outs, tmplt_banks, output_dir, + injection_file=injection_file, + tags=tags) + elif mfltrMethod == "WORKFLOW_MULTIPLE_IFOS": + logger.info("Adding matched-filter jobs to workflow.") + inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow, + science_segs, datafind_outs, tmplt_banks, + output_dir, injection_file=injection_file, + tags=tags) + else: + errMsg = "Matched filter method not recognized. Must be one of " + errMsg += "WORKFLOW_INDEPENDENT_IFOS or WORKFLOW_MULTIPLE_IFOS." + raise ValueError(errMsg) + + logger.info("Leaving matched-filtering setup module.") + return inspiral_outs
+ + +
+[docs] +def setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs, + tmplt_banks, output_dir, + injection_file=None, + tags=None): + ''' + Setup matched-filter jobs that are generated as part of the workflow. + This + module can support any matched-filter code that is similar in principle to + lalapps_inspiral, but for new codes some additions are needed to define + Executable and Job sub-classes (see jobutils.py). + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance that the coincidence jobs will be added to. + science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances + The list of times that are being analysed in this workflow. + datafind_outs : pycbc.workflow.core.FileList + An FileList of the datafind files that are needed to obtain the + data used in the analysis. + tmplt_banks : pycbc.workflow.core.FileList + An FileList of the template bank files that will serve as input + in this stage. + output_dir : path + The directory in which output will be stored. + injection_file : pycbc.workflow.core.File, optional (default=None) + If given the file containing the simulation file to be sent to these + jobs on the command line. If not given no file will be sent. + tags : list of strings (optional, default = []) + A list of the tagging strings that will be used for all jobs created + by this call to the workflow. An example might be ['BNSINJECTIONS'] or + ['NOINJECTIONANALYSIS']. This will be used in output names. + + Returns + ------- + inspiral_outs : pycbc.workflow.core.FileList + A list of output files written by this stage. This *will not* contain + any intermediate products produced within this stage of the workflow. + If you require access to any intermediate products produced at this + stage you can call the various sub-functions directly. + ''' + if tags is None: + tags = [] + # Need to get the exe to figure out what sections are analysed, what is + # discarded etc. This should *not* be hardcoded, so using a new executable + # will require a bit of effort here .... + + cp = workflow.cp + ifos = science_segs.keys() + match_fltr_exe = os.path.basename(cp.get('executables','inspiral')) + # Select the appropriate class + exe_class = select_matchedfilter_class(match_fltr_exe) + + # Set up class for holding the banks + inspiral_outs = FileList([]) + + # Matched-filtering is done independently for different ifos, but might not be! + # If we want to use multi-detector matched-filtering or something similar to this + # it would probably require a new module + for ifo in ifos: + logger.info("Setting up matched-filtering for %s.", ifo) + job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifo, + out_dir=output_dir, + injection_file=injection_file, + tags=tags) + + sngl_ifo_job_setup(workflow, ifo, inspiral_outs, job_instance, + science_segs[ifo], datafind_outs, + parents=tmplt_banks, allow_overlap=False) + return inspiral_outs
+ + +
+[docs] +def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs, + tmplt_banks, output_dir, + injection_file=None, + tags=None): + ''' + Setup matched-filter jobs that are generated as part of the workflow in + which a single job reads in and generates triggers over multiple ifos. + This module can support any matched-filter code that is similar in + principle to pycbc_multi_inspiral, but for new codes some additions are + needed to define Executable and Job sub-classes (see jobutils.py). + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance that the coincidence jobs will be added to. + science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances + The list of times that are being analysed in this workflow. + datafind_outs : pycbc.workflow.core.FileList + An FileList of the datafind files that are needed to obtain the + data used in the analysis. + tmplt_banks : pycbc.workflow.core.FileList + An FileList of the template bank files that will serve as input + in this stage. + output_dir : path + The directory in which output will be stored. + injection_file : pycbc.workflow.core.File, optional (default=None) + If given the file containing the simulation file to be sent to these + jobs on the command line. If not given no file will be sent. + tags : list of strings (optional, default = []) + A list of the tagging strings that will be used for all jobs created + by this call to the workflow. An example might be ['BNSINJECTIONS'] or + ['NOINJECTIONANALYSIS']. This will be used in output names. + + Returns + ------- + inspiral_outs : pycbc.workflow.core.FileList + A list of output files written by this stage. This *will not* contain + any intermediate products produced within this stage of the workflow. + If you require access to any intermediate products produced at this + stage you can call the various sub-functions directly. + ''' + if tags is None: + tags = [] + # Need to get the exe to figure out what sections are analysed, what is + # discarded etc. This should *not* be hardcoded, so using a new executable + # will require a bit of effort here .... + + cp = workflow.cp + ifos = sorted(science_segs.keys()) + match_fltr_exe = os.path.basename(cp.get('executables','inspiral')) + + # List for holding the output + inspiral_outs = FileList([]) + + logger.info("Setting up matched-filtering for %s.", ' '.join(ifos)) + + if match_fltr_exe == 'pycbc_multi_inspiral': + exe_class = select_matchedfilter_class(match_fltr_exe) + # Right ascension + declination must be provided in radians + cp.set('inspiral', 'ra', + cp.get('workflow', 'ra')) + cp.set('inspiral', 'dec', + cp.get('workflow', 'dec')) + # At the moment we aren't using sky grids, but when we do this code + # might be used then. + # from pycbc.workflow.grb_utils import get_sky_grid_scale + # if cp.has_option("jitter_skyloc", "apply-fermi-error"): + # cp.set('inspiral', 'sky-error', + # str(get_sky_grid_scale(float(cp.get('workflow', + # 'sky-error'))))) + # else: + # cp.set('inspiral', 'sky-error', + # str(get_sky_grid_scale(float(cp.get('workflow', + # 'sky-error')), + # sigma_sys=0.0))) + # cp.set('inspiral', 'trigger-time',\ + # cp.get('workflow', 'trigger-time')) + # cp.set('inspiral', 'block-duration', + # str(abs(science_segs[ifos[0]][0]) - \ + # 2 * int(cp.get('inspiral', 'pad-data')))) + + job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifos, + out_dir=output_dir, + injection_file=injection_file, + tags=tags) + if cp.has_option("workflow", "do-long-slides") and "slide" in tags[-1]: + slide_num = int(tags[-1].replace("slide", "")) + logger.info( + "Setting up matched-filtering for slide %d", + slide_num + ) + slide_shift = int(cp.get("inspiral", "segment-length")) + time_slide_dict = {ifo: (slide_num + 1) * ix * slide_shift + for ix, ifo in enumerate(ifos)} + multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance, + science_segs, datafind_outs, + output_dir, parents=tmplt_banks, + slide_dict=time_slide_dict) + else: + multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance, + science_segs, datafind_outs, + output_dir, parents=tmplt_banks) + else: + # Select the appropriate class + raise ValueError("Not currently supported.") + return inspiral_outs
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/minifollowups.html b/latest/html/_modules/pycbc/workflow/minifollowups.html new file mode 100644 index 00000000000..b12858001d8 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/minifollowups.html @@ -0,0 +1,1451 @@ + + + + + + pycbc.workflow.minifollowups — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.minifollowups

+# Copyright (C) 2015 Christopher M. Biwer, Alexander Harvey Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+import logging
+import os.path
+
+from ligo import segments
+
+from pycbc.events import coinc
+from pycbc.workflow.core import Executable, FileList
+from pycbc.workflow.core import makedir, resolve_url_to_file
+from pycbc.workflow.plotting import PlotExecutable, requirestr, excludestr
+try:
+    # Python 3
+    from itertools import zip_longest
+except ImportError:
+    # Python 2
+    from itertools import izip_longest as zip_longest
+from pycbc.workflow.pegasus_workflow import SubWorkflow
+
+logger = logging.getLogger('pycbc.workflow.minifollowups')
+
+
+[docs] +def grouper(iterable, n, fillvalue=None): + """ Create a list of n length tuples + """ + args = [iter(iterable)] * n + return zip_longest(*args, fillvalue=fillvalue)
+ + +
+[docs] +def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, + tmpltbank_file, insp_segs, insp_data_name, + insp_anal_name, dax_output, out_dir, + tags=None): + """ Create plots that followup the Nth loudest coincident injection + from a statmap produced HDF file. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + coinc_file: + single_triggers: list of pycbc.workflow.File + A list cointaining the file objects associated with the merged + single detector trigger files for each ifo. + tmpltbank_file: pycbc.workflow.File + The file object pointing to the HDF format template bank + insp_segs: SegFile + The segment file containing the data read and analyzed by each inspiral + job. + insp_data_name: str + The name of the segmentlist storing data read. + insp_anal_name: str + The name of the segmentlist storing data analyzed. + dax_output : directory + Location of the dax outputs + out_dir: path + The directory to store minifollowups result plots and files + tags: {None, optional} + Tags to add to the minifollowups executables + + Returns + ------- + layout: list + A list of tuples which specify the displayed file layout for the + minifollops plots. + """ + logger.info('Entering minifollowups module') + + if not workflow.cp.has_section('workflow-minifollowups'): + msg = 'There is no [workflow-minifollowups] section in ' + msg += 'configuration file' + logger.info(msg) + logger.info('Leaving minifollowups') + return + + tags = [] if tags is None else tags + makedir(dax_output) + + # turn the config file into a File class + config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini') + workflow.cp.write(open(config_path, 'w')) + + config_file = resolve_url_to_file(config_path) + + exe = Executable(workflow.cp, 'foreground_minifollowup', + ifos=workflow.ifos, out_dir=dax_output, tags=tags) + + node = exe.create_node() + node.add_input_opt('--config-files', config_file) + node.add_input_opt('--bank-file', tmpltbank_file) + node.add_input_opt('--statmap-file', coinc_file) + node.add_multiifo_input_list_opt('--single-detector-triggers', + single_triggers) + node.add_input_opt('--inspiral-segments', insp_segs) + node.add_opt('--inspiral-data-read-name', insp_data_name) + node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) + if tags: + node.add_list_opt('--tags', tags) + node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file') + node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map') + + name = node.output_files[0].name + map_file = node.output_files[1] + + node.add_opt('--workflow-name', name) + node.add_opt('--output-dir', out_dir) + node.add_opt('--dax-file-directory', '.') + + workflow += node + + # execute this in a sub-workflow + fil = node.output_files[0] + + # determine if a staging site has been specified + job = SubWorkflow(fil.name, is_planned=False) + input_files = [tmpltbank_file, coinc_file, insp_segs] + \ + single_triggers + job.add_inputs(*input_files) + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + logger.info('Leaving minifollowups module')
+ + +
+[docs] +def setup_single_det_minifollowups(workflow, single_trig_file, tmpltbank_file, + insp_segs, insp_data_name, insp_anal_name, + dax_output, out_dir, veto_file=None, + veto_segment_name=None, fg_file=None, + fg_name=None, statfiles=None, + tags=None): + """ Create plots that followup the Nth loudest clustered single detector + triggers from a merged single detector trigger HDF file. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + single_trig_file: pycbc.workflow.File + The File class holding the single detector triggers. + tmpltbank_file: pycbc.workflow.File + The file object pointing to the HDF format template bank + insp_segs: SegFile + The segment file containing the data read by each inspiral job. + insp_data_name: str + The name of the segmentlist storing data read. + insp_anal_name: str + The name of the segmentlist storing data analyzed. + out_dir: path + The directory to store minifollowups result plots and files + statfiles: FileList (optional, default=None) + Supplementary files necessary for computing the single-detector + statistic. + tags: {None, optional} + Tags to add to the minifollowups executables + Returns + ------- + layout: list + A list of tuples which specify the displayed file layout for the + minifollops plots. + """ + logger.info('Entering minifollowups module') + + if not workflow.cp.has_section('workflow-sngl_minifollowups'): + msg = 'There is no [workflow-sngl_minifollowups] section in ' + msg += 'configuration file' + logger.info(msg) + logger.info('Leaving minifollowups') + return + + tags = [] if tags is None else tags + makedir(dax_output) + + # turn the config file into a File class + curr_ifo = single_trig_file.ifo + config_path = os.path.abspath(dax_output + '/' + curr_ifo + \ + '_'.join(tags) + 'singles_minifollowup.ini') + workflow.cp.write(open(config_path, 'w')) + + config_file = resolve_url_to_file(config_path) + + exe = Executable(workflow.cp, 'singles_minifollowup', + ifos=curr_ifo, out_dir=dax_output, tags=tags) + + node = exe.create_node() + node.add_input_opt('--config-files', config_file) + node.add_input_opt('--bank-file', tmpltbank_file) + node.add_input_opt('--single-detector-file', single_trig_file) + node.add_input_opt('--inspiral-segments', insp_segs) + node.add_opt('--inspiral-data-read-name', insp_data_name) + node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) + node.add_opt('--instrument', curr_ifo) + if veto_file is not None: + assert(veto_segment_name is not None) + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--veto-segment-name', veto_segment_name) + if fg_file is not None: + assert(fg_name is not None) + node.add_input_opt('--foreground-censor-file', fg_file) + node.add_opt('--foreground-segment-name', fg_name) + if statfiles: + node.add_input_list_opt( + '--statistic-files', + statfiles, + check_existing_options=False, + ) + if tags: + node.add_list_opt('--tags', tags) + node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file') + node.new_output_file_opt(workflow.analysis_time, '.dax.map', + '--output-map') + + name = node.output_files[0].name + map_file = node.output_files[1] + + node.add_opt('--workflow-name', name) + node.add_opt('--output-dir', out_dir) + node.add_opt('--dax-file-directory', '.') + + workflow += node + + # execute this in a sub-workflow + fil = node.output_files[0] + + job = SubWorkflow(fil.name, is_planned=False) + input_files = [tmpltbank_file, insp_segs, single_trig_file] + if veto_file is not None: + input_files.append(veto_file) + if statfiles: + input_files += statfiles + job.add_inputs(*input_files) + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + logger.info('Leaving minifollowups module')
+ + + +
+[docs] +def setup_injection_minifollowups(workflow, injection_file, inj_xml_file, + single_triggers, tmpltbank_file, + insp_segs, insp_data_name, insp_anal_name, + dax_output, out_dir, tags=None): + """ Create plots that followup the closest missed injections + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + coinc_file: + single_triggers: list of pycbc.workflow.File + A list cointaining the file objects associated with the merged + single detector trigger files for each ifo. + tmpltbank_file: pycbc.workflow.File + The file object pointing to the HDF format template bank + insp_segs: SegFile + The segment file containing the data read by each inspiral job. + insp_data_name: str + The name of the segmentlist storing data read. + insp_anal_name: str + The name of the segmentlist storing data analyzed. + out_dir: path + The directory to store minifollowups result plots and files + tags: {None, optional} + Tags to add to the minifollowups executables + + Returns + ------- + layout: list + A list of tuples which specify the displayed file layout for the + minifollops plots. + """ + logger.info('Entering injection minifollowups module') + + if not workflow.cp.has_section('workflow-injection_minifollowups'): + msg = 'There is no [workflow-injection_minifollowups] section in ' + msg += 'configuration file' + logger.info(msg) + logger.info('Leaving minifollowups') + return + + tags = [] if tags is None else tags + makedir(dax_output) + + # turn the config file into a File class + config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'injection_minifollowup.ini') + workflow.cp.write(open(config_path, 'w')) + + config_file = resolve_url_to_file(config_path) + + exe = Executable(workflow.cp, 'injection_minifollowup', ifos=workflow.ifos, out_dir=dax_output) + + node = exe.create_node() + node.add_input_opt('--config-files', config_file) + node.add_input_opt('--bank-file', tmpltbank_file) + node.add_input_opt('--injection-file', injection_file) + node.add_input_opt('--injection-xml-file', inj_xml_file) + node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers) + node.add_input_opt('--inspiral-segments', insp_segs) + node.add_opt('--inspiral-data-read-name', insp_data_name) + node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) + if tags: + node.add_list_opt('--tags', tags) + node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file', tags=tags) + node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags) + + name = node.output_files[0].name + map_file = node.output_files[1] + + node.add_opt('--workflow-name', name) + node.add_opt('--output-dir', out_dir) + node.add_opt('--dax-file-directory', '.') + + workflow += node + + # execute this in a sub-workflow + fil = node.output_files[0] + + job = SubWorkflow(fil.name, is_planned=False) + input_files = [tmpltbank_file, injection_file, inj_xml_file, insp_segs] + input_files += single_triggers + job.add_inputs(*input_files) + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + + logger.info('Leaving injection minifollowups module')
+ + + +
+[docs] +class SingleTemplateExecutable(PlotExecutable): + """Class to be used for to create workflow.Executable instances for the + pycbc_single_template executable. Basically inherits directly from + PlotExecutable. + """ + time_dependent_options = ['--channel-name', '--frame-type']
+ + + +
+[docs] +class SingleTimeFreqExecutable(PlotExecutable): + """Class to be used for to create workflow.Executable instances for the + pycbc_plot_singles_timefreq executable. Basically inherits directly from + PlotExecutable. + """ + time_dependent_options = ['--channel-name', '--frame-type']
+ + + +
+[docs] +class PlotQScanExecutable(PlotExecutable): + """Class to be used for to create workflow.Executable instances for the + pycbc_plot_qscan executable. Basically inherits directly from + PlotExecutable. + """ + time_dependent_options = ['--channel-name', '--frame-type']
+ + + +
+[docs] +def get_single_template_params(curr_idx, times, bank_data, + bank_id, fsdt, tids): + """ + A function to get the parameters needed for the make_single_template_files + function. + + Parameters + ---------- + curr_idx : int + The index of the event in the file + times : dictionary keyed on IFO of numpy arrays, dtype float + The array of trigger times for each detector + bank_data : dictionary or h5py file + Structure containing the bank information + bank_id : int + The template index within the bank + fsdt : dictionary of h5py files, keyed on IFO + The single-detector TRIGGER_MERGE files, keyed by IFO + tids : dictionary keyed on IFO of numpy arrays, dtype int + The trigger indexes in fsdt for each IFO + + Returns + ------- + params : dictionary + A dictionary containing the parameters needed for the event used + + """ + params = {} + for ifo in times: + params['%s_end_time' % ifo] = times[ifo][curr_idx] + try: + # Only present for precessing, so may not exist + params['u_vals_%s' % ifo] = \ + fsdt[ifo][ifo]['u_vals'][tids[ifo][curr_idx]] + except: + pass + + params['mean_time'] = coinc.mean_if_greater_than_zero( + [times[ifo][curr_idx] for ifo in times] + )[0] + + params['mass1'] = bank_data['mass1'][bank_id] + params['mass2'] = bank_data['mass2'][bank_id] + params['spin1z'] = bank_data['spin1z'][bank_id] + params['spin2z'] = bank_data['spin2z'][bank_id] + params['f_lower'] = bank_data['f_lower'][bank_id] + if 'approximant' in bank_data: + params['approximant'] = bank_data['approximant'][bank_id] + # don't require precessing template info if not present + try: + params['spin1x'] = bank_data['spin1x'][bank_id] + params['spin1y'] = bank_data['spin1y'][bank_id] + params['spin2x'] = bank_data['spin2x'][bank_id] + params['spin2y'] = bank_data['spin2y'][bank_id] + params['inclination'] = bank_data['inclination'][bank_id] + except KeyError: + pass + return params
+ + + +
+[docs] +def make_single_template_files(workflow, segs, ifo, data_read_name, + analyzed_name, params, out_dir, inj_file=None, + exclude=None, require=None, tags=None, + store_file=False, use_mean_time=False, + use_exact_inj_params=False): + """Function for creating jobs to run the pycbc_single_template code and + add these jobs to the workflow. + + Parameters + ----------- + workflow : workflow.Workflow instance + The pycbc.workflow.Workflow instance to add these jobs to. + segs : workflow.File instance + The pycbc.workflow.File instance that points to the XML file containing + the segment lists of data read in and data analyzed. + ifo: str + The name of the interferometer + data_read_name : str + The name of the segmentlist containing the data read in by each + inspiral job in the segs file. + analyzed_name : str + The name of the segmentlist containing the data analyzed by each + inspiral job in the segs file. + params : dictionary + A dictionary containing the parameters of the template to be used. + params[ifo+'end_time'] is required for all ifos in workflow.ifos. + If use_exact_inj_params is False then also need to supply values for + [mass1, mass2, spin1z, spin2x]. For precessing templates one also + needs to supply [spin1y, spin1x, spin2x, spin2y, inclination] + additionally for precession one must supply u_vals or + u_vals_+ifo for all ifos. u_vals is the ratio between h_+ and h_x to + use when constructing h(t). h(t) = (h_+ * u_vals) + h_x. + out_dir : str + Directory in which to store the output files. + inj_file : workflow.File (optional, default=None) + If given send this injection file to the job so that injections are + made into the data. + exclude : list (optional, default=None) + If given, then when considering which subsections in the ini file to + parse for options to add to single_template_plot, only use subsections + that *do not* match strings in this list. + require : list (optional, default=None) + If given, then when considering which subsections in the ini file to + parse for options to add to single_template_plot, only use subsections + matching strings in this list. + tags : list (optional, default=None) + The tags to use for this job. + store_file : boolean (optional, default=False) + Keep the output files of this job. + use_mean_time : boolean (optional, default=False) + Use the mean time as the center time for all ifos + use_exact_inj_params : boolean (optional, default=False) + If True do not use masses and spins listed in the params dictionary + but instead use the injection closest to the filter time as a template. + + Returns + -------- + output_files : workflow.FileList + The list of workflow.Files created in this function. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'single_template' + secs = requirestr(workflow.cp.get_subsections(name), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + # Reanalyze the time around the trigger in each detector + curr_exe = SingleTemplateExecutable(workflow.cp, 'single_template', + ifos=[ifo], out_dir=out_dir, + tags=tags) + start = int(params[ifo + '_end_time']) + end = start + 1 + cseg = segments.segment([start, end]) + node = curr_exe.create_node(valid_seg=cseg) + + if use_exact_inj_params: + node.add_opt('--use-params-of-closest-injection') + else: + node.add_opt('--mass1', "%.6f" % params['mass1']) + node.add_opt('--mass2', "%.6f" % params['mass2']) + node.add_opt('--spin1z',"%.6f" % params['spin1z']) + node.add_opt('--spin2z',"%.6f" % params['spin2z']) + node.add_opt('--template-start-frequency', + "%.6f" % params['f_lower']) + # Is this precessing? + if 'u_vals' in params or 'u_vals_%s' % ifo in params: + node.add_opt('--spin1x',"%.6f" % params['spin1x']) + node.add_opt('--spin1y',"%.6f" % params['spin1y']) + node.add_opt('--spin2x',"%.6f" % params['spin2x']) + node.add_opt('--spin2y',"%.6f" % params['spin2y']) + node.add_opt('--inclination',"%.6f" % params['inclination']) + try: + node.add_opt('--u-val',"%.6f" % params['u_vals']) + except: + node.add_opt('--u-val', + "%.6f" % params['u_vals_%s' % ifo]) + + if params[ifo + '_end_time'] > 0 and not use_mean_time: + trig_time = params[ifo + '_end_time'] + else: + trig_time = params['mean_time'] + + node.add_opt('--trigger-time', f"{trig_time:.6f}") + node.add_input_opt('--inspiral-segments', segs) + if inj_file is not None: + node.add_input_opt('--injection-file', inj_file) + node.add_opt('--data-read-name', data_read_name) + node.add_opt('--data-analyzed-name', analyzed_name) + node.new_output_file_opt(workflow.analysis_time, '.hdf', + '--output-file', store_file=store_file) + workflow += node + return node.output_files
+ + + +
+[docs] +def make_single_template_plots(workflow, segs, data_read_name, analyzed_name, + params, out_dir, inj_file=None, exclude=None, + data_segments=None, + require=None, tags=None, params_str=None, + use_exact_inj_params=False): + """Function for creating jobs to run the pycbc_single_template code and + to run the associated plotting code pycbc_single_template_plots and add + these jobs to the workflow. + + Parameters + ----------- + workflow : workflow.Workflow instance + The pycbc.workflow.Workflow instance to add these jobs to. + segs : workflow.File instance + The pycbc.workflow.File instance that points to the XML file containing + the segment lists of data read in and data analyzed. + data_read_name : str + The name of the segmentlist containing the data read in by each + inspiral job in the segs file. + analyzed_name : str + The name of the segmentlist containing the data analyzed by each + inspiral job in the segs file. + params : dictionary + A dictionary containing the parameters of the template to be used. + params[ifo+'end_time'] is required for all ifos in workflow.ifos. + If use_exact_inj_params is False then also need to supply values for + [mass1, mass2, spin1z, spin2x]. For precessing templates one also + needs to supply [spin1y, spin1x, spin2x, spin2y, inclination] + additionally for precession one must supply u_vals or + u_vals_+ifo for all ifos. u_vals is the ratio between h_+ and h_x to + use when constructing h(t). h(t) = (h_+ * u_vals) + h_x. + out_dir : str + Directory in which to store the output files. + inj_file : workflow.File (optional, default=None) + If given send this injection file to the job so that injections are + made into the data. + exclude : list (optional, default=None) + If given, then when considering which subsections in the ini file to + parse for options to add to single_template_plot, only use subsections + that *do not* match strings in this list. + require : list (optional, default=None) + If given, then when considering which subsections in the ini file to + parse for options to add to single_template_plot, only use subsections + matching strings in this list. + data_segments : dictionary of segment lists + Dictionary of segment lists keyed on the IFO. Used to decide if an + IFO is plotted if there is valid data. If not given, will plot if + the IFO produced a trigger which contributed to the event + tags : list (optional, default=None) + Add this list of tags to all jobs. + params_str : str (optional, default=None) + If given add this string to plot title and caption to describe the + template that was used. + use_exact_inj_params : boolean (optional, default=False) + If True do not use masses and spins listed in the params dictionary + but instead use the injection closest to the filter time as a template. + + Returns + -------- + hdf_files : workflow.FileList + The list of workflow.Files created by single_template jobs + in this function. + plot_files : workflow.FileList + The list of workflow.Files created by single_template_plot jobs + in this function. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'single_template_plot' + secs = requirestr(workflow.cp.get_subsections(name), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + hdf_files = FileList([]) + plot_files = FileList([]) + valid = {} + for ifo in workflow.ifos: + valid[ifo] = params['mean_time'] in data_segments[ifo] if data_segments \ + else params['%s_end_time' % ifo] > 0 + for tag in secs: + for ifo in workflow.ifos: + if not valid[ifo]: + # If the IFO is not being used, continue + continue + data = make_single_template_files( + workflow, + segs, + ifo, + data_read_name, + analyzed_name, + params, + out_dir, + inj_file=inj_file, + exclude=exclude, + require=require, + tags=tags + [tag], + store_file=False, + use_exact_inj_params=use_exact_inj_params + ) + hdf_files += data + # Make the plot for this trigger and detector + node = PlotExecutable(workflow.cp, name, ifos=[ifo], + out_dir=out_dir, tags=[tag] + tags).create_node() + node.add_input_opt('--single-template-file', data[0]) + node.new_output_file_opt(workflow.analysis_time, '.png', + '--output-file') + title="'%s SNR and chi^2 timeseries" %(ifo) + if params_str is not None: + title+= " using %s" %(params_str) + title+="'" + node.add_opt('--plot-title', title) + caption = "'The SNR and chi^2 timeseries around the injection" + if params_str is not None: + caption += " using %s" %(params_str) + if use_exact_inj_params: + caption += ". The injection itself was used as the template.'" + else: + caption += ". The template used has the following parameters: " + caption += "mass1=%s, mass2=%s, spin1z=%s, spin2z=%s'"\ + %(params['mass1'], params['mass2'], params['spin1z'], + params['spin2z']) + node.add_opt('--plot-caption', caption) + workflow += node + plot_files += node.output_files + return hdf_files, plot_files
+ + +
+[docs] +def make_plot_waveform_plot(workflow, params, out_dir, ifos, exclude=None, + require=None, tags=None): + """ Add plot_waveform jobs to the workflow. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'single_template_plot' + secs = requirestr(workflow.cp.get_subsections(name), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, 'plot_waveform', ifos=ifos, + out_dir=out_dir, tags=[tag] + tags).create_node() + node.add_opt('--mass1', "%.6f" % params['mass1']) + node.add_opt('--mass2', "%.6f" % params['mass2']) + node.add_opt('--spin1z',"%.6f" % params['spin1z']) + node.add_opt('--spin2z',"%.6f" % params['spin2z']) + if 'u_vals' in params: + # Precessing options + node.add_opt('--spin1x',"%.6f" % params['spin1x']) + node.add_opt('--spin2x',"%.6f" % params['spin2x']) + node.add_opt('--spin1y',"%.6f" % params['spin1y']) + node.add_opt('--spin2y',"%.6f" % params['spin2y']) + node.add_opt('--inclination',"%.6f" % params['inclination']) + node.add_opt('--u-val', "%.6f" % params['u_vals']) + node.new_output_file_opt(workflow.analysis_time, '.png', + '--output-file') + workflow += node + files += node.output_files + return files
+ + +
+[docs] +def make_inj_info(workflow, injection_file, injection_index, num, out_dir, + tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + name = 'page_injinfo' + files = FileList([]) + node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--injection-file', injection_file) + node.add_opt('--injection-index', str(injection_index)) + node.add_opt('--n-nearest', str(num)) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + files += node.output_files + return files
+ + +
+[docs] +def make_coinc_info(workflow, singles, bank, coinc_file, out_dir, + n_loudest=None, trig_id=None, file_substring=None, + sort_order=None, sort_var=None, title=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + name = 'page_coincinfo' + files = FileList([]) + node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--single-trigger-files', singles) + node.add_input_opt('--statmap-file', coinc_file) + node.add_input_opt('--bank-file', bank) + if sort_order: + node.add_opt('--sort-order', sort_order) + if sort_var: + node.add_opt('--sort-variable', sort_var) + if n_loudest is not None: + node.add_opt('--n-loudest', str(n_loudest)) + if trig_id is not None: + node.add_opt('--trigger-id', str(trig_id)) + if title is not None: + node.add_opt('--title', f'"{title}"') + if file_substring is not None: + node.add_opt('--statmap-file-subspace-name', file_substring) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + files += node.output_files + return files
+ + +
+[docs] +def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo, + statfiles=None, title=None, tags=None): + """Setup a job to create sngl detector sngl ifo html summary snippet. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'page_snglinfo' + files = FileList([]) + node = PlotExecutable(workflow.cp, name, ifos=[ifo], + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--single-trigger-file', sngl_file) + node.add_input_opt('--bank-file', bank_file) + node.add_opt('--trigger-id', str(trigger_id)) + node.add_opt('--instrument', ifo) + if statfiles is not None: + node.add_input_list_opt( + '--statistic-files', + statfiles, + check_existing_options=False, + ) + if title is not None: + node.add_opt('--title', f'"{title}"') + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_trigger_timeseries(workflow, singles, ifo_times, out_dir, special_tids=None, + exclude=None, require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + name = 'plot_trigger_timeseries' + secs = requirestr(workflow.cp.get_subsections(name), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, + out_dir=out_dir, tags=[tag] + tags).create_node() + node.add_multiifo_input_list_opt('--single-trigger-files', singles) + node.add_opt('--times', ifo_times) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + + if special_tids is not None: + node.add_opt('--special-trigger-ids', special_tids) + + workflow += node + files += node.output_files + return files
+ + +
+[docs] +def make_qscan_plot(workflow, ifo, trig_time, out_dir, injection_file=None, + data_segments=None, time_window=100, tags=None): + """ Generate a make_qscan node and add it to workflow. + + This function generates a single node of the singles_timefreq executable + and adds it to the current workflow. Parent/child relationships are set by + the input/output files automatically. + + Parameters + ----------- + workflow: pycbc.workflow.core.Workflow + The workflow class that stores the jobs that will be run. + ifo: str + Which interferometer are we using? + trig_time: int + The time of the trigger being followed up. + out_dir: str + Location of directory to output to + injection_file: pycbc.workflow.File (optional, default=None) + If given, add the injections in the file to strain before making the + plot. + data_segments: ligo.segments.segmentlist (optional, default=None) + The list of segments for which data exists and can be read in. If given + the start/end times given to singles_timefreq will be adjusted if + [trig_time - time_window, trig_time + time_window] does not completely + lie within a valid data segment. A ValueError will be raised if the + trig_time is not within a valid segment, or if it is not possible to + find 2*time_window (plus the padding) of continuous data around the + trigger. This **must** be coalesced. + time_window: int (optional, default=None) + The amount of data (not including padding) that will be read in by the + singles_timefreq job. The default value of 100s should be fine for most + cases. + tags: list (optional, default=None) + List of tags to add to the created nodes, which determine file naming. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'plot_qscan' + + curr_exe = PlotQScanExecutable(workflow.cp, name, ifos=[ifo], + out_dir=out_dir, tags=tags) + + # Determine start/end times, using data segments if needed. + # Begin by choosing "optimal" times + start = trig_time - time_window + end = trig_time + time_window + node = curr_exe.create_node(valid_seg=segments.segment([start, end])) + + # Then if data_segments is available, check against that, and move if + # needed + if data_segments is not None: + # Assumes coalesced, so trig_time can only be within one segment + for seg in data_segments: + if trig_time in seg: + data_seg = seg + break + elif trig_time == -1.0: + node.add_opt('--gps-start-time', int(trig_time)) + node.add_opt('--gps-end-time', int(trig_time)) + node.add_opt('--center-time', trig_time) + caption_string = "'No trigger in %s'" % ifo + node.add_opt('--plot-caption', caption_string) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files + else: + err_msg = "Trig time {} ".format(trig_time) + err_msg += "does not seem to lie within any data segments. " + err_msg += "This shouldn't be possible, please ask for help!" + raise ValueError(err_msg) + # Check for pad-data + if curr_exe.has_opt('pad-data'): + pad_data = int(curr_exe.get_opt('pad-data')) + else: + pad_data = 0 + # We only read data that's available. The code must handle the case + # of not much data being available. + if end > (data_seg[1] - pad_data): + end = data_seg[1] - pad_data + if start < (data_seg[0] + pad_data): + start = data_seg[0] + pad_data + + node.add_opt('--gps-start-time', int(start)) + node.add_opt('--gps-end-time', int(end)) + node.add_opt('--center-time', trig_time) + + if injection_file is not None: + node.add_input_opt('--injection-file', injection_file) + + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files
+ + +
+[docs] +def make_singles_timefreq(workflow, single, bank_file, trig_time, out_dir, + veto_file=None, time_window=10, data_segments=None, + tags=None): + """ Generate a singles_timefreq node and add it to workflow. + + This function generates a single node of the singles_timefreq executable + and adds it to the current workflow. Parent/child relationships are set by + the input/output files automatically. + + Parameters + ----------- + workflow: pycbc.workflow.core.Workflow + The workflow class that stores the jobs that will be run. + single: pycbc.workflow.core.File instance + The File object storing the single-detector triggers to followup. + bank_file: pycbc.workflow.core.File instance + The File object storing the template bank. + trig_time: int + The time of the trigger being followed up. + out_dir: str + Location of directory to output to + veto_file: pycbc.workflow.core.File (optional, default=None) + If given use this file to veto triggers to determine the loudest event. + FIXME: Veto files *should* be provided a definer argument and not just + assume that all segments should be read. + time_window: int (optional, default=None) + The amount of data (not including padding) that will be read in by the + singles_timefreq job. The default value of 10s should be fine for most + cases. + data_segments: ligo.segments.segmentlist (optional, default=None) + The list of segments for which data exists and can be read in. If given + the start/end times given to singles_timefreq will be adjusted if + [trig_time - time_window, trig_time + time_window] does not completely + lie within a valid data segment. A ValueError will be raised if the + trig_time is not within a valid segment, or if it is not possible to + find 2*time_window (plus the padding) of continuous data around the + trigger. This **must** be coalesced. + tags: list (optional, default=None) + List of tags to add to the created nodes, which determine file naming. + """ + tags = [] if tags is None else tags + makedir(out_dir) + name = 'plot_singles_timefreq' + + curr_exe = SingleTimeFreqExecutable(workflow.cp, name, ifos=[single.ifo], + out_dir=out_dir, tags=tags) + + # Determine start/end times, using data segments if needed. + # Begin by choosing "optimal" times + start = trig_time - time_window + end = trig_time + time_window + + node = curr_exe.create_node(valid_seg=segments.segment([start, end])) + node.add_input_opt('--trig-file', single) + node.add_input_opt('--bank-file', bank_file) + + # Then if data_segments is available, check against that, and move if + # needed + if data_segments is not None: + # Assumes coalesced, so trig_time can only be within one segment + for seg in data_segments: + if trig_time in seg: + data_seg = seg + break + elif trig_time == -1.0: + node.add_opt('--gps-start-time', int(trig_time)) + node.add_opt('--gps-end-time', int(trig_time)) + node.add_opt('--center-time', trig_time) + + if veto_file: + node.add_input_opt('--veto-file', veto_file) + + node.add_opt('--detector', single.ifo) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files + else: + err_msg = "Trig time {} ".format(trig_time) + err_msg += "does not seem to lie within any data segments. " + err_msg += "This shouldn't be possible, please ask for help!" + raise ValueError(err_msg) + # Check for pad-data + if curr_exe.has_opt('pad-data'): + pad_data = int(curr_exe.get_opt('pad-data')) + else: + pad_data = 0 + if abs(data_seg) < (2 * time_window + 2 * pad_data): + tl = 2 * time_window + 2 * pad_data + err_msg = "I was asked to use {} seconds of data ".format(tl) + err_msg += "to run a plot_singles_timefreq job. However, I have " + err_msg += "only {} seconds available.".format(abs(data_seg)) + raise ValueError(err_msg) + if data_seg[0] > (start - pad_data): + start = data_seg[0] + pad_data + end = start + 2 * time_window + if data_seg[1] < (end + pad_data): + end = data_seg[1] - pad_data + start = end - 2 * time_window + # Sanity check, shouldn't get here! + if data_seg[0] > (start - pad_data): + err_msg = "I shouldn't be here! Go ask Ian what he broke." + raise ValueError(err_msg) + + node.add_opt('--gps-start-time', int(start)) + node.add_opt('--gps-end-time', int(end)) + node.add_opt('--center-time', trig_time) + + if veto_file: + node.add_input_opt('--veto-file', veto_file) + + node.add_opt('--detector', single.ifo) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files
+ + +
+[docs] +def make_skipped_html(workflow, skipped_data, out_dir, tags): + """ + Make a html snippet from the list of skipped background coincidences + """ + exe = Executable(workflow.cp, 'html_snippet', + ifos=workflow.ifos, out_dir=out_dir, tags=tags) + + node = exe.create_node() + + parsed_data = {} + for ifo, time in skipped_data: + if ifo not in parsed_data: + parsed_data[ifo] = {} + if time not in parsed_data[ifo]: + parsed_data[ifo][time] = 1 + else: + parsed_data[ifo][time] = parsed_data[ifo][time] + 1 + + n_events = len(skipped_data) + html_string = '"{} background events have been skipped '.format(n_events) + html_string += 'because one of their single triggers already appears ' + html_string += 'in the events followed up above. ' + html_string += 'Specifically, the following single detector triggers ' + html_string += 'were found in these coincidences. ' + html_template = '{} event at time {} appeared {} times. ' + for ifo in parsed_data: + for time in parsed_data[ifo]: + n_occurances = parsed_data[ifo][time] + html_string += html_template.format(ifo, time, n_occurances) + + html_string += '"' + + node.add_opt('--html-text', html_string) + node.add_opt('--title', '"Events were skipped"') + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + files = node.output_files + return files
+ + + +
+[docs] +def make_upload_files(workflow, psd_files, snr_timeseries, xml_all, + event_id, approximant, out_dir, channel_name, + tags=None): + """ + Make files including xml, skymap fits and plots for uploading to gracedb + for a given event + + Parameters + ---------- + psd_files: FileList([]) + PSD Files from MERGE_PSDs for the search as appropriate for the + event + snr_timeseries: FileList([]) + SNR timeseries files, one from each IFO, to add to the XML and plot + output from pysbs_single_template + xml_all: pycbc.workflow.core.File instance + XML file containing all events from the search + event_id: string + an integer to describe the event's position in the xml_all file + approximant: byte string + The approximant used for the template of the event, to be passed + to bayestar for sky location + out_dir: + The directory where all the output files should go + channel_name: string + Channel name to be added to the XML file to be uploaded + tags: {None, optional} + Tags to add to the minifollowups executables + + Returns + ------- + all_output_files: FileList + List of all output files from this process + """ + logging.info("Setting up upload files") + indiv_xml_exe = Executable( + workflow.cp, + 'generate_xml', + ifos=workflow.ifos, out_dir=out_dir, + tags=tags + ) + + logging.info("Setting up XML generation") + xml_node = indiv_xml_exe.create_node() + xml_node.add_input_opt('--input-file', xml_all) + xml_node.add_opt('--event-id', event_id) + xml_node.add_input_list_opt('--psd-files', psd_files) + xml_node.add_input_list_opt('--snr-timeseries', snr_timeseries) + xml_node.add_opt('--channel-name', channel_name) + xml_node.new_output_file_opt( + workflow.analysis_time, + '.png', + '--snr-timeseries-plot', + tags=['snr'] + ) + xml_node.new_output_file_opt( + workflow.analysis_time, + '.png', + '--psd-plot', + tags=['psd'] + ) + xml_out = xml_node.new_output_file_opt( + workflow.analysis_time, + '.xml', + '--output-file' + ) + + workflow += xml_node + + logging.info("Setting up bayestar generation") + bayestar_exe = Executable( + workflow.cp, + 'bayestar', + ifos=workflow.ifos, + out_dir=out_dir, + tags=tags + ) + + bayestar_node = bayestar_exe.create_node() + bayestar_node.add_input_opt('--event-xml', xml_out) + fits_out = bayestar_node.new_output_file_opt( + workflow.analysis_time, + '.fits', + '--output-file', + ) + + # This will be called if the approximant is within the bank + if approximant == b'SPAtmplt': + # Bayestar doesn't use the SPAtmplt approximant + approximant = b'TaylorF2' + if approximant is not None: + bayestar_node.add_opt('--approximant', approximant.decode()) + + workflow += bayestar_node + + logging.info("Setting up skymap plot generation") + skymap_plot_exe = PlotExecutable( + workflow.cp, + 'skymap_plot', + ifos=workflow.ifos, + out_dir=out_dir, + tags=tags + ) + + skymap_plot_node = skymap_plot_exe.create_node() + skymap_plot_node.add_input_opt('', fits_out) + skymap_plot_node.new_output_file_opt( + workflow.analysis_time, + '.png', + '-o', + ) + workflow += skymap_plot_node + + all_output_files = xml_node.output_files + bayestar_node.output_files + \ + skymap_plot_node.output_files + return all_output_files
+ + + +
+[docs] +def setup_upload_prep_minifollowups(workflow, coinc_file, xml_all_file, + single_triggers, psd_files, + tmpltbank_file, insp_segs, insp_data_name, + insp_anal_name, dax_output, out_dir, + tags=None): + """ Create plots that followup the Nth loudest coincident injection + from a statmap produced HDF file. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + The core workflow instance we are populating + coinc_file: + single_triggers: list of pycbc.workflow.File + A list cointaining the file objects associated with the merged + single detector trigger files for each ifo. + psd_files: list of pycbc.workflow.File + A list containing the file objects associated with the merged + psd files for each ifo. + xml_all_file : workflow file object + XML File containing all foreground events + tmpltbank_file: pycbc.workflow.File + The file object pointing to the HDF format template bank + insp_segs: SegFile + The segment file containing the data read and analyzed by each inspiral + job. + The segment file containing the data read and analyzed by each inspiral + job. + insp_data_name: str + The name of the segmentlist storing data read. + insp_anal_name: str + The name of the segmentlist storing data analyzed. + dax_output : directory + Location of the dax outputs + out_dir: path + The directory to store minifollowups result plots and files + tags: {None, optional} + Tags to add to the minifollowups executables + + Returns + ------- + layout: list + A list of tuples which specify the displayed file layout for the + minifollowups plots. + """ + logger.info('Entering minifollowups module') + + if not workflow.cp.has_section('workflow-minifollowups'): + msg = 'There is no [workflow-minifollowups] section in ' + msg += 'configuration file' + logger.info(msg) + logger.info('Leaving minifollowups') + return + + tags = [] if tags is None else tags + makedir(dax_output) + makedir(out_dir) + + # turn the config file into a File class + config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + \ + 'upload_prep_minifollowup.ini') + workflow.cp.write(open(config_path, 'w')) + + config_file = resolve_url_to_file(config_path) + + exe = Executable(workflow.cp, 'upload_prep_minifollowup', + ifos=workflow.ifos, out_dir=dax_output, tags=tags) + + node = exe.create_node() + node.add_input_opt('--config-files', config_file) + node.add_input_opt('--xml-all-file', xml_all_file) + node.add_input_opt('--bank-file', tmpltbank_file) + node.add_input_opt('--statmap-file', coinc_file) + node.add_multiifo_input_list_opt('--single-detector-triggers', + single_triggers) + node.add_multiifo_input_list_opt('--psd-files', psd_files) + node.add_input_opt('--inspiral-segments', insp_segs) + node.add_opt('--inspiral-data-read-name', insp_data_name) + node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) + if tags: + node.add_list_opt('--tags', tags) + node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file') + node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map') + + name = node.output_files[0].name + map_file = node.output_files[1] + + node.add_opt('--workflow-name', name) + node.add_opt('--output-dir', out_dir) + node.add_opt('--dax-file-directory', '.') + + workflow += node + + # execute this in a sub-workflow + fil = node.output_files[0] + + # determine if a staging site has been specified + job = SubWorkflow(fil.name, is_planned=False) + input_files = [xml_all_file, tmpltbank_file, coinc_file, insp_segs] + \ + single_triggers + psd_files + job.add_inputs(*input_files) + job.set_subworkflow_properties(map_file, + staging_site=workflow.staging_site, + cache_file=workflow.cache_file) + job.add_into_workflow(workflow) + logger.info('Leaving minifollowups module')
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/pegasus_sites.html b/latest/html/_modules/pycbc/workflow/pegasus_sites.html new file mode 100644 index 00000000000..f0635783dc5 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/pegasus_sites.html @@ -0,0 +1,434 @@ + + + + + + pycbc.workflow.pegasus_sites — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.pegasus_sites

+# Copyright (C) 2021 The PyCBC development team
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module provides default site catalogs, which should be suitable for
+most use cases. You can override individual details here. It should also be
+possible to implement a new site, but not sure how that would work in practice.
+"""
+
+import logging
+import os.path
+import tempfile
+import urllib.parse
+from shutil import which
+from urllib.parse import urljoin
+from urllib.request import pathname2url
+
+from Pegasus.api import Directory, FileServer, Site, Operation, Namespace
+from Pegasus.api import Arch, OS, SiteCatalog
+
+from pycbc.version import last_release, version, release  # noqa
+
+logger = logging.getLogger('pycbc.workflow.pegasus_sites')
+
+if release == 'True':
+    sing_version = version
+else:
+    sing_version = last_release
+
+# NOTE urllib is weird. For some reason it only allows known schemes and will
+# give *wrong* results, rather then failing, if you use something like gsiftp
+# We can add schemes explicitly, as below, but be careful with this!
+urllib.parse.uses_relative.append('gsiftp')
+urllib.parse.uses_netloc.append('gsiftp')
+
+KNOWN_SITES = ['local', 'condorpool_symlink',
+               'condorpool_copy', 'condorpool_shared', 'osg']
+
+
+
+[docs] +def add_site_pegasus_profile(site, cp): + """Add options from [pegasus_profile] in configparser to site""" + # Add global profile information + if cp.has_section('pegasus_profile'): + add_ini_site_profile(site, cp, 'pegasus_profile') + # Add site-specific profile information + if cp.has_section('pegasus_profile-{}'.format(site.name)): + add_ini_site_profile(site, cp, 'pegasus_profile-{}'.format(site.name))
+ + + +
+[docs] +def add_ini_site_profile(site, cp, sec): + """Add options from sec in configparser to site""" + for opt in cp.options(sec): + namespace = opt.split('|')[0] + if namespace in ('pycbc', 'container'): + continue + + value = cp.get(sec, opt).strip() + key = opt.split('|')[1] + site.add_profiles(Namespace(namespace), key=key, value=value)
+ + + +
+[docs] +def add_local_site(sitecat, cp, local_path, local_url): + """Add the local site to site catalog""" + # local_url must end with a '/' + if not local_url.endswith('/'): + local_url = local_url + '/' + + local = Site("local", arch=Arch.X86_64, os_type=OS.LINUX) + add_site_pegasus_profile(local, cp) + + local_dir = Directory(Directory.SHARED_SCRATCH, + path=os.path.join(local_path, 'local-site-scratch')) + local_file_serv = FileServer(urljoin(local_url, 'local-site-scratch'), + Operation.ALL) + local_dir.add_file_servers(local_file_serv) + local.add_directories(local_dir) + + local.add_profiles(Namespace.PEGASUS, key="style", value="condor") + local.add_profiles(Namespace.CONDOR, key="getenv", value="True") + sitecat.add_sites(local)
+ + + + + + + +
+[docs] +def add_condorpool_copy_site(sitecat, cp): + """Add condorpool_copy site to site catalog""" + site = Site("condorpool_copy", arch=Arch.X86_64, os_type=OS.LINUX) + add_site_pegasus_profile(site, cp) + + site.add_profiles(Namespace.PEGASUS, key="style", value="condor") + site.add_profiles(Namespace.PEGASUS, key="data.configuration", + value="nonsharedfs") + site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging', + value="true") + # This explicitly disables symlinking + site.add_profiles(Namespace.PEGASUS, key='nosymlink', + value=True) + site.add_profiles(Namespace.PEGASUS, key='auxillary.local', + value="true") + site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid", + value="False") + site.add_profiles(Namespace.CONDOR, key="should_transfer_files", + value="Yes") + site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output", + value="ON_EXIT_OR_EVICT") + site.add_profiles(Namespace.CONDOR, key="getenv", value="True") + site.add_profiles(Namespace.CONDOR, key="+DESIRED_Sites", + value='"nogrid"') + site.add_profiles(Namespace.CONDOR, key="+IS_GLIDEIN", + value='"False"') + site.add_profiles(Namespace.CONDOR, key="+flock_local", + value="True") + site.add_profiles(Namespace.DAGMAN, key="retry", value="2") + sitecat.add_sites(site)
+ + + +
+[docs] +def add_condorpool_shared_site(sitecat, cp, local_path, local_url): + """Add condorpool_shared site to site catalog""" + # local_url must end with a '/' + if not local_url.endswith('/'): + local_url = local_url + '/' + + site = Site("condorpool_shared", arch=Arch.X86_64, os_type=OS.LINUX) + add_site_pegasus_profile(site, cp) + + # It's annoying that this is needed! + local_dir = Directory(Directory.SHARED_SCRATCH, + path=os.path.join(local_path, 'cpool-site-scratch')) + local_file_serv = FileServer(urljoin(local_url, 'cpool-site-scratch'), + Operation.ALL) + local_dir.add_file_servers(local_file_serv) + site.add_directories(local_dir) + + site.add_profiles(Namespace.PEGASUS, key="style", value="condor") + site.add_profiles(Namespace.PEGASUS, key="data.configuration", + value="sharedfs") + site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging', + value="true") + site.add_profiles(Namespace.PEGASUS, key='auxillary.local', + value="true") + site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid", + value="False") + site.add_profiles(Namespace.CONDOR, key="should_transfer_files", + value="Yes") + site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output", + value="ON_EXIT_OR_EVICT") + site.add_profiles(Namespace.CONDOR, key="getenv", value="True") + site.add_profiles(Namespace.CONDOR, key="+DESIRED_Sites", + value='"nogrid"') + site.add_profiles(Namespace.CONDOR, key="+IS_GLIDEIN", + value='"False"') + site.add_profiles(Namespace.CONDOR, key="+flock_local", + value="True") + site.add_profiles(Namespace.DAGMAN, key="retry", value="2") + # Need to set PEGASUS_HOME + peg_home = which('pegasus-plan') + assert peg_home.endswith('bin/pegasus-plan') + peg_home = peg_home.replace('bin/pegasus-plan', '') + site.add_profiles(Namespace.ENV, key="PEGASUS_HOME", value=peg_home) + sitecat.add_sites(site)
+ + + +# NOTE: We should now be able to add a nonfs site. I'll leave this for a +# future patch/as demanded feature though. The setup would largely be +# the same as the OSG site, except without the OSG specific things. + +# def add_condorpool_nonfs_site(sitecat, cp): + + +
+[docs] +def add_osg_site(sitecat, cp): + """Add osg site to site catalog""" + site = Site("osg", arch=Arch.X86_64, os_type=OS.LINUX) + add_site_pegasus_profile(site, cp) + site.add_profiles(Namespace.PEGASUS, key="style", value="condor") + site.add_profiles(Namespace.PEGASUS, key="data.configuration", + value="condorio") + site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging', + value="true") + site.add_profiles(Namespace.CONDOR, key="should_transfer_files", + value="Yes") + site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output", + value="ON_SUCCESS") + site.add_profiles(Namespace.CONDOR, key="success_exit_code", + value="0") + site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid", + value="True") + site.add_profiles(Namespace.CONDOR, key="getenv", + value="False") + site.add_profiles(Namespace.CONDOR, key="+InitializeModulesEnv", + value="False") + site.add_profiles(Namespace.CONDOR, key="+SingularityCleanEnv", + value="True") + site.add_profiles(Namespace.CONDOR, key="Requirements", + value="(HAS_SINGULARITY =?= TRUE) && " + "(HAS_LIGO_FRAMES =?= True) && " + "(IS_GLIDEIN =?= True)") + cvmfs_loc = '"/cvmfs/singularity.opensciencegrid.org/pycbc/pycbc-el8:v' + cvmfs_loc += sing_version + '"' + site.add_profiles(Namespace.CONDOR, key="+SingularityImage", + value=cvmfs_loc) + # On OSG failure rate is high + site.add_profiles(Namespace.DAGMAN, key="retry", value="4") + site.add_profiles(Namespace.ENV, key="LAL_DATA_PATH", + value="/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation") + # Add MKL location to LD_LIBRARY_PATH for OSG + site.add_profiles(Namespace.ENV, key="LD_LIBRARY_PATH", + value="/usr/local/lib:/.singularity.d/libs") + sitecat.add_sites(site)
+ + + +
+[docs] +def add_site(sitecat, sitename, cp, out_dir=None): + """Add site sitename to site catalog""" + # Allow local site scratch to be overriden for any site which uses it + sec = 'pegasus_profile-{}'.format(sitename) + opt = 'pycbc|site-scratch' + if cp.has_option(sec, opt): + out_dir = os.path.abspath(cp.get(sec, opt)) + if cp.has_option(sec, 'pycbc|unique-scratch'): + scratchdir = tempfile.mkdtemp(prefix='pycbc-tmp_', dir=out_dir) + os.chmod(scratchdir, 0o755) + try: + os.symlink(scratchdir, '{}-site-scratch'.format(sitename)) + except OSError: + pass + out_dir = scratchdir + elif out_dir is None: + out_dir = os.getcwd() + local_url = urljoin('file://', pathname2url(out_dir)) + if sitename == 'local': + add_local_site(sitecat, cp, out_dir, local_url) + elif sitename == 'condorpool_symlink': + add_condorpool_symlink_site(sitecat, cp) + elif sitename == 'condorpool_copy': + add_condorpool_copy_site(sitecat, cp) + elif sitename == 'condorpool_shared': + add_condorpool_shared_site(sitecat, cp, out_dir, local_url) + elif sitename == 'osg': + add_osg_site(sitecat, cp) + else: + raise ValueError("Do not recognize site {}".format(sitename))
+ + + +
+[docs] +def make_catalog(cp, out_dir): + """Make combined catalog of built-in known sites""" + catalog = SiteCatalog() + for site in KNOWN_SITES: + add_site(catalog, site, cp, out_dir=out_dir) + return catalog
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/pegasus_workflow.html b/latest/html/_modules/pycbc/workflow/pegasus_workflow.html new file mode 100644 index 00000000000..50c3859a62b --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/pegasus_workflow.html @@ -0,0 +1,1174 @@ + + + + + + pycbc.workflow.pegasus_workflow — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.pegasus_workflow

+# Copyright (C) 2014  Alex Nitz
+#
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+""" This module provides thin wrappers around Pegasus.DAX3 functionality that
+provides additional abstraction and argument handling.
+"""
+import os
+import shutil
+import logging
+import tempfile
+import subprocess
+import warnings
+from packaging import version
+from urllib.request import pathname2url
+from urllib.parse import urljoin, urlsplit
+
+import Pegasus.api as dax
+
+logger = logging.getLogger('pycbc.workflow.pegasus_workflow')
+
+PEGASUS_FILE_DIRECTORY = os.path.join(os.path.dirname(__file__),
+                                      'pegasus_files')
+
+
+
+[docs] +class ProfileShortcuts(object): + """ Container of common methods for setting pegasus profile information + on Executables and nodes. This class expects to be inherited from + and for a add_profile method to be implemented. + """ +
+[docs] + def set_memory(self, size): + """ Set the amount of memory that is required in megabytes + """ + self.add_profile('condor', 'request_memory', '%sM' % size)
+ + +
+[docs] + def set_storage(self, size): + """ Set the amount of storage required in megabytes + """ + self.add_profile('condor', 'request_disk', '%sM' % size)
+ + +
+[docs] + def set_num_cpus(self, number): + self.add_profile('condor', 'request_cpus', number)
+ + +
+[docs] + def set_universe(self, universe): + if universe == 'standard': + self.add_profile("pegasus", "gridstart", "none") + + self.add_profile("condor", "universe", universe)
+ + +
+[docs] + def set_category(self, category): + self.add_profile('dagman', 'category', category)
+ + +
+[docs] + def set_priority(self, priority): + self.add_profile('dagman', 'priority', priority)
+ + +
+[docs] + def set_num_retries(self, number): + self.add_profile("dagman", "retry", number)
+ + +
+[docs] + def set_execution_site(self, site): + self.add_profile("selector", "execution.site", site)
+
+ + + +
+[docs] +class Executable(ProfileShortcuts): + """ The workflow representation of an Executable + """ + id = 0 + def __init__(self, name, os='linux', + arch='x86_64', installed=False, + container=None): + self.logical_name = name + "_ID%s" % str(Executable.id) + self.pegasus_name = name + Executable.id += 1 + self.os = dax.OS(os) + self.arch = dax.Arch(arch) + self.installed = installed + self.container = container + self.in_workflow = False + self.profiles = {} + self.transformations = {} + +
+[docs] + def create_transformation(self, site, url): + transform = Transformation( + self.logical_name, + site=site, + pfn=url, + is_stageable=self.installed, + arch=self.arch, + os_type=self.os, + container=self.container + ) + transform.pycbc_name = self.pegasus_name + for (namespace, key), value in self.profiles.items(): + transform.add_profiles( + dax.Namespace(namespace), + key=key, + value=value + ) + self.transformations[site] = transform
+ + +
+[docs] + def add_profile(self, namespace, key, value): + """ Add profile information to this executable + """ + if self.transformations: + err_msg = "Need code changes to be able to add profiles " + err_msg += "after transformations are created." + raise ValueError(err_msg) + self.profiles[(namespace, key)] = value
+
+ + + +
+[docs] +class Transformation(dax.Transformation): + +
+[docs] + def is_same_as(self, other): + test_vals = ['namespace', 'version'] + test_site_vals = ['arch', 'os_type', 'os_release', + 'os_version', 'bypass', 'container'] + # Check for logical name first + if not self.pycbc_name == other.pycbc_name: + return False + + # Check the properties of the executable + for val in test_vals: + sattr = getattr(self, val) + oattr = getattr(other, val) + if not sattr == oattr: + return False + # Some properties are stored in the TransformationSite + self_site = list(self.sites.values()) + assert len(self_site) == 1 + self_site = self_site[0] + other_site = list(other.sites.values()) + assert len(other_site) == 1 + other_site = other_site[0] + for val in test_site_vals: + sattr = getattr(self_site, val) + oattr = getattr(other_site, val) + if not sattr == oattr: + return False + + # Also check the "profile". This is things like Universe, RAM/disk/CPU + # requests, execution site, getenv=True, etc. + for profile in self.profiles: + if profile not in other.profiles: + return False + for profile in other.profiles: + if profile not in self.profiles: + return False + + return True
+
+ + + +
+[docs] +class Node(ProfileShortcuts): + def __init__(self, transformation): + self.in_workflow = False + self.transformation=transformation + self._inputs = [] + self._outputs = [] + self._dax_node = dax.Job(transformation) + # NOTE: We are enforcing one site per transformation. Therefore the + # transformation used indicates the site to be used. + self.set_execution_site(list(transformation.sites.keys())[0]) + self._args = [] + # Each value in _options is added separated with whitespace + # so ['--option','value'] --> "--option value" + self._options = [] + # For _raw_options *NO* whitespace is added. + # so ['--option','value'] --> "--optionvalue" + # and ['--option',' ','value'] --> "--option value" + self._raw_options = [] + +
+[docs] + def add_arg(self, arg): + """ Add an argument + """ + if not isinstance(arg, File): + arg = str(arg) + + self._args += [arg]
+ + +
+[docs] + def add_raw_arg(self, arg): + """ Add an argument to the command line of this job, but do *NOT* add + white space between arguments. This can be added manually by adding + ' ' if needed + """ + if not isinstance(arg, File): + arg = str(arg) + + self._raw_options += [arg]
+ + +
+[docs] + def add_opt(self, opt, value=None, check_existing_options=True, **kwargs): # pylint:disable=unused-argument + """ Add an option + """ + if check_existing_options and (opt in self._options + or opt in self._raw_options): + err_msg = ( + "Trying to set option %s with value %s, but it " + "has already been provided by the configuration file. " + "Usually this should not be given in the config file, " + "but contact developers to check" + ) % (opt, value) + raise ValueError(err_msg) + if value is not None: + if not isinstance(value, File): + value = str(value) + self._options += [opt, value] + else: + self._options += [opt]
+ + + #private functions to add input and output data sources/sinks + def _add_input(self, inp): + """ Add as source of input data + """ + self._inputs += [inp] + self._dax_node.add_inputs(inp) + + def _add_output(self, out): + """ Add as destination of output data + """ + self._outputs += [out] + out.node = self + stage_out = out.storage_path is not None + self._dax_node.add_outputs(out, stage_out=stage_out) + + # public functions to add options, arguments with or without data sources +
+[docs] + def add_input(self, inp): + """Declares an input file without adding it as a command-line option. + """ + self._add_input(inp)
+ + +
+[docs] + def add_output(self, inp): + """Declares an output file without adding it as a command-line option. + """ + self._add_output(inp)
+ + +
+[docs] + def add_input_opt(self, opt, inp, **kwargs): + """ Add an option that determines an input + """ + self.add_opt(opt, inp._dax_repr(), **kwargs) + self._add_input(inp)
+ + +
+[docs] + def add_output_opt(self, opt, out, **kwargs): + """ Add an option that determines an output + """ + self.add_opt(opt, out._dax_repr(), **kwargs) + self._add_output(out)
+ + +
+[docs] + def add_output_list_opt(self, opt, outputs, **kwargs): + """ Add an option that determines a list of outputs + """ + self.add_opt(opt, **kwargs) + # Never check existing options for list option values + if 'check_existing_options' in kwargs: + kwargs['check_existing_options'] = False + for out in outputs: + self.add_opt(out, **kwargs) + self._add_output(out)
+ + +
+[docs] + def add_input_list_opt(self, opt, inputs, **kwargs): + """ Add an option that determines a list of inputs + """ + self.add_opt(opt, **kwargs) + # Never check existing options for list option values + if 'check_existing_options' in kwargs: + del kwargs['check_existing_options'] + for inp in inputs: + self.add_opt( + inp, + check_existing_options=False, + **kwargs + ) + self._add_input(inp)
+ + +
+[docs] + def add_list_opt(self, opt, values, **kwargs): + """ Add an option with a list of non-file parameters. + """ + self.add_opt(opt, **kwargs) + # Never check existing options for list option values + if 'check_existing_options' in kwargs: + del kwargs['check_existing_options'] + for val in values: + self.add_opt( + val, + check_existing_options=False, + **kwargs + )
+ + +
+[docs] + def add_input_arg(self, inp): + """ Add an input as an argument + """ + self.add_arg(inp._dax_repr()) + self._add_input(inp)
+ + +
+[docs] + def add_output_arg(self, out): + """ Add an output as an argument + """ + self.add_arg(out._dax_repr()) + self._add_output(out)
+ + +
+[docs] + def new_output_file_opt(self, opt, name): + """ Add an option and return a new file handle + """ + fil = File(name) + self.add_output_opt(opt, fil) + return fil
+ + + # functions to describe properties of this node +
+[docs] + def add_profile(self, namespace, key, value): + """ Add profile information to this node at the DAX level + """ + self._dax_node.add_profiles( + dax.Namespace(namespace), + key=key, + value=value + )
+ + + def _finalize(self): + if len(self._raw_options): + raw_args = [''.join([str(a) for a in self._raw_options])] + else: + raw_args = [] + args = self._args + raw_args + self._options + self._dax_node.add_args(*args)
+ + + +
+[docs] +class Workflow(object): + """ + """ + def __init__(self, name='my_workflow', directory=None, cache_file=None, + dax_file_name=None): + # Pegasus logging is fairly verbose, quieten it down a bit + # This sets the logger to one level less verbose than the root + # (pycbc) logger + + curr_level = logging.getLogger().level + # Get the logger associated with the Pegasus workflow import + pegasus_logger = logging.getLogger('Pegasus') + pegasus_logger.setLevel(curr_level + 10) + self.name = name + self._rc = dax.ReplicaCatalog() + self._tc = dax.TransformationCatalog() + + if directory is None: + self.out_dir = os.getcwd() + else: + self.out_dir = os.path.abspath(directory) + + if cache_file is not None: + cache_file = os.path.abspath(cache_file) + self.cache_file = cache_file + + self._inputs = [] + self._outputs = [] + self._transformations = [] + self._containers = [] + self.in_workflow = False + self.sub_workflows = [] + if dax_file_name is None: + self.filename = self.name + '.dax' + else: + self.filename = dax_file_name + self._adag = dax.Workflow(self.filename) + + # A pegasus job version of this workflow for use if it is included + # within a larger workflow + self._as_job = SubWorkflow(self.filename, is_planned=False, + _id=self.name) + self._swinputs = [] + +
+[docs] + def add_workflow(self, workflow): + """ Add a sub-workflow to this workflow + + This function adds a sub-workflow of Workflow class to this workflow. + Parent child relationships are determined by data dependencies + + Parameters + ---------- + workflow : Workflow instance + The sub-workflow to add to this one + """ + workflow.in_workflow = self + self.sub_workflows += [workflow] + self._adag.add_jobs(workflow._as_job) + return self
+ + +
+[docs] + def add_explicit_dependancy(self, parent, child): + """ + Add an explicit dependancy between two Nodes in this workflow. + + Most dependencies (in PyCBC and Pegasus thinking) are added by + declaring file linkages. However, there are some cases where you might + want to override that and add an explicit dependancy. + + Parameters + ---------- + parent : Node instance + The parent Node. + child : Node instance + The child Node + """ + self._adag.add_dependency(parent._dax_node, children=[child._dax_node])
+ + +
+[docs] + def add_subworkflow_dependancy(self, parent_workflow, child_workflow): + """ + Add a dependency between two sub-workflows in this workflow + + This is done if those subworkflows are themselves declared as Workflows + which are sub-workflows and not explicit SubWorkflows. (These Workflows + contain SubWorkflows inside them .... Yes, the relationship between + PyCBC and Pegasus becomes confusing here). If you are working with + explicit SubWorkflows these can be added normally using File relations. + + Parameters + ---------- + parent_workflow : Workflow instance + The sub-workflow to use as the parent dependence. + Must be a sub-workflow of this workflow. + child_workflow : Workflow instance + The sub-workflow to add as the child dependence. + Must be a sub-workflow of this workflow. + """ + self._adag.add_dependency(parent_workflow._as_job, + children=[child_workflow._as_job])
+ + +
+[docs] + def add_transformation(self, tranformation): + """ Add a transformation to this workflow + + Adds the input transformation to this workflow. + + Parameters + ---------- + transformation : Pegasus.api.Transformation + The transformation to be added. + """ + self._tc.add_transformations(tranformation)
+ + +
+[docs] + def add_container(self, container): + """ Add a container to this workflow + + Adds the input container to this workflow. + + Parameters + ---------- + container : Pegasus.api.Container + The container to be added. + """ + self._tc.add_containers(container)
+ + +
+[docs] + def add_node(self, node): + """ Add a node to this workflow + + This function adds nodes to the workflow. It also determines + parent/child relations from the inputs to this job. + + Parameters + ---------- + node : pycbc.workflow.pegasus_workflow.Node + A node that should be executed as part of this workflow. + """ + node._finalize() + node.in_workflow = self + + # Record the executable that this node uses + if node.transformation not in self._transformations: + for tform in self._transformations: + # Check if transform is already in workflow + if node.transformation.is_same_as(tform): + node.transformation.in_workflow = True + node._dax_node.transformation = tform.name + node.transformation.name = tform.name + break + else: + self._transformations += [node.transformation] + lgc = (hasattr(node, 'executable') + and node.executable.container is not None + and node.executable.container not in self._containers) + if lgc: + self._containers.append(node.executable.container) + + # Add the node itself + self._adag.add_jobs(node._dax_node) + + # Determine the parent child relationships based on the inputs that + # this node requires. + # In Pegasus5 this is mostly handled by pegasus, we just need to + # connect files correctly if dealing with file management between + # workflows/subworkflows + for inp in node._inputs: + if inp.node is not None and inp.node.in_workflow == self: + # Standard case: File produced within the same workflow. + # Don't need to do anything here. + continue + + elif inp.node is not None and not inp.node.in_workflow: + # This error should be rare, but can happen. If a Node hasn't + # yet been added to a workflow, this logic breaks. Always add + # nodes in order that files will be produced. + raise ValueError('Parents of this node must be added to the ' + 'workflow first.') + + elif inp.node is None: + # File is external to the workflow (e.g. a pregenerated + # template bank). (if inp.node is None) + if inp not in self._inputs: + self._inputs += [inp] + + elif inp.node.in_workflow != self: + # File is coming from a parent workflow, or other workflow + # These needs a few extra hooks later, use _swinputs for this. + if inp not in self._inputs: + self._inputs += [inp] + self._swinputs += [inp] + else: + err_msg = ("I don't understand how to deal with an input file " + "here. Ian doesn't think this message should be " + "possible, but if you get here something has gone " + "wrong and will need debugging!") + raise ValueError(err_msg) + + # Record the outputs that this node generates + self._outputs += node._outputs + + return self
+ + + def __add__(self, other): + if isinstance(other, Node): + return self.add_node(other) + elif isinstance(other, Workflow): + return self.add_workflow(other) + else: + raise TypeError('Cannot add type %s to this workflow' % type(other)) + +
+[docs] + def traverse_workflow_io(self): + """ If input is needed from another workflow within a larger + hierarchical workflow, determine the path for the file to reach + the destination and add the file to workflows input / output as + needed. + """ + def root_path(v): + path = [v] + while v.in_workflow: + path += [v.in_workflow] + v = v.in_workflow + return path + + for inp in self._swinputs: + workflow_root = root_path(self) + input_root = root_path(inp.node.in_workflow) + for step in workflow_root: + if step in input_root: + common = step + break + + # Set our needed file as output so that it gets staged upwards + # to a workflow that contains the job which needs it. + for idx in range(input_root.index(common)): + child_wflow = input_root[idx] + parent_wflow = input_root[idx+1] + if inp not in child_wflow._as_job.get_outputs(): + child_wflow._as_job.add_outputs(inp, stage_out=True) + parent_wflow._outputs += [inp] + + # Set out needed file so it gets staged downwards towards the + # job that needs it. + for wf in workflow_root[:workflow_root.index(common)]: + if inp not in wf._as_job.get_inputs(): + wf._as_job.add_inputs(inp) + + for wf in self.sub_workflows: + wf.traverse_workflow_io()
+ + +
+[docs] + def save(self, filename=None, submit_now=False, plan_now=False, + output_map_path=None, root=True): + """ Write this workflow to DAX file + """ + if filename is None: + filename = self.filename + + if output_map_path is None: + output_map_path = 'output.map' + + # Handle setting up io for inter-workflow file use ahead of time + # so that when daxes are saved the metadata is complete + if root: + self.traverse_workflow_io() + + for sub in self.sub_workflows: + sub.save(root=False) + # FIXME: If I'm now putting output_map here, all output_map stuff + # should move here. + sub.output_map_file.insert_into_dax(self._rc, self.sites) + sub_workflow_file = File(sub.filename) + pfn = os.path.join(os.getcwd(), sub.filename) + sub_workflow_file.add_pfn(pfn, site='local') + sub_workflow_file.insert_into_dax(self._rc, self.sites) + + # add workflow input files pfns for local site to dax + for fil in self._inputs: + fil.insert_into_dax(self._rc, self.sites) + + self._adag.add_replica_catalog(self._rc) + + # Add TC into workflow + self._adag.add_transformation_catalog(self._tc) + + with open(output_map_path, 'w') as f: + for out in self._outputs: + try: + f.write(out.output_map_str() + '\n') + except ValueError: + # There was no storage path + pass + + # Pegasus requires that we write the DAX file into the local directory + olddir = os.getcwd() + os.chdir(self.out_dir) + self._adag.write(filename) + if not self.in_workflow: + if submit_now or plan_now: + self.plan_and_submit(submit_now=submit_now) + else: + with open('additional_planner_args.dat', 'w') as f: + stage_site_str = self.staging_site_str + exec_sites = self.exec_sites_str + # For now we don't include --config as this can be added to + # in submit_dax. We should add an option to add additional + # pegasus properties (through the config files?) here. + #prop_file = os.path.join(PEGASUS_FILE_DIRECTORY, + # 'pegasus-properties.conf') + #f.write('--conf {} '.format(prop_file)) + if self.cache_file is not None: + f.write('--cache {} '.format(self.cache_file)) + + f.write('--output-sites local ') + f.write('--sites {} '.format(exec_sites)) + f.write('--staging-site {} '.format(stage_site_str)) + f.write('--cluster label,horizontal ') + f.write('--cleanup inplace ') + f.write('--relative-dir work ') + # --dir is not being set here because it might be easier to + # set this in submit_dax still? + f.write('-q ') + f.write('--dax {}'.format(filename)) + os.chdir(olddir)
+ + +
+[docs] + def plan_and_submit(self, submit_now=True): + """ Plan and submit the workflow now. + """ + # New functionality, this might still need some work. Here's things + # that this might want to do, that submit_dax does: + # * Checks proxy (ignore this, user should already have this done) + # * Pulls properties file in (DONE) + # * Send necessary options to the planner (DONE) + # * Some logging about hostnames (NOT DONE, needed?) + # * Setup the helper scripts (start/debug/stop/status) .. (DONE) + # * Copy some of the interesting files into workflow/ (DONE) + # * Checks for dashboard URL (NOT DONE) + # * Does something with condor_reschedule (NOT DONE, needed?) + + planner_args = {} + planner_args['submit'] = submit_now + + # Get properties file - would be nice to add extra properties here. + prop_file = os.path.join(PEGASUS_FILE_DIRECTORY, + 'pegasus-properties.conf') + planner_args['conf'] = prop_file + + # Cache file, if there is one + if self.cache_file is not None: + planner_args['cache'] = [self.cache_file] + + # Not really sure what this does, but Karan said to use it. Seems to + # matter for subworkflows + planner_args['output_sites'] = ['local'] + + # Site options + planner_args['sites'] = self.sites + planner_args['staging_sites'] = self.staging_site + + # Make tmpdir for submitfiles + # default directory is the system default, but is overrideable + # This should probably be moved to core.py? + submit_opts = 'pegasus_profile', 'pycbc|submit-directory' + submit_dir = None + if self.cp.has_option(*submit_opts): + submit_dir = self.cp.get(*submit_opts) + submitdir = tempfile.mkdtemp(prefix='pycbc-tmp_', dir=submit_dir) + os.chmod(submitdir, 0o755) + try: + os.remove('submitdir') + except FileNotFoundError: + pass + os.symlink(submitdir, 'submitdir') + planner_args['dir'] = submitdir + + # Other options + planner_args['cluster'] = ['label,horizontal'] + planner_args['relative_dir'] = 'work' + planner_args['cleanup'] = 'inplace' + # This quietens the planner a bit. We cannot set the verbosity + # directly, which would be better. So be careful, if changing the + # pegasus.mode property, it will change the verbosity (a lot). + planner_args['quiet'] = 1 + + # FIXME: The location of output.map is hardcoded in the properties + # file. This is overridden for subworkflows, but is not for + # main workflows with submit_dax. If we ever remove submit_dax + # we should include the location explicitly here. + self._adag.plan(**planner_args) + + # Set up convenience scripts + with open('status', 'w') as fp: + fp.write('pegasus-status --verbose ') + fp.write('--long {}/work $@'.format(submitdir)) + + with open('debug', 'w') as fp: + fp.write('pegasus-analyzer -r ') + fp.write('-v {}/work $@'.format(submitdir)) + + with open('stop', 'w') as fp: + fp.write('pegasus-remove {}/work $@'.format(submitdir)) + + with open('start', 'w') as fp: + fp.write('pegasus-run {}/work $@'.format(submitdir)) + + os.chmod('status', 0o755) + os.chmod('debug', 0o755) + os.chmod('stop', 0o755) + os.chmod('start', 0o755) + + os.makedirs('workflow/planning', exist_ok=True) + + shutil.copy2(prop_file, 'workflow/planning') + shutil.copy2(os.path.join(submitdir, 'work', 'braindump.yml'), + 'workflow/planning') + + if self.cache_file is not None: + shutil.copy2(self.cache_file, 'workflow/planning')
+
+ + + +
+[docs] +class SubWorkflow(dax.SubWorkflow): + """Workflow job representation of a SubWorkflow. + + This follows the Pegasus nomenclature where there are Workflows, Jobs and + SubWorkflows. Be careful though! A SubWorkflow is actually a Job, not a + Workflow. If creating a sub-workflow you would create a Workflow as normal + and write out the necessary dax files. Then you would create a SubWorkflow + object, which acts as the Job in the top-level workflow. Most of the + special linkages that are needed for sub-workflows are then handled at that + stage. We do add a little bit of functionality here. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.pycbc_planner_args = {} + +
+[docs] + def add_into_workflow(self, container_wflow): + """Add this Job into a container Workflow + """ + self.add_planner_args(**self.pycbc_planner_args) + + # Set this to None so code will fail if more planner args are added + self.pycbc_planner_args = None + container_wflow._adag.add_jobs(self)
+ + +
+[docs] + def add_planner_arg(self, value, option): + if self.pycbc_planner_args is None: + err_msg = ("We cannot add arguments to the SubWorkflow planning " + "stage after this is added to the parent workflow.") + raise ValueError(err_msg) + + self.pycbc_planner_args[value] = option
+ + +
+[docs] + def set_subworkflow_properties(self, output_map_file, + staging_site, + cache_file): + + self.add_planner_arg('pegasus.dir.storage.mapper.replica.file', + os.path.basename(output_map_file.name)) + # Ensure output_map_file has the for_planning flag set. There's no + # API way to set this after the File is initialized, so we have to + # change the attribute here. + # WORSE, we only want to set this if the pegasus *planner* is version + # 5.0.4 or larger + sproc_out = subprocess.check_output(['pegasus-version']).strip() + sproc_out = sproc_out.decode() + if version.parse(sproc_out) >= version.parse('5.0.4'): + output_map_file.for_planning=True + self.add_inputs(output_map_file) + + # I think this is needed to deal with cases where the subworkflow file + # does not exist at submission time. + bname = os.path.splitext(os.path.basename(self.file))[0] + self.add_planner_arg('basename', bname) + self.add_planner_arg('output_sites', ['local']) + self.add_planner_arg('cleanup', 'inplace') + self.add_planner_arg('cluster', ['label', 'horizontal']) + self.add_planner_arg('verbose', 3) + + if cache_file: + self.add_planner_arg('cache', [cache_file]) + + if staging_site: + self.add_planner_arg('staging_sites', staging_site)
+
+ + + +
+[docs] +class File(dax.File): + """ The workflow representation of a physical file + + An object that represents a file from the perspective of setting up a + workflow. The file may or may not exist at the time of workflow generation. + If it does, this is represented by containing a physical file name (PFN). + A storage path is also available to indicate the desired final + destination of this file. + """ + def __init__(self, name): + self.name = name + self.node = None + dax.File.__init__(self, name) + # Storage_path is where the file would be *output* to + self.storage_path = None + # Input_pfns is *input* locations of the file. This needs a site. + self.input_pfns = [] + # Adding to a dax finalizes the File. Ensure that changes cannot be + # made after doing this. + self.added_to_dax = False + + def _dax_repr(self): + return self + + @property + def dax_repr(self): + """Return the dax representation of a File.""" + return self._dax_repr() + +
+[docs] + def output_map_str(self): + if self.storage_path: + return '%s %s pool="%s"' % (self.name, self.storage_path, 'local') + else: + raise ValueError('This file does not have a storage path')
+ + +
+[docs] + def add_pfn(self, url, site): + """ + Associate a PFN with this file. Takes a URL and associated site. + """ + self.input_pfns.append((url, site))
+ + +
+[docs] + def has_pfn(self, url, site='local'): + """ + Check if the url, site is already associated to this File. If site is + not provided, we will assume it is 'local'. + """ + return (((url, site) in self.input_pfns) + or ((url, 'all') in self.input_pfns))
+ + +
+[docs] + def insert_into_dax(self, rep_cat, sites): + for (url, site) in self.input_pfns: + if site == 'all': + for curr_site in sites: + rep_cat.add_replica(curr_site, self, url) + else: + rep_cat.add_replica(site, self, url)
+ + +
+[docs] + @classmethod + def from_path(cls, path): + """Takes a path and returns a File object with the path as the PFN.""" + warnings.warn("The from_path method in pegasus_workflow is " + "deprecated. Please use File.from_path (for " + "output files) in core.py or resolve_url_to_file " + "in core.py (for input files) instead.", + DeprecationWarning) + urlparts = urlsplit(path) + site = 'nonlocal' + if (urlparts.scheme == '' or urlparts.scheme == 'file'): + if os.path.isfile(urlparts.path): + path = os.path.abspath(urlparts.path) + path = urljoin('file:', pathname2url(path)) + site = 'local' + + fil = cls(os.path.basename(path)) + fil.add_pfn(path, site=site) + return fil
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/plotting.html b/latest/html/_modules/pycbc/workflow/plotting.html new file mode 100644 index 00000000000..8035ddd9208 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/plotting.html @@ -0,0 +1,789 @@ + + + + + + pycbc.workflow.plotting — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.plotting

+# Copyright (C) 2015  Alex Nitz
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is responsible for setting up plotting jobs.
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
+"""
+
+import logging
+from urllib.request import pathname2url
+from urllib.parse import urljoin
+
+from pycbc.workflow.core import File, FileList, makedir, Executable
+
+logger = logging.getLogger('pycbc.workflow.plotting')
+
+
+
+[docs] +def excludestr(tags, substr): + if substr is None: + return tags + if isinstance(substr, list): + if len(substr) > 1: + tags = excludestr(tags, substr[1:]) + substr = substr[0] + return [tag for tag in tags if substr not in tag]
+ + + +
+[docs] +def requirestr(tags, substr): + if substr is None: + return tags + return [tag for tag in tags if substr in tag]
+ + + +
+[docs] +class PlotExecutable(Executable): + """ plot executable + """ + current_retention_level = Executable.FINAL_RESULT + + # plots and final results should get the highest priority + # on the job queue +
+[docs] + def create_node(self, **kwargs): + node = Executable.create_node(self, **kwargs) + node.set_priority(1000) + return node
+
+ + + +
+[docs] +def make_template_plot(workflow, bank_file, out_dir, bins=None, + tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_bank', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--bank-file', bank_file) + + if workflow.cp.has_option_tags('workflow-coincidence', 'background-bins', tags=tags): + bins = workflow.cp.get_opt_tags('workflow-coincidence', 'background-bins', tags=tags) + if bins: + node.add_opt('--background-bins', bins) + + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_range_plot(workflow, psd_files, out_dir, exclude=None, require=None, + tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_range'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, 'plot_range', ifos=workflow.ifos, + out_dir=out_dir, tags=[tag] + tags).create_node() + node.add_input_list_opt('--psd-files', psd_files) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_spectrum_plot(workflow, psd_files, out_dir, tags=None, + hdf_group=None, precalc_psd_files=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_spectrum', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--psd-files', psd_files) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + + if hdf_group is not None: + node.add_opt('--hdf-group', hdf_group) + if precalc_psd_files is not None and len(precalc_psd_files) == 1: + node.add_input_list_opt('--psd-file', precalc_psd_files) + + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_segments_plot(workflow, seg_files, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_segments', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--segment-files', seg_files) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node
+ + + +
+[docs] +def make_gating_plot(workflow, insp_files, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_gating', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--input-file', insp_files) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node
+ + + +
+[docs] +def make_throughput_plot(workflow, insp_files, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_throughput', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--input-file', insp_files) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node
+ + + +
+[docs] +def make_foreground_table(workflow, trig_file, bank_file, out_dir, + singles=None, extension='.html', tags=None, + hierarchical_level=None): + + if hierarchical_level is not None and tags: + tags = [("HIERARCHICAL_LEVEL_{:02d}".format( + hierarchical_level))] + tags + elif hierarchical_level is not None and not tags: + tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] + elif hierarchical_level is None and not tags: + tags = [] + + makedir(out_dir) + exe = PlotExecutable(workflow.cp, 'page_foreground', + ifos=trig_file.ifo_list, + out_dir=out_dir, tags=tags) + node = exe.create_node() + node.add_input_opt('--bank-file', bank_file) + node.add_input_opt('--trigger-file', trig_file) + if hierarchical_level is not None: + node.add_opt('--use-hierarchical-level', hierarchical_level) + if singles is not None: + node.add_input_list_opt('--single-detector-triggers', singles) + node.new_output_file_opt(bank_file.segment, extension, '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_sensitivity_plot(workflow, inj_file, out_dir, exclude=None, + require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_sensitivity'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, 'plot_sensitivity', ifos=workflow.ifos, + out_dir=out_dir, tags=[tag] + tags).create_node() + node.add_input_opt('--injection-file', inj_file) + node.new_output_file_opt(inj_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_coinc_snrchi_plot(workflow, inj_file, inj_trig, stat_file, trig_file, + out_dir, exclude=None, require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_coinc_snrchi'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + exe = PlotExecutable(workflow.cp, 'plot_coinc_snrchi', + ifos=inj_trig.ifo_list, + out_dir=out_dir, tags=[tag] + tags) + node = exe.create_node() + node.add_input_opt('--found-injection-file', inj_file) + node.add_input_opt('--single-injection-file', inj_trig) + node.add_input_opt('--coinc-statistic-file', stat_file) + node.add_input_opt('--single-trigger-file', trig_file) + node.new_output_file_opt(inj_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_inj_table(workflow, inj_file, out_dir, missed=False, singles=None, + tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_injections', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + + node.add_input_opt('--injection-file', inj_file) + if missed: + node.add_opt('--show-missed') + + if singles is not None: + node.add_multiifo_input_list_opt('--single-trigger-files', singles) + + node.new_output_file_opt(inj_file.segment, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_seg_table(workflow, seg_files, seg_names, out_dir, tags=None, + title_text=None, description=None): + """ Creates a node in the workflow for writing the segment summary + table. Returns a File instances for the output file. + """ + seg_files = list(seg_files) + seg_names = list(seg_names) + if tags is None: tags = [] + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_segtable', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--segment-files', seg_files) + quoted_seg_names = [] + for s in seg_names: + quoted_seg_names.append("'" + s + "'") + node.add_opt('--segment-names', ' '.join(quoted_seg_names)) + node.add_opt('--ifos', ' '.join(workflow.ifos)) + if description: + node.add_opt('--description', "'" + description + "'") + if title_text: + node.add_opt('--title-text', "'" + title_text + "'") + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None): + """ Creates a node in the workflow for writing the veto_definer + table. Returns a File instances for the output file. + """ + if vetodef_file is None: + if not workflow.cp.has_option_tags("workflow-segments", + "segments-veto-definer-file", []): + return None + vetodef_file = workflow.cp.get_opt_tags("workflow-segments", + "segments-veto-definer-file", []) + file_url = urljoin('file:', pathname2url(vetodef_file)) + vdf_file = File(workflow.ifos, 'VETO_DEFINER', + workflow.analysis_time, file_url=file_url) + vdf_file.add_pfn(file_url, site='local') + else: + vdf_file = vetodef_file + + if tags is None: tags = [] + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--veto-definer-file', vdf_file) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_seg_plot(workflow, seg_files, out_dir, seg_names=None, tags=None): + """ Creates a node in the workflow for plotting science, and veto segments. + """ + seg_files = list(seg_files) + if tags is None: tags = [] + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_segplot', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--segment-files', seg_files) + quoted_seg_names = [] + for s in seg_names: + quoted_seg_names.append("'" + s + "'") + node.add_opt('--segment-names', ' '.join(quoted_seg_names)) + node.add_opt('--ifos', ' '.join(workflow.ifos)) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_ifar_plot(workflow, trigger_file, out_dir, tags=None, + hierarchical_level=None, executable='page_ifar'): + """ Creates a node in the workflow for plotting cumulative histogram + of IFAR values. + """ + + if hierarchical_level is not None and tags: + tags = [("HIERARCHICAL_LEVEL_{:02d}".format( + hierarchical_level))] + tags + elif hierarchical_level is not None and not tags: + tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] + elif hierarchical_level is None and not tags: + tags = [] + + makedir(out_dir) + exe = PlotExecutable(workflow.cp, executable, ifos=trigger_file.ifo_list, + out_dir=out_dir, tags=tags) + node = exe.create_node() + node.add_input_opt('--trigger-file', trigger_file) + if hierarchical_level is not None: + node.add_opt('--use-hierarchical-level', hierarchical_level) + node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_snrchi_plot(workflow, trig_files, veto_file, veto_name, + out_dir, exclude=None, require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_snrchi'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + for trig_file in trig_files: + exe = PlotExecutable(workflow.cp, 'plot_snrchi', + ifos=trig_file.ifo_list, + out_dir=out_dir, + tags=[tag] + tags) + node = exe.create_node() + + node.set_memory(15000) + node.add_input_opt('--trigger-file', trig_file) + if veto_file is not None: + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--segment-name', veto_name) + node.new_output_file_opt(trig_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_foundmissed_plot(workflow, inj_file, out_dir, exclude=None, + require=None, tags=None): + if tags is None: + tags = [] + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_foundmissed'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + exe = PlotExecutable(workflow.cp, 'plot_foundmissed', ifos=workflow.ifos, + out_dir=out_dir, tags=[tag] + tags) + node = exe.create_node() + ext = '.html' if exe.has_opt('dynamic') else '.png' + node.add_input_opt('--injection-file', inj_file) + node.new_output_file_opt(inj_file.segment, ext, '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_snrratehist_plot(workflow, bg_file, out_dir, closed_box=False, + tags=None, hierarchical_level=None): + + if hierarchical_level is not None and tags: + tags = [("HIERARCHICAL_LEVEL_{:02d}".format( + hierarchical_level))] + tags + elif hierarchical_level is not None and not tags: + tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] + elif hierarchical_level is None and not tags: + tags = [] + + makedir(out_dir) + exe = PlotExecutable(workflow.cp, 'plot_snrratehist', + ifos=bg_file.ifo_list, + out_dir=out_dir, tags=tags) + node = exe.create_node() + node.add_input_opt('--trigger-file', bg_file) + if hierarchical_level is not None: + node.add_opt('--use-hierarchical-level', hierarchical_level) + + if closed_box: + node.add_opt('--closed-box') + + node.new_output_file_opt(bg_file.segment, '.png', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_snrifar_plot(workflow, bg_file, out_dir, closed_box=False, + cumulative=True, tags=None, hierarchical_level=None): + + if hierarchical_level is not None and tags: + tags = [("HIERARCHICAL_LEVEL_{:02d}".format( + hierarchical_level))] + tags + elif hierarchical_level is not None and not tags: + tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] + elif hierarchical_level is None and not tags: + tags = [] + + makedir(out_dir) + exe = PlotExecutable(workflow.cp, 'plot_snrifar', ifos=bg_file.ifo_list, + out_dir=out_dir, tags=tags) + node = exe.create_node() + node.add_input_opt('--trigger-file', bg_file) + if hierarchical_level is not None: + node.add_opt('--use-hierarchical-level', hierarchical_level) + + if closed_box: + node.add_opt('--closed-box') + + if not cumulative: + node.add_opt('--not-cumulative') + + node.new_output_file_opt(bg_file.segment, '.png', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_results_web_page(workflow, results_dir, template='orange', + explicit_dependencies=None): + template_path = 'templates/'+template+'.html' + + out_dir = workflow.cp.get('results_page', 'output-path') + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'results_page', ifos=workflow.ifos, + out_dir=out_dir).create_node() + node.add_opt('--plots-dir', results_dir) + node.add_opt('--template-file', template_path) + workflow += node + if explicit_dependencies is not None: + for dep in explicit_dependencies: + workflow.add_explicit_dependancy(dep, node)
+ + + +
+[docs] +def make_single_hist(workflow, trig_file, veto_file, veto_name, + out_dir, bank_file=None, exclude=None, + require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_hist'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, 'plot_hist', + ifos=trig_file.ifo, + out_dir=out_dir, + tags=[tag] + tags).create_node() + if veto_file is not None: + node.add_opt('--segment-name', veto_name) + node.add_input_opt('--veto-file', veto_file) + node.add_input_opt('--trigger-file', trig_file) + if bank_file: + node.add_input_opt('--bank-file', bank_file) + node.new_output_file_opt(trig_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_binned_hist(workflow, trig_file, veto_file, veto_name, + out_dir, bank_file, exclude=None, + require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_binnedhist'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + node = PlotExecutable(workflow.cp, 'plot_binnedhist', + ifos=trig_file.ifo, + out_dir=out_dir, + tags=[tag] + tags).create_node() + node.add_opt('--ifo', trig_file.ifo) + if veto_file is not None: + node.add_opt('--veto-segment-name', veto_name) + node.add_input_opt('--veto-file', veto_file) + node.add_input_opt('--trigger-file', trig_file) + node.add_input_opt('--bank-file', bank_file) + node.new_output_file_opt(trig_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_singles_plot(workflow, trig_files, bank_file, veto_file, veto_name, + out_dir, exclude=None, require=None, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + secs = requirestr(workflow.cp.get_subsections('plot_singles'), require) + secs = excludestr(secs, exclude) + secs = excludestr(secs, workflow.ifo_combinations) + files = FileList([]) + for tag in secs: + for trig_file in trig_files: + node = PlotExecutable(workflow.cp, 'plot_singles', + ifos=trig_file.ifo, + out_dir=out_dir, + tags=[tag] + tags).create_node() + + node.set_memory(15000) + node.add_input_opt('--bank-file', bank_file) + if veto_file is not None: + node.add_input_opt('--veto-file', veto_file) + node.add_opt('--segment-name', veto_name) + node.add_opt('--detector', trig_file.ifo) + node.add_input_opt('--single-trig-file', trig_file) + node.new_output_file_opt(trig_file.segment, '.png', '--output-file') + workflow += node + files += node.output_files + return files
+ + + +
+[docs] +def make_dq_flag_trigger_rate_plot(workflow, dq_file, dq_label, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'plot_dq_flag_likelihood', + ifos=dq_file.ifo, out_dir=out_dir, + tags=tags).create_node() + node.add_input_opt('--dq-file', dq_file) + node.add_opt('--dq-label', dq_label) + node.add_opt('--ifo', dq_file.ifo) + node.new_output_file_opt(dq_file.segment, '.png', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_dq_segment_table(workflow, dq_file, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_dq_table', ifos=dq_file.ifo, + out_dir=out_dir, tags=tags).create_node() + node.add_input_opt('--dq-file', dq_file) + node.add_opt('--ifo', dq_file.ifo) + node.new_output_file_opt(dq_file.segment, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ + + +
+[docs] +def make_template_bin_table(workflow, dq_file, out_dir, tags=None): + tags = [] if tags is None else tags + makedir(out_dir) + node = PlotExecutable(workflow.cp, 'page_template_bin_table', + ifos=dq_file.ifo, out_dir=out_dir, + tags=tags).create_node() + node.add_input_opt('--dq-file', dq_file) + node.add_opt('--ifo', dq_file.ifo) + node.new_output_file_opt(dq_file.segment, '.html', '--output-file') + workflow += node + return node.output_files[0]
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/psd.html b/latest/html/_modules/pycbc/workflow/psd.html new file mode 100644 index 00000000000..62e49b31974 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/psd.html @@ -0,0 +1,263 @@ + + + + + + pycbc.workflow.psd — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.psd

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+"""This module is responsible for setting up PSD-related jobs in workflows.
+"""
+
+import logging
+
+from ligo.segments import segmentlist
+
+from pycbc.workflow.core import FileList, make_analysis_dir, Executable
+from pycbc.workflow.core import SegFile
+
+logger = logging.getLogger('pycbc.workflow.psd')
+
+
+class CalcPSDExecutable(Executable):
+    current_retention_level = Executable.ALL_TRIGGERS
+
+class MergePSDFiles(Executable):
+    current_retention_level = Executable.MERGED_TRIGGERS
+
+def chunks(l, n):
+    """ Yield n successive chunks from l.
+    """
+    newn = int(len(l) / n)
+    for i in range(0, n-1):
+        yield l[i*newn:i*newn+newn]
+    yield l[n*newn-newn:]
+
+
+[docs] +def merge_psds(workflow, files, ifo, out_dir, tags=None): + make_analysis_dir(out_dir) + tags = [] if not tags else tags + node = MergePSDFiles(workflow.cp, 'merge_psds', + ifos=ifo, out_dir=out_dir, + tags=tags).create_node() + node.add_input_list_opt('--psd-files', files) + node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file') + workflow += node + return node.output_files[0]
+ + +
+[docs] +def setup_psd_calculate(workflow, frame_files, ifo, segments, + segment_name, out_dir, tags=None): + make_analysis_dir(out_dir) + tags = [] if not tags else tags + if workflow.cp.has_option_tags('workflow-psd', 'parallelization-factor', tags=tags): + num_parts = int(workflow.cp.get_opt_tags('workflow-psd', + 'parallelization-factor', + tags=tags)) + else: + num_parts = 1 + + # get rid of duplicate segments which happen when splitting the bank + segments = segmentlist(frozenset(segments)) + + segment_lists = list(chunks(segments, num_parts)) + + psd_files = FileList([]) + for i, segs in enumerate(segment_lists): + seg_file = SegFile.from_segment_list('%s_%s' %(segment_name, i), + segmentlist(segs), segment_name, ifo, + valid_segment=workflow.analysis_time, + extension='xml', directory=out_dir) + + psd_files += [make_psd_file(workflow, frame_files, seg_file, + segment_name, out_dir, + tags=tags + ['PART%s' % i])] + + return merge_psds(workflow, psd_files, ifo, out_dir, tags=tags)
+ + +
+[docs] +def make_psd_file(workflow, frame_files, segment_file, segment_name, out_dir, + tags=None): + make_analysis_dir(out_dir) + tags = [] if not tags else tags + exe = CalcPSDExecutable(workflow.cp, 'calculate_psd', + ifos=segment_file.ifo, out_dir=out_dir, + tags=tags) + node = exe.create_node() + node.add_input_opt('--analysis-segment-file', segment_file) + node.add_opt('--segment-name', segment_name) + + if frame_files and not exe.has_opt('frame-type'): + node.add_input_list_opt('--frame-files', frame_files) + + node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file') + workflow += node + return node.output_files[0]
+ + +class AvgPSDExecutable(Executable): + current_retention_level = Executable.FINAL_RESULT + +
+[docs] +def make_average_psd(workflow, psd_files, out_dir, tags=None, + output_fmt='.txt'): + make_analysis_dir(out_dir) + tags = [] if tags is None else tags + node = AvgPSDExecutable(workflow.cp, 'average_psd', ifos=workflow.ifos, + out_dir=out_dir, tags=tags).create_node() + node.add_input_list_opt('--input-files', psd_files) + + if len(workflow.ifos) > 1: + node.new_output_file_opt(workflow.analysis_time, output_fmt, + '--detector-avg-file') + + node.new_multiifo_output_list_opt('--time-avg-file', workflow.ifos, + workflow.analysis_time, output_fmt, tags=tags) + + workflow += node + return node.output_files
+ + +# keep namespace clean +__all__ = ['make_psd_file', 'make_average_psd', 'setup_psd_calculate', 'merge_psds'] +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/psdfiles.html b/latest/html/_modules/pycbc/workflow/psdfiles.html new file mode 100644 index 00000000000..028e8e5575e --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/psdfiles.html @@ -0,0 +1,279 @@ + + + + + + pycbc.workflow.psdfiles — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.psdfiles

+# Copyright (C) 2015 Larne Pekowsky
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module is responsible for setting up the psd files used by CBC
+workflows.
+"""
+
+# FIXME: Is this module still relevant for any code? Can it be removed?
+
+import logging
+import configparser as ConfigParser
+
+from pycbc.workflow.core import FileList
+from pycbc.workflow.core import make_analysis_dir, resolve_url_to_file
+
+logger = logging.getLogger('pycbc.workflow.psdfiles')
+
+
+[docs] +def setup_psd_workflow(workflow, science_segs, datafind_outs, + output_dir=None, tags=None): + ''' + Setup static psd section of CBC workflow. At present this only supports pregenerated + psd files, in the future these could be created within the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + science_segs : Keyed dictionary of ligo.segments.segmentlist objects + scienceSegs[ifo] holds the science segments to be analysed for each + ifo. + datafind_outs : pycbc.workflow.core.FileList + The file list containing the datafind files. + output_dir : path string + The directory where data products will be placed. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + + Returns + -------- + psd_files : pycbc.workflow.core.FileList + The FileList holding the psd files, 0 or 1 per ifo + ''' + if tags is None: + tags = [] + logger.info("Entering static psd module.") + make_analysis_dir(output_dir) + cp = workflow.cp + + # Parse for options in ini file. + try: + psdMethod = cp.get_opt_tags("workflow-psd", "psd-method", + tags) + except: + # Predefined PSD sare optional, just return an empty list if not + # provided. + return FileList([]) + + if psdMethod == "PREGENERATED_FILE": + logger.info("Setting psd from pre-generated file(s).") + psd_files = setup_psd_pregenerated(workflow, tags=tags) + else: + errMsg = "PSD method not recognized. Only " + errMsg += "PREGENERATED_FILE is currently supported." + raise ValueError(errMsg) + + logger.info("Leaving psd module.") + return psd_files
+ + + +
+[docs] +def setup_psd_pregenerated(workflow, tags=None): + ''' + Setup CBC workflow to use pregenerated psd files. + The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will + be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank + and pycbc_plot_psd_file. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + + Returns + -------- + psd_files : pycbc.workflow.core.FileList + The FileList holding the gating files + ''' + if tags is None: + tags = [] + psd_files = FileList([]) + + cp = workflow.cp + global_seg = workflow.analysis_time + file_attrs = {'segs': global_seg, 'tags': tags} + + # Check for one psd for all ifos + try: + pre_gen_file = cp.get_opt_tags('workflow-psd', + 'psd-pregenerated-file', tags) + file_attrs['ifos'] = workflow.ifos + curr_file = resolve_url_to_file(pre_gen_file, attrs=file_attrs) + psd_files.append(curr_file) + except ConfigParser.Error: + # Check for one psd per ifo + for ifo in workflow.ifos: + try: + pre_gen_file = cp.get_opt_tags('workflow-psd', + 'psd-pregenerated-file-%s' % ifo.lower(), + tags) + file_attrs['ifos'] = [ifo] + curr_file = resolve_url_to_file(pre_gen_file, attrs=file_attrs) + psd_files.append(curr_file) + + except ConfigParser.Error: + # It's unlikely, but not impossible, that only some ifos + # will have pregenerated PSDs + logger.warning("No psd file specified for IFO %s.", ifo) + pass + + return psd_files
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/segment.html b/latest/html/_modules/pycbc/workflow/segment.html new file mode 100644 index 00000000000..09088c8d0ed --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/segment.html @@ -0,0 +1,612 @@ + + + + + + pycbc.workflow.segment — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.segment

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+This module is responsible for setting up the segment generation stage of
+workflows. For details about this module and its capabilities see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/segments.html
+"""
+
+import os
+import shutil
+import itertools
+import logging
+
+from ligo import segments
+from ligo.segments import utils as segmentsUtils
+
+from pycbc.workflow.core import SegFile, make_analysis_dir
+from pycbc.workflow.core import resolve_url
+
+logger = logging.getLogger('pycbc.workflow.segment')
+
+
+[docs] +def save_veto_definer(cp, out_dir, tags=None): + """ Retrieve the veto definer file and save it locally + + Parameters + ----------- + cp : ConfigParser instance + out_dir : path + tags : list of strings + Used to retrieve subsections of the ini file for + configuration options. + """ + if tags is None: + tags = [] + make_analysis_dir(out_dir) + veto_def_url = cp.get_opt_tags("workflow-segments", + "segments-veto-definer-url", tags) + veto_def_base_name = os.path.basename(veto_def_url) + veto_def_new_path = os.path.abspath(os.path.join(out_dir, + veto_def_base_name)) + # Don't need to do this if already done + resolve_url(veto_def_url,out_dir) + + # and update location + cp.set("workflow-segments", "segments-veto-definer-file", veto_def_new_path) + return veto_def_new_path
+ + + +
+[docs] +def get_segments_file(workflow, name, option_name, out_dir, tags=None): + """Get cumulative segments from option name syntax for each ifo. + + Use syntax of configparser string to define the resulting segment_file + e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2 + Each ifo may have a different string and is stored separately in the file. + Flags which add time must precede flags which subtract time. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + name: string + Name of the segment list being created + option_name: str + Name of option in the associated config parser to get the flag list + tags : list of strings + Used to retrieve subsections of the ini file for + configuration options. + + returns + -------- + seg_file: pycbc.workflow.SegFile + SegFile intance that points to the segment xml file on disk. + """ + from pycbc.dq import query_str + make_analysis_dir(out_dir) + cp = workflow.cp + start = workflow.analysis_time[0] + end = workflow.analysis_time[1] + + if tags is None: + tags = [] + + # Check for veto definer file + veto_definer = None + if cp.has_option("workflow-segments", "segments-veto-definer-url"): + veto_definer = save_veto_definer(workflow.cp, out_dir) + + # Check for provided server + server = "https://segments.ligo.org" + if cp.has_option_tags("workflow-segments", "segments-database-url", tags): + server = cp.get_opt_tags("workflow-segments", + "segments-database-url", tags) + + if cp.has_option_tags("workflow-segments", "segments-source", tags): + source = cp.get_opt_tags("workflow-segments", "segments-source", tags) + else: + source = "any" + + if source == "file": + local_file_path = \ + resolve_url(cp.get_opt_tag("workflow-segments", + option_name+"-file", tags)) + pfn = os.path.join(out_dir, os.path.basename(local_file_path)) + shutil.move(local_file_path, pfn) + return SegFile.from_segment_xml(pfn) + + segs = {} + for ifo in workflow.ifos: + flag_str = cp.get_opt_tags("workflow-segments", option_name, [ifo]) + key = ifo + ':' + name + + if flag_str.upper() == "OFF": + segs[key] = segments.segmentlist([]) + elif flag_str.upper() == "ON": + all_seg = segments.segment([start, end]) + segs[key] = segments.segmentlist([all_seg]) + else: + segs[key] = query_str(ifo, flag_str, start, end, + source=source, server=server, + veto_definer=veto_definer) + logger.info("%s: got %s flags", ifo, option_name) + + return SegFile.from_segment_list_dict(name, segs, + extension='.xml', + valid_segment=workflow.analysis_time, + directory=out_dir)
+ + + +
+[docs] +def get_triggered_coherent_segment(workflow, sciencesegs): + """ + Construct the coherent network on and off source segments. Can switch to + construction of segments for a single IFO search when coherent segments + are insufficient for a search. + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The workflow instance that the calculated segments belong to. + sciencesegs : dict + Dictionary of all science segments within analysis time. + + Returns + -------- + onsource : ligo.segments.segmentlistdict + A dictionary containing the on source segments for network IFOs + + offsource : ligo.segments.segmentlistdict + A dictionary containing the off source segments for network IFOs + """ + + # Load parsed workflow config options + cp = workflow.cp + triggertime = int(os.path.basename(cp.get('workflow', 'trigger-time'))) + minduration = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'min-duration'))) + maxduration = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'max-duration'))) + onbefore = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'on-before'))) + onafter = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'on-after'))) + padding = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'pad-data'))) + if cp.has_option("workflow-condition_strain", "do-gating"): + padding += int(os.path.basename(cp.get("condition_strain", + "pad-data"))) + quanta = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'quanta'))) + + # Check available data segments meet criteria specified in arguments + commonsegs = sciencesegs.extract_common(sciencesegs.keys()) + offsrclist = commonsegs[tuple(commonsegs.keys())[0]] + if len(offsrclist) > 1: + logger.info("Removing network segments that do not contain trigger " + "time") + for seg in offsrclist: + if triggertime in seg: + offsrc = seg + else: + offsrc = offsrclist[0] + + if abs(offsrc) < minduration + 2 * padding: + fail = segments.segment([triggertime - minduration / 2. - padding, + triggertime + minduration / 2. + padding]) + logger.warning("Available network segment shorter than minimum " + "allowed duration.") + return None, fail + + # Will segment duration be the maximum desired length or not? + if abs(offsrc) >= maxduration + 2 * padding: + logger.info("Available network science segment duration (%ds) is " + "greater than the maximum allowed segment length (%ds). " + "Truncating...", abs(offsrc), maxduration) + else: + logger.info("Available network science segment duration (%ds) is " + "less than the maximum allowed segment length (%ds).", + abs(offsrc), maxduration) + + logger.info("%ds of padding applied at beginning and end of segment.", + padding) + + + # Construct on-source + onstart = triggertime - onbefore + onend = triggertime + onafter + oncentre = onstart + ((onbefore + onafter) / 2) + onsrc = segments.segment(onstart, onend) + logger.info("Constructed ON-SOURCE: duration %ds (%ds before to %ds after" + " trigger).", abs(onsrc), triggertime - onsrc[0], + onsrc[1] - triggertime) + onsrc = segments.segmentlist([onsrc]) + + # Maximal, centred coherent network segment + idealsegment = segments.segment(int(oncentre - padding - + 0.5 * maxduration), + int(oncentre + padding + + 0.5 * maxduration)) + + # Construct off-source + if (idealsegment in offsrc): + offsrc = idealsegment + + elif idealsegment[1] not in offsrc: + offsrc &= segments.segment(offsrc[1] - maxduration - 2 * padding, + offsrc[1]) + + elif idealsegment[0] not in offsrc: + offsrc &= segments.segment(offsrc[0], + offsrc[0] + maxduration + 2 * padding) + + # Trimming off-source + excess = (abs(offsrc) - 2 * padding) % quanta + if excess != 0: + logger.info("Trimming %ds excess time to make OFF-SOURCE duration a " + "multiple of %ds", excess, quanta) + offset = (offsrc[0] + abs(offsrc) / 2.) - oncentre + if 2 * abs(offset) > excess: + if offset < 0: + offsrc &= segments.segment(offsrc[0] + excess, + offsrc[1]) + elif offset > 0: + offsrc &= segments.segment(offsrc[0], + offsrc[1] - excess) + assert abs(offsrc) % quanta == 2 * padding + else: + logger.info("This will make OFF-SOURCE symmetrical about trigger " + "time.") + start = int(offsrc[0] - offset + excess / 2) + end = int(offsrc[1] - offset - round(float(excess) / 2)) + offsrc = segments.segment(start, end) + assert abs(offsrc) % quanta == 2 * padding + + logger.info("Constructed OFF-SOURCE: duration %ds (%ds before to %ds " + "after trigger).", abs(offsrc) - 2 * padding, + triggertime - offsrc[0] - padding, + offsrc[1] - triggertime - padding) + offsrc = segments.segmentlist([offsrc]) + + # Put segments into segmentlistdicts + onsource = segments.segmentlistdict() + offsource = segments.segmentlistdict() + ifos = '' + for iifo in sciencesegs.keys(): + ifos += str(iifo) + onsource[iifo] = onsrc + offsource[iifo] = offsrc + + return onsource, offsource
+ + + +
+[docs] +def generate_triggered_segment(workflow, out_dir, sciencesegs): + cp = workflow.cp + + if cp.has_option("workflow", "allow-single-ifo-search"): + min_ifos = 1 + else: + min_ifos = 2 + + triggertime = int(os.path.basename(cp.get('workflow', 'trigger-time'))) + minbefore = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'min-before'))) + minafter = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'min-after'))) + minduration = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'min-duration'))) + onbefore = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'on-before'))) + onafter = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'on-after'))) + padding = int(os.path.basename(cp.get('workflow-exttrig_segments', + 'pad-data'))) + if cp.has_option("workflow-condition_strain", "do-gating"): + padding += int(os.path.basename(cp.get("condition_strain", + "pad-data"))) + + # How many IFOs meet minimum data requirements? + min_seg = segments.segment(triggertime - onbefore - minbefore - padding, + triggertime + onafter + minafter + padding) + scisegs = segments.segmentlistdict({ifo: sciencesegs[ifo] + for ifo in sciencesegs.keys() if min_seg in sciencesegs[ifo] + and abs(sciencesegs[ifo]) >= minduration}) + # Find highest number of IFOs that give an acceptable coherent segment + num_ifos = len(scisegs) + while num_ifos >= min_ifos: + # Consider all combinations for a given number of IFOs + ifo_combos = itertools.combinations(scisegs, num_ifos) + onsource = {} + offsource = {} + for ifo_combo in ifo_combos: + ifos = "".join(ifo_combo) + logger.info("Calculating optimal segment for %s.", ifos) + segs = segments.segmentlistdict({ifo: scisegs[ifo] + for ifo in ifo_combo}) + onsource[ifos], offsource[ifos] = get_triggered_coherent_segment(\ + workflow, segs) + + # Which combination gives the longest coherent segment? + valid_combs = [iifos for iifos in onsource.keys() + if onsource[iifos] is not None] + + if len(valid_combs) == 0: + # If none, offsource dict will contain segments showing criteria + # that have not been met, for use in plotting + if len(offsource.keys()) > 1: + seg_lens = {ifos: abs(next(offsource[ifos].values())[0]) + for ifos in offsource.keys()} + best_comb = max(seg_lens.iterkeys(), + key=(lambda key: seg_lens[key])) + else: + best_comb = tuple(offsource.keys())[0] + logger.info("No combination of %d IFOs with suitable science " + "segment.", num_ifos) + else: + # Identify best analysis segment + if len(valid_combs) > 1: + seg_lens = {ifos: abs(next(offsource[ifos].values())[0]) + for ifos in valid_combs} + best_comb = max(seg_lens.iterkeys(), + key=(lambda key: seg_lens[key])) + else: + best_comb = valid_combs[0] + logger.info("Calculated science segments.") + + offsourceSegfile = os.path.join(out_dir, "offSourceSeg.txt") + segmentsUtils.tosegwizard(open(offsourceSegfile, "w"), + list(offsource[best_comb].values())[0]) + + onsourceSegfile = os.path.join(out_dir, "onSourceSeg.txt") + segmentsUtils.tosegwizard(open(onsourceSegfile, "w"), + list(onsource[best_comb].values())[0]) + + bufferleft = int(cp.get('workflow-exttrig_segments', + 'num-buffer-before')) + bufferright = int(cp.get('workflow-exttrig_segments', + 'num-buffer-after')) + onlen = onbefore + onafter + bufferSegment = segments.segment(\ + triggertime - onbefore - bufferleft * onlen, + triggertime + onafter + bufferright * onlen) + bufferSegfile = os.path.join(out_dir, "bufferSeg.txt") + segmentsUtils.tosegwizard(open(bufferSegfile, "w"), + segments.segmentlist([bufferSegment])) + + return onsource[best_comb], offsource[best_comb], bufferSegment + + num_ifos -= 1 + + logger.warning("No suitable science segments available.") + try: + return None, offsource[best_comb], None + except UnboundLocalError: + return None, min_seg, None
+ + +
+[docs] +def get_flag_segments_file(workflow, name, option_name, out_dir, tags=None): + """Get segments from option name syntax for each ifo for indivudal flags. + + Use syntax of configparser string to define the resulting segment_file + e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2 + Each ifo may have a different string and is stored separately in the file. + Each flag is stored separately in the file. + Flags which add time must precede flags which subtract time. + + Parameters + ---------- + workflow: pycbc.workflow.Workflow + name: string + Name of the segment list being created + option_name: str + Name of option in the associated config parser to get the flag list + tags : list of strings + Used to retrieve subsections of the ini file for + configuration options. + + returns + -------- + seg_file: pycbc.workflow.SegFile + SegFile intance that points to the segment xml file on disk. + """ + from pycbc.dq import query_str + make_analysis_dir(out_dir) + cp = workflow.cp + start = workflow.analysis_time[0] + end = workflow.analysis_time[1] + + if tags is None: + tags = [] + + # Check for veto definer file + veto_definer = None + if cp.has_option("workflow-segments", "segments-veto-definer-url"): + veto_definer = save_veto_definer(workflow.cp, out_dir) + + # Check for provided server + server = "https://segments.ligo.org" + if cp.has_option_tags("workflow-segments", "segments-database-url", tags): + server = cp.get_opt_tags("workflow-segments", + "segments-database-url", tags) + + source = "any" + if cp.has_option_tags("workflow-segments", "segments-source", tags): + source = cp.get_opt_tags("workflow-segments", "segments-source", tags) + if source == "file": + local_file_path = \ + resolve_url(cp.get_opt_tags("workflow-segments", + option_name+"-file", tags)) + pfn = os.path.join(out_dir, os.path.basename(local_file_path)) + shutil.move(local_file_path, pfn) + return SegFile.from_segment_xml(pfn) + + segs = {} + for ifo in workflow.ifos: + if cp.has_option_tags("workflow-segments", option_name, [ifo]): + flag_str = cp.get_opt_tags("workflow-segments", option_name, [ifo]) + flag_list = flag_str.split(',') + for flag in flag_list: + flag_name = flag[1:] + if len(flag_name.split(':')) > 1: + flag_name = name.split(':')[1] + key = ifo + ':' + flag_name + segs[key] = query_str(ifo, flag, start, end, + source=source, server=server, + veto_definer=veto_definer) + logger.info("%s: got %s segments", ifo, flag_name) + else: + logger.info("%s: no segments requested", ifo) + + return SegFile.from_segment_list_dict(name, segs, + extension='.xml', + valid_segment=workflow.analysis_time, + directory=out_dir)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/splittable.html b/latest/html/_modules/pycbc/workflow/splittable.html new file mode 100644 index 00000000000..d2d8aa1091c --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/splittable.html @@ -0,0 +1,317 @@ + + + + + + pycbc.workflow.splittable — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.splittable

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module is responsible for setting up the splitting output files stage of
+workflows. For details about this module and its capabilities see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
+"""
+
+import os
+import logging
+
+from pycbc.workflow.core import FileList, make_analysis_dir
+from pycbc.workflow.jobsetup import (PycbcSplitBankExecutable,
+        PycbcSplitBankXmlExecutable, PycbcSplitInspinjExecutable,
+        PycbcHDFSplitInjExecutable)
+
+logger = logging.getLogger('pycbc.workflow.splittable')
+
+
+[docs] +def select_splitfilejob_instance(curr_exe): + """ + This function returns an instance of the class that is appropriate for + splitting an output file up within workflow (for e.g. splitbank). + + Parameters + ---------- + curr_exe : string + The name of the Executable that is being used. + curr_section : string + The name of the section storing options for this executble + + Returns + -------- + exe class : sub-class of pycbc.workflow.core.Executable + The class that holds the utility functions appropriate + for the given Executable. This class **must** contain + * exe_class.create_job() + and the job returned by this **must** contain + * job.create_node() + """ + if curr_exe == 'pycbc_hdf5_splitbank': + exe_class = PycbcSplitBankExecutable + elif curr_exe == 'pycbc_splitbank': + exe_class = PycbcSplitBankXmlExecutable + elif curr_exe == 'pycbc_split_inspinj': + exe_class = PycbcSplitInspinjExecutable + elif curr_exe == 'pycbc_hdf_splitinj': + exe_class = PycbcHDFSplitInjExecutable + else: + # Should we try some sort of default class?? + err_string = "No class exists for Executable %s" %(curr_exe,) + raise NotImplementedError(err_string) + + return exe_class
+ + +
+[docs] +def setup_splittable_workflow(workflow, input_tables, out_dir=None, tags=None): + ''' + This function aims to be the gateway for code that is responsible for taking + some input file containing some table, and splitting into multiple files + containing different parts of that table. For now the only supported operation + is using lalapps_splitbank to split a template bank xml file into multiple + template bank xml files. + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance that the jobs will be added to. + input_tables : pycbc.workflow.core.FileList + The input files to be split up. + out_dir : path + The directory in which output will be written. + + Returns + -------- + split_table_outs : pycbc.workflow.core.FileList + The list of split up files as output from this job. + ''' + if tags is None: + tags = [] + logger.info("Entering split output files module.") + make_analysis_dir(out_dir) + # Parse for options in .ini file + splitMethod = workflow.cp.get_opt_tags("workflow-splittable", + "splittable-method", tags) + + if splitMethod == "IN_WORKFLOW": + # Scope here for choosing different options + logger.info("Adding split output file jobs to workflow.") + split_table_outs = setup_splittable_dax_generated(workflow, + input_tables, out_dir, tags) + elif splitMethod == "NOOP": + # Probably better not to call the module at all, but this option will + # return the input file list. + split_table_outs = input_tables + else: + errMsg = "Splittable method not recognized. Must be one of " + errMsg += "IN_WORKFLOW or NOOP." + raise ValueError(errMsg) + + logger.info("Leaving split output files module.") + return split_table_outs
+ + +
+[docs] +def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags): + ''' + Function for setting up the splitting jobs as part of the workflow. + + Parameters + ----------- + workflow : pycbc.workflow.core.Workflow + The Workflow instance that the jobs will be added to. + input_tables : pycbc.workflow.core.FileList + The input files to be split up. + out_dir : path + The directory in which output will be written. + + Returns + -------- + split_table_outs : pycbc.workflow.core.FileList + The list of split up files as output from this job. + ''' + cp = workflow.cp + + # Get values from ini file + try: + num_splits = cp.get_opt_tags("workflow-splittable", + "splittable-num-banks", tags) + except BaseException: + inj_interval = int(cp.get_opt_tags("workflow-splittable", + "splitinjtable-interval", tags)) + if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \ + cp.has_option("workflow-injections", "em-bright-only"): + num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep", + tags)) + else: + # This needed to be changed from num-injs to ninjections in order + # to work properly with pycbc_create_injections + num_injs = int(cp.get_opt_tags("workflow-injections", + "ninjections", tags)) + inj_tspace = float(abs(workflow.analysis_time)) / num_injs + num_splits = int(inj_interval // inj_tspace) + 1 + + split_exe_tag = cp.get_opt_tags("workflow-splittable", + "splittable-exe-tag", tags) + split_exe = os.path.basename(cp.get("executables", split_exe_tag)) + # Select the appropriate class + exe_class = select_splitfilejob_instance(split_exe) + + # Set up output structure + out_file_groups = FileList([]) + + # Set up the condorJob class for the current executable + curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits, + out_dir=out_dir) + + for input in input_tables: + node = curr_exe_job.create_node(input, tags=tags) + workflow.add_node(node) + out_file_groups += node.output_files + return out_file_groups
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/tmpltbank.html b/latest/html/_modules/pycbc/workflow/tmpltbank.html new file mode 100644 index 00000000000..a9ca52b2a11 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/tmpltbank.html @@ -0,0 +1,476 @@ + + + + + + pycbc.workflow.tmpltbank — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.tmpltbank

+# Copyright (C) 2013  Ian Harry
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+
+"""
+This module is responsible for setting up the template bank stage of CBC
+workflows. For details about this module and its capabilities see here:
+https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/template_bank.html
+"""
+
+
+import os
+import logging
+import configparser as ConfigParser
+
+import pycbc
+from pycbc.workflow.core import FileList
+from pycbc.workflow.core import make_analysis_dir, resolve_url_to_file
+from pycbc.workflow.jobsetup import select_tmpltbank_class, sngl_ifo_job_setup
+
+logger = logging.getLogger('pycbc.workflow.tmpltbank')
+
+
+
+[docs] +def setup_tmpltbank_workflow(workflow, science_segs, datafind_outs, + output_dir=None, psd_files=None, tags=None, + return_format=None): + ''' + Setup template bank section of CBC workflow. This function is responsible + for deciding which of the various template bank workflow generation + utilities should be used. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + science_segs : Keyed dictionary of ligo.segments.segmentlist objects + scienceSegs[ifo] holds the science segments to be analysed for each + ifo. + datafind_outs : pycbc.workflow.core.FileList + The file list containing the datafind files. + output_dir : path string + The directory where data products will be placed. + psd_files : pycbc.workflow.core.FileList + The file list containing predefined PSDs, if provided. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + + Returns + -------- + tmplt_banks : pycbc.workflow.core.FileList + The FileList holding the details of all the template bank jobs. + ''' + if tags is None: + tags = [] + logger.info("Entering template bank generation module.") + make_analysis_dir(output_dir) + cp = workflow.cp + + # Parse for options in ini file + tmpltbankMethod = cp.get_opt_tags("workflow-tmpltbank", "tmpltbank-method", + tags) + + # There can be a large number of different options here, for e.g. to set + # up fixed bank, or maybe something else + if tmpltbankMethod == "PREGENERATED_BANK": + logger.info("Setting template bank from pre-generated bank(s).") + tmplt_banks = setup_tmpltbank_pregenerated(workflow, tags=tags) + # Else we assume template banks will be generated in the workflow + elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS": + logger.info("Adding template bank jobs to workflow.") + tmplt_banks = setup_tmpltbank_dax_generated(workflow, science_segs, + datafind_outs, output_dir, tags=tags, + psd_files=psd_files) + elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS_NODATA": + logger.info("Adding template bank jobs to workflow.") + tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir, + tags=tags, independent_ifos=True, + psd_files=psd_files) + elif tmpltbankMethod == "WORKFLOW_NO_IFO_VARIATION_NODATA": + logger.info("Adding template bank jobs to workflow.") + tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir, + tags=tags, independent_ifos=False, + psd_files=psd_files) + else: + errMsg = "Template bank method not recognized. Must be either " + errMsg += "PREGENERATED_BANK, WORKFLOW_INDEPENDENT_IFOS " + errMsg += "or WORKFLOW_INDEPENDENT_IFOS_NODATA." + raise ValueError(errMsg) + + # Check the format of the input template bank file and return it in + # the format requested as per return_format, provided a conversion + # between the two specific formats has been implemented. Currently, + # a conversion from xml.gz or xml to hdf is supported, but not vice + # versa. If a return_format is not specified the function returns + # the bank in the format as it was inputted. + tmplt_bank_filename=tmplt_banks[0].name + ext = tmplt_bank_filename.split('.', 1)[1] + logger.info("Input bank is a %s file", ext) + if return_format is None : + tmplt_banks_return = tmplt_banks + elif return_format in ('hdf', 'h5', 'hdf5'): + if ext in ('hdf', 'h5', 'hdf5') or ext in ('xml.gz' , 'xml'): + tmplt_banks_return = pycbc.workflow.convert_bank_to_hdf(workflow, + tmplt_banks, "bank") + else : + if ext == return_format: + tmplt_banks_return = tmplt_banks + else: + raise NotImplementedError("{0} to {1} conversion is not " + "supported.".format(ext, return_format)) + logger.info("Leaving template bank generation module.") + return tmplt_banks_return
+ + +
+[docs] +def setup_tmpltbank_dax_generated(workflow, science_segs, datafind_outs, + output_dir, tags=None, + psd_files=None): + ''' + Setup template bank jobs that are generated as part of the CBC workflow. + This function will add numerous jobs to the CBC workflow using + configuration options from the .ini file. The following executables are + currently supported: + + * lalapps_tmpltbank + * pycbc_geom_nonspin_bank + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + science_segs : Keyed dictionary of ligo.segments.segmentlist objects + scienceSegs[ifo] holds the science segments to be analysed for each + ifo. + datafind_outs : pycbc.workflow.core.FileList + The file list containing the datafind files. + output_dir : path string + The directory where data products will be placed. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + psd_file : pycbc.workflow.core.FileList + The file list containing predefined PSDs, if provided. + + Returns + -------- + tmplt_banks : pycbc.workflow.core.FileList + The FileList holding the details of all the template bank jobs. + ''' + if tags is None: + tags = [] + cp = workflow.cp + # Need to get the exe to figure out what sections are analysed, what is + # discarded etc. This should *not* be hardcoded, so using a new executable + # will require a bit of effort here .... + + ifos = science_segs.keys() + tmplt_bank_exe = os.path.basename(cp.get('executables', 'tmpltbank')) + # Select the appropriate class + exe_class = select_tmpltbank_class(tmplt_bank_exe) + + # Set up class for holding the banks + tmplt_banks = FileList([]) + + for ifo in ifos: + job_instance = exe_class(workflow.cp, 'tmpltbank', ifo=ifo, + out_dir=output_dir, + tags=tags) + # Check for the write_psd flag + if cp.has_option_tags("workflow-tmpltbank", "tmpltbank-write-psd-file", tags): + job_instance.write_psd = True + else: + job_instance.write_psd = False + + sngl_ifo_job_setup(workflow, ifo, tmplt_banks, job_instance, + science_segs[ifo], datafind_outs, + allow_overlap=True) + return tmplt_banks
+ + +
+[docs] +def setup_tmpltbank_without_frames(workflow, output_dir, + tags=None, independent_ifos=False, + psd_files=None): + ''' + Setup CBC workflow to use a template bank (or banks) that are generated in + the workflow, but do not use the data to estimate a PSD, and therefore do + not vary over the duration of the workflow. This can either generate one + bank that is valid for all ifos at all times, or multiple banks that are + valid only for a single ifo at all times (one bank per ifo). + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + output_dir : path string + The directory where the template bank outputs will be placed. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + independent_ifos : Boolean, optional (default=False) + If given this will produce one template bank per ifo. If not given + there will be on template bank to cover all ifos. + psd_file : pycbc.workflow.core.FileList + The file list containing predefined PSDs, if provided. + + Returns + -------- + tmplt_banks : pycbc.workflow.core.FileList + The FileList holding the details of the template bank(s). + ''' + if tags is None: + tags = [] + cp = workflow.cp + # Need to get the exe to figure out what sections are analysed, what is + # discarded etc. This should *not* be hardcoded, so using a new executable + # will require a bit of effort here .... + + ifos = workflow.ifos + fullSegment = workflow.analysis_time + + tmplt_bank_exe = os.path.basename(cp.get('executables','tmpltbank')) + # Can not use lalapps_template bank with this + if tmplt_bank_exe == 'lalapps_tmpltbank': + errMsg = "Lalapps_tmpltbank cannot be used to generate template banks " + errMsg += "without using frames. Try another code." + raise ValueError(errMsg) + + # Select the appropriate class + exe_instance = select_tmpltbank_class(tmplt_bank_exe) + + tmplt_banks = FileList([]) + + # Make the distinction between one bank for all ifos and one bank per ifo + if independent_ifos: + ifoList = [ifo for ifo in ifos] + else: + ifoList = [[ifo for ifo in ifos]] + + # Check for the write_psd flag + if cp.has_option_tags("workflow-tmpltbank", "tmpltbank-write-psd-file", tags): + exe_instance.write_psd = True + else: + exe_instance.write_psd = False + + for ifo in ifoList: + job_instance = exe_instance(workflow.cp, 'tmpltbank', ifo=ifo, + out_dir=output_dir, + tags=tags, + psd_files=psd_files) + node = job_instance.create_nodata_node(fullSegment) + workflow.add_node(node) + tmplt_banks += node.output_files + + return tmplt_banks
+ + +
+[docs] +def setup_tmpltbank_pregenerated(workflow, tags=None): + ''' + Setup CBC workflow to use a pregenerated template bank. + The bank given in cp.get('workflow','pregenerated-template-bank') will be used + as the input file for all matched-filtering jobs. If this option is + present, workflow will assume that it should be used and not generate + template banks within the workflow. + + Parameters + ---------- + workflow: pycbc.workflow.core.Workflow + An instanced class that manages the constructed workflow. + tags : list of strings + If given these tags are used to uniquely name and identify output files + that would be produced in multiple calls to this function. + + Returns + -------- + tmplt_banks : pycbc.workflow.core.FileList + The FileList holding the details of the template bank. + ''' + if tags is None: + tags = [] + # Currently this uses the *same* fixed bank for all ifos. + # Maybe we want to add capability to analyse separate banks in all ifos? + + # Set up class for holding the banks + tmplt_banks = FileList([]) + + cp = workflow.cp + global_seg = workflow.analysis_time + file_attrs = {'segs' : global_seg, 'tags' : tags} + + try: + # First check if we have a bank for all ifos + pre_gen_bank = cp.get_opt_tags('workflow-tmpltbank', + 'tmpltbank-pregenerated-bank', tags) + file_attrs['ifos'] = workflow.ifos + curr_file = resolve_url_to_file(pre_gen_bank, attrs=file_attrs) + tmplt_banks.append(curr_file) + except ConfigParser.Error: + # Okay then I must have banks for each ifo + for ifo in workflow.ifos: + try: + pre_gen_bank = cp.get_opt_tags('workflow-tmpltbank', + 'tmpltbank-pregenerated-bank-%s' % ifo.lower(), + tags) + file_attrs['ifos'] = [ifo] + curr_file = resolve_url_to_file(pre_gen_bank, attrs=file_attrs) + tmplt_banks.append(curr_file) + + except ConfigParser.Error: + err_msg = "Cannot find pregerated template bank in section " + err_msg += "[workflow-tmpltbank] or any tagged sections. " + if tags: + tagged_secs = " ".join("[workflow-tmpltbank-%s]" \ + %(ifo,) for ifo in workflow.ifos) + err_msg += "Tagged sections are %s. " %(tagged_secs,) + err_msg += "I looked for 'tmpltbank-pregenerated-bank' option " + err_msg += "and 'tmpltbank-pregenerated-bank-%s'." %(ifo,) + raise ConfigParser.Error(err_msg) + + return tmplt_banks
+ + +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_modules/pycbc/workflow/versioning.html b/latest/html/_modules/pycbc/workflow/versioning.html new file mode 100644 index 00000000000..83d71897d09 --- /dev/null +++ b/latest/html/_modules/pycbc/workflow/versioning.html @@ -0,0 +1,207 @@ + + + + + + pycbc.workflow.versioning — PyCBC 2.5.dev2 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pycbc.workflow.versioning

+# Copyright (C) 2023 Gareth Cabourn Davies
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#                                   Preamble
+#
+# =============================================================================
+#
+"""
+Module to generate/manage the executable used for version information
+in workflows
+"""
+import os
+import logging
+
+from pycbc.workflow.core import Executable
+
+logger = logging.getLogger('pycbc.workflow.versioning')
+
+
+
+[docs] +class VersioningExecutable(Executable): + """ + Executable for getting version information + """ + current_retention_level = Executable.FINAL_RESULT
+ + + +
+[docs] +def make_versioning_page(workflow, config_parser, out_dir, tags=None): + """ + Make executable for versioning information + """ + vers_exe = VersioningExecutable( + workflow.cp, + 'page_versioning', + out_dir=out_dir, + ifos=workflow.ifos, + tags=tags, + ) + node = vers_exe.create_node() + config_names = [] + exes = [] + for name, path in config_parser.items('executables'): + exe_to_test = os.path.basename(path) + if exe_to_test in exes: + # executable is already part of the list, + # find which index and add the name to the + # one already stored + path_idx = exes.index(exe_to_test) + name_orig = config_names[path_idx] + config_names[path_idx] = f"{name_orig},{name}" + else: + config_names.append(name) + exes.append(exe_to_test) + node.add_list_opt('--executables', exes) + node.add_list_opt('--executables-names', config_names) + node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') + workflow.add_node(node) + + return node
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/latest/html/_sources/_include/distributions-table.rst.txt b/latest/html/_sources/_include/distributions-table.rst.txt new file mode 100644 index 00000000000..cbe2bb2b36e --- /dev/null +++ b/latest/html/_sources/_include/distributions-table.rst.txt @@ -0,0 +1,24 @@ +===================================== ===================================================================== +Name Class +===================================== ===================================================================== +``'arbitrary'`` :py:class:`pycbc.distributions.arbitrary.Arbitrary` +``'cos_angle'`` :py:class:`pycbc.distributions.angular.CosAngle` +``'external'`` :py:class:`pycbc.distributions.external.External` +``'external_func_fromfile'`` :py:class:`pycbc.distributions.external.DistributionFunctionFromFile` +``'fisher_sky'`` :py:class:`pycbc.distributions.sky_location.FisherSky` +``'fixed_samples'`` :py:class:`pycbc.distributions.fixedsamples.FixedSamples` +``'fromfile'`` :py:class:`pycbc.distributions.arbitrary.FromFile` +``'gaussian'`` :py:class:`pycbc.distributions.gaussian.Gaussian` +``'independent_chip_chieff'`` :py:class:`pycbc.distributions.spins.IndependentChiPChiEff` +``'mchirp_from_uniform_mass1_mass2'`` :py:class:`pycbc.distributions.mass.MchirpfromUniformMass1Mass2` +``'q_from_uniform_mass1_mass2'`` :py:class:`pycbc.distributions.mass.QfromUniformMass1Mass2` +``'sin_angle'`` :py:class:`pycbc.distributions.angular.SinAngle` +``'uniform'`` :py:class:`pycbc.distributions.uniform.Uniform` +``'uniform_angle'`` :py:class:`pycbc.distributions.angular.UniformAngle` +``'uniform_f0_tau'`` :py:class:`pycbc.distributions.qnm.UniformF0Tau` +``'uniform_log10'`` :py:class:`pycbc.distributions.uniform_log.UniformLog10` +``'uniform_power_law'`` :py:class:`pycbc.distributions.power_law.UniformPowerLaw` +``'uniform_radius'`` :py:class:`pycbc.distributions.power_law.UniformRadius` +``'uniform_sky'`` :py:class:`pycbc.distributions.sky_location.UniformSky` +``'uniform_solidangle'`` :py:class:`pycbc.distributions.angular.UniformSolidAngle` +===================================== ===================================================================== diff --git a/latest/html/_sources/_include/inference_data_opts-table.rst.txt b/latest/html/_sources/_include/inference_data_opts-table.rst.txt new file mode 100644 index 00000000000..3c822e12b5b --- /dev/null +++ b/latest/html/_sources/_include/inference_data_opts-table.rst.txt @@ -0,0 +1,223 @@ ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| Name | Syntax | Description | ++=================================+====================================+==========================================================+ +| ``instruments`` | INSTRUMENTS [INSTRUMENTS ...] | Instruments to analyze, eg. H1 L1. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``trigger-time`` | TRIGGER_TIME | Reference GPS time (at geocenter) from which the | +| | | (anlaysis|psd)-(start|end)-time options are measured. | +| | | The integer seconds will be used. Default is 0; i.e., | +| | | if not provided, the analysis and psd times should be | +| | | in GPS seconds. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``analysis-start-time`` | IFO:TIME [IFO:TIME ...] | The start time to use for the analysis, measured with | +| | | respect to the trigger-time. If psd-inverse-length is | +| | | provided, the given start time will be padded by half | +| | | that length to account for wrap-around effects. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``analysis-end-time`` | IFO:TIME [IFO:TIME ...] | The end time to use for the analysis, measured with | +| | | respect to the trigger-time. If psd-inverse-length is | +| | | provided, the given end time will be padded by half | +| | | that length to account for wrap-around effects. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-start-time`` | IFO:TIME [IFO:TIME ...] | Start time to use for PSD estimation, measured with | +| | | respect to the trigger-time. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-end-time`` | IFO:TIME [IFO:TIME ...] | End time to use for PSD estimation, measured with | +| | | respect to the trigger-time. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``data-conditioning-low-freq`` | IFO:FLOW [IFO:FLOW ...] | Low frequency cutoff of the data. Needed for PSD | +| | | estimation and when creating fake strain. If not | +| | | provided, will use the model's low-frequency-cutoff. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| Options to select the method of PSD generation: | +| The options ``psd-model``, ``psd-file``, ``asd-file``, and ``psd-estimation`` are | +| mutually exclusive. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-model`` | IFO:MODEL [IFO:MODEL ...] | Get PSD from given analytical model. Choose from any | +| | | available PSD model. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-extra-args`` | DETECTOR:PARAM:VALUE | (optional) Extra arguments passed to the PSD models. | +| | [DETECTOR:PARAM:VALUE ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-file`` | IFO:FILE [IFO:FILE ...] | Get PSD using given PSD ASCII file | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``asd-file`` | IFO:FILE [IFO:FILE ...] | Get PSD using given ASD ASCII file | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-estimation`` | IFO:FILE [IFO:FILE ...] | Measure PSD from the data, using given average method. | +| | | Choose from mean, median or median-mean. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-segment-length`` | IFO:LENGTH [IFO:LENGTH ...] | (Required for ``psd-estimation``) The segment length for | +| | | PSD estimation (s) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-segment-stride`` | IFO:STRIDE [IFO:STRIDE ...] | (Required for ``psd-estimation``) The separation between | +| | | consecutive segments (s) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-num-segments`` | IFO:NUM [IFO:NUM ...] | (Optional, used only with ``psd-estimation``). If given | +| | | PSDs will be estimated using only this number of | +| | | segments. If more data is given than needed to make | +| | | this number of segments than excess data will not be | +| | | used in the PSD estimate. If not enough data is given | +| | | the code will fail. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-inverse-length`` | IFO:LENGTH [IFO:LENGTH ...] | (Optional) The maximum length of the impulse response | +| | | of the overwhitening filter (s) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``invpsd-trunc-method`` | {hann} | (Optional) What truncation method to use when applying | +| | | psd-inverse-length. If not provided, a hard truncation | +| | | will be used. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-output`` | IFO:FILE [IFO:FILE ...] | (Optional) Write PSD to specified file | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-segment`` | SECONDS | Length of segment when calculating the PSD | +| | | variability. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-short-segment`` | SECONDS | Length of short segment for outliers removal in PSD | +| | | variability calculation. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-long-segment`` | SECONDS | Length of long segment when calculating the PSD | +| | | variability. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-psd-duration`` | SECONDS | Duration of short segments for PSD estimation. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-psd-stride`` | SECONDS | Separation between PSD estimation segments. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-low-freq`` | HERTZ | Minimum frequency to consider in strain bandpass. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psdvar-high-freq`` | HERTZ | Maximum frequency to consider in strain bandpass. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| Options for obtaining h(t): | +| These options are used for generating h(t) either by reading from a file | +| or by generating it. This is only needed if the PSD is to be estimated | +| from the data, ie. if the ``psd-estimation`` option is given. This group | +| supports reading from multiple ifos simultaneously. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``strain-high-pass`` | IFO:FREQUENCY [IFO:FREQUENCY ...] | High pass frequency | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``strain-low-pass`` | IFO:FREQUENCY [IFO:FREQUENCY ...] | Low pass frequency | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``pad-data`` | IFO:LENGTH [IFO:LENGTH ...] | Extra padding to remove highpass corruption (integer | +| | | seconds, default 8) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``taper-data`` | IFO:LENGTH [IFO:LENGTH ...] | Taper ends of data to zero using the supplied length | +| | | as a window (integer seconds) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``sample-rate`` | IFO:RATE [IFO:RATE ...] | The sample rate to use for h(t) generation (integer | +| | | Hz). | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``channel-name`` | IFO:CHANNEL [IFO:CHANNEL ...] | The channel containing the gravitational strain data | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``frame-cache`` | IFO:FRAME_CACHE [IFO:FRAME_CACHE | Cache file containing the frame locations. | +| | ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``frame-files`` | IFO:FRAME_FILES [IFO:FRAME_FILES | list of frame files | +| | ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``hdf-store`` | IFO:HDF_STORE_FILE | Store of time series data in hdf format | +| | [IFO:HDF_STORE_FILE ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``frame-type`` | IFO:FRAME_TYPE [IFO:FRAME_TYPE | (optional) Replaces frame-files. Use datafind to get | +| | ...] | the needed frame file(s) of this type. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``frame-sieve`` | IFO:FRAME_SIEVE [IFO:FRAME_SIEVE | (optional), Only use frame files where the URL matches | +| | ...] | the regular expression given. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain`` | IFO:CHOICE [IFO:CHOICE ...] | Name of model PSD for generating fake gaussian noise. | +| | | Choose from any available PSD model, or ``zeroNoise``. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-extra-args`` | DETECTOR:PARAM:VALUE | (optional) Extra arguments passed to the PSD models. | +| | [DETECTOR:PARAM:VALUE ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-seed`` | IFO:SEED [IFO:SEED ...] | Seed value for the generation of fake colored gaussian | +| | | noise | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-from-file`` | IFO:FILE [IFO:FILE ...] | File containing ASD for generating fake noise from it. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-flow`` | FAKE_STRAIN_FLOW [FAKE_STRAIN_FLOW | Low frequency cutoff of the fake strain | +| | ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-filter-duration`` | FAKE_STRAIN_FILTER_DURATION | Duration in seconds of the fake data coloring filter | +| | [FAKE_STRAIN_FILTER_DURATION ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``fake-strain-sample-rate`` | FAKE_STRAIN_SAMPLE_RATE | Sample rate of the fake data generation | +| | [FAKE_STRAIN_SAMPLE_RATE ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``injection-file`` | IFO:FILE [IFO:FILE ...] | (optional) Injection file containing parametersof CBC | +| | | signals to be added to the strain | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``sgburst-injection-file`` | IFO:FILE [IFO:FILE ...] | (optional) Injection file containing parametersof | +| | | sine-Gaussian burst signals to add to the strain | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``injection-scale-factor`` | IFO:VAL [IFO:VAL ...] | Divide injections by this factor before adding to the | +| | | strain data | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``injection-sample-rate`` | IFO:VAL [IFO:VAL ...] | Sample rate to use for injections (integer Hz). | +| | | Typically similar to the strain data sample rate.If | +| | | not provided, the strain sample rate will be used | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``injection-f-ref`` | IFO:VALUE [IFO:VALUE ...] | Reference frequency in Hz for creating CBC injections | +| | | from an XML file | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``injection-f-final`` | IFO:VALUE [IFO:VALUE ...] | Override the f_final field of a CBC XML injection file | +| | | (frequency in Hz) | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``gating-file`` | IFO:FILE [IFO:FILE ...] | (optional) Text file of gating segments to apply. | +| | | Format of each line (units s) : gps_time | +| | | zeros_half_width pad_half_width | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-threshold`` | IFO:SIGMA [IFO:SIGMA ...] | If given, find and gate glitches producing a deviation | +| | | larger than SIGMA in the whitened strain time series | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-max-iterations`` | SIGMA | If given, iteratively apply autogating | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-cluster`` | IFO:SECONDS [IFO:SECONDS ...] | Length of clustering window for detecting glitches for | +| | | autogating. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-width`` | IFO:SECONDS [IFO:SECONDS ...] | Half-width of the gating window. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-taper`` | IFO:SECONDS [IFO:SECONDS ...] | Taper the strain before and after each gating window | +| | | over a duration of SECONDS. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``autogating-pad`` | IFO:SECONDS [IFO:SECONDS ...] | Ignore the given length of whitened strain at the ends | +| | | of a segment, to avoid filters ringing. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``gating-method`` | {hard,taper,paint} | Choose the method for gating. Default: `taper` | +| | [{hard,taper,paint} ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``normalize-strain`` | IFO:VALUE [IFO:VALUE ...] | (optional) Divide frame data by constant. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``zpk-z`` | IFO:VALUE [IFO:VALUE ...] | (optional) Zero-pole-gain (zpk) filter strain. A list | +| | | of zeros for transfer function | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``zpk-p`` | IFO:VALUE [IFO:VALUE ...] | (optional) Zero-pole-gain (zpk) filter strain. A list | +| | | of poles for transfer function | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``zpk-k`` | IFO:VALUE [IFO:VALUE ...] | (optional) Zero-pole-gain (zpk) filter strain. | +| | | Transfer function gain | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| Options for gating data: | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``gate`` | IFO:CENTRALTIME:HALFDUR:TAPERDUR | Apply one or more gates to the data before filtering. | +| | [IFO:CENTRALTIME:HALFDUR:TAPERDUR | | +| | ...] | | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``gate-overwhitened`` | | Overwhiten data first, then apply the gates specified | +| | | in ``gate``. Overwhitening allows for sharper tapers to | +| | | be used, since lines are not blurred. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``psd-gate`` | IFO:CENTRALTIME:HALFDUR:TAPERDUR | Apply one or more gates to the data used for computing | +| | [IFO:CENTRALTIME:HALFDUR:TAPERDUR | the PSD. Gates are applied prior to FFT-ing the data | +| | ...] | for PSD estimation. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| Options for quering data quality (DQ): | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``dq-segment-name`` | DQ_SEGMENT_NAME | The status flag to query for data quality. Default is | +| | | "DATA". | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``dq-source`` | {any,GWOSC,dqsegdb} | Where to look for DQ information. If "any" (the | +| | | default) will first try GWOSC, then dqsegdb. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``dq-server`` | DQ_SERVER | The server to use for dqsegdb. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ +| ``veto-definer`` | VETO_DEFINER | Path to a veto definer file that defines groups of | +| | | flags, which themselves define a set of DQ segments. | ++---------------------------------+------------------------------------+----------------------------------------------------------+ diff --git a/latest/html/_sources/_include/inference_io_inheritance_diagrams.rst.txt b/latest/html/_sources/_include/inference_io_inheritance_diagrams.rst.txt new file mode 100644 index 00000000000..da4cca5577f --- /dev/null +++ b/latest/html/_sources/_include/inference_io_inheritance_diagrams.rst.txt @@ -0,0 +1,121 @@ +.. _inheritance-io-cpnest_file: + +* ``cpnest_file``: + +.. inheritance-diagram:: pycbc.inference.io.cpnest.CPNestFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-dynesty_file: + +* ``dynesty_file``: + +.. inheritance-diagram:: pycbc.inference.io.dynesty.DynestyFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-emcee_file: + +* ``emcee_file``: + +.. inheritance-diagram:: pycbc.inference.io.emcee.EmceeFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-emcee_pt_file: + +* ``emcee_pt_file``: + +.. inheritance-diagram:: pycbc.inference.io.emcee_pt.EmceePTFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-epsie_file: + +* ``epsie_file``: + +.. inheritance-diagram:: pycbc.inference.io.epsie.EpsieFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-multinest_file: + +* ``multinest_file``: + +.. inheritance-diagram:: pycbc.inference.io.multinest.MultinestFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-nessai_file: + +* ``nessai_file``: + +.. inheritance-diagram:: pycbc.inference.io.nessai.NessaiFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-posterior_file: + +* ``posterior_file``: + +.. inheritance-diagram:: pycbc.inference.io.posterior.PosteriorFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-ptemcee_file: + +* ``ptemcee_file``: + +.. inheritance-diagram:: pycbc.inference.io.ptemcee.PTEmceeFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-snowline_file: + +* ``snowline_file``: + +.. inheritance-diagram:: pycbc.inference.io.snowline.SnowlineFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + +.. _inheritance-io-ultranest_file: + +* ``ultranest_file``: + +.. inheritance-diagram:: pycbc.inference.io.ultranest.UltranestFile + :parts: 3 + :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile + +| + + diff --git a/latest/html/_sources/_include/models-table.rst.txt b/latest/html/_sources/_include/models-table.rst.txt new file mode 100644 index 00000000000..1674e5c2bf8 --- /dev/null +++ b/latest/html/_sources/_include/models-table.rst.txt @@ -0,0 +1,26 @@ +========================================= ============================================================================================= +Name Class +========================================= ============================================================================================= +``'brute_lisa_sky_modes_marginalize'`` :py:class:`pycbc.inference.models.brute_marg.BruteLISASkyModesMarginalize` +``'brute_parallel_gaussian_marginalize'`` :py:class:`pycbc.inference.models.brute_marg.BruteParallelGaussianMarginalize` +``'gated_gaussian_margpol'`` :py:class:`pycbc.inference.models.gated_gaussian_noise.GatedGaussianMargPol` +``'gated_gaussian_noise'`` :py:class:`pycbc.inference.models.gated_gaussian_noise.GatedGaussianNoise` +``'gaussian_noise'`` :py:class:`pycbc.inference.models.gaussian_noise.GaussianNoise` +``'hierarchical'`` :py:class:`pycbc.inference.models.hierarchical.HierarchicalModel` +``'joint_primary_marginalized'`` :py:class:`pycbc.inference.models.hierarchical.JointPrimaryMarginalizedModel` +``'marginalized_hmpolphase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedHMPolPhase` +``'marginalized_phase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPhaseGaussianNoise` +``'marginalized_polarization'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPolarization` +``'marginalized_time'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedTime` +``'multi_signal'`` :py:class:`pycbc.inference.models.hierarchical.MultiSignalModel` +``'relative'`` :py:class:`pycbc.inference.models.relbin.Relative` +``'relative_time'`` :py:class:`pycbc.inference.models.relbin.RelativeTime` +``'relative_time_dom'`` :py:class:`pycbc.inference.models.relbin.RelativeTimeDom` +``'single_template'`` :py:class:`pycbc.inference.models.single_template.SingleTemplate` +``'test_eggbox'`` :py:class:`pycbc.inference.models.analytic.TestEggbox` +``'test_normal'`` :py:class:`pycbc.inference.models.analytic.TestNormal` +``'test_posterior'`` :py:class:`pycbc.inference.models.analytic.TestPosterior` +``'test_prior'`` :py:class:`pycbc.inference.models.analytic.TestPrior` +``'test_rosenbrock'`` :py:class:`pycbc.inference.models.analytic.TestRosenbrock` +``'test_volcano'`` :py:class:`pycbc.inference.models.analytic.TestVolcano` +========================================= ============================================================================================= diff --git a/latest/html/_sources/_include/psd_models-table.rst.txt b/latest/html/_sources/_include/psd_models-table.rst.txt new file mode 100644 index 00000000000..4dc4f003bfe --- /dev/null +++ b/latest/html/_sources/_include/psd_models-table.rst.txt @@ -0,0 +1,98 @@ +=========================================== ============================================================================= +Name Function +=========================================== ============================================================================= +``AdVBNSOptimizedSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVBNSOptimizedSensitivityP1200087` +``AdVDesignSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVDesignSensitivityP1200087` +``AdVEarlyHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVEarlyHighSensitivityP1200087` +``AdVEarlyLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVEarlyLowSensitivityP1200087` +``AdVLateHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVLateHighSensitivityP1200087` +``AdVLateLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVLateLowSensitivityP1200087` +``AdVMidHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVMidHighSensitivityP1200087` +``AdVMidLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.AdVMidLowSensitivityP1200087` +``AdVO3LowT1800545`` :py:func:`pycbc.psd.analytical.AdVO3LowT1800545` +``AdVO4IntermediateT1800545`` :py:func:`pycbc.psd.analytical.AdVO4IntermediateT1800545` +``AdVO4T1800545`` :py:func:`pycbc.psd.analytical.AdVO4T1800545` +``AdvVirgo`` :py:func:`pycbc.psd.analytical.AdvVirgo` +``CosmicExplorerP1600143`` :py:func:`pycbc.psd.analytical.CosmicExplorerP1600143` +``CosmicExplorerPessimisticP1600143`` :py:func:`pycbc.psd.analytical.CosmicExplorerPessimisticP1600143` +``CosmicExplorerWidebandP1600143`` :py:func:`pycbc.psd.analytical.CosmicExplorerWidebandP1600143` +``EinsteinTelescopeP1600143`` :py:func:`pycbc.psd.analytical.EinsteinTelescopeP1600143` +``GEOHF`` :py:func:`pycbc.psd.analytical.GEOHF` +``GEO`` :py:func:`pycbc.psd.analytical.GEO` +``KAGRA128MpcT1800545`` :py:func:`pycbc.psd.analytical.KAGRA128MpcT1800545` +``KAGRA25MpcT1800545`` :py:func:`pycbc.psd.analytical.KAGRA25MpcT1800545` +``KAGRA80MpcT1800545`` :py:func:`pycbc.psd.analytical.KAGRA80MpcT1800545` +``KAGRADesignSensitivityT1600593`` :py:func:`pycbc.psd.analytical.KAGRADesignSensitivityT1600593` +``KAGRAEarlySensitivityT1600593`` :py:func:`pycbc.psd.analytical.KAGRAEarlySensitivityT1600593` +``KAGRALateSensitivityT1600593`` :py:func:`pycbc.psd.analytical.KAGRALateSensitivityT1600593` +``KAGRAMidSensitivityT1600593`` :py:func:`pycbc.psd.analytical.KAGRAMidSensitivityT1600593` +``KAGRAOpeningSensitivityT1600593`` :py:func:`pycbc.psd.analytical.KAGRAOpeningSensitivityT1600593` +``KAGRA`` :py:func:`pycbc.psd.analytical.KAGRA` +``TAMA`` :py:func:`pycbc.psd.analytical.TAMA` +``Virgo`` :py:func:`pycbc.psd.analytical.Virgo` +``aLIGO140MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGO140MpcT1800545` +``aLIGO175MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGO175MpcT1800545` +``aLIGOAPlusDesignSensitivityT1800042`` :py:func:`pycbc.psd.analytical.aLIGOAPlusDesignSensitivityT1800042` +``aLIGOAdVO3LowT1800545`` :py:func:`pycbc.psd.analytical.aLIGOAdVO3LowT1800545` +``aLIGOAdVO4IntermediateT1800545`` :py:func:`pycbc.psd.analytical.aLIGOAdVO4IntermediateT1800545` +``aLIGOAdVO4T1800545`` :py:func:`pycbc.psd.analytical.aLIGOAdVO4T1800545` +``aLIGOBHBH20DegGWINC`` :py:func:`pycbc.psd.analytical.aLIGOBHBH20DegGWINC` +``aLIGOBHBH20Deg`` :py:func:`pycbc.psd.analytical.aLIGOBHBH20Deg` +``aLIGOBNSOptimizedSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOBNSOptimizedSensitivityP1200087` +``aLIGODesignSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGODesignSensitivityP1200087` +``aLIGODesignSensitivityT1800044`` :py:func:`pycbc.psd.analytical.aLIGODesignSensitivityT1800044` +``aLIGOEarlyHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOEarlyHighSensitivityP1200087` +``aLIGOEarlyLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOEarlyLowSensitivityP1200087` +``aLIGOHighFrequencyGWINC`` :py:func:`pycbc.psd.analytical.aLIGOHighFrequencyGWINC` +``aLIGOHighFrequency`` :py:func:`pycbc.psd.analytical.aLIGOHighFrequency` +``aLIGOKAGRA128MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGOKAGRA128MpcT1800545` +``aLIGOKAGRA25MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGOKAGRA25MpcT1800545` +``aLIGOKAGRA80MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGOKAGRA80MpcT1800545` +``aLIGOLateHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOLateHighSensitivityP1200087` +``aLIGOLateLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOLateLowSensitivityP1200087` +``aLIGOMidHighSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOMidHighSensitivityP1200087` +``aLIGOMidLowSensitivityP1200087`` :py:func:`pycbc.psd.analytical.aLIGOMidLowSensitivityP1200087` +``aLIGONSNSOptGWINC`` :py:func:`pycbc.psd.analytical.aLIGONSNSOptGWINC` +``aLIGONSNSOpt`` :py:func:`pycbc.psd.analytical.aLIGONSNSOpt` +``aLIGONoSRMHighPower`` :py:func:`pycbc.psd.analytical.aLIGONoSRMHighPower` +``aLIGONoSRMLowPowerGWINC`` :py:func:`pycbc.psd.analytical.aLIGONoSRMLowPowerGWINC` +``aLIGONoSRMLowPower`` :py:func:`pycbc.psd.analytical.aLIGONoSRMLowPower` +``aLIGOO3LowT1800545`` :py:func:`pycbc.psd.analytical.aLIGOO3LowT1800545` +``aLIGOQuantumBHBH20Deg`` :py:func:`pycbc.psd.analytical.aLIGOQuantumBHBH20Deg` +``aLIGOQuantumHighFrequency`` :py:func:`pycbc.psd.analytical.aLIGOQuantumHighFrequency` +``aLIGOQuantumNSNSOpt`` :py:func:`pycbc.psd.analytical.aLIGOQuantumNSNSOpt` +``aLIGOQuantumNoSRMHighPower`` :py:func:`pycbc.psd.analytical.aLIGOQuantumNoSRMHighPower` +``aLIGOQuantumNoSRMLowPower`` :py:func:`pycbc.psd.analytical.aLIGOQuantumNoSRMLowPower` +``aLIGOQuantumZeroDetHighPower`` :py:func:`pycbc.psd.analytical.aLIGOQuantumZeroDetHighPower` +``aLIGOQuantumZeroDetLowPower`` :py:func:`pycbc.psd.analytical.aLIGOQuantumZeroDetLowPower` +``aLIGOThermal`` :py:func:`pycbc.psd.analytical.aLIGOThermal` +``aLIGOZeroDetHighPowerGWINC`` :py:func:`pycbc.psd.analytical.aLIGOZeroDetHighPowerGWINC` +``aLIGOZeroDetHighPower`` :py:func:`pycbc.psd.analytical.aLIGOZeroDetHighPower` +``aLIGOZeroDetLowPowerGWINC`` :py:func:`pycbc.psd.analytical.aLIGOZeroDetLowPowerGWINC` +``aLIGOZeroDetLowPower`` :py:func:`pycbc.psd.analytical.aLIGOZeroDetLowPower` +``aLIGOaLIGO140MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGOaLIGO140MpcT1800545` +``aLIGOaLIGO175MpcT1800545`` :py:func:`pycbc.psd.analytical.aLIGOaLIGO175MpcT1800545` +``aLIGOaLIGODesignSensitivityT1800044`` :py:func:`pycbc.psd.analytical.aLIGOaLIGODesignSensitivityT1800044` +``aLIGOaLIGOO3LowT1800545`` :py:func:`pycbc.psd.analytical.aLIGOaLIGOO3LowT1800545` +``analytical_psd_lisa_tdi_AE_confusion`` :py:func:`pycbc.psd.analytical_space.analytical_psd_lisa_tdi_AE_confusion` +``analytical_psd_lisa_tdi_AE`` :py:func:`pycbc.psd.analytical_space.analytical_psd_lisa_tdi_AE` +``analytical_psd_lisa_tdi_T`` :py:func:`pycbc.psd.analytical_space.analytical_psd_lisa_tdi_T` +``analytical_psd_lisa_tdi_XYZ`` :py:func:`pycbc.psd.analytical_space.analytical_psd_lisa_tdi_XYZ` +``analytical_psd_taiji_tdi_AE_confusion`` :py:func:`pycbc.psd.analytical_space.analytical_psd_taiji_tdi_AE_confusion` +``analytical_psd_taiji_tdi_AE`` :py:func:`pycbc.psd.analytical_space.analytical_psd_taiji_tdi_AE` +``analytical_psd_taiji_tdi_T`` :py:func:`pycbc.psd.analytical_space.analytical_psd_taiji_tdi_T` +``analytical_psd_taiji_tdi_XYZ`` :py:func:`pycbc.psd.analytical_space.analytical_psd_taiji_tdi_XYZ` +``analytical_psd_tianqin_tdi_AE_confusion`` :py:func:`pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_AE_confusion` +``analytical_psd_tianqin_tdi_AE`` :py:func:`pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_AE` +``analytical_psd_tianqin_tdi_T`` :py:func:`pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_T` +``analytical_psd_tianqin_tdi_XYZ`` :py:func:`pycbc.psd.analytical_space.analytical_psd_tianqin_tdi_XYZ` +``eLIGOModel`` :py:func:`pycbc.psd.analytical.eLIGOModel` +``eLIGOShot`` :py:func:`pycbc.psd.analytical.eLIGOShot` +``flat_unity`` :py:func:`pycbc.psd.analytical.flat_unity` +``iLIGOModel`` :py:func:`pycbc.psd.analytical.iLIGOModel` +``iLIGOSRD`` :py:func:`pycbc.psd.analytical.iLIGOSRD` +``iLIGOSeismic`` :py:func:`pycbc.psd.analytical.iLIGOSeismic` +``iLIGOShot`` :py:func:`pycbc.psd.analytical.iLIGOShot` +``iLIGOThermal`` :py:func:`pycbc.psd.analytical.iLIGOThermal` +``sh_transformed_psd_lisa_tdi_XYZ`` :py:func:`pycbc.psd.analytical_space.sh_transformed_psd_lisa_tdi_XYZ` +=========================================== ============================================================================= diff --git a/latest/html/_sources/_include/sampler_inheritance_diagrams.rst.txt b/latest/html/_sources/_include/sampler_inheritance_diagrams.rst.txt new file mode 100644 index 00000000000..eca56393106 --- /dev/null +++ b/latest/html/_sources/_include/sampler_inheritance_diagrams.rst.txt @@ -0,0 +1,120 @@ +.. _inheritance-cpnest: + +* ``cpnest``: + +.. inheritance-diagram:: pycbc.inference.sampler.cpnest.CPNestSampler + :parts: 3 + +| + + +.. _inheritance-dummy: + +* ``dummy``: + +.. inheritance-diagram:: pycbc.inference.sampler.dummy.DummySampler + :parts: 3 + +| + + +.. _inheritance-dynesty: + +* ``dynesty``: + +.. inheritance-diagram:: pycbc.inference.sampler.dynesty.DynestySampler + :parts: 3 + +| + + +.. _inheritance-emcee: + +* ``emcee``: + +.. inheritance-diagram:: pycbc.inference.sampler.emcee.EmceeEnsembleSampler + :parts: 3 + +| + + +.. _inheritance-emcee_pt: + +* ``emcee_pt``: + +.. inheritance-diagram:: pycbc.inference.sampler.emcee_pt.EmceePTSampler + :parts: 3 + +| + + +.. _inheritance-epsie: + +* ``epsie``: + +.. inheritance-diagram:: pycbc.inference.sampler.epsie.EpsieSampler + :parts: 3 + +| + + +.. _inheritance-multinest: + +* ``multinest``: + +.. inheritance-diagram:: pycbc.inference.sampler.multinest.MultinestSampler + :parts: 3 + +| + + +.. _inheritance-nessai: + +* ``nessai``: + +.. inheritance-diagram:: pycbc.inference.sampler.nessai.NessaiSampler + :parts: 3 + +| + + +.. _inheritance-ptemcee: + +* ``ptemcee``: + +.. inheritance-diagram:: pycbc.inference.sampler.ptemcee.PTEmceeSampler + :parts: 3 + +| + + +.. _inheritance-refine: + +* ``refine``: + +.. inheritance-diagram:: pycbc.inference.sampler.refine.RefineSampler + :parts: 3 + +| + + +.. _inheritance-snowline: + +* ``snowline``: + +.. inheritance-diagram:: pycbc.inference.sampler.snowline.SnowlineSampler + :parts: 3 + +| + + +.. _inheritance-ultranest: + +* ``ultranest``: + +.. inheritance-diagram:: pycbc.inference.sampler.ultranest.UltranestSampler + :parts: 3 + +| + + diff --git a/latest/html/_sources/_include/samplers-table.rst.txt b/latest/html/_sources/_include/samplers-table.rst.txt new file mode 100644 index 00000000000..06b5c091208 --- /dev/null +++ b/latest/html/_sources/_include/samplers-table.rst.txt @@ -0,0 +1,16 @@ +=============== ============================================================== +Name Class +=============== ============================================================== +``'cpnest'`` :py:class:`pycbc.inference.sampler.cpnest.CPNestSampler` +``'dummy'`` :py:class:`pycbc.inference.sampler.dummy.DummySampler` +``'dynesty'`` :py:class:`pycbc.inference.sampler.dynesty.DynestySampler` +``'emcee'`` :py:class:`pycbc.inference.sampler.emcee.EmceeEnsembleSampler` +``'emcee_pt'`` :py:class:`pycbc.inference.sampler.emcee_pt.EmceePTSampler` +``'epsie'`` :py:class:`pycbc.inference.sampler.epsie.EpsieSampler` +``'multinest'`` :py:class:`pycbc.inference.sampler.multinest.MultinestSampler` +``'nessai'`` :py:class:`pycbc.inference.sampler.nessai.NessaiSampler` +``'ptemcee'`` :py:class:`pycbc.inference.sampler.ptemcee.PTEmceeSampler` +``'refine'`` :py:class:`pycbc.inference.sampler.refine.RefineSampler` +``'snowline'`` :py:class:`pycbc.inference.sampler.snowline.SnowlineSampler` +``'ultranest'`` :py:class:`pycbc.inference.sampler.ultranest.UltranestSampler` +=============== ============================================================== diff --git a/latest/html/_sources/_include/transforms-table.rst.txt b/latest/html/_sources/_include/transforms-table.rst.txt new file mode 100644 index 00000000000..3f490201ecb --- /dev/null +++ b/latest/html/_sources/_include/transforms-table.rst.txt @@ -0,0 +1,37 @@ +============================================ ============================================================== +Name Class +============================================ ============================================================== +``'align_total_spin'`` :py:class:`pycbc.transforms.AlignTotalSpin` +``'aligned_mass_spin_to_cartesian_spin'`` :py:class:`pycbc.transforms.AlignedMassSpinToCartesianSpin` +``'cartesian_spin_1_to_spherical_spin_1'`` :py:class:`pycbc.transforms.CartesianSpin1ToSphericalSpin1` +``'cartesian_spin_2_to_spherical_spin_2'`` :py:class:`pycbc.transforms.CartesianSpin2ToSphericalSpin2` +``'cartesian_spin_to_aligned_mass_spin'`` :py:class:`pycbc.transforms.CartesianSpinToAlignedMassSpin` +``'cartesian_spin_to_chi_p'`` :py:class:`pycbc.transforms.CartesianSpinToChiP` +``'cartesian_spin_to_precession_mass_spin'`` :py:class:`pycbc.transforms.CartesianSpinToPrecessionMassSpin` +``'cartesian_to_spherical'`` :py:class:`pycbc.transforms.CartesianToSpherical` +``'chirp_distance_to_distance'`` :py:class:`pycbc.transforms.ChirpDistanceToDistance` +``'custom'`` :py:class:`pycbc.transforms.CustomTransform` +``'custom_multi'`` :py:class:`pycbc.transforms.CustomTransformMultiOutputs` +``'distance_to_chirp_distance'`` :py:class:`pycbc.transforms.DistanceToChirpDistance` +``'distance_to_redshift'`` :py:class:`pycbc.transforms.DistanceToRedshift` +``'exponent'`` :py:class:`pycbc.transforms.Exponent` +``'geo_to_lisa'`` :py:class:`pycbc.transforms.GEOToLISA` +``'geo_to_ssb'`` :py:class:`pycbc.transforms.GEOToSSB` +``'lambda_from_multiple_tov_files'`` :py:class:`pycbc.transforms.LambdaFromMultipleTOVFiles` +``'lambda_from_tov_file'`` :py:class:`pycbc.transforms.LambdaFromTOVFile` +``'lisa_to_geo'`` :py:class:`pycbc.transforms.LISAToGEO` +``'lisa_to_ssb'`` :py:class:`pycbc.transforms.LISAToSSB` +``'log'`` :py:class:`pycbc.transforms.Log` +``'logistic'`` :py:class:`pycbc.transforms.Logistic` +``'logit'`` :py:class:`pycbc.transforms.Logit` +``'mass1_mass2_to_mchirp_eta'`` :py:class:`pycbc.transforms.Mass1Mass2ToMchirpEta` +``'mass1_mass2_to_mchirp_q'`` :py:class:`pycbc.transforms.Mass1Mass2ToMchirpQ` +``'mchirp_eta_to_mass1_mass2'`` :py:class:`pycbc.transforms.MchirpEtaToMass1Mass2` +``'mchirp_q_to_mass1_mass2'`` :py:class:`pycbc.transforms.MchirpQToMass1Mass2` +``'precession_mass_spin_to_cartesian_spin'`` :py:class:`pycbc.transforms.PrecessionMassSpinToCartesianSpin` +``'spherical_spin_1_to_cartesian_spin_1'`` :py:class:`pycbc.transforms.SphericalSpin1ToCartesianSpin1` +``'spherical_spin_2_to_cartesian_spin_2'`` :py:class:`pycbc.transforms.SphericalSpin2ToCartesianSpin2` +``'spherical_to_cartesian'`` :py:class:`pycbc.transforms.SphericalToCartesian` +``'ssb_to_geo'`` :py:class:`pycbc.transforms.SSBToGEO` +``'ssb_to_lisa'`` :py:class:`pycbc.transforms.SSBToLISA` +============================================ ============================================================== diff --git a/latest/html/_sources/_include/waveform-parameters.rst.txt b/latest/html/_sources/_include/waveform-parameters.rst.txt new file mode 100644 index 00000000000..309c8f6e43d --- /dev/null +++ b/latest/html/_sources/_include/waveform-parameters.rst.txt @@ -0,0 +1,69 @@ +======================== ======================================================================================================================================================================================================================= +Parameter Description +======================== ======================================================================================================================================================================================================================= +``'mass1'`` The mass of the first component object in the binary (in solar masses). +``'mass2'`` The mass of the second component object in the binary (in solar masses). +``'spin1x'`` The x component of the first binary component's dimensionless spin. +``'spin1y'`` The y component of the first binary component's dimensionless spin. +``'spin1z'`` The z component of the first binary component's dimensionless spin. +``'spin2x'`` The x component of the second binary component's dimensionless spin. +``'spin2y'`` The y component of the second binary component's dimensionless spin. +``'spin2z'`` The z component of the second binary component's dimensionless spin. +``'eccentricity'`` Eccentricity. +``'lambda1'`` The dimensionless tidal deformability parameter of object 1. +``'lambda2'`` The dimensionless tidal deformability parameter of object 2. +``'dquad_mon1'`` Quadrupole-monopole parameter / m_1^5 -1. +``'dquad_mon2'`` Quadrupole-monopole parameter / m_2^5 -1. +``'lambda_octu1'`` The octupolar tidal deformability parameter of object 1. +``'lambda_octu2'`` The octupolar tidal deformability parameter of object 2. +``'quadfmode1'`` The quadrupolar f-mode angular frequency of object 1. +``'quadfmode2'`` The quadrupolar f-mode angular frequency of object 2. +``'octufmode1'`` The octupolar f-mode angular frequency of object 1. +``'octufmode2'`` The octupolar f-mode angular frequency of object 2. +``'dchi0'`` 0PN testingGR parameter. +``'dchi1'`` 0.5PN testingGR parameter. +``'dchi2'`` 1PN testingGR parameter. +``'dchi3'`` 1.5PN testingGR parameter. +``'dchi4'`` 2PN testingGR parameter. +``'dchi5'`` 2.5PN testingGR parameter. +``'dchi5l'`` 2.5PN logrithm testingGR parameter. +``'dchi6'`` 3PN testingGR parameter. +``'dchi6l'`` 3PN logrithm testingGR parameter. +``'dchi7'`` 3.5PN testingGR parameter. +``'dalpha1'`` Merger-ringdown testingGR parameter. +``'dalpha2'`` Merger-ringdown testingGR parameter. +``'dalpha3'`` Merger-ringdown testingGR parameter. +``'dalpha4'`` Merger-ringdown testingGR parameter. +``'dalpha5'`` Merger-ringdown testingGR parameter. +``'dbeta1'`` Intermediate testingGR parameter. +``'dbeta2'`` Intermediate testingGR parameter. +``'dbeta3'`` Intermediate testingGR parameter. +``'distance'`` Luminosity distance to the binary (in Mpc). +``'coa_phase'`` Coalesence phase of the binary (in rad). +``'inclination'`` Inclination (rad), defined as the angle between the orbital angular momentum L and the line-of-sight at the reference frequency. +``'long_asc_nodes'`` Longitude of ascending nodes axis (rad). +``'mean_per_ano'`` Mean anomaly of the periastron (rad). +``'delta_t'`` The time step used to generate the waveform (in s). +``'f_lower'`` The starting frequency of the waveform (in Hz). +``'approximant'`` A string that indicates the chosen approximant. +``'f_ref'`` The reference frequency. +``'phase_order'`` The pN order of the orbital phase. The default of -1 indicates that all implemented orders are used. +``'spin_order'`` The pN order of the spin corrections. The default of -1 indicates that all implemented orders are used. +``'tidal_order'`` The pN order of the tidal corrections. The default of -1 indicates that all implemented orders are used. +``'amplitude_order'`` The pN order of the amplitude. The default of -1 indicates that all implemented orders are used. +``'eccentricity_order'`` The pN order of the eccentricity corrections.The default of -1 indicates that all implemented orders are used. +``'frame_axis'`` Allow to choose among orbital_l, view and total_j +``'modes_choice'`` Allow to turn on among orbital_l, view and total_j +``'side_bands'`` Flag for generating sidebands +``'mode_array'`` Choose which (l,m) modes to include when generating a waveform. Only if approximant supports this feature.By default pass None and let lalsimulation use it's default behaviour.Example: mode_array = [ [2,2], [2,-2] ] +``'numrel_data'`` Sets the NR flags; only needed for NR waveforms. +``'delta_f'`` The frequency step used to generate the waveform (in Hz). +``'f_final'`` The ending frequency of the waveform. The default (0) indicates that the choice is made by the respective approximant. +``'f_final_func'`` Use the given frequency function to compute f_final based on the parameters of the waveform. +``'tc'`` Coalescence time (s) is the time when a GW reaches the origin of a certain coordinate system. +``'ra'`` Right ascension (rad). +``'dec'`` Declination (rad). +``'polarization'`` Polarization angle (rad) in a certain coordinate system. +``'eclipticlatitude'`` eclipticlatitude in SSB/LISA coords. +``'eclipticlongitude'`` eclipticlongitude in SSB/LISA coords. +======================== ======================================================================================================================================================================================================================= diff --git a/latest/html/_sources/apps.rst.txt b/latest/html/_sources/apps.rst.txt new file mode 100644 index 00000000000..df3fd0d1257 --- /dev/null +++ b/latest/html/_sources/apps.rst.txt @@ -0,0 +1,23 @@ +.. _apps: + +============================================= +Applications and Workflows +============================================= + +Users who are interested in tools that PyCBC provides for various other +analysis tasks (e.g. template bank generation, hardware injections, and testing +template banks) should read the documentation at: + +.. toctree:: + :maxdepth: 1 + + inference + workflow/pycbc_make_psd_estimation_workflow + workflow/pycbc_make_offline_search_workflow + workflow/pygrb.rst + tmpltbank + hwinj + banksim + faithsim + upload_to_gracedb + pycbc_condition_strain diff --git a/latest/html/_sources/banksim.rst.txt b/latest/html/_sources/banksim.rst.txt new file mode 100644 index 00000000000..3b886f31cd2 --- /dev/null +++ b/latest/html/_sources/banksim.rst.txt @@ -0,0 +1,197 @@ +################################################################ +Calculating the Effectualness (Fitting Factor) of Template Banks +################################################################ + +.. _banksim: + +=================== +Introduction +=================== + +This page describes how to use the ``banksim`` facilities within PyCBC. +The ``banksim`` tools calculate the matches, maximized over a set of templates, +for a list of injections waveforms to measure the effectualness (fitting +factor) of a template bank. + +The purpose of this test is to allow the user to investigate the construction of new template banks, as well as act as a sanity check of the template bank generation methodology and code. Therefore the tests run over the same parameter ranges used to generate the bank and using the same sensitivity curve. The tests described here may not be optimal or exhaustive, but should be used to gain confidence that a bank is recovering signals it is designed to recover at an acceptable match. + +----------------------------- +Creating a configuration file +----------------------------- + +All the choices when setting up a banksim are contained +in a single configuration file. + +Below is an example. + +.. literalinclude:: ../examples/banksim/banksim_simple.ini + +There are four sections that must be present [inspinj]/[external_injection], [executables], [workflow], +and [banksim]. + + #. inspinj + + This section sets the paramaters of all of the injection waveforms. + The arguments in the configuration file are fed directly to the + lalapps_inspinj program to create an injection file. + The same arguments are available, and the same restrictions apply. + The number of injections can be set by using the gps start and end time + options along with the time step. + Note, however, that the waveform name is required but does not + determine the actual approximants that will be compared. That is set in the [banksim] + section. + + If you want to use another method to create injections (Eg. ``pycbc_create_injections``), + instead of using [inspinj], you can name the section [external_injection] and specify the + path of the injection file. + + .. code-block:: bash + + [external_injection] + inj-file = /path/to/inj.xml + + Note: The injection file should be in the sim_inspiral table (.xml) format. + + #. executables + + This section lists the location of the pycbc_banksim script. Make note + that the script is copied to the executables folder + and that is the version that will be used. + + #. workflow + + This section has options that configure the workflow. + The required options are 'log-path', 'bank-file', + 'injections-per-job', and 'templates-per-job'. The + 'log-path' specifies the directory to store + condor log files. 'bank-file' sets the template bank + over which to maximize matches. It must be either + a sngl or sim inspiral table in xml format. + 'injections-per-job' as its name suggests determines + the maximum number of injections that each job + has to calculate fitting factors for. + + The injection + file generated from the [inspinj] section or provided externally is split + into smaller pieces to satisfy this requirement. + Note that this option has a direct effect on the memory + requirements of each banksim job, as each injection + is pregenerated at the beginning of the program. + + The 'templates-per-job' will cause the given template + bank to be split into smaller portions. This option + is directly proportional to the running time of + each job. + + An optional value 'use-gpus' can be set. This will + set up the workflow to choose condor nodes with GPUS + and sets the options so that the banksim program + will use the GPU for accelerated processing. Note that + the default is to treat all results from a GPU as + unreliable. As such, each job is automatically run + twice. The results are compared and only kept if + they equivelant. Only the GPUs on SUGAR and ATLAS + are supported at this time. + + Bank simulations running on LDG clusters must include + the 'accounting-group' option in the workflow section. + The value must be choosen according to the + `Accounting information web page `_. + + #. banksim + + This section corresponds to the arguments sent to the + banksim executable. The notable exeption is that the + correct flag for GPU support will be set if the 'use-gpus' + option is set in the [workflow] section. The actual + signal and template approximants, along with their + PN order paramters (if relevant), are set here. Note that + the option filter-buffer-length must be set to a value + greater than the duration of the longest generated + approximant. + +------------------------ +Generating the workflow +------------------------ + +Once a configuration file as been made, create a +workspace directory and place the file into it. +Running the following command will generate a dag +that will submit the required jobs. + +.. code-block:: bash + + pycbc_make_banksim --conf YOUR_INI_FILE.ini + +The workflow can then be submitted by running the +generated shell script. + +.. code-block:: bash + + sh submit.sh + +------------------------- +Understanding the results +------------------------- + +The main results of the banksim is a single file called +'results.dat'. This is a space separated ASCII file. + +Early (incomplete) results can be generated at any time +by executing the following script. + +.. code-block:: bash + + sh partial_results.sh + +Some basic plots are also generated automatically and +placed into the 'plots' folder. + +The pycbc_banksim_plots script located in the +scripts folder is an example of +how to read the results file. + +================================================= +Validating template banks for production analysis +================================================= + +To validate the uberbanks used in LIGO searches, we the BNS, NSBH and BBH regions, with separate banksim runs. Therefore there will be some overlap between the signals tested. For technical reasons, it is also convenient to split the NSBH and BBH tests up into a run with signals below a total mass of 50 and signals with a total mass above 50. + +We propose to select test signals from mass distributions that a flat in component masses in the respective regions; NS masses between 1 and 3 and BH masses between 2 and 99, with a total mass limit of 100. In addition, we select aligned spin magnitudes uniform in the respective regions; -0.05 to 0.05 for NS and -0.99 to 0.99 for BH. + +We propose to test with 10,000 injection signals in each of the BNS, NSBH and BBH regions, for a total of 30,000. This number is much less than the total number of templates in the bank. + +We propose to use SEOBNRv2 as the test signals, even though the uberbank uses TaylorF2 and SEOBNRv2_ROM_DoubleSpin templates for recovery. This is because we believe that SEOBNRv2 is a more accurate waveform than either TaylorF2 or the ROMs. + +--------- +Procedure +--------- + +The bank generation can be verified using the pycbc_banksim code. To run this follow the instructions for running the banksim code. An example ini file to run the NSBH banksim for total masses below 50 is given here + +.. literalinclude:: ../examples/banksim/nsbh_below50.ini + +To run this you will need to change the banksim option to your local version of pycbc_banksim, the log-path option to a suitable location for your log files on your cluster, the locations of the bank and noise curve and possibly whatever processing_scheme is best on your cluster (mkl works on Atlas with /opt/intel/2015/intel.sh sourced). + +Injected spins are up to 0.99, not 0.9895 and the injections are uniform in component mass from 1 to 50 and uniform in spin magnitude (so it contains some highly spinning BNS). Injections are generated from 25Hz but matches are calculated from 30Hz, this gives the signal some "burn-in" time. Source location l-distr is random over the sky and inclination i-distr is uniformly distributed over arccos(i) - although this should not matter for aligned signals. + +---------- +Evaluation +---------- + +A stochastic placement method (like sbank) will not be able to guarantee that all points in parameter space are covered at better than 0.97 fitting factor. A convenient measure of the success of the bank generation is if the bank is able to recover 99% of injected signals using the same parameters and templates as the bank is designed for with a fitting factor of 0.97 or better. Further requirements might be that there should be no fitting factors with matches less than 0.95 or that the fitting factors below 0.97 should not be clustered in a particular part of parameter space. To cover all source groups we can run such tests separately for simulated BNS, NSBH and BBH signals when testing a bank that covers all three parameter ranges. + +While such tests do not guarantee that the bank will successfully recover all possible signals in the parameter region (for example due different sensitivites in the two detectors, different waveform approximants, precession effects, tidal deformation and disruption etc.) these tests do indicate with a reasonable level of confidence that the template generation has been successful at what it was designed to do. + +------------ +Known issues +------------ + +The coverage of the high-mass (>70) and anti-aligned (<-0.5) NSBH region is known to be sparse in some versions. + +The mchirp-window size may need to be changed if it is too tight. This is particularly a problem at higher masses. + +If speed is an issue, the banksims can be sped up by reducing the number of injection signals, using ROMs instead of SEOBNRv2 as injection signals, reducing the signal-sample-rate or tightening the mchirp-window. Code is being developed to do this dynamically. + +The option total-mass-divide is needed to replicate the uberbank switching from using TaylorF2 below total mass of 4 to using ROMs above. This may not exist on current master of pycbc_banksim. + diff --git a/latest/html/_sources/build_gh_pages.rst.txt b/latest/html/_sources/build_gh_pages.rst.txt new file mode 100644 index 00000000000..6d88712c407 --- /dev/null +++ b/latest/html/_sources/build_gh_pages.rst.txt @@ -0,0 +1,79 @@ +####################################### +Building Documentation for GitHub Pages +####################################### + +============================== +Creating a GitHub project page +============================== + +GitHub pages are built from a branch of the repository called ``gh-pages``. +If you have not already created a GitHub project pages for PyCBC in your +repository, make a ``gh-pages`` branch in your repository as follows: + +.. code-block:: bash + + git checkout --orphan gh-pages + git rm -rf . + git clean -dxf + touch .nojekyll + mkdir latest + git add .nojekyll latest + git commit -a -m "set up gh-pages branch" + git push origin gh-pages + git branch --set-upstream gh-pages origin/gh-pages + +These commands create the branch and then remove all of the files from this branch, so that it will just contain the documentation pages. + +.. note:: + + The main `gwastro/pycbc `_ repository already has a `gh-pages` branch, so do not do this in the main repository. + +====================================== +Building and pushing the documentation +====================================== + +The documentation should built from the source code on a regular branch and installed into the ``gh-pages`` branch. Since git cannot have two branches checked out simultaneously, you need to check out another copy of the repository with the ``gh-pages`` branch inside your PyCBC source repository. Do this will the following commands (assuming your PyCBC git repository is in a directory named ``pycbc``). + +.. code-block:: bash + + cd /path/to/your/repo/pycbc + git clone git@github.com:github-username/pycbc.git _gh-pages + +Now flush the contents of this directory. We do this, as the documentation is +not really under version control in the ``gh-pages`` branch. We just use this +branch to publish to GitHub pages. Run the commands + +.. code-block:: bash + + cd _gh-pages + git rm -rf * + git commit -a -m "flush documentation" + cd .. + +The last ``cd`` command should put you back at the top level of your PyCBC +source directory. To build the documentation into the ``_gh-pages`` directory, +run the command + +.. code-block:: bash + + python setup.py build_gh_pages + +This will build the documentation in the second repository that you created called ``_gh-pages`` under the directory ``latest/``. To push these changes up to GitHub + +.. code-block:: bash + + cd _gh-pages + git add --all + git commit -a -m "documentation update" + git push origin gh-pages + +The documentation will then be available under your GitHub pages at ``http://username.github.io/pycbc/latest/html/`` where you should replace ``username/`` with your GitHub account name. + +In this example, we checkout the master branch to build the documentation, but you can change the last command above to checkout any other branch that your are developing. + +.. note:: + + Be careful with the ``git rm -rf *`` command as if you run it in the wrong + directory you can delete the contents of your git repository. If you do + this by accident, you can use ``git reset`` to undo the commit. + diff --git a/latest/html/_sources/catalog.rst.txt b/latest/html/_sources/catalog.rst.txt new file mode 100644 index 00000000000..494d18163f5 --- /dev/null +++ b/latest/html/_sources/catalog.rst.txt @@ -0,0 +1,31 @@ +.. _catalog: + +################################################### +Catalog of Observed Gravitational-wave Mergers +################################################### + +Information about the growing catalog of gravitational-wave mergers can be +accessed through the :py:mod:`pycbc.catalog` package. + +=========================================== +Which mergers do we have information about? +=========================================== + +.. literalinclude:: ../examples/catalog/what.py +.. command-output:: python ../examples/catalog/what.py + +============================================== +Plotting some key statistics from the catalog +============================================== + +.. plot:: ../examples/catalog/stat.py + :include-source: + +============================================== +Accessing data around each event +============================================== + +Data around each event can also be easily accessed for any detector. + +.. plot:: ../examples/catalog/data.py + :include-source: diff --git a/latest/html/_sources/credit.rst.txt b/latest/html/_sources/credit.rst.txt new file mode 100644 index 00000000000..6a024804e43 --- /dev/null +++ b/latest/html/_sources/credit.rst.txt @@ -0,0 +1,66 @@ +======================================= +Use of PyCBC in Scientific Publications +======================================= + +If you use any code from PyCBC in a scientific publication, then we ask that +you include a citation to the software through its DOI and that you cite the +publications relevant to the sections of the code that you are using, as +described below. + +------------------------- +Citing the PyCBC Software +------------------------- + +A bibtex key and DOI for each release is avaliable from `Zenodo +`__ and DOIs for releases can be found on the `PyCBC +release page `__. A key for the +latest release is available at: + +.. image:: https://zenodo.org/badge/31596861.svg + :target: https://zenodo.org/badge/latestdoi/31596861 + +If you do not use a specific release, please cite the DOI for the latest +release, or the release closest to the version that you are using. + +--------------------------------------------------------------- +Citing the scientific publications that describe the algorithms +--------------------------------------------------------------- + +PyCBC implements a large number of data-analysis algorithms and so it is not +possible to give one single citation. To give proper scientific credit for the +development of PyCBC, in addition to citing the DOI from the software, please +cite the appropriate scientific publications below. + +^^^^^^^^^^^^^^^^^^ +Bayesian Inference +^^^^^^^^^^^^^^^^^^ + +If you use the Bayesian inference modules, or code derived from those modules, +please cite the paper: + +- `PyCBC Inference: A Python-based parameter estimation toolkit for compact binary coalescence signals. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Searches for Compact Binary Coalescence +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you use the PyCBC search algorithms, please cite all four of these papers: + +- `FINDCHIRP: an algorithm for detection of gravitational waves from inspiraling compact binaries. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +- `A chi-squared time-frequency discriminator for gravitational wave detection. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +- `Detecting binary compact-object mergers with gravitational waves: Understanding and Improving the sensitivity of the PyCBC search. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +- `Implementing a search for aligned-spin neutron star -- black hole systems with advanced ground based gravitational wave detectors. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +If you use the offline PyCBC search pipeline, please additionally cite: + +- `The PyCBC search for gravitational waves from compact binary coalescence. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +If you use the low-latency PyCBC search pipeline (PyCBC Live), please +additionally cite: + +- `PyCBC Live: Rapid Detection of Gravitational Waves from Compact Binary Mergers. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ + +- `Realtime search for compact binary mergers in Advanced LIGO and Virgo's third observing run using PyCBC Live. `__ `[INSPIRES BibTeX Key] `__ `[ADS BibTeX key] `__ diff --git a/latest/html/_sources/dataquality.rst.txt b/latest/html/_sources/dataquality.rst.txt new file mode 100644 index 00000000000..ac3aa14cef5 --- /dev/null +++ b/latest/html/_sources/dataquality.rst.txt @@ -0,0 +1,38 @@ +############################################################# +Query times of valid data, hardware injections, and more. +############################################################# + +Information about the state of the LIGO/Virgo data can be queried from +resources at the GWOSC and from LVC-proprietary DQSEGDB through the +functions of the :py:mod:`pycbc.dq` module. We will outline a few of common +tasks below. + +.. _example-valid-data: + +================================================= +Determine the times an instrument has valid data +================================================= + +.. plot:: ../examples/dataquality/on.py + :include-source: + +==================================== +Finding times of hardware injections +==================================== + +.. plot:: ../examples/dataquality/hwinj.py + :include-source: + +======================== +What flags can I query? +======================== + +A list of many of the flags which can be quiered is `available here `_. +Instead, just give the raw name such as "DATA" instead of "H1_DATA". + +There are two additional types of flags which can be queried. These are +the negation of the flags like "NO_CBC_HW_INJ". The flag "CBC_HW_INJ" would +give times where there *is* a hardware injection instead of times when +there isn't. Similarly if you use "CBC_CAT2_VETO" instead of "CBC_CAT2" you +will get the times that are adversely affected instead of the times that +are not. diff --git a/latest/html/_sources/detector.rst.txt b/latest/html/_sources/detector.rst.txt new file mode 100644 index 00000000000..efe2a5cc35e --- /dev/null +++ b/latest/html/_sources/detector.rst.txt @@ -0,0 +1,58 @@ +################################################### +Gravitational-wave Detectors +################################################### + +The pycbc.detector module provides the :py:mod:`pycbc.detector.Detector` class +to access information about gravitational wave detectors and key information +about how their orientation and position affects their view of a source + +===================================== +Detector Locations +===================================== + +.. literalinclude:: ../examples/detector/loc.py +.. command-output:: python ../examples/detector/loc.py + +===================================== +Light travel time between detectors +===================================== + +.. literalinclude:: ../examples/detector/travel.py +.. command-output:: python ../examples/detector/travel.py + +====================================================== +Time source gravitational-wave passes through detector +====================================================== + +.. literalinclude:: ../examples/detector/delay.py +.. command-output:: python ../examples/detector/delay.py + +================================================================ +Antenna Patterns and Projecting a Signal into the Detector Frame +================================================================ + +.. literalinclude:: ../examples/detector/ant.py +.. command-output:: python ../examples/detector/ant.py + +============================================================== +Adding a custom detector / overriding existing ones +============================================================== +PyCBC supports observatories with arbitrary locations. For the study +of possible new observatories you can add them explicitly within a script +or by means of a config file to make the detectors visible to all codes +that use the PyCBC detector interfaces. + +An example of adding a detector directly within a script. + +.. plot:: ../examples/detector/custom.py + :include-source: + + +The following demonstrates a config file which similarly can provide +custom observatory information. The options are the same as for the +direct function calls. To tell PyCBC the location of the config file, +set the PYCBC_DETECTOR_CONFIG variable to the location of the file e.g. +PYCBC_DETECTOR_CONFIG=/some/path/to/detectors.ini. The following would +provide new detectors 'f1' and 'f2'. + +.. literalinclude:: ../examples/detector/custom.ini diff --git a/latest/html/_sources/devs.rst.txt b/latest/html/_sources/devs.rst.txt new file mode 100644 index 00000000000..72f86424b7b --- /dev/null +++ b/latest/html/_sources/devs.rst.txt @@ -0,0 +1,42 @@ +============================= +Documentation for Developers +============================= + +PyCBC developers should read the pages below which explain how to write +documentation, develop the code, and create releases: + +.. toctree:: + :maxdepth: 1 + + documentation + release + +Developers who are interested in file I/O, data storage, and access should +read the documentation at: + +.. toctree:: + :maxdepth: 1 + + formats/hdf_format + +Developers who are interested in creating new scientific workflow generation +scripts should read the documentation at: + +.. toctree:: + :maxdepth: 1 + + workflow + +Full Module Documentation is available at: + +.. toctree:: + :maxdepth: 1 + + modules + +Instructions for building the GitHub pages documentation are at: + +.. toctree:: + :maxdepth: 1 + + build_gh_pages diff --git a/latest/html/_sources/distributions.rst.txt b/latest/html/_sources/distributions.rst.txt new file mode 100644 index 00000000000..808627d2ccb --- /dev/null +++ b/latest/html/_sources/distributions.rst.txt @@ -0,0 +1,47 @@ +################################################### +Using PyCBC Distributions from PyCBC Inference +################################################### + +The aim of this page is to demonstrate some simple uses of the distributions available from the distributions.py module available at :py:mod:`pycbc.distributions`. + +============================================================ +Generating samples in a Python script by using the .ini file +============================================================ + +This example shows how to generate samples in a Python script by using a standalone .ini :download:`file <../examples/distributions/pycbc_bbh_prior.ini>`, +in this example, we can also learn that when we draw samples from source-frame masses and comoving volume uniformly, the distributions of detector-frame masses are not uniform. + +.. plot:: ../examples/distributions/sampling_from_config_example.py + :include-source: + +========================================= +Making Mass Distributions in M1 and M2 +========================================= + +Here we will demonstrate how to make different mass populations of binaries. This example shows uniform and gaussian mass distributions. + +.. plot:: ../examples/distributions/mass_examples.py + :include-source: + +==================================================== +Generating mchirp and q from uniform mass1 and mass2 +==================================================== + +This example shows chirp mass and mass ratio samples drawn from uniform mass1 and mass2 with boundaries given by chirp mass and mass ratio. The first row of the figures below are the chirp mass and mass ratio samples, and the same set of samples converted to mass1 and mass2, respectively. The second row are the marginalized distribution for chirp mass and mass ratio, with comparisons for analytical probability density function (PDF). + +.. plot:: ../examples/distributions/mchirp_q_from_uniform_m1m2_example.py + :include-source: + +======================================================== +Sky Location Distribution as Spin Distribution Example +======================================================== + +Here we can make a distribution of spins of unit length with equal distribution in x, y, and z to sample the surface of a unit sphere. + +.. plot:: ../examples/distributions/spin_examples.py + :include-source: + +Using much of the same code we can also show how this sampling accurately covers the surface of a unit sphere. + +.. plot:: ../examples/distributions/spin_spatial_distr_example.py + :include-source: diff --git a/latest/html/_sources/docker.rst.txt b/latest/html/_sources/docker.rst.txt new file mode 100644 index 00000000000..879e82827b5 --- /dev/null +++ b/latest/html/_sources/docker.rst.txt @@ -0,0 +1,32 @@ +========================== +Running PyCBC under Docker +========================== + +The easiest way to start using PyCBC is to install one of our `Docker containers `_. First, install the `Docker Community Edition `_ for your `Mac `_ or `Windows `_ desktop. Docker CE installations for `Linux platforms `_ are also available. + + +To start a Docker container with no graphics, type the commands:: + + docker pull pycbc/pycbc-el8:latest + docker run -it pycbc/pycbc-el8:latest + +This example downloads current version of the code from the `GitHub master branch. `_ Replace the string ``latest`` with one of the `PyCBC release tags `_ (e.g. ``v1.7.0``) to install a container containing a released version of PyCBC. The container includes all of the required software and dependencies to run PyCBC, including a compatible version of LALSuite installed into the root filesystem. The command above starts a login shell as the pycbc user. To override this and log in as root, run the command:: + + docker run -it pycbc/pycbc-el8:latest /bin/bash -l + +------------------------------------- +Using jupyter notebook within docker +------------------------------------- + +One can start a jupyter notebook within docker and then port forward to your +computer's environment.:: + + docker run -it -p 8888:8888 --name pycbc_test pycbc/pycbc-el8:latest /bin/su -l pycbc -c "jupyter notebook --no-browser --ip 0.0.0.0" + +Once the image is running, you can connect from your computer's web browser to the address printed to the screen by jupyter. This is typically the local host adddress, e.g. ``127.0.0.1`` + +------------------------------- +Sharing user files and SSH keys +------------------------------- + +It can be useful to share your SSH public/private key with the Docker container, for example to allow you to git push and pull from your repository on GitHub. To do this, add the argument ``-v ${HOME}/.ssh:/opt/pycbc/.ssh`` to the ``docker run`` commands. You can also create e.g. a ``scratch`` directory and use the ``-v`` option to mount it in the container. This directory can be used to transfer files between the container and the host computer. See the `Docker volumes documentation `_ for a detailed explaination of mounting directories inside a docker container. diff --git a/latest/html/_sources/documentation.rst.txt b/latest/html/_sources/documentation.rst.txt new file mode 100644 index 00000000000..bd5ceaef20f --- /dev/null +++ b/latest/html/_sources/documentation.rst.txt @@ -0,0 +1,54 @@ +###################### +Documenting PyCBC code +###################### + +These page contains some details of how to use the pyCBC Sphinx documentation system. This is a very similar to that used by numpy and scipy to document that code. + +All code in PyCBC must be appropriately documented. If you find any documentation to be inadequate, please contact the development team, or improve yourself where possible. + +Sphinx +====== + +PyCBC uses the `sphinx` documentation system to produce automatic documentation from all modules and functions, with a layer on top to describe the code appropriately. + +The `sphinx website `_ has a `nice tutorial `_ on the reStructuredText format used for documentation. + +Documenting the PyCBC package +============================= + +The overview documentation pages (e.g. this page), live in the ``docs`` directory of the git repository. These pages are linked together using internal links. + +For example: the file ``docs/index.rst`` file is used to build the home page for the documentation, and contains the following block:: + + .. toctree:: + :maxdepth: 1 + + install + documentation + +The ``install`` and ``documentation`` entries refer to the ``install.rst`` and ``documentation.rst`` files in the same directory. + +In order to provide readable documentation for all users, each key module in ``pycbc`` should have its own directory in ``docs`` that builds upon the automatic documentation built from its classes and functions. For example, the ``pycbc.frame`` module has it's own documentation in ``docs/frame.rst`` that includes some custom text, and a copy of the automatic documentation from its classes and functions. + +How much text goes directly into the module docstrings, and how much is abstracted into a separated ``rst`` file in the ``docs/`` directory is a matter of personal taste, and keeping the code readable, but should have little effect on the HTML documentation. + +Documenting PyCBC modules +========================= + +All PyCBC modules should be documented using ``sphinx`` syntax, in the same way that ``matplotlib``, ``numpy`` and ``scipy`` functions are listed. + +The numpy github repository includes a `nice style guide `_ for documenting modules and their members using sphinx. + + +Documenting PyCBC scripts +========================= + +Documenting command-line scripts isn't ideal in any documentation language, including sphinx. + +In ``PyCBC``, command-line scripts in the ``bin`` directory of the git repository should be accompanied by a reStructuredText file in ``docs/bin``. + +However, at some point this directory got removed. So what is the current recommendation for documenting scripts?? + +This file contains an overview of what the code does, and some other information, with finally a dump of the command-line ``--help`` message via this directive:: + + .. command-output:: pycbc_inspiral --help diff --git a/latest/html/_sources/extend.rst.txt b/latest/html/_sources/extend.rst.txt new file mode 100644 index 00000000000..e76f7fef89d --- /dev/null +++ b/latest/html/_sources/extend.rst.txt @@ -0,0 +1,12 @@ +===================================== +Extending PyCBC with external plugins +===================================== + +Would you like to use a waveform model that PyCBC doesn't have? Or maybe +you have your own waveform you'd like to use for a search, parameter estimation +, etc. PyCBC supports a plug-in archictecture for external waveform models. + +.. toctree:: + :maxdepth: 1 + + waveform_plugin diff --git a/latest/html/_sources/faithsim.rst.txt b/latest/html/_sources/faithsim.rst.txt new file mode 100644 index 00000000000..62fa3064024 --- /dev/null +++ b/latest/html/_sources/faithsim.rst.txt @@ -0,0 +1,116 @@ +################################################ +Dag Generator for Doing Faithfulness Comparisons +################################################ + +============ +Introduction +============ + +This page describes how to use the faithfulness dag generator within +PyCBC. + +========================== +How to generate a workflow +========================== + +------------------------------------ +Creating a configuration (.ini) file +------------------------------------ + +All the choices when setting up a faithsim are contained +in a single configuration file. + +Below is an example. + +.. literalinclude:: ../examples/faith/faithsim_simple.ini + +There are four sections that must be present [inspinj], [executables], [workflow], and [faithsim-XXX]. + + #. inspinj + + This section sets the paramaters of all of the injection waveforms. + The arguments in the configuration file are fed directly to the + lalapps_inspinj program to create an injection file. + The same arguments are available, and the same restrictions apply. + The number of injections can be set by using the gps start and end time + options along with the time step. + Note, however, that the waveform name is required but does not + determine the actual approximants that will be compared. That is set in the [banksim] + section. + + #. executables + + This section lists the location of the pycbc_faithsim script. Make note + that the script is copied to the executables folder + and that is the version that will be used. + + #. workflow + + This section has options that configure the workflow. + The required options are 'log-path' and 'templates-per-job'. The + 'log-path' specifies the directory to store + condor log files. The 'templates-per-job' section determines + how many faithfulness calculations each job will do. The + injection file is split into smaller portions to match this + restriction. This option + is directly proportional to the running time of + each job. + + Faith simulations running on LDG clusters must include the + 'accounting-group' option in the workflow section. The value + must be choosen according to the + `Accounting information web page `_. + + #. faithsim-XXX + Multiple sections with a a name of the form 'faithsim-USER_STRING' can exist. + The generator will create jobs that correspond to each of these sections + and each will generate an independent results file + file labeled with the same USER_STRING. + + These sections corresponds to the arguments sent to the + banksim executable. The two approximants to compare, along with their + PN order paramters (if relevant), are set here. Note that + the option filter-waveform-length must be set to a value + greater than the duration of the longest generated + approximant. + +------------------------ +Generating the workflow +------------------------ + +Once a configuration file as been made, create a +workspace directory and place the file into it. +Running the following command will generate a dag +that will submit the required jobs. + +.. code-block:: bash + + pycbc_make_faithsim --conf YOUR_INI_FILE.ini + +The workflow can then be submitted by running the +generated shell script. + +.. code-block:: bash + + sh submit.sh + +------------------------- +Understanding the results +------------------------- + +The main results of the faithsim are result files, one for each +faithsim section in the file. These are whitespace separated ASCII files. + +Some basic plots are also generated automatically and +placed into the 'plots' folder. + +The pycbc_faithsim_plots script located in the +scripts folder is an example of +how to read the results files. + +-------------------- +Example config files +-------------------- +*ADD SOME HERE* + +Contact Alex Nitz for some more detailed examples of configuration files. diff --git a/latest/html/_sources/fft.rst.txt b/latest/html/_sources/fft.rst.txt new file mode 100644 index 00000000000..fa261a8db3f --- /dev/null +++ b/latest/html/_sources/fft.rst.txt @@ -0,0 +1,169 @@ +################################################### +Performing FFTs in PyCBC +################################################### + +============ +Introduction +============ + +Many applications in gravitational-wave analysis rely on Fast Fourier +Transforms. These are often the dominant computational cost in analyses. PyCBC +needs to balance the requirement that analyses be efficient with ease of use +for end users. To meet this requirement PyCBC provides two different APIs to +do FFTs: + +* A function based API, which is easy to use, but not optimized. +* A class-based API, which is a little more involved to use, but allows the use of optimized FFT routines. + +These APIs offer access to a number of FFT backends. PyCBC knows how to do FFTs +using the FFTW, MKL and numpy backends, and will enable these if they are +present on your system. By default FFTW will be used, then MKL if FFTW is not +and numpy will be used only if the other 2 are not present. However, you can +override this and choose a specific backend if multiple are available. + +When running on GPUs, PyCBC knows how to do CUDA FFTs through the same +interface. + +============================ +Using the function based API +============================ + +The PyCBC function based API offers a simple way to Fourier transform an +input array into an output array. This is done by:: + + >>> from pycbc import fft + >>> fft.fft(input_array, output_array) + +Or for an inverse FFT:: + + >>> from pycbc import fft + >>> fft.ifft(input_array, output_array) + +To do this requires having the output_array, which would be the Fourier +transform of the input, already existing as an array of 0s. This output array +must be the correct *length*, the correct *type* (complex or real) and the +correct *precision* (double or float precision). It's also worth noting that +fast Fourier transforms are more efficient if their lengths are 2**N where +N is some integer. This becomes a little complicated if doing real->complex +or complex->real transforms; in those cases the longer array should have a +length of 2**N to be efficient. + +Here's a few examples:: + + >>> import numpy as np + >>> from pycbc import types + >>> from pycbc import fft + >>> inarr = types.Array(np.ones([64], dtype=np.complex64)) + >>> outarr = types.Array(np.zeros([64], dtype=np.complex64)) + >>> fft.fft(inarr, outarr) + >>> print(outarr) + +or (note here the length of the complex array is the length of the real array divided by 2 and then + 1):: + + >>> import numpy as np + >>> from pycbc import types + >>> from pycbc import fft + >>> inarr = types.Array(np.ones([64], dtype=np.float32)) + >>> outarr = types.Array(np.zeros([33], dtype=np.complex64)) + >>> fft.fft(inarr, outarr) + >>> print(outarr) + +or (this one is an inverse FFT):: + + >>> import numpy as np + >>> from pycbc import types + >>> from pycbc import fft + >>> inarr = types.Array(np.ones([33], dtype=np.complex64)) + >>> outarr = types.Array(np.zeros([64], dtype=np.float32)) + >>> fft.ifft(inarr, outarr) + >>> print(outarr) + +This will work the pycbc Timeseries and Frequencyseries as well. Except you +must FFT a TimeSeries to a FrequencySeries or IFFT a FrequencySeries to a +TimeSeries. In this case the time and frequency spacing must also be +consistent. For this reason we provide convenience functions that use the +function API, but figure out these details, and create the output array, for +you. As an example:: + + >>> import numpy as np + >>> from pycbc import types + >>> inarr = types.TimeSeries(np.ones([64], dtype=np.float64), delta_t=1./64.) + >>> outarr = inarr.to_frequencyseries() + +or:: + + >>> import numpy as np + >>> from pycbc import types + >>> inarr = types.FrequencySeries(np.ones([33], dtype=np.complex128), delta_f=1.) + >>> outarr = inarr.to_timeseries() + + +========================= +Using the class-based API +========================= + +The PyCBC class-based API should be used if you care about performance. If you +are performing FFTs many times, with inputs that are the same size each time, +using this will offer significance perfommance improvement. + +Here's how to use this:: + + >>> from pycbc import fft + >>> fft_class = pycbc.fft.FFT(inarr, outarr) + >>> fft_class.execute() + >>> outarr *= inarr._delta_t # ONLY IF inarr is a TimeSeries + >>> outarr *= inarr._delta_f # ONLY IF inarr is a FrequencySeries + +Or for an inverse FFT:: + + >>> from pycbc import fft + >>> ifft_class = pycbc.fft.IFFT(inarr, outarr) + >>> ifft_class.execute() + >>> outarr *= inarr._delta_t # ONLY IF inarr is a TimeSeries + >>> outarr *= inarr._delta_f # ONLY IF inarr is a FrequencySeries + +The idea would be that the `fft_class` or `ifft_class` would only be created +*once* and the execute command called many times. You would change the contents +of inarr before each call and outarr will update when execute is run. After +creating the FFT class *do not* reassign inarr, but instead set values. So:: + + >>> fft_class = pycbc.fft.FFT(inarr, outarr) + >>> inarr = types.TimeSeries(np.ones([64], dtype=np.float64), delta_t=1./64.) + >>> fft_class.execute() + +would not work! Instead do:: + + >>> fft_class = pycbc.fft.FFT(inarr, outarr) + >>> inarr[:] = np.ones([64], dtype=np.float64)[:] + >>> fft_class.execute() + + +=========================== +Choosing a specific backend +=========================== + +If you want to choose a specific backend, you can see what is available with:: + + >>> from pycbc.fft import backend_support + >>> backend_support.get_backend_names() + +and then do:: + + >>> from pycbc.fft import backend_support + >>> backend_support.set_backend(['mkl']) + +to set a specific backend. Running:: + + >>> from pycbc.fft import backend_support + >>> backend_support.get_backend() + +will tell you what you are currently using. You can also use the +MKL `Scheme` to default to using MKL FFTs, instead of FFTW. + +==================== +Method documentation +==================== + +.. automodule:: pycbc.fft + :noindex: + :members: diff --git a/latest/html/_sources/filter.rst.txt b/latest/html/_sources/filter.rst.txt new file mode 100644 index 00000000000..fe6ee0ef812 --- /dev/null +++ b/latest/html/_sources/filter.rst.txt @@ -0,0 +1,31 @@ +################################################### +Filtering +################################################### + +===================================== +Applying highpass / lowpass filters +===================================== + +.. plot:: ../examples/filter/pass.py + :include-source: + +===================================== +Applying an FIR filter +===================================== + +.. plot:: ../examples/filter/fir.py + :include-source: + +===================================== +Matched Filter SNR +===================================== + +.. plot:: ../examples/filter/snr.py + :include-source: + +===================================== +Chisq time series +===================================== + +.. plot:: ../examples/filter/chisq.py + :include-source: diff --git a/latest/html/_sources/formats/hdf_format.rst.txt b/latest/html/_sources/formats/hdf_format.rst.txt new file mode 100644 index 00000000000..4b80e2906a9 --- /dev/null +++ b/latest/html/_sources/formats/hdf_format.rst.txt @@ -0,0 +1,141 @@ +############################################################ +HDF files within the PyCBC workflow +############################################################ + +.. note:: + + Format specifications are provided here to aid in development. The canonical + definition, as always, lives within the code itself. + +========================= +single inspiral triggers +========================= + +***************** +Executables +***************** + + * pycbc_inspiral + +***************** +Specification +***************** + +All keys in the inspiral output are prefixed with the IFO name, e.g. H1, L1. Currently, +only a single ifo is present in each file, but at a future date, multiple may +be allowed. + +The following table consists of columns of trigger data. Each column is of the same length +and an index into one column corresponds to the same trigger in each of the other columns. + +.. csv-table:: Column vectors of trigger data + :header: "path", "description" + + "IFO/snr", "The mangitude of the complex SNR" + "IFO/coa_phase", "The phase of the complex SNR" + "IFO/end_time", "The gps time of the trigger" + "IFO/chisq", "Value of the bruce power chisq" + "IFO/chisq_dof", "Not DOF. The number of bins in the chisq. DOF = 2 * (num_bins -1)" + "IFO/bank_chisq", "Value of the bank chisq" + "IFO/bank_chisq_dof", "Number of templates used to construct the bank chisq" + "IFO/cont_chisq", "Value of the autochisq" + "IFO/cont_chisq_dof", "Number of dof for the auto chisq" + "IFO/template_duration", "Duration of the template approximant used for this trigger" + "IFO/sigmasq", "The weighted power of the template, placed at 1Mpc, used for this trigger" + +.. csv-table:: Additional Data + :header: "path", "description" + + "IFO/search/start_time", "Array of gps times which denote the start of a valid period of triggers" + "IFO/search/end_time", "Array of gps times which denote the corresponding end of a vlid period of triggers" + + +The following are columns that exists in the file, but should not be used by any user. +Its definition or existence is subject to change without notice. + +.. csv-table:: reserved columns + :header: "path" + + "IFO/template_hash" + +================================== +combined single inspiral triggers +================================== + +***************** +Executables +***************** + + * pycbc_coinc_mergetrigs + +***************** +Specification +***************** + +All keys in the inspiral output are prefixed with the IFO name, e.g. H1, L1. Currently, +only a single ifo is present in each file, but at a future date, multiple may +be allowed. + +The following table consists of columns of trigger data. Each column is of the same length +and an index into one column corresponds to the same trigger in each of the other columns. + + +.. csv-table:: Column vectors of trigger data + :header: "path", "description" + + "IFO/snr", "The mangitude of the complex SNR" + "IFO/coa_phase", "The phase of the complex SNR" + "IFO/end_time", "The gps time of the trigger" + "IFO/chisq", "Value of the bruce power chisq" + "IFO/chisq_dof", "Not DOF. The number of bins in the chisq. DOF = 2 * (num_bins -1)" + "IFO/bank_chisq", "Value of the bank chisq" + "IFO/bank_chisq_dof", "Number of templates used to construct the bank chisq" + "IFO/cont_chisq", "Value of the autochisq" + "IFO/cont_chisq_dof", "Number of dof for the auto chisq" + "IFO/template_duration", "Duration of the template approximant used for this trigger" + "IFO/sigmasq", "The weighted power of the template, placed at 1Mpc, used for this trigger" + "IFO/template_id", "The unique template id value. This is the index into the hdf template file format" + +The key feature that the combined trigger format adds is the convenience of precalculated region +references to access only the data produced by a given template. These values are stored in region +reference arrays. The length of each array is the same as the number of templates, and an index +into the array matches the template_id number. Each array directly maps to a single column. + +.. csv-table:: region reference arrays + :header: "path" + + "IFO/bank_chisq_dof_template" + "IFO/bank_chisq_template" + "IFO/chisq_dof_template" + "IFO/chisq_template" + "IFO/coa_phase_template" + "IFO/cont_chisq_dof_template" + "IFO/cont_chisq_template" + "IFO/end_time_template" + "IFO/sigmasq_template" + "IFO/snr_template" + "IFO/template_boundaries" + "IFO/template_duration_template" + +.. csv-table:: Additional Data + :header: "path", "description" + + + "IFO/search/start_time", "Array of gps times which denote the start of a valid period of triggers" + "IFO/search/end_time", "Array of gps times which denote the corresponding end of a vlid period of triggers" + +********************* +Example uses +********************* + +Accessing triggers by template + +.. code-block:: python + + import h5py + f = h5py.File('H1-testdata.hdf') + snr_regs = f['H1/snr_template'] + snr_template_0 = f['H1/snr'][snr_regs[0]] + + + diff --git a/latest/html/_sources/frame.rst.txt b/latest/html/_sources/frame.rst.txt new file mode 100644 index 00000000000..0966c1abafc --- /dev/null +++ b/latest/html/_sources/frame.rst.txt @@ -0,0 +1,59 @@ +################################################### +Reading Gravitational-wave Frames +################################################### + +============ +Introduction +============ + +All data generated and recorded by the current generation of ground-based laser-interferometer gravitational-wave detectors are recorded in gravitational-wave frame (GWF) files. These files typically contain data from a number of sources bundled into a single, time-stamped set, along with the metadata for each channel. + +===================== +Querying a LDR server +===================== + +The LIGO Data Replicator (LDR) is a tool for replicating data sets to the different data grids. If you have access to a LDR server you can read GWF files using ``pycbc.frame`` module as follows:: + + >>> from pycbc import frame + >>> tseries = frame.query_and_read_frame("G1_RDS_C01_L3", "G1:DER_DATA_H", 1049587200, 1049587200 + 60) + +This returns a ``TimeSeries`` instance of the data. Note if you do not have access to frames through an LDR server then you will need to copy the frames to your run location. + +Alternatively, if you just want the location of the frame files, you can do:: + + >>> from pycbc import frame + >>> frame_files = frame.frame_paths("G1_RDS_C01_L3", 1049587200, 1049587200 + 60) + +This will return a ``list`` of the frame files' paths. + +===================== +Reading a frame file +===================== + +The ``pycbc.frame`` module provides methods for reading these files into ``TimeSeries`` objects as follows:: + + >>> from pycbc import frame + >>> data = frame.read_frame('G-G1_RDS_C01_L3-1049587200-60.gwf', 'G1:DER_DATA_H', 1049587200, 1049587200 + 60) + +Here the first argument is the path to the frame file of interest, while the second lists the `data channel` of interest whose data exist within the file. + +===================== +Writing a frame file +===================== + +The ``pycbc.frame`` modules provides a method for writing ``TimeSeries`` instances to GWF as follows:: + + >>> from pycbc import frame + >>> from pycbc import types + >>> data = types.TimeSeries(numpy.ones(16384 * 16), delta_t=1.0 / 16384) + >>> frame.write_frame("./test.gwf", "H1:TEST_UNITY", data) + +Here the first argument is the path to the output frame file, the second is the name of the channel, and the last is the ``TimeSeries`` instance to be written to the frame. + +==================== +Method documentation +==================== + +.. automodule:: pycbc.frame + :noindex: + :members: diff --git a/latest/html/_sources/genindex.rst.txt b/latest/html/_sources/genindex.rst.txt new file mode 100644 index 00000000000..9e530fa2fd5 --- /dev/null +++ b/latest/html/_sources/genindex.rst.txt @@ -0,0 +1,2 @@ +Index +===== diff --git a/latest/html/_sources/gw150914.rst.txt b/latest/html/_sources/gw150914.rst.txt new file mode 100644 index 00000000000..f4dc9a0cb16 --- /dev/null +++ b/latest/html/_sources/gw150914.rst.txt @@ -0,0 +1,40 @@ +################################################### +Signal Processing with GW150914 +################################################### + +Here are some interesting examples of how to process LIGO data using GW150914 +as an example. + +===================================== +Plotting the whitened strain +===================================== + +.. plot:: ../examples/gw150914/gw150914_shape.py + :include-source: + +============================================== +Calculate the signal-to-noise +============================================== + +.. plot:: ../examples/gw150914/gw150914_h1_snr.py + :include-source: + +============================================== +Listen to GW150914 in Hanford +============================================== + + +Here we'll make a frequency shifted and slowed version of GW150914 as +it can be heard in the Hanford data. + +.. literalinclude:: ../examples/gw150914/audio.py + +:download:`The audio ` + +Note, google chrome may not play wav files correctly, please download to listen. + +.. raw:: html + +