-
Notifications
You must be signed in to change notification settings - Fork 0
/
madys.py
7714 lines (6619 loc) · 392 KB
/
madys.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
MADYS
Tool for age and mass determination of young stellar and substellar objects.
Reference: Squicciarini & Bonavita (2022), A&A 666, A15
Given a list of stars, it:
- retrieves and cross-matches photometry from several catalogs;
- corrects for interstellar extinction;
- assesses the quality of each photometric measurement;
- uses reliable photometric data to derive physical parameters (notably ages and masses)
of individual stars.
In the current release, MADYS allows a selection of one among 20 theoretical models,
many of which with several customizable parameters (metallicity, rotational velocity,
etc). Have a look to the GitHub repository and to the Readthedocs page for additional details.
Classes:
- SampleObject
- FitParams
- IsochroneGrid
- ModelHandler
- CurveObject
"""
import sys
import os
madys_path = os.path.dirname(os.path.realpath(__file__))
import copy
import warnings
import logging
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d, RectBivariateSpline
from scipy.ndimage import label, center_of_mass
import time
from astropy import units as u
from astropy.constants import M_jup,M_sun,R_jup,R_sun
from astropy.coordinates import SkyCoord, Galactocentric, galactocentric_frame_defaults
from astropy.io import ascii, fits
from astropy.io.votable.exceptions import E19
from astropy.table import Table, Column, vstack, hstack, MaskedColumn
from astroquery.simbad import Simbad
import h5py
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tabulate import tabulate
from json import JSONDecodeError
import gzip
import urllib
import shutil
import pickle
import re
from xml.parsers.expat import ExpatError
from http.client import RemoteDisconnected
try:
from tap import (GaiaArchive, QueryStr)
except ModuleNotFoundError:
os.system('pip install git+https://github.com/mfouesneau/tap')
from tap import (GaiaArchive, QueryStr)
gaia = GaiaArchive()
dt = h5py.special_dtype(vlen=str)
MADYS_VERSION = 'v1.3.0'
def closest(array, value):
"""Given an "array" and a (list of) "value"(s), finds the j(s) such that |array[j]-value|=min((array-value)).
"array" must be monotonic increasing. j=-1 or j=len(array) is returned
to indicate that "value" is out of range (below and above, respectively)."""
n = len(array)
if hasattr(value, '__len__') == False:
if (value < array[0]):
return 0
elif (value > array[n-1]):
return n-1
jl = 0
ju = n-1
while (ju-jl > 1):
jm = (ju+jl) >> 1
if (value >= array[jm]):
jl = jm
else:
ju = jm
if (value == array[0]):
return 0
elif (value == array[n-1]):
return n-1
else:
jn = jl + np.argmin([value-array[jl], array[jl+1]-value])
return jn
else:
nv = len(value)
jn = np.zeros(nv, dtype='int32')
for i in range(nv):
if (value[i] < array[0]):
jn[i] = 0
elif (value[i] > array[n-1]):
jn[i] = n-1
else:
jl = 0
ju = n-1
while (ju-jl > 1):
jm=(ju+jl) >> 1
if (value[i] >= array[jm]):
jl = jm
else:
ju = jm
if (value[i] == array[0]):
jn[i] = 0
elif (value[i] == array[n-1]):
jn[i] = n-1
else:
jn[i] = jl+np.argmin([value[i]-array[jl], array[jl+1]-value[i]])
return jn
def where_v(elements, array, approx=False, assume_sorted=True):
"""Returns the index of the first occurrence of each of the "elements" in the
"array". If approx==True, the indices of closest matchs
are returned instead (in this case, the array is supposed to be sorted. Use
assume_sorted=False instead.)"""
if isinstance(array, list): array = np.array(array)
try:
dd = len(elements)
if isinstance(elements, list):
elements = np.array(elements)
dim = len(elements.shape)
except TypeError: dim = 0
if approx == True:
if assume_sorted == False:
i_sort = np.argsort(array)
array2 = array[i_sort]
else:
array2 = array
if dim == 0:
w = closest(array2, elements)
return w
ind = np.zeros(len(elements), dtype=np.int16)
for i in range(len(elements)):
ind[i] = closest(array2,elements[i])
if assume_sorted:
return ind
else:
return i_sort[ind]
else:
if dim == 0:
w, = np.where(array == elements)
return w
ind = np.zeros(len(elements), dtype=np.int16)
for i in range(len(elements)):
w, = np.where(array == elements[i])
if len(w) == 0:
ind[i] = len(array)
else:
ind[i] = w[0]
return ind
def nansumwrapper(a, axis=None, **kwargs):
"""Wrapper for np.nansum. Unlike np.nansum, returns np.nan (and
not 0) if all the elements of "a" are np.nan."""
ma = np.isnan(a) == False
sa = np.nansum(ma, axis=axis)
sm = np.nansum(a, axis=axis,**kwargs)
sm = np.where(sa == 0, np.nan, sm)
return sm
def repr_table(table):
"""Returns the correct __repr__ of an astropy Table."""
aa = {}
for col in list(table.columns):
aa[col] = table[col].data
r = repr(aa)
r = r.replace('array', 'np.array')
try:
eval('nan')
except NameError:
r = r.replace('nan', 'np.nan')
return "Table("+r+")"
def info_filters(x=None):
"""Provides info on all available filters (if no argument is provided) or on a specific filter (specified as an argument)."""
if x is None:
temp_filters = copy.deepcopy(stored_data['filters'])
del temp_filters['logT'], temp_filters['logR'], temp_filters['logL'], temp_filters['logg']
print('Available filters for MADYS: ')
print('')
print(', '.join(temp_filters)+'.')
print('')
print('Available physical parameters for MADYS: ')
print(', '.join(['logg','logL','logR','logT'])+'.')
else:
try:
print(stored_data['filters'][x]['description'])
if (x!='logT') & (x!='logL') & (x!='logg') & (x!='logR'):
print('Wavelength: '+'{:.3f}'.format(stored_data['filters'][x]['wavelength'])+' micron')
print('Absolute extinction A(l)/A(V): '+'{:.3f}'.format(stored_data['filters'][x]['A_coeff']))
except KeyError:
raise ValueError("Quantity '"+x+"' not found: check the spelling and try again. Available filters and physical parameters: "+', '.join(stored_data['filters'])+'.')
def make_logo():
zenodo_path = list(stored_data['complete_model_list'].values())[0].split('files')[0]
logo = """
Welcome to
.----------------. .----------------. .----------------. .----------------. .----------------.
| .--------------. || .--------------. || .--------------. || .--------------. || .--------------. |
| | ____ ____ | || | __ | || | ________ | || | ____ ____ | || | _______ | |
| ||_ \ / _|| || | / \ | || | |_ ___ `. | || | |_ _||_ _| | || | / ___ | | |
| | | \/ | | || | / /\ \ | || | | | `. \ | || | \ \ / / | || | | (__ \_| | |
| | | |\ /| | | || | / ____ \ | || | | | | | | || | \ \/ / | || | '.___`-. | |
| | _| |_\/_| |_ | || | _/ / \ \_ | || | _| |___.' / | || | _| |_ | || | |`\____) | | |
| ||_____||_____|| || ||____| |____|| || | |________.' | || | |______| | || | |_______.' | |
| | | || | | || | | || | | || | | |
| '--------------' || '--------------' || '--------------' || '--------------' || '--------------' |
'----------------' '----------------' '----------------' '----------------' '----------------'
version: {0}
@ V. Squicciarini & M. Bonavita
code repository: https://github.com/vsquicciarini/madys
model repository: {1}
documentation: https://madys.readthedocs.io/en/latest/
""".format(MADYS_VERSION, zenodo_path)
return logo
def add_mag(mag1, mag2):
"""Adds two magnitudes."""
return -2.5*np.log10(10**(-0.4*mag1)+10**(-0.4*mag2))
stored_data = {'models': {'data': {}, 'parameters':{}}}
class ModelHandler(object):
"""
Class: madys.ModelHandler
Class that handles data and metadata of the files in the /isochrone path.
Input:
- model_grid: string, required. Selected isochrone grid model. Use ModelHandler.available() to return a list of available models.
- resolve: bool, optional. If True, searchs for the location of the correct file. If False, it recovers it from the dictionary 'stored_data'. Default: False.
Attributes:
- file: string. Full path to the local file corresponding to model_grid;
- age_range: 2-element list. Minimum and maximum age of the grid [Myr].
- mass_range: 2-element list. Minimum and maximum age mass of the grid [M_sun].
- header: string. Verbose description of the model_grid.
- feh: float. [Fe/H] of the grid.
- he: float. Helium content of the grid.
- afe: float. Alpha enhancement [a/Fe] of the grid.
- v_vcrit: float. Rotational velocity of the grid.
- fspot: float. Fraction of stellar surface covered by star spots.
- B: int. Whether magnetic fields are included (1) or not (0) in the grid.
Built-in methods:
1) __repr__
Returns self.header.
Methods (use help() to have more detailed info):
1) get_contents
Returns the full or part of the content of self.file, i.e. the data+metadata associated to a model_grid.
2) get_data
Wrapper for get_contents() that only returns the data associated to a model_grid.
3) available
Prints info about locally available models.
4) download_model
Given a model grid, it downloads it.
"""
def __init__(self, model_grid, resolve=False):
self.model_grid = model_grid
if resolve:
found = False
fname = model_grid+'.h5'
for root, dirs, files in os.walk(os.path.join(madys_path,'isochrones')):
for name in files:
if os.path.isfile(os.path.join(root, fname)):
found = True
true_path = root
break
if not found:
raise ValueError('File {0} for model {1} does not exists. Are you sure it is in your search path?'.format(fname, model_grid))
self.file = os.path.join(true_path,fname)
else:
try:
self.file = stored_data['local_model_list'][model_grid]
except KeyError:
raise ValueError('Model {0} not found. Are you sure it is in your search path?'.format(model_grid))
model_info = self.get_contents(get_isochrone_ranges=True, get_headers=True, get_attributes=True)
attrs = model_info['attributes']
self.B, self.afe, self.feh, self.fspot, self.he = attrs['B'], attrs['afe'], attrs['feh'], attrs['fspot'], attrs['he']
self.header = model_info['headers']
self.mass_range, self.age_range = model_info['agemass_range']
def __repr__(self):
return self.header[0]
def get_data(self):
"""
Wrapper for ModelHandler.get_contents(get_data = True)['data'], i.e. with only 'get_data' set to True.
Input: None.
Output:
- masses: numpy array. The n_m masses of the grid [M_sun].
- ages: numpy array. The n_a ages of the grid [Myr].
- filters: numpy array. List of the n_f filters in the grid.
- dic: dictionary. Contains all the metadata of the file, excluding the headers.
- data: numpy array with shape (n_m,n_a,n_f). Isochrone grid.
"""
return self.get_contents(get_data=True)['data']
def get_contents(self, get_data=False, get_isochrone_ranges=False, get_headers=False, get_attributes=False):
"""
Returns (part of) the content of self.file, i.e. the data associated to a model_grid.
Input:
- get_data: bool, optional. Whether to return (mass, age, filters, data) of the grid. Default=False.
- get_isochrone_ranges: bool, optional. Whether to return (mass_range, age_range, filters of the grid. Default=False.
- get_headers: bool, optional. Whether to return (header, version_header) of the grid. Default=False.
- get_attributes: bool, optional. Whether to return all the attributes of the grid BUT the headers. Default=False.
Output:
- results: dictionary. Depending on the truth value of the named keywords, it can or cannot contain the following keywords:
- attributes: dictionary. Contains all the metadata of the file, excluding the headers.
- headers: tuple. Contains the model suite header and the (usually empty) model version header.
- agemass_range: tuple. Two elements:
- a 2-element list with [min(mass), max(mass)], measured in M_sun;
- a 2-element list with [min(age), max(age)], measured in Myr.
- filters: numpy array. List of the n_f filters in the grid.
- data: tuple containing the following:
- masses: numpy array. The n_m masses of the grid [M_sun].
- ages: numpy array. The n_a ages of the grid [Myr].
- filters: numpy array. List of the n_f filters in the grid.
- dic: dictionary. Contains all the metadata of the file, excluding the headers.
- data: numpy array with shape (n_m,n_a,n_f). Isochrone grid.
"""
results = {}
with h5py.File(self.file, "r") as f:
if get_attributes:
dic = {}
for i in f.attrs.keys():
if (i == 'header') | (i == 'version_header'): continue
dic[i] = f.attrs[i]
results['attributes'] = dic
if get_headers:
header = f.attrs['header']
for j in range(10):
header = header.replace(' #','#')
try:
version_header = f.attrs['version_header']
for j in range(10):
version_header = version_header.replace(' #','#')
headers = (header.rstrip(), version_header.rstrip())
except KeyError:
headers = (header.rstrip(), """""")
results['headers'] = headers
if get_isochrone_ranges:
m = f.get('masses')[:]
a = f.get('ages')[:]
fi = f.get('filters')[:]
fi = np.array(fi, dtype='str')
results['agemass_range'] = ([np.min(m),np.max(m)], [np.min(a),np.max(a)])
results['filters'] = fi
if get_data:
if self.model_grid in stored_data['models']['data'].keys():
results['data'] = stored_data['models']['data'][self.model_grid]
else:
with h5py.File(self.file, "r") as f:
m = f.get('masses')[:]
a = f.get('ages')[:]
fi = f.get('filters')[:]
dat = f.get('data')[:]
dic = {}
for i in f.attrs.keys():
if (i == 'header') | (i == 'version_header'): continue
dic[i] = f.attrs[i]
fi = np.array(fi, dtype='str')
data = (m, a, fi, dic, dat)
stored_data['models']['data'][self.model_grid] = data
results['data'] = data
return results
@classmethod
def available(cls, key=None, verbose=True):
"""
A class method.
Prints info about locally available models.
Informs about 1) the calling sequence; 2) age and mass ranges; 3) list of filters;
4) available parameters / adopted parameters; 5) literature reference.
Input:
- key: string, optional. If selected, searchs the corresponding model. If None, prints info about all the available model suites. Default: None.
It can be either:
1) 'full_model_list';
2) a valid model_family;
3) a valid model_suite;
4) a valid model_version.
- verbose: bool, optional. If True, prints all information about a specific or all models. If False, only raises an error if the model is not found. Default: True.
Output: none, but the method prints:
1) key not provided: mass range, age range, available parameters, calling sequence for all the model versions belonging to each model suite;
2) key='full_model_list': taxonomic classification of the complete set of models available on Zenodo;
3) key is a model_suite: mass range, age range, available parameters, calling sequence for all the model versions belonging to each model suite;
4) key is a model_family: mass range, age range, available parameters, calling sequence for all the model versions belonging to each model family;
5) key is a model_version: mass range, age range, available parameters, calling sequence of the specified model version.
"""
attrs_list, header_list, version_header_list = [], [], []
mass_list, age_list, filter_list = [], [], []
for root, dirs, files in os.walk(madys_path):
for name in files:
if root.endswith('extinction'):
continue
if name.endswith('h5'):
model_grid = ModelHandler(name[:-3])
model_info = model_grid.get_contents(get_isochrone_ranges=True, get_headers=True, get_attributes=True)
headers = model_info['headers']
agemass_ranges = model_info['agemass_range']
header_list.append(headers[0])
version_header_list.append(headers[1])
mass_list.append(agemass_ranges[0])
age_list.append(agemass_ranges[1])
attrs_list.append(model_info['attributes'])
filter_list.append(model_info['filters'])
model_families = np.array([attr['model_family'].lower() for attr in attrs_list])
model_suites = np.array([attr['model_suite'].lower() for attr in attrs_list])
model_versions = np.array([attr['model_version'].lower() for attr in attrs_list])
feh_list = np.array([attr['feh'] for attr in attrs_list])
he_list = np.array([attr['he'] for attr in attrs_list])
afe_list = np.array([attr['afe'] for attr in attrs_list])
B_list = np.array([attr['B'] for attr in attrs_list])
v_vcrit_list = np.array([attr['v_vcrit'] for attr in attrs_list])
fspot_list = np.array([attr['fspot'] for attr in attrs_list])
unique_names = np.unique(model_suites)
__, i1, i2 = np.intersect1d(unique_names, model_suites, return_indices=True)
if key is None:
print('Available models for MADYS: ')
print('')
for i in i2:
print(header_list[i][:-1])
surveys = list(np.unique([stored_data['filters'][k]['survey'] for k in filter_list[i]]))
d = 1
while np.sum(np.around(mass_list[i],d)==0) > 0:
d += 1
if d == 8: break
mass_range = mass_list[i]
print(f'# Mass range (M_sun): [{mass_range[0]:.{d}f}, {mass_range[1]:.{d}f}]')
print('# Age range (Myr): '+str(list(np.around(age_list[i],1))))
print('# Available photometric systems: ')
print('# '+', '.join(surveys))
w, = np.where(np.array(model_suites) == model_suites[i])
feh = np.unique(feh_list[w]).astype(str)
print('# Available metallicities: ['+','.join(feh)+']')
v_vcrit = np.unique(v_vcrit_list[w]).astype(str)
print('# Available rotational velocities: ['+','.join(v_vcrit)+']')
afe = np.unique(afe_list[w]).astype(str)
print('# Available alpha enhancements: ['+','.join(afe)+']')
B = np.unique(B_list[w]).astype(str)
print('# Available magnetic field strengths: ['+','.join(B)+']')
fspot = np.unique(fspot_list[w]).astype(str)
print('# Available spot fractions: ['+','.join(fspot)+']')
he = np.unique(he_list[w]).astype(str)
print('# Available helium contents: ['+','.join(he)+']')
print('# Model family: {0}'.format(ModelHandler._get_model_tree_info(model_versions[i], 'model_family')))
print('# Model suite: {0}'.format(ModelHandler._get_model_tree_info(model_versions[i], 'model_suite')))
if version_header_list[i] != '':
versions = np.unique(np.array(model_versions)[w])
print("# Call it as: '"+"'/ '".join(versions)+"'")
else: print("# Call it as: '"+model_suites[i]+"'")
elif key == 'full_model_list':
print('Full list of models available on Zenodo: ')
zenodo_grids = list(stored_data['complete_model_list'].keys())
zenodo_versions = np.array([i.split('_')[0] for i in zenodo_grids])
zenodo_families = np.array([i.lower() for i in ModelHandler._get_model_tree_info(zenodo_versions, 'model_family')])
zenodo_suites = np.array([i.lower() for i in ModelHandler._get_model_tree_info(zenodo_versions, 'model_suite')])
unique_families = np.unique(zenodo_families)
for fam in unique_families:
print('Model family: '+fam)
w1, = np.where(zenodo_families == fam)
unique_suites = np.unique(zenodo_suites[w1])
for sui in unique_suites:
print(' Model suite: '+sui)
w2, = np.where(zenodo_suites==sui)
unique_versions = np.unique(zenodo_versions[w2])
for ver in unique_versions:
print(' Model version: '+ver)
w3, = np.where(zenodo_versions==ver)
for gri in w3:
print(' Model grid: '+zenodo_grids[gri])
print('')
else:
key = key.lower()
__, if1, if2 = np.intersect1d(model_families,[key], return_indices=True)
__, in1, in2 = np.intersect1d(model_suites,[key], return_indices=True)
__, is1, is2 = np.intersect1d(model_versions,[key], return_indices=True)
zenodo_grids = list(stored_data['complete_model_list'].keys())
zenodo_versions = np.array([i.split('_')[0] for i in zenodo_grids])
zenodo_families = np.array([i.lower() for i in ModelHandler._get_model_tree_info(zenodo_versions,'model_family')])
if len(in1)>0:
if verbose:
in1=in1[0]
print(header_list[in1][:-1])
surveys = list(np.unique([stored_data['filters'][k]['survey'] for k in filter_list[in1]]))
d=1
while np.sum(np.around(mass_list[in1],d)==0)>0:
d+=1
if d==8: break
mass_range = mass_list[in1]
print(f'# Mass range (M_sun): [{mass_range[0]:.{d}f}, {mass_range[1]:.{d}f}]')
print('# Age range (Myr): '+str(list(np.around(age_list[in1],1))))
print('# Available photometric systems: ')
print('# '+', '.join(surveys))
w, = np.where(np.array(model_suites)==model_suites[in1])
feh = np.unique(feh_list[w]).astype(str)
print('# Available metallicities: ['+','.join(feh)+']')
v_vcrit = np.unique(v_vcrit_list[w]).astype(str)
print('# Available rotational velocities: ['+','.join(v_vcrit)+']')
afe = np.unique(afe_list[w]).astype(str)
print('# Available alpha enhancements: ['+','.join(afe)+']')
B = np.unique(B_list[w]).astype(str)
print('# Available magnetic field strengths: ['+','.join(B)+']')
fspot = np.unique(fspot_list[w]).astype(str)
print('# Available spot fractions: ['+','.join(fspot)+']')
he = np.unique(he_list[w]).astype(str)
print('# Available helium contents: ['+','.join(he)+']')
print('# Model family: {0}'.format(ModelHandler._get_model_tree_info(model_versions[in1],'model_family')))
print('# Model suite: {0}'.format(ModelHandler._get_model_tree_info(model_versions[in1],'model_suite')))
if version_header_list[in1] != '':
versions = np.unique(np.array(model_versions)[w])
print("# Call it as: '"+"'/ '".join(versions)+"'")
else: print("# Call it as: '"+model_suites[in1]+"'")
elif len(is1)>0:
if verbose:
is1 = is1[0]
w, = np.where(np.array(model_versions) == model_versions[is1])
print(version_header_list[is1][:-1])
print(header_list[is1][1:-1])
surveys = list(np.unique([stored_data['filters'][k]['survey'] for k in filter_list[is1]]))
d=1
while np.sum(np.around(mass_list[is1],d) == 0) > 0:
d += 1
if d == 8: break
mass_range = mass_list[is1]
print(f'# Mass range (M_sun): [{mass_range[0]:.{d}f}, {mass_range[1]:.{d}f}]')
print('# Age range (Myr): '+str(list(np.around(age_list[is1],1))))
print('# Available photometric systems: ')
print('# '+', '.join(surveys))
feh = np.unique(feh_list[w]).astype(str)
print('# Available metallicities: ['+','.join(feh)+']')
v_vcrit = np.unique(v_vcrit_list[w]).astype(str)
print('# Available rotational velocities: ['+','.join(v_vcrit)+']')
afe = np.unique(afe_list[w]).astype(str)
print('# Available alpha enhancements: ['+','.join(afe)+']')
B = np.unique(B_list[w]).astype(str)
print('# Available magnetic field strengths: ['+','.join(B)+']')
fspot = np.unique(fspot_list[w]).astype(str)
print('# Available spot fractions: ['+','.join(fspot)+']')
he = np.unique(he_list[w]).astype(str)
print('# Available helium contents: ['+','.join(he)+']')
versions = np.unique(np.array(model_versions)[w])
print("# Call it as: '"+model_versions[is1]+"'")
elif len(if1)>0:
if verbose:
if1 = if1[0]
print('Model family: '+model_families[if1])
w, = np.where(np.array(model_families) == model_families[if1])
versions = np.unique(np.array(model_versions)[w])
__, is01, is02 = np.intersect1d(np.array(model_versions)[w], versions, return_indices=True)
for i in range(len(is01)):
print(version_header_list[w[is01[i]]][:-1])
print(header_list[w[is01[i]]][1:-1])
surveys = list(np.unique([stored_data['filters'][k]['survey'] for k in filter_list[w[is01[i]]]]))
d=1
while np.sum(np.around(mass_list[w[is01[i]]],d) == 0) > 0:
d += 1
if d == 8: break
mass_range = mass_list[w[is01[i]]]
print(f'# Mass range (M_sun): [{mass_range[0]:.{d}f}, {mass_range[1]:.{d}f}]')
print('# Age range (Myr): '+str(list(np.around(age_list[w[is01[i]]],1))))
print('# Available photometric systems: ')
print('# '+', '.join(surveys))
ww, = np.where(np.array(model_versions)==versions[is02[i]])
feh = np.unique(feh_list[ww]).astype(str)
print('# Available metallicities: ['+','.join(feh)+']')
v_vcrit = np.unique(v_vcrit_list[ww]).astype(str)
print('# Available rotational velocities: ['+','.join(v_vcrit)+']')
afe = np.unique(afe_list[ww]).astype(str)
print('# Available alpha enhancements: ['+','.join(afe)+']')
B = np.unique(B_list[ww]).astype(str)
print('# Available magnetic field strengths: ['+','.join(B)+']')
fspot = np.unique(fspot_list[ww]).astype(str)
print('# Available spot fractions: ['+','.join(fspot)+']')
he = np.unique(he_list[ww]).astype(str)
print('# Available helium contents: ['+','.join(he)+']')
print("# Call it as: '"+versions[is02[i]]+"'")
elif key in zenodo_versions:
print('The model '+key+' is not available in your working path.')
print('However, there are model grids on Zenodo, which you can download via ModelHandler.download_model().')
w, = np.where(zenodo_versions==key)
print('Available model grids within this model version:')
print(', '.join(np.array(zenodo_grids)[w]))
elif key in zenodo_families:
print('The selected model "'+key+'" is a family of models. No related grid is available in your working path.')
key_list = np.array(list(stored_data['model_families'].keys()))
key_list_lower = np.array([i.lower() for i in key_list])
w, = np.where(key_list_lower == key)
available_suites = stored_data['model_families'][key_list[w[0]]]
for suite in available_suites.keys():
versions = available_suites[suite]
for version in versions:
ModelHandler.available(version)
print('')
else:
mess='The inserted model does not exist. Check the spelling and try again. Available models: '+', '.join(unique_names)+'.'
raise ValueError(mess)
@staticmethod
def _get_model_tree_info(model_version,info):
model_tree = stored_data['model_families']
if (info != 'model_family') & (info != 'model_suite'):
raise ValueError('Select one between "model_family" and "model_suite".')
if isinstance(model_version, str):
found = False
for model_family in model_tree.keys():
model_suites = model_tree[model_family]
for model_suite in model_suites.keys():
model_versions = model_suites[model_suite]
if model_version in model_versions:
found = True
break
if found: break
if found:
if info == 'model_family': return model_family
elif info == 'model_suite': return model_suite
return model_family, model_suite
else: raise ValueError('Invalid model provided: {0}'.format(model_version))
else:
l = []
for i in range(len(model_version)):
l.append(ModelHandler._get_model_tree_info(model_version[i],info))
return l
@staticmethod
def _available_parameters(model_suite_or_version):
if model_suite_or_version in stored_data['models']['parameters'].keys():
return stored_data['models']['parameters'][model_suite_or_version]
else:
attrs_list = []
for root, dirs, files in os.walk(madys_path):
for name in files:
if root.endswith('extinction'): continue
if name.endswith('h5'):
model_grid = ModelHandler(name[:-3])
attrs_list.append(model_grid.get_contents(get_attributes=True)['attributes'])
model_versions = [attr['model_version'] for attr in attrs_list]
model_suites = [attr['model_suite'] for attr in attrs_list]
feh_list = np.array([attr['feh'] for attr in attrs_list])
he_list = np.array([attr['he'] for attr in attrs_list])
afe_list = np.array([attr['afe'] for attr in attrs_list])
B_list = np.array([attr['B'] for attr in attrs_list])
v_vcrit_list = np.array([attr['v_vcrit'] for attr in attrs_list])
fspot_list = np.array([attr['fspot'] for attr in attrs_list])
__, is1, __ = np.intersect1d(model_suites, [model_suite_or_version], return_indices=True)
__, is2, __ = np.intersect1d(model_versions, [model_suite_or_version], return_indices=True)
if len(is1) > 0:
is1 = is1[0]
w, = np.where(np.array(model_suites) == model_suites[is1])
feh = np.unique(feh_list[w])
v_vcrit = np.unique(v_vcrit_list[w])
afe = np.unique(afe_list[w])
B = np.unique(B_list[w])
fspot = np.unique(fspot_list[w])
he = np.unique(he_list[w])
elif len(is2) > 0:
is2 = is2[0]
w, = np.where(np.array(model_versions) == model_versions[is2])
feh = np.unique(feh_list[w])
v_vcrit = np.unique(v_vcrit_list[w])
afe = np.unique(afe_list[w])
B = np.unique(B_list[w])
fspot = np.unique(fspot_list[w])
he = np.unique(he_list[w])
else:
raise ValueError('Model '+model_suite_or_version+' not found!')
res = {'feh':feh, 'he':he, 'afe':afe,
'v_vcrit':v_vcrit, 'fspot':fspot, 'B':B}
stored_data['models']['parameters'][model_suite_or_version] = res
return res
@classmethod
def _model_list(cls, dtype='grid', return_type='list'):
attrs_list = []
file_list = []
for root, dirs, files in os.walk(madys_path):
for name in files:
if root.endswith('extinction'): continue
if name.endswith('h5'):
model = ModelHandler(name[:-3],resolve=True)
attrs_list.append(model.get_contents(get_attributes = True)['attributes'])
file_list.append(model.file)
try:
res=[attr['model_'+dtype] for attr in attrs_list]
except KeyError: raise KeyError("Valid arguments for dtype: 'grid', 'version', 'name', 'family'. ")
if return_type=='dict':
unique_models = np.unique(res)
__, i1, i2 = np.intersect1d(unique_models,np.array(res),return_indices=True)
dic = {}
res = np.array(res)
for i, r in enumerate(res[i2]): dic[r] = file_list[i2[i]]
return dic
else: return np.unique(res)
@staticmethod
def _version_to_grid(model_version, model_params):
code_dict={'mist':'211000','starevol':'201000','spots':'200200','dartmouth':'21000Y',
'yapsi':'200020','pm13':'000000','parsec2':'202000'}
try:
code=code_dict[model_version]
except KeyError:
code='200000'
keys=['feh', 'afe', 'v_vcrit', 'fspot', 'he', 'B']
if model_version == 'starevol':
def_params = {'feh':-0.01, 'afe': 0.00, 'v_vcrit': 0.00, 'fspot': 0.00, 'B':0, 'he':0.27}
else:
def_params = {'feh':0.00, 'afe': 0.00, 'v_vcrit': 0.00, 'fspot': 0.00, 'B':0, 'he':0.27}
for key in def_params.keys():
if key not in model_params: model_params[key]=def_params[key]
model_grid=model_version+''
for i,k in enumerate(keys):
if code[i]=='0': continue
elif code[i]=='Y':
if model_params[k]==0: model_grid+='_B0'
else: model_grid+='_B1'
else:
value=model_params[k]
s='m' if value<0 else 'p'
ff="{:."+code[i]+"f}"
s1=ff.format(abs(value))
model_grid+='_'+s+s1
return model_grid
@staticmethod
def _grid_to_version(model_grid):
code_dict={'mist':'211000','starevol':'201000','spots':'200200','dartmouth':'21000Y',
'yapsi':'200020','pm13':'000000','parsec2':'202000'}
split_model = model_grid.split('_')
model_version = split_model[0]
try:
code=code_dict[split_model[0]]
except KeyError:
code='200000'
sort_par = ['feh','afe','v_vcrit','fspot','he','B']
c=1
model_params={}
for i,k in enumerate(sort_par):
if code[i]=='0': continue
elif code[i]=='Y':
model_params[k]=int(split_model[c][1:])
c+=1
else:
model_params[k]=float(split_model[c].replace('p','+').replace('m','-'))
c+=1
return model_version,model_params
@staticmethod
def _check_updates():
PIK=os.path.join(madys_path,'utils','models.pkl')
with open(PIK,'rb') as f:
local_model_db=pickle.load(f)
local_model_families=pickle.load(f)
local_zenodo_record=pickle.load(f)
current_release_model_list=list(local_model_db.keys())
try:
tmp=urllib.request.urlretrieve('https://github.com/vsquicciarini/madys/raw/main/utils/models.pkl')[0]
with open(tmp,'rb') as f:
github_model_db=pickle.load(f)
github_model_families=pickle.load(f)
github_zenodo_record=pickle.load(f)
github_model_list=list(github_model_db.keys())
if len(github_model_list)>len(current_release_model_list):
print('New models found on the GitHub repository. MADYS is updating its model list. Please wait...')
urllib.request.urlretrieve('https://github.com/vsquicciarini/madys/raw/main/utils/models.pkl',PIK)
print('Done.')
stored_data['complete_model_list'] = github_model_db
elif local_zenodo_record!=github_zenodo_record:
print('The Zenodo record appears to have changed. MADYS is updating its model list. Please wait...')
urllib.request.urlretrieve('https://github.com/vsquicciarini/madys/raw/main/utils/models.pkl',PIK)
print('Done.')
stored_data['complete_model_list'] = github_model_db
stored_data['model_families'] = github_model_families
else:
stored_data['complete_model_list'] = local_model_db
stored_data['model_families'] = local_model_families
except:
print('It was not possible to check the GitHub page for updates. Using local list of models.')
stored_data['complete_model_list'] = local_model_db
stored_data['model_families'] = local_model_families
@staticmethod
def _load_local_models(reload=False):
if ('local_model_list' not in stored_data.keys()) | (reload==True):
stored_data['local_model_list'] = ModelHandler._model_list(return_type='dict')
@staticmethod
def _find_model_grid(model_version, start_params):
local_model_list = list(stored_data['local_model_list'].keys())
if isinstance(start_params,dict):
model_params1 = ModelHandler._find_match(model_version,start_params,list(stored_data['complete_model_list'].keys()))
sol1 = ModelHandler._version_to_grid(model_version,model_params1)
if len(local_model_list)==0:
print('No model '+model_version+' found in your local path. The best-matching model for your request would be '+sol1+'.')
while 1:
value = input("Do you want me to download this model? Selecting 'N' will end the program. [Y/N]:\n")
if str.lower(value)=='y':
print('Downloading the model...')
break
elif str.lower(value)=='n':
raise KeyboardInterrupt('No analysis is possible if the model is not present locally. Execution ended.')
else:
print("Invalid choice. Please select 'Y' or 'N'.")
if str.lower(value)=='y':
ModelHandler.download_model(sol1)
return sol1
elif sol1 not in local_model_list:
model_versions = np.array([mod.split('_')[0] for mod in local_model_list])
w, = np.where(model_versions == model_version)
n_m=len(w)
if n_m==0:
print('No model '+model_version+' found in your local path. The best-matching model for your request would be '+sol1+'.')
while 1:
value = input("Do you want me to download this model? Selecting 'N' will end the program. [Y/N]:\n")
if str.lower(value)=='y':
print('Downloading the model...')
break
elif str.lower(value)=='n':
raise KeyboardInterrupt('No analysis is possible if the model is not present locally. Execution ended.')
else:
print("Invalid choice. Please select 'Y' or 'N'.")
if str.lower(value)=='y':
ModelHandler.download_model(sol1)
return sol1
else:
true_model_list = list(np.array(local_model_list)[w])
chi2 = np.zeros(n_m)
for q in range(n_m):
par = ModelHandler._grid_to_version(true_model_list[q])[1]
for k in par.keys():
chi2[q] += (par[k]-model_params1[k])**2
arg_min = np.argmin(chi2)
sol2 = true_model_list[arg_min]
sol2_dict = ModelHandler._grid_to_version(true_model_list[arg_min])[1]
print('The closest model (M1) to the input has: '+str(sol2_dict)+', but a closer match (M2) was found in the MADYS database:')
print(str(model_params1)+'.')
while 1:
value = input("Do you want me to download this model? Press 'N' to use M1 instead. [Y/N]:\n")
if str.lower(value)=='y':
print('Downloading the model...')
break
elif str.lower(value)=='n':
break
else:
print("Invalid choice. Please select 'Y' or 'N'.")
if str.lower(value)=='y':
ModelHandler.download_model(sol1)
return sol1
else: return sol2
else:
return sol1
elif isinstance(start_params,list):
try:
ModelHandler._find_match(model_version,start_params[0],list(stored_data['complete_model_list'].keys()))
except ValueError as e:
msg = 'Model '+model_version+' does not exist. Check the spelling, and re-run MADYS again.'
e.args = (msg,)
raise
sol1_list=[]
for i in range(len(start_params)):
model_params1 = ModelHandler._find_match(model_version,start_params[i],list(stored_data['complete_model_list'].keys()))
sol1 = ModelHandler._version_to_grid(model_version,model_params1)
sol1_list.append(sol1)
__, i1, i2 = np.intersect1d(sol1_list,local_model_list,return_indices=True)
lacking_models = np.setdiff1d(sol1_list,local_model_list)
if len(lacking_models)==0:
pass
else:
if len(i1)>0:
print('Based on input parameters, it seems that some model grids are better suited to (a portion of) your sample than the grids that are locally available.')
print('Locally available grids: '+','.join(local_model_list))
print('MADYS suggests you to download the following additional grids: '+', '.join(lacking_models))
else:
print('No grid for model version '+model_version+' was found locally.')
print('Based on input parameters, MADYS suggests you to download the following grids: '+', '.join(lacking_models))
n_mod = len(lacking_models)
for i in range(n_mod):
print('Select code {0} to download model {1}'.format(i,lacking_models[i]))
while 1:
print('Type all the desired codes by separating them with a comma: e.g., 0, 2, 3. Type -1 not to download any model, 999 to download all the models.')
value = input("Please provide codes:\n")
values=[int(v) for v in value.split(',')]
if (np.max(values)>=n_mod) & (np.max(values)!=999):
print('Invalid choice: only {0} models are available'.format(n_mod))
elif np.min(values)<-1:
print('Invalid choice: type -1 if you prefer not to download any model.')
elif (np.min(values)==-1) & (len(values)>1):
print('Invalid choice: type just -1 (no comma) if you prefer not to download any model.')
else: break
if (np.max(values)==999):
values = np.arange(0,n_mod)
if (np.min(values)==-1) & (len(i1)==0):
msg = """You decided not to download any grid for model_version """+model_version+""".
However, the relative folder is empty, so MADYS does not have any model to compare data with.
Re-run the program, downloading at least one model when prompted.
Program ended."""
elif np.min(values)==-1:
return
else:
for value in values:
dl_model = lacking_models[value]
print('Downloading model '+dl_model+'...')
ModelHandler.download_model(dl_model)
print('Download ended.')
else: raise ValueError('Invalid input provided. Valid input types: dictionary, list.')
@staticmethod
def download_model(model_grid, verbose=True):