Skip to content

Commit

Permalink
infoWidget with plotly
Browse files Browse the repository at this point in the history
  • Loading branch information
gduscher committed Dec 15, 2024
1 parent 27c6df1 commit 4b05f98
Show file tree
Hide file tree
Showing 9 changed files with 4,391 additions and 196 deletions.
2 changes: 1 addition & 1 deletion notebooks/0_pyTEMlib.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.13.0"
},
"toc": {
"base_numbering": "1",
Expand Down
2,798 changes: 2,774 additions & 24 deletions notebooks/Spectroscopy/Analyse_Low_Loss.ipynb

Large diffs are not rendered by default.

118 changes: 53 additions & 65 deletions notebooks/Spectroscopy/EDS.ipynb

Large diffs are not rendered by default.

10 changes: 6 additions & 4 deletions pyTEMlib/core_loss_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,14 +191,16 @@ def update_cl_sidebar(self):
cl_index = index+1
self.core_loss_tab[0, 0].options = spectrum_list
self.core_loss_tab[0, 0].value = spectrum_list[cl_index]
self.update_cl_dataset()
self.set_fit_start()
self.parent.plot()
if '_relationship' in self.parent.datasets.keys():
self.update_cl_dataset()
self.set_fit_start()
self.parent.plot()

def update_cl_dataset(self, value=0):
self.cl_key = self.core_loss_tab[0, 0].value.split(':')[0]
self.parent.coreloss_key = self.cl_key
self.parent.datasets['_relationship']['core_loss'] = self.cl_key
if '_relationship' in self.parent.datasets.keys():
self.parent.datasets['_relationship']['core_loss'] = self.cl_key

if 'None' in self.cl_key:
return
Expand Down
80 changes: 54 additions & 26 deletions pyTEMlib/eels_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,15 @@ def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
new_si.metadata.update({'zero_loss': {'shifted': shifts}})
return new_si

from numba import jit

def get_zero_losses(energy, z_loss_params):
z_loss_dset = np.zeros((z_loss_params.shape[0], z_loss_params.shape[1], energy.shape[0]))
for x in range(z_loss_params.shape[0]):
for y in range(z_loss_params.shape[1]):
z_loss_dset[x, y] += zl_func(energy, *z_loss_params[x, y])
return z_loss_dset




Expand Down Expand Up @@ -488,11 +497,12 @@ def guess_function(xvec, yvec):
z_loss_dset = dataset.copy()
z_loss_dset *= 0.0

energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
z_loss_dset.shape[1], energy.shape[0]))
z_loss_peaks = zl_func(energy_grid, *z_loss_params)
z_loss_dset += z_loss_peaks

#energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
# z_loss_dset.shape[1], energy.shape[0]))
#z_loss_peaks = zl_func(energy_grid, *z_loss_params)
z_loss_params = np.array(z_loss_params)
z_loss_dset += get_zero_losses(np.array(energy), np.array(z_loss_params))

shifts = z_loss_params[:, :, 0] * z_loss_params[:, :, 3]
widths = z_loss_params[:, :, 2] * z_loss_params[:, :, 5]

Expand Down Expand Up @@ -522,7 +532,15 @@ def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
return eps


def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, plot_result: bool = False, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
def get_plasmon_losses(energy, params):
dset = np.zeros((params.shape[0], params.shape[1], energy.shape[0]))
for x in range(params.shape[0]):
for y in range(params.shape[1]):
dset[x, y] += energy_loss_function(energy, params[x, y])
return dset


def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
"""
Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
Expand Down Expand Up @@ -567,8 +585,6 @@ def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.nd
elf = (-1/eps).imag
return A*elf



# define window for fitting
energy = dataset.get_spectral_dims(return_axis=True)[0].values
start_fit_pixel = np.searchsorted(energy, startFitEnergy)
Expand All @@ -589,37 +605,46 @@ def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.nd
guess_pos = energy[guess_pos]
if guess_width >8:
guess_width=8
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
p0=[guess_pos, guess_width, guess_amplitude])
try:
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
p0=[guess_pos, guess_width, guess_amplitude])
except:
end_fit_pixel = np.searchsorted(energy, 30)
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel]/ anglog[start_fit_pixel:end_fit_pixel])
try:
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
p0=[guess_pos, guess_width, guess_amplitude])
except:
popt=[0,0,0]

plasmon = dataset.like_data(energy_loss_function(energy, popt[0], popt[1], popt[2]))
plasmon *= anglog
start_plasmon = np.searchsorted(energy, 0)+1


plasmon[:start_plasmon] = 0.

epsilon = drude(energy, popt[0], popt[1], 1) * popt[2]
epsilon[:start_plasmon] = 0.

plasmon.metadata['plasmon'] = {'parameter': popt, 'epsilon':epsilon}
return plasmon

# if it can be parallelized:
fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
threads=number_threads, return_cov=False, return_fit=False, return_std=False,
km_guess=False, num_fit_parms=3)
[fitted_dataset] = fitter.do_fit()
[fit_parameter] = fitter.do_fit()

plasmon_dset = dataset * 0.0
fit_parameter = np.array(fit_parameter)
plasmon_dset += get_plasmon_losses(np.array(energy), fit_parameter)
if 'plasmon' not in plasmon_dset.metadata:
plasmon_dset.metadata['plasmon'] = {}
plasmon_dset.metadata['plasmon'].update({'startFitEnergy': startFitEnergy,
'endFitEnergy': endFitEnergy,
'fit_parameter': fit_parameter,
'original_low_loss': dataset.title})

if plot_result:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)
ax1.imshow(fitted_dataset[:, :, 0], cmap='jet')
ax1.set_title('Ep - Peak Position')
ax2.imshow(fitted_dataset[:, :, 1], cmap='jet')
ax2.set_title('Ew - Peak Width')
ax3.imshow(fitted_dataset[:, :, 2], cmap='jet')
ax3.set_title('A - Amplitude')
plt.show()
return fitted_dataset
return plasmon_dset


def angle_correction(spectrum):
Expand Down Expand Up @@ -722,8 +747,11 @@ def multiple_scattering(energy_scale: np.ndarray, p: list, core_loss=False)-> np
ssd = ssd * ssd2

PSD /=tmfp*np.exp(-tmfp)
BGDcoef = scipy.interpolate.splrep(LLene, PSD, s=0)
return scipy.interpolate.splev(energy_scale, BGDcoef)
BGDcoef = scipy.interpolate.splrep(LLene, PSD, s=0)
msd = scipy.interpolate.splev(energy_scale, BGDcoef)
start_plasmon = np.searchsorted(energy_scale, 0)+1
msd[:start_plasmon] = 0.
return msd

def fit_multiple_scattering(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float,pin=None, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
"""
Expand Down
60 changes: 58 additions & 2 deletions pyTEMlib/file_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -786,6 +786,55 @@ def h5_group_to_dict(group, group_dict={}):
return group_dict


def read_annotation(image):
if 'MAGE' not in image.data_type.name:
return {}
scale_x = np.abs(image.x[1]-image.x[0])
scale_y = np.abs(image.y[1]-image.y[0])
rec_scale = np.array([scale_x, scale_y,scale_x, scale_y])
if 'DocumentObjectList' not in image.original_metadata:
return {}
if '0' not in image.original_metadata['DocumentObjectList']:
return {}
annotations = {}
tags = image.original_metadata['DocumentObjectList']['0']
for key in tags:
if 'AnnotationGroupList' in key:
an_tags = tags[key]
for key2 in an_tags:
if isinstance(an_tags[key2], dict):
if an_tags[key2]['AnnotationType'] == 13: #type 'text'
annotations[key2] = {'type': 'text'}
if 'Label' in an_tags:
annotations[key2]['label'] = an_tags['Label']
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
annotations[key2]['position'] = [rect[1],rect[0]]
annotations[key2]['text'] = an_tags['Text']

elif an_tags[key2]['AnnotationType']==6:
annotations[key2] = {'type': 'circle'}
if 'Label' in an_tags:
annotations[key2]['label'] = an_tags['Label']
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale

annotations[key2]['radius'] =rect[3]-rect[1]
annotations[key2]['position'] = [rect[1],rect[0]]

elif an_tags[key2]['AnnotationType'] == 23:
annotations[key2] = {'type': 'spectral_image'}
if 'Label' in an_tags[key2]:
annotations[key2]['label'] = an_tags[key2]['Label']
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale

annotations[key2]['width'] =rect[3]-rect[1]
annotations[key2]['height'] =rect[2]-rect[0]
annotations[key2]['position'] = [rect[1],rect[0]]
annotations[key2]['Rectangle'] = np.array(an_tags[key2]['Rectangle'])
if len(annotations)>0:
image.metadata['annotations'] = annotations
return annotations


def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=False): # save_file=False,
"""Opens a file if the extension is .hf5, .ndata, .dm3 or .dm4
Expand Down Expand Up @@ -849,11 +898,16 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
dataset_dict[key] = h5_group_to_dict(master_group[key])
if not write_hdf_file:
file.close()
for dset in dataset_dict.values():
if isinstance(dset, sidpy.Dataset):
if 'Measurement' in dset.title:
dset.title = dset.title.split('/')[-1]
return dataset_dict
elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5']:
# tags = open_file(filename)
if extension in ['.dm3', '.dm4']:
reader = SciFiReaders.DMReader(filename)

elif extension in ['.emi']:
try:
import hyperspy.api as hs
Expand Down Expand Up @@ -899,8 +953,10 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
if not isinstance(dset, dict):
print('Please use new SciFiReaders Package for full functionality')
if isinstance(dset, sidpy.Dataset):
dset = [dset]

dset = {'Channel_000': dset}
for key in dset:
read_annotation(dset[key])

if isinstance(dset, dict):
dataset_dict = dset

Expand Down
Loading

0 comments on commit 4b05f98

Please sign in to comment.