diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bfb52bb4e..e958ac960 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -36,7 +36,7 @@ jobs: PMG_MAPI_KEY: ${{ secrets.PMG_MAPI_KEY }} steps: - - name: Check out Abipy repo + - name: Checkout Abipy repo uses: actions/checkout@v4 - name: Set up Miniconda diff --git a/abipy/abio/inputs.py b/abipy/abio/inputs.py index a41e2095e..5c0a31cc0 100644 --- a/abipy/abio/inputs.py +++ b/abipy/abio/inputs.py @@ -675,7 +675,7 @@ def runlevel(self): # DTE run. runlevel.add(atags.DFPT) runlevel.add(atags.DTE) - + elif optdriver == 99: # BSE run runlevel.update([atags.MANY_BODY, atags.BSE]) @@ -1502,7 +1502,7 @@ def new_with_vars(self, *args, **kwargs) -> AbinitInput: new.set_vars(*args, **kwargs) return new - def new_with_structure(self, new_structure, scdims=None, verbose=1) -> AbinitInput: + def new_with_structure(self, new_structure, scdims=None, verbose=1, **abi_vars) -> AbinitInput: """ Return a new |AbinitInput| with a different structure. See notes below for the constraints that must be fulfilled by the new structure @@ -1514,6 +1514,7 @@ def new_with_structure(self, new_structure, scdims=None, verbose=1) -> AbinitInp Must be used when `new_structure` represents a supercell of the initial structure defined in the input file. verbose: Verbosity level. + abi_vars: Abinit variables added to the Abinit input. .. warning:: @@ -1522,11 +1523,15 @@ def new_with_structure(self, new_structure, scdims=None, verbose=1) -> AbinitInp When structure represents a supercell, `scdims` must be coherent with the `new_structure` passed as argument. """ + new_structure = Structure.as_structure(new_structure) + # Check structure if scdims is None: # Assume same value of natom and typat if len(self.structure) != len(new_structure): - raise ValueError("Structures must have same value of natom") + raise ValueError(f"Structures must have same value of natom." + + f"new_structure has {len(new_structure)} atoms." + + f"input.structure has {len(self.structure)}") errors = [] for i, (site1, site2) in enumerate(zip(self.structure, new_structure)): if site1.specie.symbol != site2.specie.symbol: @@ -1610,6 +1615,8 @@ def new_with_structure(self, new_structure, scdims=None, verbose=1) -> AbinitInp # Add chkprim if not yet done. new.set_vars_ifnotin(chkprim=0) + new.set_vars(**abi_vars) + return new def new_with_decorators(self, decorators) -> AbinitInput: @@ -1929,7 +1936,7 @@ def make_ph_inputs_qpoint(self, qpt, tolerance=None, def make_ddkpert_input(self, perturbation, kptopt=2, only_vk=False, use_symmetries=False, tolerance=None, manager=None) -> AbinitInput: """ Returns |AbinitInput| for the calculation of an electric field perturbation. - This function should be called with an input that represents a GS run and + This function should be called with an input that represents a GS run and an electric field perturbation. Args: @@ -1973,7 +1980,7 @@ def make_ddkpert_input(self, perturbation, kptopt=2, only_vk=False, use_symmetri if only_vk: inp.set_vars(nstep=1, nline=1) - + # TODO: to implement #if not use_symmetries: # inp.set_vars( @@ -2073,11 +2080,11 @@ def make_dkdk_input(self, tolerance=None, kptopt=2, manager=None) -> AbinitInput dkdk_input.set_vars(tolerance) return dkdk_input - + def make_ddepert_input(self, perturbation, use_symmetries=True, tolerance=None, manager=None) -> AbinitInput: """ Returns |AbinitInput| for the calculation of an electric field perturbation. - This function should be called with an input that represents a GS run and + This function should be called with an input that represents a GS run and an electric field perturbation. Args: @@ -2111,7 +2118,7 @@ def make_ddepert_input(self, perturbation, use_symmetries=True, tolerance=None, qpt=(0, 0, 0), # q-wavevector. kptopt=2, # Take into account time-reversal symmetry. ) - + if not use_symmetries: inp.set_vars( comment="Input file for DDE calculation without symmetries.", @@ -2122,7 +2129,7 @@ def make_ddepert_input(self, perturbation, use_symmetries=True, tolerance=None, inp.set_vars(tolerance) return inp - + def make_dde_inputs(self, tolerance=None, use_symmetries=True, manager=None) -> MultiDataset: """ Return |MultiDataset| inputs for the calculation of electric field perturbations. diff --git a/abipy/dfpt/deformation_utils.py b/abipy/dfpt/deformation_utils.py index c40527955..fb6a56a23 100644 --- a/abipy/dfpt/deformation_utils.py +++ b/abipy/dfpt/deformation_utils.py @@ -28,8 +28,8 @@ def generate_deformations_volumic(structure, eps_V=0.02, scales=None): return structures_new -def generate_deformations(structure , eps=0.005): - spgrp = AbinitSpaceGroup.from_structure(structure ) +def generate_deformations(structure, eps=0.005): + spgrp = AbinitSpaceGroup.from_structure(structure ) print (spgrp) spgrp_number=spgrp.spgid rprim= structure.lattice.matrix @@ -39,11 +39,11 @@ def generate_deformations(structure , eps=0.005): structures_new = {} if 1 <= spgrp_number <= 2: - disp=[[1,1,1,1,1,1], [0,1,1,1,1,1], [2,1,1,1,1,1], [1,0,1,1,1,1], [1,2,1,1,1,1], [1,1,0,1,1,1], - [1,1,2,1,1,1], [1,1,1,0,1,1], [1,1,1,2,1,1], [1,1,1,1,0,1], [1,1,1,1,2,1], [1,1,1,1,1,0], - [1,1,1,1,1,2], [0,0,1,1,1,1], [1,0,0,1,1,1], [1,1,0,0,1,1], [1,1,1,0,0,1], [1,1,1,1,0,0], - [0,1,0,1,1,1], [0,1,1,0,1,1], [0,1,1,1,0,1], [0,1,1,1,1,0], [1,0,1,0,1,1], [1,0,1,1,0,1], - [1,0,1,1,1,0], [1,1,0,1,0,1], [1,1,0,1,1,0], [1,1,1,0,1,0] , [0 ,0,0,0,0,0]] + disp=[[1,1,1,1,1,1], [0,1,1,1,1,1], [2,1,1,1,1,1], [1,0,1,1,1,1], [1,2,1,1,1,1], [1,1,0,1,1,1], + [1,1,2,1,1,1], [1,1,1,0,1,1], [1,1,1,2,1,1], [1,1,1,1,0,1], [1,1,1,1,2,1], [1,1,1,1,1,0], + [1,1,1,1,1,2], [0,0,1,1,1,1], [1,0,0,1,1,1], [1,1,0,0,1,1], [1,1,1,0,0,1], [1,1,1,1,0,0], + [0,1,0,1,1,1], [0,1,1,0,1,1], [0,1,1,1,0,1], [0,1,1,1,1,0], [1,0,1,0,1,1], [1,0,1,1,0,1], + [1,0,1,1,1,0], [1,1,0,1,0,1], [1,1,0,1,1,0], [1,1,1,0,1,0] , [0 ,0,0,0,0,0]] if abs(rprim[1, 0]) > 1e-9 or abs(rprim[2, 0]) > 1e-9 or abs(rprim[2, 1]) > 1e-9: print("Warning: The lattice is oriented such that xz =xy =yz =0 .") rprim0 = np.copy(rprim) @@ -59,12 +59,12 @@ def generate_deformations(structure , eps=0.005): cos_ac = np.dot(a, c) / (norm_a * norm_c) cos_bc = np.dot(b, c) / (norm_b * norm_c) - rprim0[0,0] = 1.0 - rprim0[0,1] = 0.0 - rprim0[0,2] = 0.0 + rprim0[0,0] = 1.0 + rprim0[0,1] = 0.0 + rprim0[0,2] = 0.0 rprim0[1,0] = cos_ab rprim0[1,1] = np.sqrt(1-cos_ab**2) - rprim0[1,2] = 0.0 + rprim0[1,2] = 0.0 rprim0[2,0] = cos_ac rprim0[2,1] = (cos_bc-rprim0[1,0]*rprim0[2,0])/rprim0[1,1] rprim0[2,2] = np.sqrt(1.0-rprim0[2,0]**2-rprim0[2,1]**2) @@ -92,7 +92,7 @@ def generate_deformations(structure , eps=0.005): structure2=structure.copy() structure2.lattice=Lattice(rprim2) - structures_new[formatted_namei] = structure2 + structures_new[formatted_namei] = structure2 return structures_new elif 3 <= spgrp_number <= 15: @@ -116,7 +116,7 @@ def generate_deformations(structure , eps=0.005): cos_bc = np.dot(b, c) / (norm_b * norm_c) rprim0[0,0] = norm_a - rprim0[0,2] = 0.0 + rprim0[0,2] = 0.0 rprim0[1,1] = norm_b rprim0[2,0] = norm_c*cos_ac rprim0[2,2] = norm_c*np.sqrt(1-cos_ac**2) @@ -139,7 +139,7 @@ def generate_deformations(structure , eps=0.005): structure2=structure.copy() structure2.lattice=Lattice(rprim2) - structures_new[formatted_namei] = structure2 + structures_new[formatted_namei] = structure2 return structures_new elif 16 <= spgrp_number <= 74: @@ -157,7 +157,7 @@ def generate_deformations(structure , eps=0.005): structure2=structure.copy() structure2.lattice=Lattice(rprim2) - structures_new[formatted_namei] = structure2 + structures_new[formatted_namei] = structure2 return structures_new elif 75 <= spgrp_number <= 194: @@ -175,7 +175,7 @@ def generate_deformations(structure , eps=0.005): structure2=structure.copy() structure2.lattice=Lattice(rprim2) - structures_new[formatted_namei] = structure2 + structures_new[formatted_namei] = structure2 return structures_new elif 195 <= spgrp_number <= 230: @@ -188,6 +188,6 @@ def generate_deformations(structure , eps=0.005): structure2=structure.copy() structure2.lattice=Lattice(rprim2) - structures_new[formatted_namei] = structure2 + structures_new[formatted_namei] = structure2 return structures_new diff --git a/abipy/dfpt/qha_aproximation.py b/abipy/dfpt/qha_aproximation.py index e528cd2c3..fed483e4e 100644 --- a/abipy/dfpt/qha_aproximation.py +++ b/abipy/dfpt/qha_aproximation.py @@ -146,8 +146,8 @@ def second_derivative_energy_v(self, vol="vol"): E2D_V = ( b0 / v0 * (-2.0 * (eta - 1) * eta ** -5.0 + (1 - 3.0 / 2.0 * (b1 - 1) * (eta - 1)) * eta ** -4.0) * np.exp(-3.0 * (b1 - 1.0) * (v ** (1.0 / 3.0) / v0 ** (1.0 / 3.0) - 1.0) / 2.0)) - return E2D_V -#============================================================================================= + return E2D_V +#============================================================================================= def vol_E2Vib1(self, tstart=0, tstop=1000, num=101): """ Compute the volume as a function of temperature using the E2Vib1 method with the Vinet equation of state. @@ -190,7 +190,7 @@ def vol_E2Vib1(self, tstart=0, tstop=1000, num=101): return vol, fits -#============================================================================================= +#============================================================================================= def vol_Einf_Vib1(self, tstart=0, tstop=1000, num=101): """ Compute the volume as a function of temperature using the EinfVib1 method with the Vinet equation of state. @@ -235,7 +235,7 @@ def vol_Einf_Vib1(self, tstart=0, tstop=1000, num=101): return vol, fits -#============================================================================================= +#============================================================================================= def vol_Einf_Vib2(self, tstart=0, tstop=1000, num=101): """ Compute the volume as a function of temperature using the EinfVib2 method with the Vinet equation of state. @@ -276,7 +276,7 @@ def vol_Einf_Vib2(self, tstart=0, tstop=1000, num=101): # Calculate total energies tot_en = ( self.energies[np.newaxis, :].T +fe_V0 - +(self.volumes[np.newaxis, :].T - V0) * dfe_dV1 + 0.5 * (self.volumes[np.newaxis, :].T - V0)**2 * dfe_dV2) + +(self.volumes[np.newaxis, :].T - V0) * dfe_dV1 + 0.5 * (self.volumes[np.newaxis, :].T - V0)**2 * dfe_dV2) # Fit the energies as a function of volume fits = [self.eos.fit(self.volumes, e) for e in tot_en.T] @@ -322,12 +322,12 @@ def vol_Einf_Vib4(self, tstart=0, tstop=1000, num=101): dfe_dV3[i]=(e[iv0+2]-2*e[iv0+1]+2*e[iv0-1]-e[iv0-2])/(2*dV**3) dfe_dV4[i]=(e[iv0+2]-4*e[iv0+1]+6*e[iv0]-4*e[iv0-1]+e[iv0-2])/(dV**4) - fe_V0[i]= e[iv0] - V0=volumes0[iv0] + fe_V0[i]= e[iv0] + V0=volumes0[iv0] tot_en = (( volumes[np.newaxis, :].T -V0) * dfe_dV1 + 0.5* ( volumes[np.newaxis, :].T -V0)**2*(dfe_dV2) +( volumes[np.newaxis, :].T -V0)**3 *dfe_dV3/6.0 + ( volumes[np.newaxis, :].T -V0)**4*(dfe_dV4/24.0) - + fe_V0[:] +energies[np.newaxis, :].T ) + + fe_V0[:] +energies[np.newaxis, :].T ) fits = [self.eos.fit(volumes, e) for e in tot_en.T] vol = np.array([fit.v0 for fit in fits]) @@ -356,7 +356,7 @@ def set_eos(self, eos_name): @add_fig_kwargs def plot_energies(self, tstart=0, tstop=1000 ,num=1, ax=None, **kwargs): """ - Plots the BO energy as a function of volume + Plots the BO energy as a function of volume Args: tstart: The starting value (in Kelvin) of the temperature mesh. @@ -556,21 +556,21 @@ def plot_thermal_expansion_coeff(self, tstart=0, tstop=1000, num=101, tref=None, data_to_save = np.column_stack((data_to_save,alpha_1)) columns.append( 'E2vib1') - if (len(self.index_list)>=2): + if (len(self.index_list)>=2): vol2 ,fits = self.vol_Einf_Vib1(num=num,tstop=tstop,tstart=tstart) E2D_V = self.second_derivative_energy_v(vol2) if (tref==None): - alpha_2 = - 1/vol2[:] * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] + alpha_2 = - 1/vol2[:] * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] else: vol2_ref,fits = self.vol_Einf_Vib1(num=1,tstop=tref,tstart=tref) b0 = np.array([fit.b0 for fit in fits]) print("B (Einfvib1) @ ",tref," K =",b0*160.21766208 ,"(GPa)" ) - alpha_2 = - 1/vol2_ref * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] + alpha_2 = - 1/vol2_ref * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] ax.plot(tmesh, alpha_2,color='gold', lw=2 , label=r"$E_\infty Vib1$") data_to_save = np.column_stack((data_to_save,alpha_2)) columns.append( 'Einfvib1') - if (len(self.index_list)>=3): + if (len(self.index_list)>=3): vol3,fits = self.vol_Einf_Vib2(num=num,tstop=tstop,tstart=tstart) E2D_V = self.second_derivative_energy_v(vol3) dfe_dV2= np.zeros( num) @@ -580,17 +580,17 @@ def plot_thermal_expansion_coeff(self, tstart=0, tstop=1000, num=101, tref=None, ds_dv = (df_t[:,iv0+2]-df_t[:,iv0])/(2*dV) ds_dv = ds_dv+ (df_t[:,iv0+2]-2*df_t[:,iv0+1]+df_t[:,iv0])/dV**2 * (vol3[:]-volumes[iv0+1]) if (tref==None): - alpha_3 = - 1/vol3[:] * ds_dv / (E2D_V[:]+dfe_dV2[:]) + alpha_3 = - 1/vol3[:] * ds_dv / (E2D_V[:]+dfe_dV2[:]) else: vol3_ref,fits = self.vol_Einf_Vib2(num=1,tstop=tref,tstart=tref) b0 = np.array([fit.b0 for fit in fits]) print("B (Einfvib2) @ ",tref," K =",b0*160.21766208 ,"(GPa)" ) - alpha_3 = - 1/vol3_ref * ds_dv / (E2D_V[:]+dfe_dV2[:]) + alpha_3 = - 1/vol3_ref * ds_dv / (E2D_V[:]+dfe_dV2[:]) ax.plot(tmesh, alpha_3,color='m', lw=2 , label=r"$E_\infty Vib2$") data_to_save = np.column_stack((data_to_save,alpha_3)) columns.append( 'Einfvib2') - if (len(self.index_list)==5): + if (len(self.index_list)==5): alpha_qha = self.get_thermal_expansion_coeff(tstart, tstop, num, tref) vol4,fits = self.vol_Einf_Vib4(num=num,tstop=tstop,tstart=tstart) E2D_V = self.second_derivative_energy_v(vol4) @@ -609,12 +609,12 @@ def plot_thermal_expansion_coeff(self, tstart=0, tstop=1000, num=101, tref=None, ds_dv = ds_dv+ 1.0/6.0* (df_t[:,4]-4*df_t[:,3]+6*df_t[:,2]-4*df_t[:,1]+df_t[:,0])/(dV**4)* (vol4[:]-volumes[2])**3 D2F=E2D_V[:]+d2fe_dV2[:]+ (vol4[:]-volumes[2])*d3fe_dV3[:]+0.5*(vol4[:]-volumes[2])**2*d4fe_dV4[:] if (tref==None): - alpha_4 = - 1/vol4[:] * ds_dv / D2F + alpha_4 = - 1/vol4[:] * ds_dv / D2F else: vol4_ref,fits = self.vol_Einf_Vib4(num=1,tstop=tref,tstart=tref) b0 = np.array([fit.b0 for fit in fits]) print("B (Einfvib4) @ ",tref," K =",b0*160.21766208 ,"(GPa)" ) - alpha_4 = - 1/vol4_ref * ds_dv / D2F + alpha_4 = - 1/vol4_ref * ds_dv / D2F ax.plot(tmesh, alpha_4,color='c',linewidth=2 , label=r"$E_\infty Vib4$") ax.plot(alpha_qha.mesh, alpha_qha.values, color='k',linestyle='dashed', lw=1.5 ,label="QHA") @@ -716,20 +716,20 @@ def plot_thermal_expansion_coeff_abc(self, tstart=0, tstop=1000, num=101, tref=N if (tref!=None): vol2_tref,fits = self.vol_E2Vib1(num=1,tstop=tref,tstart=tref) - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol ,fits = self.vol_Einf_Vib1(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref,fits = self.vol_Einf_Vib1(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol,fits = self.vol_Einf_Vib2(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref,fits = self.vol_Einf_Vib2(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_tot_energies(tstart, tstop, num,tot_en , self.volumes_from_phdos) method =r"$ (E_\infty Vib4)$" vol,fits = self.vol_Einf_Vib4(num=num,tstop=tstop,tstart=tstart) @@ -752,7 +752,7 @@ def plot_thermal_expansion_coeff_abc(self, tstart=0, tstop=1000, num=101, tref=N alpha_b = (bb[2:] - bb[:-2]) / (2 * dt) / bb[1:-1] alpha_c = (cc[2:] - cc[:-2]) / (2 * dt) / cc[1:-1] else: - alpha_a = (aa[2:] - aa[:-2]) / (2 * dt) / aa_tref + alpha_a = (aa[2:] - aa[:-2]) / (2 * dt) / aa_tref alpha_b = (bb[2:] - bb[:-2]) / (2 * dt) / bb_tref alpha_c = (cc[2:] - cc[:-2]) / (2 * dt) / cc_tref @@ -777,7 +777,7 @@ def plot_thermal_expansion_coeff_abc(self, tstart=0, tstop=1000, num=101, tref=N alpha2_b = (bb2[2:] - bb2[:-2]) / (2 * dt) / bb2[1:-1] alpha2_c = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2[1:-1] else: - alpha2_a = (aa2[2:] - aa2[:-2]) / (2 * dt) / aa2_tref + alpha2_a = (aa2[2:] - aa2[:-2]) / (2 * dt) / aa2_tref alpha2_b = (bb2[2:] - bb2[:-2]) / (2 * dt) / bb2_tref alpha2_c = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2_tref @@ -825,20 +825,20 @@ def plot_thermal_expansion_coeff_angles(self, tstart=0, tstop=1000, num=101, tre if (tref!=None): vol2_tref,fits = self.vol_E2Vib1(num=1,tstop=tref,tstart=tref) - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol ,fits = self.vol_Einf_Vib1(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref,fits = self.vol_Einf_Vib1(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol,fits = self.vol_Einf_Vib2(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref,fits = self.vol_Einf_Vib2(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_tot_energies(tstart, tstop, num,tot_en , self.volumes_from_phdos) method =r"$ (E_\infty Vib4)$" vol,fits = self.vol_Einf_Vib4(num=num,tstop=tstop,tstart=tstart) @@ -861,7 +861,7 @@ def plot_thermal_expansion_coeff_angles(self, tstart=0, tstop=1000, num=101, tre alpha_beta = (beta[2:] - beta[:-2]) / (2 * dt) / beta[1:-1] alpha_gamma = (cc[2:] - cc[:-2]) / (2 * dt) / cc[1:-1] else: - alpha_alpha = (alpha[2:] - alpha[:-2]) / (2 * dt) / alpha_tref + alpha_alpha = (alpha[2:] - alpha[:-2]) / (2 * dt) / alpha_tref alpha_beta = (beta[2:] - beta[:-2]) / (2 * dt) / beta_tref alpha_gamma = (cc[2:] - cc[:-2]) / (2 * dt) / cc_tref @@ -886,7 +886,7 @@ def plot_thermal_expansion_coeff_angles(self, tstart=0, tstop=1000, num=101, tre alpha2_beta = (beta2[2:] - beta2[:-2]) / (2 * dt) / beta2[1:-1] alpha2_gamma = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2[1:-1] else: - alpha2_alpha = (alpha2[2:] - alpha2[:-2]) / (2 * dt) / alpha2_tref + alpha2_alpha = (alpha2[2:] - alpha2[:-2]) / (2 * dt) / alpha2_tref alpha2_beta = (beta2[2:] - beta2[:-2]) / (2 * dt) / beta2_tref alpha2_gamma = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2_tref @@ -936,16 +936,16 @@ def plot_abc_vs_t(self, tstart=0, tstop=1000, num=101, lattice=None, tref=None, data_to_save = np.column_stack((data_to_save,aa2,bb2,cc2)) columns.append( 'E2vib1 (a,b,c) | ') - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol ,fits = self.vol_Einf_Vib1(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol,fits = self.vol_Einf_Vib2(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_tot_energies(tstart, tstop, num,tot_en , self.volumes_from_phdos) method =r"$ (E_\infty Vib4)$" vol,fits = self.vol_Einf_Vib4(num=num,tstop=tstop,tstart=tstart) @@ -1009,16 +1009,16 @@ def plot_angles_vs_t(self, tstart=0, tstop=1000, num=101, angle=None, tref=None, data_to_save = np.column_stack((data_to_save,alpha2,beta2,gamma2)) columns.append( 'E2vib1 (alpha,beta,gamma) | ') - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol ,fits = self.vol_Einf_Vib1(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol,fits = self.vol_Einf_Vib2(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_tot_energies(tstart, tstop, num,tot_en , self.volumes_from_phdos) method =r"$ (E_\infty Vib4)$" vol,fits = self.vol_Einf_Vib4(num=num,tstop=tstop,tstart=tstart) @@ -1102,9 +1102,9 @@ def vol_E2Vib1_forth(self, tstart=0, tstop=1000, num=101): iv1=self.iv1_vib dV=volumes0[iv1]-volumes0[iv0] - V0=self.volumes[self.iv0] + V0=self.volumes[self.iv0] - energy = self.energies[np.newaxis, :].T + energy = self.energies[np.newaxis, :].T f=self.fit_forth( tstart=0, tstop=0, num=1 ,energy=energy,volumes=self.volumes) param3 = np.zeros((num,3)) param3=np.array([12*f.param[0][0],6*f.param[0][1],2*f.param[0][2]]) @@ -1118,7 +1118,7 @@ def vol_E2Vib1_forth(self, tstart=0, tstop=1000, num=101): dfe_dV1=(e[iv1]-e[iv0])/dV vol[i]=V0-dfe_dV1*E2D**-1 - return vol + return vol def vol_EinfVib1_forth(self, tstart=0, tstop=1000, num=101): """ @@ -1133,7 +1133,7 @@ def vol_EinfVib1_forth(self, tstart=0, tstop=1000, num=101): dV=volumes0[iv1]-volumes0[iv0] - energy = self.energies[np.newaxis, :].T + energy = self.energies[np.newaxis, :].T ph_energies = self.get_vib_free_energies(tstart, tstop, num) vol = np.zeros( num) @@ -1167,18 +1167,18 @@ def vol_Einf_Vib2_forth(self, tstart=0, tstop=1000, num=101): fe_V0= np.zeros( num) if (len(self.index_list)==5): iv0=2 - else : + else : iv0=1 dV=volumes0[iv0]-volumes0[iv0-1] - V0=volumes0[iv0] + V0=volumes0[iv0] for i,e in enumerate(ph_energies.T): dfe_dV[i]=(e[iv0+1]-e[iv0-1])/(2*dV) d2fe_dV2[i]=(e[iv0+1]-2.0*e[iv0]+e[iv0-1])/(dV)**2 - fe_V0[i]= e[iv0] + fe_V0[i]= e[iv0] - tot_en = self.energies[np.newaxis, :].T + ( self.volumes[np.newaxis, :].T -V0) * dfe_dV + tot_en = self.energies[np.newaxis, :].T + ( self.volumes[np.newaxis, :].T -V0) * dfe_dV tot_en = tot_en + 0.5* (( self.volumes[np.newaxis, :].T -V0))**2*(d2fe_dV2) - tot_en = tot_en+ fe_V0 + tot_en = tot_en+ fe_V0 f=self.fit_forth( tstart, tstop, num ,tot_en,self.volumes) vol=f.min_vol @@ -1214,12 +1214,12 @@ def vol_Einf_Vib4_forth(self, tstart=0, tstop=1000, num=101): dfe_dV3[i]=(e[iv0+2]-2*e[iv0+1]+2*e[iv0-1]-e[iv0-2])/(2*dV**3) dfe_dV4[i]=(e[iv0+2]-4*e[iv0+1]+6*e[iv0]-4*e[iv0-1]+e[iv0-2])/(dV**4) - fe_V0[i]= e[iv0] - V0=volumes0[iv0] + fe_V0[i]= e[iv0] + V0=volumes0[iv0] tot_en = ( volumes[np.newaxis, :].T -V0) * dfe_dV1 + 0.5* ( volumes[np.newaxis, :].T -V0)**2*(dfe_dV2) tot_en = tot_en+( volumes[np.newaxis, :].T -V0)**3 *dfe_dV3/6 + ( volumes[np.newaxis, :].T -V0)**4*(dfe_dV4/24) - tot_en = tot_en + fe_V0 +energies[np.newaxis, :].T + tot_en = tot_en + fe_V0 +energies[np.newaxis, :].T f=self.fit_forth( tstart, tstop, num ,tot_en,self.volumes) vol=f.min_vol @@ -1254,20 +1254,20 @@ def plot_vol_vs_t_4th(self, tstart=0, tstop=1000, num=101, ax=None, **kwargs): data_to_save = np.column_stack((data_to_save,vol_4th)) columns.append( 'E2vib1') - if (len(self.index_list)>=2): + if (len(self.index_list)>=2): vol2_4th = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) ax.plot(tmesh, vol2_4th,color='gold', lw=2 , label=r"$E_\infty Vib1$") data_to_save = np.column_stack((data_to_save,vol2_4th)) columns.append( 'Einfvib1') - if (len(self.index_list)>=3): + if (len(self.index_list)>=3): vol3_4th = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) ax.plot(tmesh, vol3_4th,color='m', lw=2 , label=r"$E_\infty Vib2$") data_to_save = np.column_stack((data_to_save,vol3_4th)) columns.append( 'Einfvib2') - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_forth( tstart, tstop, num ,tot_en,volumes) vol4_4th = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) ax.plot(tmesh, vol4_4th,color='c', lw=2 ,label=r"$E_\infty Vib4$") @@ -1301,11 +1301,11 @@ def get_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101 , tref=N Returns: |Function1D| """ ph_energies = self.get_vib_free_energies(tstart, tstop, num) - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f = self.fit_forth( tstart, tstop, num ,tot_en,self.volumes_from_phdos) if (tref!=None): ph_energies2 = self.get_vib_free_energies(tref, tref, 1) - tot_en2 = self.energies_pdos[np.newaxis, :].T + ph_energies2 + tot_en2 = self.energies_pdos[np.newaxis, :].T + ph_energies2 f0 = self.fit_forth(tref, tref , 1 ,tot_en2 , self.volumes_from_phdos) dt = f.temp[1] - f.temp[0] @@ -1324,12 +1324,12 @@ def get_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101 , tref=N param2[j] = np.array([3*param[j][0],2*param[j][1],param[j][2]]) p = np.poly1d(param2[j]) - d2f_t_v[j]= p(f.min_vol[j]) + d2f_t_v[j]= p(f.min_vol[j]) F2D = f.F2D_V if (tref==None): - #alpha= - 1/f.min_vol[1:-1] *d2f_t_v[1:-1] / F2D[1:-1] - alpha= - 1/f.min_vol *d2f_t_v / F2D + #alpha= - 1/f.min_vol[1:-1] *d2f_t_v[1:-1] / F2D[1:-1] + alpha= - 1/f.min_vol *d2f_t_v / F2D else : #alpha= - 1/f0.min_vol * d2f_t_v[1:-1] / F2D[1:-1] alpha= - 1/f0.min_vol * d2f_t_v / F2D @@ -1363,7 +1363,7 @@ def plot_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101, tref=N iv0=self.iv0_vib iv1=self.iv1_vib dV=volumes[iv0+1]-volumes[iv0] - energy = self.energies[np.newaxis, :].T + energy = self.energies[np.newaxis, :].T f=self.fit_forth( tstart=0, tstop=0, num=1 ,energy=energy,volumes=self.volumes) param3 = np.zeros((num,3)) param3=np.array([12*f.param[0][0],6*f.param[0][1],2*f.param[0][2]]) @@ -1381,20 +1381,20 @@ def plot_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101, tref=N data_to_save = np.column_stack((data_to_save,alpha_1)) columns.append( 'E2vib1') - if (len(self.index_list)>=2): + if (len(self.index_list)>=2): vol2_4th = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) #E2D_V = self.second_derivative_energy_v(vol2) E2D_V = p3(vol2_4th) if (tref==None): - alpha_2 = - 1/vol2_4th[:] * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] + alpha_2 = - 1/vol2_4th[:] * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] else : vol2_4th_ref = self.vol_EinfVib1_forth(num=1,tstop=tref,tstart=tref) - alpha_2 = - 1/vol2_4th_ref * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] + alpha_2 = - 1/vol2_4th_ref * (df_t[:,iv1]-df_t[:,iv0])/(volumes[iv1]-volumes[iv0]) / E2D_V[:] ax.plot(tmesh, alpha_2,color='gold', lw=2 , label=r"$E_\infty Vib1$") data_to_save = np.column_stack((data_to_save,alpha_2)) columns.append( 'Einfvib1') - if (len(self.index_list)>=3): + if (len(self.index_list)>=3): vol3_4th = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) E2D_V = p3(vol3_4th) dfe_dV2= np.zeros( num) @@ -1404,15 +1404,15 @@ def plot_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101, tref=N ds_dv = (df_t[:,iv0+2]-df_t[:,iv0])/(2*dV) ds_dv = ds_dv+ (df_t[:,iv0+2]-2*df_t[:,iv0+1]+df_t[:,iv0])/dV**2 * (vol3_4th[:]-volumes[iv0+1]) if (tref==None): - alpha_3 = - 1/vol3_4th[:] * ds_dv / (E2D_V[:]+dfe_dV2[:]) + alpha_3 = - 1/vol3_4th[:] * ds_dv / (E2D_V[:]+dfe_dV2[:]) else : vol3_4th_ref = self.vol_Einf_Vib2_forth(num=1,tstop=tref,tstart=tref) - alpha_3 = - 1/vol3_4th_ref * ds_dv / (E2D_V[:]+dfe_dV2[:]) + alpha_3 = - 1/vol3_4th_ref * ds_dv / (E2D_V[:]+dfe_dV2[:]) ax.plot(tmesh, alpha_3,color='m', lw=2 , label=r"$E_\infty Vib2$") data_to_save = np.column_stack((data_to_save,alpha_3)) columns.append( 'Einfvib2') - if (len(self.index_list)==5): + if (len(self.index_list)==5): vol4_4th = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) E2D_V = p3(vol4_4th) @@ -1430,10 +1430,10 @@ def plot_thermal_expansion_coeff_4th(self, tstart=0, tstop=1000, num=101, tref=N ds_dv = ds_dv+ 1.0/6.0* (df_t[:,4]-4*df_t[:,3]+6*df_t[:,2]-4*df_t[:,1]+df_t[:,0])/(dV**4)* (vol4_4th[:]-volumes[2])**3 D2F=E2D_V[:]+d2fe_dV2[:]+ (vol4_4th[:]-volumes[2])*d3fe_dV3[:]+0.5*(vol4_4th[:]-volumes[2])**2*d4fe_dV4[:] if (tref==None): - alpha_4 = - 1/vol4_4th[:] * ds_dv / D2F + alpha_4 = - 1/vol4_4th[:] * ds_dv / D2F else : vol4_4th_ref = self.vol_Einf_Vib4_forth(num=1,tstop=tref,tstart=tref) - alpha_4 = - 1/vol4_4th_ref * ds_dv / D2F + alpha_4 = - 1/vol4_4th_ref * ds_dv / D2F ax.plot(tmesh, alpha_4,color='c',linewidth=2 , label=r"$E_\infty Vib4$") @@ -1485,16 +1485,16 @@ def plot_abc_vs_t_4th(self, tstart=0, tstop=1000, num=101, lattice=None,tref=Non data_to_save = np.column_stack((data_to_save,aa2,bb2,cc2)) columns.append( 'E2vib1 (a,b,c) | ') - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_forth( tstart, tstop, num ,tot_en,volumes) method =r"$ (E_\infty Vib4)$" vol = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) @@ -1558,16 +1558,16 @@ def plot_angles_vs_t_4th(self, tstart=0, tstop=1000, num=101,angle=None, tref=N data_to_save = np.column_stack((data_to_save,alpha2,beta2,gamma2)) columns.append( 'E2vib1 (alpha,beta,gamma) | ') - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_forth( tstart, tstop, num ,tot_en,volumes) method =r"$ (E_\infty Vib4)$" vol = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) @@ -1630,20 +1630,20 @@ def plot_thermal_expansion_coeff_abc_4th(self, tstart=0, tstop=1000, num=101, tr if (tref!=None): vol2_tref = self.vol_E2Vib1_forth(num=1,tstop=tref,tstart=tref) - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref = self.vol_EinfVib1_forth(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref = self.vol_Einf_Vib2_forth(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_forth( tstart, tstop, num ,tot_en,volumes) method =r"$ (E_\infty Vib4)$" vol = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) @@ -1666,7 +1666,7 @@ def plot_thermal_expansion_coeff_abc_4th(self, tstart=0, tstop=1000, num=101, tr alpha_b = (bb[2:] - bb[:-2]) / (2 * dt) / bb[1:-1] alpha_c = (cc[2:] - cc[:-2]) / (2 * dt) / cc[1:-1] else: - alpha_a = (aa[2:] - aa[:-2]) / (2 * dt) / aa_tref + alpha_a = (aa[2:] - aa[:-2]) / (2 * dt) / aa_tref alpha_b = (bb[2:] - bb[:-2]) / (2 * dt) / bb_tref alpha_c = (cc[2:] - cc[:-2]) / (2 * dt) / cc_tref @@ -1691,7 +1691,7 @@ def plot_thermal_expansion_coeff_abc_4th(self, tstart=0, tstop=1000, num=101, tr alpha2_b = (bb2[2:] - bb2[:-2]) / (2 * dt) / bb2[1:-1] alpha2_c = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2[1:-1] else: - alpha2_a = (aa2[2:] - aa2[:-2]) / (2 * dt) / aa2_tref + alpha2_a = (aa2[2:] - aa2[:-2]) / (2 * dt) / aa2_tref alpha2_b = (bb2[2:] - bb2[:-2]) / (2 * dt) / bb2_tref alpha2_c = (cc2[2:] - cc2[:-2]) / (2 * dt) / cc2_tref @@ -1739,20 +1739,20 @@ def plot_thermal_expansion_coeff_angles_4th(self, tstart=0, tstop=1000, num=101, if (tref!=None): vol2_tref = self.vol_E2Vib1_forth(num=1,tstop=tref,tstart=tref) - if (len(self.index_list)==2): + if (len(self.index_list)==2): vol = self.vol_EinfVib1_forth(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref = self.vol_EinfVib1_forth(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib1)$" - if (len(self.index_list)==3): + if (len(self.index_list)==3): vol = self.vol_Einf_Vib2_forth(num=num,tstop=tstop,tstart=tstart) if (tref!=None): vol_tref = self.vol_Einf_Vib2_forth(num=1,tstop=tref,tstart=tref) method =r"$ (E_\infty Vib2)$" - if (len(self.index_list)==5): - tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies + if (len(self.index_list)==5): + tot_en = self.energies_pdos[np.newaxis, :].T + ph_energies f0 = self.fit_forth( tstart, tstop, num ,tot_en,volumes) method =r"$ (E_\infty Vib4)$" vol = self.vol_Einf_Vib4_forth(num=num,tstop=tstop,tstart=tstart) @@ -1774,7 +1774,7 @@ def plot_thermal_expansion_coeff_angles_4th(self, tstart=0, tstop=1000, num=101, alpha_beta = (beta[2:] - beta[:-2]) / (2 * dt) / beta[1:-1] alpha_gamma = (gamma[2:] - gamma[:-2]) / (2 * dt) / gamma[1:-1] else: - alpha_alpha = (alpha[2:] - alpha[:-2]) / (2 * dt) / alpha_tref + alpha_alpha = (alpha[2:] - alpha[:-2]) / (2 * dt) / alpha_tref alpha_beta = (beta[2:] - beta[:-2]) / (2 * dt) / beta_tref alpha_gamma = (gamma[2:] - gamma[:-2]) / (2 * dt) / gamma_tref @@ -1799,7 +1799,7 @@ def plot_thermal_expansion_coeff_angles_4th(self, tstart=0, tstop=1000, num=101, alpha2_beta = (beta2[2:] - beta2[:-2]) / (2 * dt) / beta2[1:-1] alpha2_gamma = (gamma2[2:] - gamma2[:-2]) / (2 * dt) / gamma2[1:-1] else: - alpha2_alpha = (alpha2[2:] - alpha2[:-2]) / (2 * dt) / alpha2_tref + alpha2_alpha = (alpha2[2:] - alpha2[:-2]) / (2 * dt) / alpha2_tref alpha2_beta = (beta2[2:] - beta2[:-2]) / (2 * dt) / beta2_tref alpha2_gamma = (gamma2[2:] - gamma2[:-2]) / (2 * dt) / gamma2_tref @@ -1918,7 +1918,7 @@ def get_thermodynamic_properties(self, tstart=0, tstop=1000, num=101): zpe[i] = d.zero_point_energy return dict2namedtuple(tmesh=tmesh, cv=cv, free_energy=free_energy, entropy=entropy, zpe=zpe) -#======================================================================================================= +#======================================================================================================= @classmethod def from_files_app_ddb(cls, ddb_paths, phdos_paths): """ diff --git a/abipy/eph/gwan.py b/abipy/eph/gwan.py new file mode 100644 index 000000000..0135c7080 --- /dev/null +++ b/abipy/eph/gwan.py @@ -0,0 +1,524 @@ +""" +This module contains objects for analyzing +the GWAN.nc file with the e-ph vertex in the Wannier representation. + +For a theoretical introduction see :cite:`Giustino2017` +""" +from __future__ import annotations + +import dataclasses +import numpy as np +import pandas as pd +#import abipy.core.abinit_units as abu + +from monty.string import marquee #, list_strings +from monty.functools import lazy_property +from monty.termcolor import cprint +from abipy.core.structure import Structure +from abipy.core.kpoints import kpoints_indices +from abipy.core.mixins import AbinitNcFile, Has_Structure, Has_ElectronBands, Has_Header #, NotebookWriter +from abipy.tools.typing import PathLike +from abipy.tools.numtools import BzRegularGridInterpolator, nparr_to_df +#from abipy.tools.plotting import (add_fig_kwargs, get_ax_fig_plt, get_axarray_fig_plt, set_axlims, set_visible, +# rotate_ticklabels, ax_append_title, set_ax_xylabels, linestyles) +#from abipy.tools import duck +from abipy.electrons.ebands import ElectronBands, RobotWithEbands +#from abipy.tools.typing import Figure +from abipy.abio.robots import Robot +from abipy.eph.common import BaseEphReader + + +class GwanFile(AbinitNcFile, Has_Header, Has_Structure, Has_ElectronBands): # , NotebookWriter): + """ + This file stores the e-ph matrix elements in the wannier representation + and provides methods to analyze and plot results. + + Usage example: + + .. code-block:: python + + with GwanFile("out_GWAN.nc") as gwan: + print(gwan) + + for spin in range(gwan.nsppol): + # Extract the object storing the g for this spin. + #gqk = gstore.gqk_spin[spin] + #print(gqk) + + # Get a Dataframe with g(k, q) for all modes and bands. + #df = gqk.get_gdf_at_qpt_kpt([1/2, 0, 0], [0, 0, 0]) + #print(df) + + .. rubric:: Inheritance Diagram + .. inheritance-diagram:: GwanFile + """ + @classmethod + def from_file(cls, filepath: PathLike) -> GwanFile: + """Initialize the object from a netcdf file.""" + return cls(filepath) + + def __init__(self, filepath: PathLike): + super().__init__(filepath) + self.r = GwanReader(filepath) + + @lazy_property + def ebands(self) -> ElectronBands: + """|ElectronBands| object.""" + return self.r.read_ebands() + + @property + def structure(self) -> Structure: + """|Structure| object.""" + return self.ebands.structure + + def close(self) -> None: + """Close the file.""" + self.r.close() + + #@lazy_property + #def gqk_spin(self) -> list: + # return [Gqk.from_gstore(self, spin) for spin in range(self.nsppol)] + + @lazy_property + def params(self) -> dict: + """dict with the convergence parameters, e.g. ``nbsum``.""" + #od = OrderedDict([ + # ("nbsum", self.nbsum), + # ("nqibz", self.r.nqibz), + #]) + ## Add EPH parameters. + #od.update(self.r.common_eph_params) + + od = {} + return od + + def __str__(self) -> str: + return self.to_string() + + def to_string(self, verbose=0) -> str: + """String representation with verbosiy level ``verbose``.""" + lines = []; app = lines.append + + app(marquee("File Info", mark="=")) + app(self.filestat(as_string=True)) + app("") + app(self.structure.to_string(verbose=verbose, title="Structure")) + + app("") + app(self.ebands.to_string(with_structure=False, verbose=verbose, title="Electronic Bands")) + if verbose > 1: + app("") + app(self.hdr.to_string(verbose=verbose, title="Abinit Header")) + + app(f"nsppol: {self.r.nsppol}") + app(f"gstore_completed: {bool(self.r.completed)}") + app(f"gstore_cplex: {self.r.cplex}") + app(f"gstore_kptopt: {self.r.kptopt}") + app(f"gstore_qptopt: {self.r.qptopt}") + #for spin in range(self.r.nsppol): + # app(f"gstore_brange_spin[{spin}]: {self.r.brange_spin[spin]}") + # app(f"gstore_erange_spin[{spin}]: {self.r.erange_spin[spin]}") + # app(f"gstore_glob_spin_nq[{spin}]: {self.r.glob_spin_nq[spin]}") + + return "\n".join(lines) + + #def write_epw_hdf5(self, filepath: PathLike) -> None: + + +@dataclasses.dataclass(kw_only=True) +class Gqk: + """ + This object stores the e-ph matrix elements (g or g^2) and the matrix elements + of the velocity operator for a given spin. + """ + cplex: int # 1 if |g|^2 is stored + # 2 if complex valued g (mind the gauge) + spin: int # Spin index. + nb: int # Number of bands + bstart: int + #bstop: int + + glob_nk: int # Total number of k/q points in global matrix. + glob_nq: int # Note that k-points/q-points can be filtered. + # Use kzone, qzone and kfilter to interpret these dimensions. + + gstore: GstoreFile + + gvals: np.ndarray | None + g2: np.ndarray | None + vk_cart_ibz: np.ndarray | None + vkmat_cart_ibz: np.ndarray | None + + @classmethod + def from_gstore(cls, gstore: GstoreFile, spin: int): + """ + Build an istance from a GstoreFile and the spin index. + """ + ncr = gstore.r + path = f"gqk_spin{spin+1}" + cplex = ncr.read_dimvalue("gstore_cplex") + nb = ncr.read_dimvalue("nb", path=path) + glob_nk = ncr.read_dimvalue("glob_nk", path=path) + glob_nq = ncr.read_dimvalue("glob_nq", path=path) + + # Read e-ph matrix elements + # nctkarr_t("gvals", "dp", "gstore_cplex, nb_kq, nb_k, natom3, glob_nk, glob_nq) + # Have to transpose the (nb_kq, nb_k) submatrix written by Fortran. + g2, gvals = None, None + if cplex == 1: + g2 = ncr.read_value("gvals", path=path).transpose(0, 1, 2, 4, 3, 5).copy() + + elif cplex == 2: + gvals = ncr.read_value("gvals", path=path).transpose(0, 1, 2, 4, 3, 5).copy() + gvals = gvals[...,0] + 1j*gvals[...,1] + + vk_cart_ibz, vkmat_cart_ibz = None, None + if ncr.with_vk == 1: + # nctk_def_arrays(spin_ncid, nctkarr_t("vk_cart_ibz", "dp", "three, nb, gstore_nkibz")) + vk_cart_ibz = ncr.read_value("vk_cart_ibz", path=path) + + if ncr.with_vk == 2: + # Full (nb x nb) matrix. + # Have to transpose (nb_kq, nb_k) submatrix written by Fortran. + # nctk_def_arrays(spin_ncid, nctkarr_t("vkmat_cart_ibz", "dp", "two, three, nb, nb, gstore_nkibz")) + vkmat_cart_ibz = ncr.read_value("vkmat_cart_ibz", path=path).transpose(0, 1, 3, 2, 4).copy() + vkmat_cart_ibz = vkmat_cart_ibz[...,0] + 1j*vkmat_cart_ibz[...,1] + + # Note conversion between Fortran and python indexing. + bstart = ncr.read_value("bstart", path=path) - 1 + #bstop = ncr.read_value("stop", path=path) + + data = locals() + return cls(**{k: data[k] for k in [field.name for field in dataclasses.fields(Gqk)]}) + + def __str__(self) -> str: + return self.to_string() + + def to_string(self, verbose=0) -> str: + """String representation with verbosiy level ``verbose``.""" + lines = []; app = lines.append + + app(marquee(f"Gqk for spin: {self.spin}", mark="=")) + app(f"cplex: {self.cplex}") + app(f"nb: {self.nb}") + app(f"bstart: {self.bstart}") + app(f"glob_nk: {self.glob_nk}") + app(f"glob_nq: {self.glob_nq}") + + return "\n".join(lines) + + @property + def structure(self): + return self.gstore.structure + + def get_dataframe(self, what: str = "g2") -> pd.DataFrame: + """ + Build and return a dataframe with all the |g(k,q)|^2 if what == "g2" or + all |v_nk|^2 if what == "v2". + """ + if what == "g2": + g2 = self.g2 if self.g2 is not None else np.abs(self.gvals) ** 2 + df = nparr_to_df("g2", g2, ["iq", "ik", "imode", "m_kq", "n_k"]) + + elif what == "v2": + if self.vk_cart_ibz is None: + raise ValueError("vk_cart_ibz is not available in GSTORE!") + # Compute the squared norm of each vector + v2 = np.sum(self.vk_cart_ibz ** 2, axis=2) + df = nparr_to_df("v2", v2, ["ik", "n_k"]) + + else: + raise ValueError(f"Invalid {what=}") + + #df["m_kq"] += bstart_mkq + #df["n_k"] += bstart_nk + + return df + + def get_g2q_interpolator_kpoint(self, kpoint, method="linear", check_mesh=1): + """ + """ + r = self.gstore.r + + # Find the index of the kpoint. + ik_g, kpoint = r.find_ik_glob_kpoint(kpoint, self.spin) + + # Compute indices of qpoints in the ngqpt mesh. + ngqpt, shifts = r.ngqpt, [0, 0, 0] + q_indices = kpoints_indices(r.qbz, ngqpt, check_mesh=check_mesh) + + natom3 = 3 * len(self.structure) + nb = self.nb + nx, ny, nz = ngqpt + + # (glob_nq, glob_nk, natom3, m_kq, n_k) + g2 = self.g2 if self.g2 is not None else np.abs(self.gvals) ** 2 + g2_qph_mn = g2[:,ik_g] + + # Insert g2 in g2_grid + g2_grid = np.empty((nb, nb, natom3, nx, ny, nz)) + for nu in range(natom3): + for g2_mn, q_inds in zip(g2_qph_mn[:,nu], q_indices): + ix, iy, iz = q_inds + g2_grid[:, :, nu, ix, iy, iz] = g2_mn + + return BzRegularGridInterpolator(self.structure, shifts, g2_grid, method=method) + + def get_g_qpt_kpt(self, qpoint, kpoint, what) -> np.ndarray: + """ + Return numpy array with e-ph matrix elements the for the given (qpoint, kpoint) pair. + + Args: + what="g2" for |g(k,q)|^2, "g" for g(k,q) + """ + # Find the internal indices of (qpoint, kpoint) + iq_g, qpoint = self.gstore.r.find_iq_glob_qpoint(qpoint, self.spin) + ik_g, kpoint = self.gstore.r.find_ik_glob_kpoint(kpoint, self.spin) + if what == "g2": + g2 = self.g2 if self.g2 is not None else np.abs(self.gvals) ** 2 + return g2[iq_g, ik_g] + if what == "g": + if self.cplex != 2: + raise ValueError("Gstore file stores g2 instead of complex g") + return self.gvals[iq_g, ik_g] + + raise ValueError(f"Invalid {what=}") + + def get_gdf_at_qpt_kpt(self, qpoint, kpoint, what="g2") -> pd.DataFrame: + """ + Build and return a dataframe with the |g(k,q)|^2 for the given (qpoint, kpoint) pair. + + Args: + what="g2" for |g(k,q)|^2, "g" for g(k,q) + """ + g2_slice = self.get_g_qpt_kpt(qpoint, kpoint, what) + df = nparr_to_df(what, g2_slice, ["imode", "m_kq", "n_k"]) + #df["m_kq"] += bstart_mkq + #df["n_k"] += bstart_nk + + return df + + def neq(self, other: Gqk, verbose: int) -> int: + """ + Helper function to compare two GQK objects. + """ + # This dimensions must agree in order to have a meaningfull comparison. + # so raise immediately if not equal. + aname_list = ["cplex", "spin", "nb", "glob_nk", "glob_nq"] + + for aname in aname_list: + val1, val2 = getattr(self, aname), getattr(other, aname) + + if isinstance(val1, (str, int, float)): + eq = val1 == val2 + elif isinstance(val1, np.ndarray): + eq = np.allclose(val1, val2) + else: + raise TypeError(f"Don't know how to handle comparison for type: {type(val1)}") + + if not eq: + raise RuntimeError(f"Different values of {aname=}, {val1=}, {val2=}") + + ierr = 0 + kws = dict(verbose=verbose) # , atol= rtol) + + # Compare v_nk or v_mn_k. + if self.vk_cart_ibz is not None: + if not _allclose("vk_cart_ibz", self.vk_cart_ibz, other.vk_cart_ibz, **kws): ierr += 1 + + if self.vkmat_cart_ibz is not None: + if not _allclose("vkmat_cart_ibz", self.vkmat_cart_ibz, other.vkmat_cart_ibz, **kws): ierr += 1 + + # Compare g or g^2. + if self.g2 is not None: + if not _allclose("g2", self.g2, other.g2, **kws): ierr += 1 + + if self.gvals is not None: + if not _allclose("gvals", self.gvals, other.gvals, **kws): ierr += 1 + + return ierr + + +class GstoreReader(BaseEphReader): + """ + Reads data from file and constructs objects. + + .. rubric:: Inheritance Diagram + .. inheritance-diagram:: GstoreReader + """ + def __init__(self, filepath: PathLike): + super().__init__(filepath) + + # Read important dimensions. + self.nsppol = self.read_dimvalue("number_of_spins") + self.cplex = self.read_dimvalue("gstore_cplex") + self.nkbz = self.read_dimvalue("gstore_nkbz") + self.nkibz = self.read_dimvalue("gstore_nkibz") + self.nqbz = self.read_dimvalue("gstore_nqbz") + self.nqibz = self.read_dimvalue("gstore_nqibz") + + # Read important variables. + self.completed = self.read_value("gstore_completed") + self.done_spin_qbz = self.read_value("gstore_done_qbz_spin") + self.with_vk = self.read_value("gstore_with_vk") + self.qptopt = self.read_value("gstore_qptopt") + self.kptopt = self.read_value("kptopt") + self.kzone = self.read_string("gstore_kzone") + self.qzone = self.read_string("gstore_qzone") + self.kfilter = self.read_string("gstore_kfilter") + self.gmode = self.read_string("gstore_gmode") + + # Note conversion Fortran --> C for the isym index. + self.brange_spin = self.read_value("gstore_brange_spin") + self.brange_spin[:,0] -= 1 + self.erange_spin = self.read_value("gstore_erange_spin") + # Total number of k/q points for each spin after filtering (if any) + self.glob_spin_nq = self.read_value("gstore_glob_nq_spin") + self.glob_nk_spin = self.read_value("gstore_glob_nk_spin") + + # K-points and q-points in the IBZ + self.kibz = self.read_value("reduced_coordinates_of_kpoints") + self.qibz = self.read_value("gstore_qibz") + + # K-points and q-points in the BZ + self.kbz = self.read_value("gstore_kbz") + self.qbz = self.read_value("gstore_qbz") + self.ngqpt = self.read_value("gstore_ngqpt") + + # Mapping BZ --> IBZ. Note conversion Fortran --> C for the isym index. + # nctkarr_t("gstore_kbz2ibz", "i", "six, gstore_nkbz"), & + # nctkarr_t("gstore_qbz2ibz", "i", "six, gstore_nqbz"), & + self.kbz2ibz = self.read_value("gstore_kbz2ibz") + self.kbz2ibz[:,0] -= 1 + + self.qbz2ibz = self.read_value("gstore_qbz2ibz") + self.qbz2ibz[:,0] -= 1 + + # Mapping q/k points in gqk --> BZ. Note conversion Fortran --> C for indexing. + # nctkarr_t("gstore_qglob2bz", "i", "gstore_max_nq, number_of_spins"), & + # nctkarr_t("gstore_kglob2bz", "i", "gstore_max_nk, number_of_spins") & + self.qglob2bz = self.read_value("gstore_qglob2bz") + self.qglob2bz -= 1 + self.kglob2bz = self.read_value("gstore_kglob2bz") + self.kglob2bz -= 1 + + def find_iq_glob_qpoint(self, qpoint, spin: int): + """ + Find the internal index of the qpoint needed to access the gvals array. + """ + qpoint = np.asarray(qpoint) + for iq_g, iq_bz in enumerate(self.qglob2bz[spin]): + if np.allclose(qpoint, self.qbz[iq_bz]): + #print(f"Found {qpoint = } with index {iq_g = }") + return iq_g, qpoint + + raise ValueError(f"Cannot find {qpoint = } in GSTORE.nc") + + def find_ik_glob_kpoint(self, kpoint, spin: int): + """Find the internal indices of the kpoint needed to access the gvals array.""" + kpoint = np.asarray(kpoint) + for ik_g, ik_bz in enumerate(self.kglob2bz[spin]): + if np.allclose(kpoint, self.kbz[ik_bz]): + #print(f"Found {kpoint = } with index {ik_g = }") + return ik_g, kpoint + + raise ValueError(f"Cannot find {kpoint = } in GSTORE.nc") + + # TODO: This fix to read groups should be imported in pymatgen. + @lazy_property + def path2group(self) -> dict: + return self.rootgrp.groups + + +class GstoreRobot(Robot, RobotWithEbands): + """ + This robot analyzes the results contained in multiple GSTORE.nc files. + + Usage example: + + .. code-block:: python + + robot = GstoreRobot.from_files([ + "t04o_GSTORE.nc", + "t05o_GSTORE.nc", + ]) + + robot.neq(verbose=1) + + .. rubric:: Inheritance Diagram + .. inheritance-diagram:: GstoreRobot + """ + EXT = "GSTORE" + + def neq(self, ref_basename: str | None = None, verbose: int = 0) -> int: + """ + Compare all GSTORE.nc files stored in the GstoreRobot + """ + # Find reference gstore. By default the first file in the robot is used. + ref_gstore = self._get_ref_abifile_from_basename(ref_basename) + + exc_list = [] + ierr = 0 + for other_gstore in self.abifiles: + if ref_gstore.filepath == other_gstore.filepath: + continue + print("Comparing: ", ref_gstore.basename, " with: ", other_gstore.basename) + try: + ierr += self._neq_two_gstores(ref_gstore, other_gstore, verbose) + cprint("EQUAL", color="green") + except Exception as exc: + exc_list.append(str(exc)) + + for exc in exc_list: + cprint(exc, color="red") + + return ierr + + @staticmethod + def _neq_two_gstores(gstore1: GstoreFile, gstore2: GstoreFile, verbose: int) -> int: + """ + Helper function to compare two GSTORE files. + """ + # These quantities must be the same to have a meaningfull comparison. + aname_list = ["structure", "nsppol", "cplex", "nkbz", "nkibz", + "nqbz", "nqibz", "completed", "kzone", "qzone", "kfilter", "gmode", + "brange_spin", "erange_spin", "glob_spin_nq", "glob_nk_spin", + ] + + for aname in aname_list: + self._compare_attr_name(aname, gstore1, gstore2) + + # Now compare the gkq objects for each spin. + ierr = 0 + for spin in range(gstore1.nsppol): + gqk1, gqk2 = gstore1.gqk_spin[spin], gstore2.gqk_spin[spin] + ierr += gqk1.neq(gqk2, verbose) + + return ierr + + def yield_figs(self, **kwargs): # pragma: no cover + """ + This function *generates* a predefined list of matplotlib figures with minimal input from the user. + Used in abiview.py to get a quick look at the results. + """ + #for fig in self.get_ebands_plotter().yield_figs(): yield fig + + def write_notebook(self, nbpath=None) -> str: + """ + Write a jupyter_ notebook to ``nbpath``. If nbpath is None, a temporary file in the current + working directory is created. Return path to the notebook. + """ + nbformat, nbv, nb = self.get_nbformat_nbv_nb(title=None) + + args = [(l, f.filepath) for l, f in self.items()] + nb.cells.extend([ + #nbv.new_markdown_cell("# This is a markdown cell"), + nbv.new_code_cell("robot = abilab.GstoreRobot(*%s)\nrobot.trim_paths()\nrobot" % str(args)), + #nbv.new_code_cell("ebands_plotter = robot.get_ebands_plotter()"), + ]) + + # Mixins + #nb.cells.extend(self.get_baserobot_code_cells()) + #nb.cells.extend(self.get_ebands_code_cells()) + + return self._write_nb_nbpath(nb, nbpath) diff --git a/abipy/examples/flows/_under_develop/run_qha.py b/abipy/examples/flows/_under_develop/run_qha.py index e6ae022c8..7f3c6e4f7 100755 --- a/abipy/examples/flows/_under_develop/run_qha.py +++ b/abipy/examples/flows/_under_develop/run_qha.py @@ -29,13 +29,13 @@ def build_flow(options): # Build input for GS calculation. scf_input = abilab.AbinitInput(structure, pseudos) - scf_input.set_vars(ecut=12, nband=8, tolvrs=1e-8) - scf_input.set_kmesh(ngkpt=[4, 4, 4], shiftk=[0, 0, 0]) + scf_input.set_vars(ecut=12, nband=4, tolvrs=1e-8) + scf_input.set_kmesh(ngkpt=[2, 2, 2], shiftk=[0, 0, 0]) - v0 = scf_input.structure.volume - volumes = [0.08 * v0, v0, v0 * 1.02] - return QhaFlow.from_scf_input(options.workdir, scf_input, volumes, - ngqpt=[2, 2, 2], with_becs=False, edos_ngkpt=(4, 4, 4)) + with_becs = False + ngqpt = [2, 2, 2] + return QhaFlow.from_scf_input(options.workdir, scf_input, ngqpt, with_becs, eps=0.005, + ) # , edos_ngkpt=(4, 4, 4)) # This block generates the thumbnails in the Abipy gallery. diff --git a/abipy/flowtk/flows.py b/abipy/flowtk/flows.py index 126c2de11..f470612a5 100644 --- a/abipy/flowtk/flows.py +++ b/abipy/flowtk/flows.py @@ -1253,7 +1253,9 @@ def show_status(self, return_df=False, **kwargs): for i, work in enumerate(self): if nids and work.node_id not in nids: continue print("", file=stream) - cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream) + name = "" + if hasattr(work, "_name"): name = ", work_name=%s" % work._name + cprint_map("Work #%d: %s, Finalized=%s %s" % (i, work, work.finalized, name), cmap={"True": "green"}, file=stream) if wlist is not None and i in wlist: continue if verbose == 0 and work.finalized: print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream) @@ -2149,7 +2151,7 @@ def allocated(self) -> int: except AttributeError: return 0 - def allocate(self, workdir=None, use_smartio=False): + def allocate(self, workdir=None, use_smartio: bool=False, build: bool=False) -> Flow: """ Allocate the `Flow` i.e. assign the `workdir` and (optionally) the |TaskManager| to the different tasks in the Flow. @@ -2157,9 +2159,7 @@ def allocate(self, workdir=None, use_smartio=False): Args: workdir: Working directory of the flow. Must be specified here if we haven't initialized the workdir in the __init__. - - Return: - self + build: True to build the flow and save status to pickle file. """ if workdir is not None: # We set the workdir of the flow here @@ -2186,6 +2186,9 @@ def allocate(self, workdir=None, use_smartio=False): if use_smartio: self.use_smartio() + if build: + self.build_and_pickle_dump() + return self def use_smartio(self): @@ -2263,13 +2266,16 @@ def on_all_ok(self): if self.on_all_ok_num_calls > 0: return True self.on_all_ok_num_calls += 1 - `implement_logic_to_create_new_work` + ##################################### + # implement_logic_to_create_new_work + ##################################### self.register_work(work) self.allocate() self.build_and_pickle_dump() - return False # The scheduler will keep on running the flow. + # The scheduler will keep on running the flow. + return False """ return True @@ -3429,6 +3435,7 @@ def main(): print("Writing new pickle file.") flow.build_and_pickle_dump() + else: print(f"Dry run mode. {len(nids)} tasks have been modified in memory.") diff --git a/abipy/flowtk/launcher.py b/abipy/flowtk/launcher.py index 3ab87e129..f24f30add 100644 --- a/abipy/flowtk/launcher.py +++ b/abipy/flowtk/launcher.py @@ -753,12 +753,12 @@ def _callback(self): if delta_etime.total_seconds() > self.num_reminders * self.remindme_s: self.num_reminders += 1 - msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " % + msg = ("Just to remind you that the scheduler with pid %s, flow %s has been running for %s " % (self.pid, flow, delta_etime)) retcode = self.send_email(msg, tag="[REMINDER]") if retcode: - msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" + + msg += ("\nThe scheduler tried to send an e-mail to remind the user," + " but send_email returned %d. Error is not critical though!" % retcode) print(msg) diff --git a/abipy/flowtk/qha.py b/abipy/flowtk/qha.py index 63f4da614..aaebffaa5 100644 --- a/abipy/flowtk/qha.py +++ b/abipy/flowtk/qha.py @@ -6,146 +6,231 @@ import numpy as np -from monty.json import jsanitize +from abipy.tools.serialization import mjson_write # mjson_load, mjson_loads, +from abipy.dfpt.deformation_utils import generate_deformations from abipy.flowtk.works import Work, PhononWork +from abipy.flowtk.tasks import RelaxTask from abipy.flowtk.flows import Flow -class RelaxAndPhononWork(Work): +class RelaxAndAddPhononWorks(Work): """ - This work performs a structural relaxation for different volumes, then it uses - the relaxed structures to compute phonons, BECS and the dielectric tensor with DFPT. + This work performs a structural relaxation of the initial structure, then a set of distorted + structures is genenerated and the relaxed structures are used + to compute phonons, BECS and the dielectric tensor with DFPT. .. rubric:: Inheritance Diagram - .. inheritance-diagram:: RelaxAndPhononWork + .. inheritance-diagram:: RelaxAndAddPhononWorks """ @classmethod - def from_scf_input(cls, scf_input, volumes, ngqpt, with_becs, optcell, ionmov, - edos_ngkpt=None) -> RelaxAndPhononWork: + def from_scf_input(cls, scf_input, eps, ngqpt, with_becs: bool, optcell: int, ionmov: int, + edos_ngkpt=None) -> RelaxAndAddPhononWorks: """ Build the work from an |AbinitInput| representing a GS-SCF calculation. Args: scf_input: |AbinitInput| for GS-SCF used as template to generate the other inputs. - volumes: List of volumes in Ang**3 with_becs: Activate calculation of Electric field and Born effective charges. optcell, ionmov: Abinit input variables. edos_ngkpt: Three integers defining the the k-sampling for the computation of the - electron DOS with the relaxed structures. Useful for metals or small gap semiconductors + electron DOS with the relaxed structures. Useful for metals in which the electronic contribution should be included. None disables the computation of the e-DOS. """ work = cls() + + # Save attributes in work work.initial_scf_input = scf_input - work.ngqpt = np.reshape(ngqpt, (3,)) - work.edos_ngkpt = edos_ngkpt if edos_ngkpt is None else np.reshape(edos_ngkpt, (3,)) - work.volumes = np.array(volumes) + work.eps = eps + work.ngqpt = ngqpt work.with_becs = with_becs + work.edos_ngkpt = edos_ngkpt if edos_ngkpt is None else np.reshape(edos_ngkpt, (3,)) - # Create input for relaxation and register the relaxation tasks. - relax_template = scf_input.deepcopy() + # Create input for relaxation and register the relaxation task. + work.relax_template = relax_template = scf_input.deepcopy() relax_template.pop_tolerances() - relax_template.set_vars(tolvrs=1e-10, toldff=1.e-6, optcell=optcell, ionmov=ionmov) + relax_template.set_vars(tolvrs=1e-8, toldff=1.e-6, optcell=optcell, ionmov=ionmov) if optcell is not None and optcell != 0: relax_template.set_vars_ifnotin(ecutsm=0.5, dilatmx=1.05) - # Construct len(volumes) works. Each work performs a structural relaxation - # at fixed volume followed by a DFPT calculation with the relaxed structure. - work.relax_tasks_vol = [] - for new_volume in work.volumes: - new_structure = relax_template.structure.scale_lattice(new_volume) - new_input = relax_template.new_with_structure(new_structure) - task = work.register_relax_task(new_input) - work.relax_tasks_vol.append(task) - - work.ph_works_vol = len(volumes) * [None] - work.edos_work = None + work.initial_relax_task = work.register_relax_task(relax_template) return work + def on_ok(self, sender): + """ + This method is called when one task reaches status `S_OK`. + It executes on_all_ok when all tasks in self have reached `S_OK`. + """ + if sender == self.initial_relax_task: + # Get relaxed structure and build deformed structures. + relaxed_structure = sender.get_final_structure() + self.deformed_structures_dict = generate_deformations(relaxed_structure, eps=self.eps) + + # Add new tasks to relax the deformed structures at fixed cell. + for structure in self.deformed_structures_dict.values(): + new_input = self.relax_template.new_with_structure(structure, optcell=0) + self.register_relax_task(new_input) + + self.flow.allocate(build=True) + + return super().on_ok(sender) + def on_all_ok(self): """ This callback is called when all tasks in the Work reach status `S_OK`. - Here are add a new PhononWork for each volume using the relaxed structure. + Here we add a new PhononWork for each volume using the relaxed structure. """ if self.edos_ngkpt is not None: self.edos_work = Work() - # Build phonon works for the different volumes. - for ivol, task in enumerate(self.relax_tasks_vol): + # Build phonon works for the different relaxed structures. + self.ph_works = [] + start = 1 + for task, def_name in zip(self[start:], self.deformed_structures_dict.keys(), strict=True): relaxed_structure = task.get_final_structure() scf_input = self.initial_scf_input.new_with_structure(relaxed_structure) ph_work = PhononWork.from_scf_input(scf_input, self.ngqpt, is_ngqpt=True, tolerance=None, with_becs=self.with_becs, ddk_tolerance=None) - self.ph_works_vol[ivol] = ph_work - # Add e-DOS calculation. + ph_work.set_name(def_name) + self.ph_works.append(ph_work) + self.flow.register_work(ph_work) + + # Add electron DOS calculation. if self.edos_ngkpt is not None: edos_input = scf_input.make_edos_input(self.edos_ngkpt) self.edos_work.register_nscf_task(edos_input, deps={ph_work[0]: "DEN"}) - self.flow.register_work(ph_work) - if self.edos_ngkpt is not None: self.flow.register_work(self.edos_work) - self.flow.allocate() - self.flow.build_and_pickle_dump() + self.flow.allocate(build=True) + + return super().on_all_ok() + + +class ThermalRelaxTask(RelaxTask): + + def _on_ok(self): + results = super()._on_ok() + # Check for convergence. + #if not self.collinear_done: + # self.input.set_vars(strtarget=strtarget) + # self.finalized = False + # self.restart() + + return results + + +class ThermalRelaxWork(Work): + """ + .. rubric:: Inheritance Diagram + .. inheritance-diagram:: ThermalRelaxWork + """ + + @classmethod + def from_relax_input(cls, relax_input, qha, temperatures, pressures): + """ + """ + work = cls() + + work.temperatures = temperatures + work.pressures = pressures + #work.qha = qha + + for pressure in pressures: + for temperature in temperatures: + strtarget = qha.get_strtarget(temperature, pressure) + new_input = relax_input.new_with_vars(strtarget=strtarget) + work.register_relax_task(new_input) + + return work + def on_all_ok(self): + """ + Implement the post-processing step at the end of the Work. + """ return super().on_all_ok() class QhaFlow(Flow): """ + Flow for QHA calculations. .. rubric:: Inheritance Diagram .. inheritance-diagram:: QhaFlow """ @classmethod - def from_scf_input(cls, workdir, scf_input, volumes, ngqpt, with_becs, - edos_ngkpt=None, metadata=None, manager=None) -> QhaFlow: + def from_scf_input(cls, workdir, scf_input, ngqpt, with_becs, eps=0.005, + edos_ngkpt=None, manager=None) -> QhaFlow: """ - Build a |Flow| for QHA calculations from an |AbinitInput| for GS-SCF calculations. + Build a flow for QHA calculations from an |AbinitInput| for GS-SCF calculations. Args: workdir: Working directory of the flow. scf_input: |AbinitInput| for GS-SCF run used as template to generate the other inputs. - volumes: List of volumes in Ang**3. ngqpt: Three integers defining the q-mesh for phonon calculation. with_becs: Activate calculation of Electric field and Born effective charges. + eps: edos_ngkpt: Three integers defining the the k-sampling for the computation of the electron DOS with the relaxed structures. Useful for metals or small gap semiconductors in which the electronic contribution should be included. None disables the computation of the e-DOS. - metadata: Dictionary with metadata to be be added to the final JSON file. manager: |TaskManager| instance. Use default if None. """ - ngqpt = np.reshape(ngqpt, 3) flow = cls(workdir=workdir, manager=manager) - flow.metadata = jsanitize(metadata) if metadata is not None else None - work = RelaxAndPhononWork.from_scf_input(scf_input, volumes, ngqpt, - with_becs=with_becs, optcell=3, ionmov=3, edos_ngkpt=edos_ngkpt) - flow.relax_and_phonon_work = work - flow.register_work(work) + new_work = RelaxAndAddPhononWorks.from_scf_input(scf_input, eps, ngqpt, + with_becs=with_becs, optcell=2, ionmov=2, + edos_ngkpt=edos_ngkpt) + flow.register_work(new_work) + flow.relax_and_add_phonon_works = new_work return flow + #def on_all_ok(self): + # if self.on_all_ok_num_calls > 0: return True + # self.on_all_ok_num_calls += 1 + + # print(f"In on_all_ok with {self.on_all_ok_num_calls =}") + + # # implement_logic_to_create_new_work + # #for work in self.relax_and_add_phonon_works.ph_works + + # """ + # self.register_work(work) + # self.allocate() + # self.build_and_pickle_dump() + # """ + + # #from abipy.dfpt.qha_approximation import QHA_App + # #qha = QHA_App.from_files_app_ddb(cls, ddb_paths, phdos_paths) + + # # The scheduler will keep on running the flow. + # return False + def finalize(self): """ This method is called when the flow is completed. It performs some basic post-processing of the results to facilitate further analysis. """ - d = {} - if self.metadata is not None: d.update({"metadata": self.metadata}) + data = {} + #work = self[0] + #data = dict( + # initial_structure=work.initial_structure, + # relaxed_structure=work.final_structure, + # relaxed_structure_energy_eV=work.initial_structure, + # initial_deformed_structures=work.initial_structure, + # relaxed_deformed_structures_energy_ev=work.initial_structure, + #) # QHA.from_files(cls, gsr_paths, phdos_paths) # Build list of strings with path to the relevant output files ordered by V. - d["gsr_relax_paths"] = [task.gsr_path for task in self.relax_and_phonon_work.relax_tasks_vol] - d["ddb_paths"] = [ph_work.outdir.has_abiext("DDB") for ph_work in self.relax_and_phonon_work.ph_works_vol] - d["gsr_edos_path"] = [] - if self.relax_and_phonon_work.edos_ngkpt is not None: - d["gsr_edos_paths"] = [task.gsr_path for task in self.relax_and_phonon_work.edos_work] + #data["gsr_relax_paths"] = [task.gsr_path for task in self.relax_and_phonon_work.relax_tasks_vol] + data["ddb_paths"] = [ph_work.outdir.has_abiext("DDB") for ph_work in self.relax_and_add_phonon_works.ph_works] + #data["gsr_edos_path"] = [] + #if self.relax_and_phonon_work.edos_ngkpt is not None: + # data["gsr_edos_paths"] = [task.gsr_path for task in self.relax_and_phonon_work.edos_work] - from abipy import abilab #entries = [] #items = zip(self.relax_and_phonon_work.relax_tasks_vol, self.relax_and_phonon_work.ph_works_vol) #for ivol, (relax_task, ph_work) in enumerate(items): @@ -165,16 +250,8 @@ def finalize(self): # entry["gsr_edos_path"] = task.gsr_path # entries.append(entry) - #d["entries_for_each_volume"] = entries - - abilab.mjson_write(d, self.outdir.path_in("qha.json"), indent=4) + #data["entries_for_each_volume"] = entries - #pyscript = """ - #from abipy import abilab - #from abipy.qha import Qha - #data = abilab.mjson_read("qha.json") - #data - #qha = from_files(gsr_files_paths, phdos_files_paths): - #""" + mjson_write(data, self.outdir.path_in("qha_data.json"), indent=4) return super().finalize() diff --git a/abipy/flowtk/tests/test_qha.py b/abipy/flowtk/tests/test_qha.py index 3746cb588..609b2eeaf 100644 --- a/abipy/flowtk/tests/test_qha.py +++ b/abipy/flowtk/tests/test_qha.py @@ -10,10 +10,10 @@ def test_qhaflow(self): """Testing QhaFlow.""" workdir = self.mkdtemp() scf_input = self.get_gsinput_alas_ngkpt(ngkpt=[4, 4, 4]) - v0 = scf_input.structure.volume - volumes = [0.98 * v0, v0, v0 * 1.02] - flow = QhaFlow.from_scf_input(workdir, scf_input, volumes, ngqpt=[2, 2, 2], - with_becs=False, edos_ngkpt=[4, 4, 4], metadata={"mpi_id": 123}) + #v0 = scf_input.structure.volume + #volumes = [0.98 * v0, v0, v0 * 1.02] + flow = QhaFlow.from_scf_input(workdir, scf_input, ngqpt=[2, 2, 2], + with_becs=False, edos_ngkpt=[4, 4, 4]) #, metadata={"mpi_id": 123}) flow.allocate() flow.check_status() diff --git a/abipy/ml/aseml.py b/abipy/ml/aseml.py index 523a1befa..907a0ae51 100644 --- a/abipy/ml/aseml.py +++ b/abipy/ml/aseml.py @@ -826,7 +826,7 @@ def plot_stresses(self, fontsize=6, **kwargs): linear_fit_ax(ax, xs, ys, fontsize=fontsize, with_label=True) ax.legend(loc="best", shadow=True, fontsize=fontsize) ax.grid(True) - s_tex = "$\sigma_{%s}$" % voigt_comp + s_tex = r"$\sigma_{%s}$" % voigt_comp if icol == 0: ax.set_ylabel(f"{key2} {s_tex}", fontsize=fontsize) if irow == (len(self.ALL_VOIGT_COMPS) - 1): @@ -865,11 +865,11 @@ def plot_energies_traj(self, delta_mode=True, fontsize=6, markersize=2, **kwargs ax.plot(e2, marker="o", color="blue", label=key2, markersize=markersize) set_grid_legend(ax, fontsize, xlabel='trajectory', - ylabel="$|\Delta_E|$" if delta_mode else "$E$", + ylabel=r"$|\Delta_E|$" if delta_mode else "$E$", grid=True, legend_loc="upper left", title=f"{key1}/{key2} MAE: {stats.MAE:.6f} eV") - head = "$\Delta$-Energy in eV" if delta_mode else "Energy in eV" + head = r"$\Delta$-Energy in eV" if delta_mode else "Energy in eV" if "title" not in kwargs: fig.suptitle(f"{head} for {self.structure.latex_formula}") return fig @@ -941,7 +941,7 @@ def plot_forces_traj(self, delta_mode=True, symbol=None, fontsize=6, markersize= grid=True, legend=not delta_mode, legend_loc="upper left", ylabel=f"$|\Delta {fp_tex}|$" if delta_mode else f"${fp_tex}$") - head = "$\Delta$-forces in eV/Ang" if delta_mode else "Forces in eV/Ang" + head = r"$\Delta$-forces in eV/Ang" if delta_mode else r"Forces in eV/Ang" if "title" not in kwargs: fig.suptitle(f"{head} for {self.structure.latex_formula}") return fig @@ -987,7 +987,7 @@ def plot_stress_traj(self, delta_mode=True, markersize=2, fontsize=6, **kwargs): set_grid_legend(ax, fontsize, xlabel='trajectory' if last_row else None, grid=True, legend=not delta_mode, legend_loc="upper left", - ylabel=f"$|\Delta \sigma_{voigt_comp_tex}|$ " if delta_mode else "$\sigma$ ") + ylabel=f"$|\Delta \sigma_{voigt_comp_tex}|$ " if delta_mode else r"$\sigma$ ") head = r"$\Delta \sigma$ (eV/Ang$^3$)" if delta_mode else "Stress tensor (eV/Ang$^3$)" if "title" not in kwargs: fig.suptitle(f"{head} for {self.structure.latex_formula}") diff --git a/abipy/ppcodes/oncv_parser.py b/abipy/ppcodes/oncv_parser.py index 237ccb43d..99a3fe394 100644 --- a/abipy/ppcodes/oncv_parser.py +++ b/abipy/ppcodes/oncv_parser.py @@ -106,7 +106,6 @@ def _scan(self, verbose: int = 0) -> OncvParser: # Read data and store it in lines self.lines = [] import io - #with io.open(self.filepath, "rt") as fh: with io.open(self.filepath, "rt", encoding="latin-1") as fh: for i, line in enumerate(fh): if i == 0: @@ -144,7 +143,12 @@ def _scan(self, verbose: int = 0) -> OncvParser: self._warnings.append("\n".join(self.lines[i:i+2])) if "GHOST(+)" in line: + # Testing for highly-localized positive-energy ghosts + # l /rc E Basis Diag. E Cutoff + + # 0 0.304263 4.808940 86.65 WARNING - GHOST(+) self._warnings.append(line) + if "GHOST(-)" in line: self._errors.append(line) @@ -287,6 +291,15 @@ def parse_eigs(str_list): return self + @lazy_property + def min_ghost_empty_ha(self): + ghost_ene = np.inf + for line in self.warnings: + if "GHOST(+)" not in line: continue + ghost_ene = min(ghost_ene, float(line.split()[2])) + + return None if ghost_ene == np.inf else ghost_ene + @lazy_property def lmax(self) -> int: # Read lmax (not very robust because we assume the user didn't change the template but oh well) @@ -294,7 +307,6 @@ def lmax(self) -> int: for i, line in enumerate(self.lines): if line.startswith(header): return int(self.lines[i+1]) - break else: raise self.Error(f"Cannot find line with `#lmax` in: {self.filepath}") @@ -905,7 +917,7 @@ def get_plotter(self): # -> Union[OncvPlotter, None]: return OncvPlotter(self) except Exception as exc: print(exc) - #raise + #raise exc return None def _grep(self, tag: str, beg: int = 0) -> GrepResults: diff --git a/abipy/scripts/abiml.py b/abipy/scripts/abiml.py index 2b656bbb5..fd95e502e 100755 --- a/abipy/scripts/abiml.py +++ b/abipy/scripts/abiml.py @@ -593,17 +593,29 @@ def scan_relax(ctx, filepath, nn_name, @click.option("--traj_range", type=str, show_default=True, help="Trajectory range e.g. `5` to select the first 5 iterations, `1:4` to select steps 1,2,3. `1:4:2 for 1,3", default=None) +@click.option("--symbol", type=str, show_default=True, + help="Chemical symbol. If None all atoms are considered.", + default=None) +@click.option('--stress/--no-stress', default=True, show_default=True, help="Show parity-plot for stress tensor") +@click.option('--delta/--no-delta', default=False, show_default=True, help="Show parity-plot for delta mode") +@click.option('--traj/--no-traj', default=False, show_default=True, help="Show results along trajectory") @click.option("-e", '--exposer', default="mpl", show_default=True, type=click.Choice(["mpl", "panel"]), - help='Plotting backend: mpl for matplotlib, panel for web-based, None to disable plotting') + help='Plotting backend: mpl for matplotlib, panel for web-based, None to disable plotting.') @add_nprocs_opt @add_workdir_verbose_opts @click.option('--config', default='abiml_validate.yml', type=click.Path(), callback=set_default, is_eager=True, expose_value=False) -def validate(ctx, filepaths, +def validate(ctx, + filepaths, nn_names, traj_range, + symbol, + stress, + delta, + traj, exposer, nprocs, - workdir, verbose + workdir, + verbose ): """ Compare ab-initio energies, forces, and stresses with ML-computed ones. @@ -618,31 +630,37 @@ def validate(ctx, filepaths, traj_range = cli.range_from_str(traj_range) nn_names = _get_nn_names(nn_names) ml_comp = aseml.MlValidateWithAbinitio(filepaths, nn_names, traj_range, verbose, workdir, prefix="_abiml_validate_") - print(ml_comp) + #print(ml_comp) c = ml_comp.run(nprocs=nprocs) + show = False if exposer != "None": - show = True - show = False - with_stress = True - with_stress = False - on_traj = True from abipy.tools.plotting import Exposer with Exposer.as_exposer(exposer, title=" ".join(os.path.basename(p) for p in filepaths)) as e: e(c.plot_energies(show=show, savefig="energies.png")) - if on_traj: + + if traj: e(c.plot_energies_traj(delta_mode=True, show=show, savefig="energies_traj.png")) - e(c.plot_energies_traj(delta_mode=False, show=show, savefig="energies_traj_delta_mode.png")) - symbol = None - #symbol = "Li" + if delta: + e(c.plot_energies_traj(delta_mode=False, show=show, savefig="energies_traj_delta_mode.png")) + e(c.plot_forces(delta_mode=False, symbol=symbol, show=show, savefig="forces.png")) - e(c.plot_forces(delta_mode=True, symbol=symbol, show=show, savefig="forces_delta.png")) - if on_traj: - e(c.plot_forces_traj(delta_mode=True, show=show, savefig="forces_traj_delta_mode.png")) - if with_stress: - e(c.plot_stresses(delta_mode=True, show=show, savefig="stresses_delta_mode.png")) - if on_traj: - e(c.plot_stress_traj(delta_mode=True, show=show, savefig="stress_traj_delta_mode.png")) + if delta: + e(c.plot_forces(delta_mode=True, symbol=symbol, show=show, savefig="forces_delta.png")) + + if traj: + e(c.plot_forces_traj(delta_mode=False, show=show, savefig="forces_traj.png")) + if delta: + e(c.plot_forces_traj(delta_mode=True, show=show, savefig="forces_traj_delta_mode.png")) + + if stress: + e(c.plot_stresses(delta_mode=False, show=show, savefig="stresses.png")) + if delta: + e(c.plot_stresses(delta_mode=True, show=show, savefig="stresses_delta_mode.png")) + if traj: + e(c.plot_stress_traj(delta_mode=False, show=show, savefig="stress_traj.png")) + if delta: + e(c.plot_stress_traj(delta_mode=True, show=show, savefig="stress_traj_delta_mode.png")) return 0 diff --git a/abipy/scripts/oncv.py b/abipy/scripts/oncv.py index c8ac360c3..b5c6ffa5c 100755 --- a/abipy/scripts/oncv.py +++ b/abipy/scripts/oncv.py @@ -60,7 +60,7 @@ def oncv_gnuplot(options): def oncv_print(options) -> int: """ - Parse oncvps output and print results to terminal. + Parse oncvps output file and print results to terminal. """ out_path = _find_oncv_output(options.filepath) p = OncvParser(out_path).scan() @@ -248,6 +248,53 @@ def oncv_run(options): return 0 +def oncv_ghost(options) -> int: + """ + Scan directories for oncvpsp output files and build dataframe with ghost position + """ + #cli.customize_mpl(options) + + # Walk through the directory tree and find all .out files. + root_dir = options.filepath + out_paths = [] + for dirpath, _, filenames in os.walk(root_dir): + for filename in filenames: + if filename.endswith(".out"): + out_paths.append(os.path.join(dirpath, filename)) + #print(out_paths) + + # Parse the output file. + ierr = 0 + rows = [] + for out_path in out_paths: + onc_parser = OncvParser(out_path).scan() + if not onc_parser.run_completed: + cprint("oncvpsp output is not completed. Exiting", color="red") + ierr += 1 + continue + + hints = onc_parser.hints + rows.append(dict( + out_path=out_path, + z=onc_parser.z, + num_warnings=len(onc_parser.warnings), + num_errors=len(onc_parser.errors), + min_ghost_empty_ha=onc_parser.min_ghost_empty_ha, + #min_ghost_occ_ha=oncv_parser.min_ghost_occ_ha, + #ppgen_hint_low=hints["low"]["ecut"], + #ppgen_hint_normal=hints["normal"]["ecut"], + ppgen_hint_high=hints["high"]["ecut"], + )) + + import pandas as pd + df = pd.DataFrame(rows) + df = df.sort_values(by='z') + from abipy.tools.printing import print_dataframe + print_dataframe(df) + + return ierr + + def oncv_gui(options): """ Start a panel web app to generate pseudopotentials. @@ -278,8 +325,6 @@ def build(): return pn.serve(build, **serve_kwargs) - - def get_epilog() -> str: return """\ Usage example: @@ -315,6 +360,7 @@ def get_copts_parser(multi=False): return p copts_parser = get_copts_parser(multi=False) + copts_parser_multi = get_copts_parser(multi=True) # Parent parser for commands supporting MplExposer. plot_parser = argparse.ArgumentParser(add_help=False) @@ -345,10 +391,13 @@ def get_copts_parser(multi=False): help=oncv_plot_pseudo.__doc__) # Subparser for compare command. - copts_parser_multi = get_copts_parser(multi=True) p_compare = subparsers.add_parser("compare", parents=[copts_parser_multi, plot_parser], help=oncv_compare.__doc__) + # Subparser for ghost command. + p_ghost = subparsers.add_parser("ghost", parents=[copts_parser], + help=oncv_ghost.__doc__) + # notebook options. p_nb = subparsers.add_parser('notebook', parents=[copts_parser], help=oncv_notebook.__doc__) p_nb.add_argument('-nb', '--notebook', action='store_true', default=False, help="Open file in jupyter notebook") diff --git a/dev_scripts/run_tuna.sh b/dev_scripts/run_tuna.sh new file mode 100755 index 000000000..b98abb7a6 --- /dev/null +++ b/dev_scripts/run_tuna.sh @@ -0,0 +1,3 @@ +pip install tuna +python -X importtime -c "import abipy" 2> abipy_import.log +tuna abipy_import.log diff --git a/tasks.py b/tasks.py index 4daa948f1..dbdc31f33 100644 --- a/tasks.py +++ b/tasks.py @@ -23,11 +23,11 @@ def make_doc(ctx): open_doc(ctx) -@task -def push_doc(ctx): - make_doc(ctx) - with cd(DOCS_DIR): - ctx.run("./ghp_import.py _build/html/ -n -p") +#@task +#def push_doc(ctx): +# make_doc(ctx) +# with cd(DOCS_DIR): +# ctx.run("./ghp_import.py _build/html/ -n -p") @task