diff --git a/adcc/ExcitedStates.py b/adcc/ExcitedStates.py index 57d08e05..f53f0164 100644 --- a/adcc/ExcitedStates.py +++ b/adcc/ExcitedStates.py @@ -89,7 +89,9 @@ def optimise_formatting(self, vectors): @property def linewidth(self): - """The width of an amplitude line if a tensor is formatted with this class""" + """ + The width of an amplitude line if a tensor is formatted with this class + """ # TODO This assumes a PP ADC matrix if self.matrix.blocks == ["s"]: nblk = 2 @@ -379,7 +381,8 @@ def describe(self, oscillator_strengths=True, state_dipole_moments=False, Show the norms of the (1p1h, 2p2h, ...) blocks of the excited states, by default ``True``. """ - # TODO This function is quite horrible and definitely needs some refactoring + # TODO This function is quite horrible and definitely needs some + # refactoring eV = constants.value("Hartree energy in eV") has_dipole = "electric_dipole" in self.operators.available diff --git a/adcc/FormatDominantElements.py b/adcc/FormatDominantElements.py index 2323d241..a89bf42b 100644 --- a/adcc/FormatDominantElements.py +++ b/adcc/FormatDominantElements.py @@ -43,9 +43,9 @@ def __init__(self, mospaces, tolerance=0.01, index_format=FormatIndexAdcc): def optimise_formatting(self, spaces_tensor_pairs): """ - Optimise the formatting parameters of this class and the index_format class - in order to be able to nicely produce equivalently formatted tensor format - strings for all the passed spaces-tensor pairs. + Optimise the formatting parameters of this class and the index_format + class in order to be able to nicely produce equivalently formatted tensor + format strings for all the passed spaces-tensor pairs. This function can be called multiple times. """ @@ -53,20 +53,22 @@ def optimise_formatting(self, spaces_tensor_pairs): return self.optimise_formatting([spaces_tensor_pairs]) for spaces, tensor in spaces_tensor_pairs: - if not isinstance(spaces, (tuple, list)) or not isinstance(tensor, Tensor): + if not isinstance(spaces, (tuple, list)) or \ + not isinstance(tensor, Tensor): raise TypeError("spaces_tensor_pairs should be a list of " "(spaces tuples, Tensor) tuples") for indices, _ in _tensor_select_below_absmax(tensor, self.tolerance): - self.index_format.optimise_formatting([(spaces[j], idx) - for j, idx in enumerate(indices)]) + self.index_format.optimise_formatting( + [(spaces[j], idx) for j, idx in enumerate(indices)]) def format_as_list(self, spaces, tensor): """Raw-format the dominant tensor elements as a list of tuples with one - tuple for each element. Each tuple has three entries, the formatted indices, - the formatted spins and the value""" + tuple for each element. Each tuple has three entries, the formatted + indices, the formatted spins and the value""" ret = [] for indices, value in _tensor_select_below_absmax(tensor, self.tolerance): - formatted = tuple(self.index_format.format(spaces[j], idx, concat_spin=False) + formatted = tuple(self.index_format.format(spaces[j], idx, + concat_spin=False) for j, idx in enumerate(indices)) ret.append(tuple(zip(*formatted)) + (value, )) return ret @@ -74,8 +76,8 @@ def format_as_list(self, spaces, tensor): def format(self, spaces, tensor): """ Return a multiline string representing the dominant tensor elements. - The tensor index is formatted according to the index format passed upon class - construction. + The tensor index is formatted according to the index format passed + upon class construction. """ ret = [] for indices, spins, value in self.format_as_list(spaces, tensor): diff --git a/adcc/FormatIndex.py b/adcc/FormatIndex.py index 997d48bd..ed92f869 100644 --- a/adcc/FormatIndex.py +++ b/adcc/FormatIndex.py @@ -72,8 +72,8 @@ def _translate_index(self, space, idx): def optimise_formatting(self, space_index_pairs): """ Optimise the formatting parameters of this class in order to be able to - nicely produce equivalently formatted tensor format strings for all the passed - spaces-index pairs. + nicely produce equivalently formatted tensor format strings for all the + passed spaces-index pairs. This function can be called multiple times. """ @@ -93,8 +93,8 @@ def format(self, space, idx, concat_spin=True): @property def max_n_characters(self): """ - The maximum number of characters needed for a formatted index (excluding the "a" - or "b" spin string) according to the current optimised formatting. + The maximum number of characters needed for a formatted index (excluding + the "a" or "b" spin string) according to the current optimised formatting. """ return self.max_digits @@ -132,8 +132,8 @@ def _translate_index(self, space, idx): def optimise_formatting(self, space_index_pairs): """ Optimise the formatting parameters of this class in order to be able to - nicely produce equivalently formatted tensor format strings for all the passed - spaces-index pairs. + nicely produce equivalently formatted tensor format strings for all the + passed spaces-index pairs. This function can be called multiple times. """ @@ -154,8 +154,8 @@ def format(self, space, idx, concat_spin=True): @property def max_n_characters(self): """ - The maximum number of characters needed for a formatted index (excluding the "a" - or "b" spin string) according to the current optimised formatting. + The maximum number of characters needed for a formatted index (excluding + the "a" or "b" spin string) according to the current optimised formatting. """ return 5 + self.max_digits @@ -198,8 +198,9 @@ def __init__(self, refstate, max_digits=1, use_hoco=True): # TODO Expand this class there is something sensible closed_shell = refstate.n_alpha == refstate.n_beta if not refstate.is_aufbau_occupation or not closed_shell: - raise ValueError("format_homolumo only produces the right results for " - "closed-shell references with an Aufbau occupation") + raise ValueError("format_homolumo only produces the right results " + "for closed-shell references with an Aufbau " + "occupation") def _translate_index(self, space, idx): # Deal with core-occupied orbitals first: @@ -238,8 +239,8 @@ def _translate_index(self, space, idx): def optimise_formatting(self, space_index_pairs): """ Optimise the formatting parameters of this class in order to be able to - nicely produce equivalently formatted tensor format strings for all the passed - spaces-index pairs. + nicely produce equivalently formatted tensor format strings for all the + passed spaces-index pairs. This function can be called multiple times. """ @@ -258,7 +259,7 @@ def format(self, space, idx, concat_spin=True): @property def max_n_characters(self): """ - The maximum number of characters needed for a formatted index (excluding the "a" - or "b" spin string) according to the current optimised formatting. + The maximum number of characters needed for a formatted index (excluding + the "a" or "b" spin string) according to the current optimised formatting. """ return 4 + self.maxlen_offset diff --git a/adcc/backends/__init__.py b/adcc/backends/__init__.py index 18362d3e..87e3d978 100644 --- a/adcc/backends/__init__.py +++ b/adcc/backends/__init__.py @@ -73,9 +73,10 @@ def available(): if not __status: status = { "pyscf": is_module_available("pyscf", "1.5.0"), - "psi4": is_module_available("psi4", "1.2.1") and is_module_available("psi4.core"), - "veloxchem": is_module_available("veloxchem"), # Exports no version info - "molsturm": is_module_available("molsturm"), # Exports no version info + "psi4": (is_module_available("psi4", "1.3.0") + and is_module_available("psi4.core")), + "veloxchem": is_module_available("veloxchem"), # No version info + "molsturm": is_module_available("molsturm"), # No version info } return sorted([b for b in status if status[b]]) @@ -141,8 +142,7 @@ def import_scf_results(res): "type " + str(type(res)) + " implemented.") -def run_hf(backend=None, xyz=None, basis="sto-3g", charge=0, multiplicity=1, - conv_tol=1e-12, conv_tol_grad=1e-8, max_iter=150): +def run_hf(backend, xyz, basis, **kwargs): """ Run a HF calculation with a specified backend, molecule, and SCF parameters @@ -164,7 +164,8 @@ def run_hf(backend=None, xyz=None, basis="sto-3g", charge=0, multiplicity=1, if len(available()) == 0: raise RuntimeError( "No supported host-program available as SCF backend. " - "See https://adc-connect.org/installation.html#install-hostprogram " + "See https://adc-connect.org/latest/" + "installation.html#install-hostprogram " "for installation instructions." ) else: @@ -175,17 +176,18 @@ def run_hf(backend=None, xyz=None, basis="sto-3g", charge=0, multiplicity=1, raise ValueError("Backend {} not found.".format(backend)) if backend == "psi4": from . import psi4 as backend_hf + elif backend == "pyscf": from . import pyscf as backend_hf + elif backend == "veloxchem": from . import veloxchem as backend_hf + elif backend == "molsturm": from . import molsturm as backend_hf + else: raise NotImplementedError("No run_hf function implemented for backend " "{}.".format(backend)) - return backend_hf.run_hf( - xyz, basis, charge, multiplicity, conv_tol, - conv_tol_grad, max_iter - ) + return backend_hf.run_hf(xyz, basis, **kwargs) diff --git a/adcc/backends/eri_build_helper.py b/adcc/backends/eri_build_helper.py index f1d178ba..d2ecc7a7 100644 --- a/adcc/backends/eri_build_helper.py +++ b/adcc/backends/eri_build_helper.py @@ -136,24 +136,25 @@ class EriBuilder: Implementation of the following functions in a derived class is necessary: - coeffs_occ_alpha: provide occupied alpha coefficients - (return np.ndarray) - coeffs_virt_alpha: provide virtual alpha coefficients - (return np.ndarray) - compute_mo_eri: compute a block of integrals (Chemists') - using the provided coefficients and retrieve block from - cache if possible + - ``coefficients``: Return a dict from a key describing the block + to the MO coefficients as an ``np.ndarray``. The expected keys + are ``Oa`` (occupied-alpha), ``Ob`` (occupied-beta), + ``Va`` (virtual-alpha), ``Vb`` (virtual-beta). + - ``compute_mo_eri``: compute a block of integrals (Chemists') + using the provided coefficients and retrieve block from cache + if possible Depending on the host program, other functions like - build_eri_phys_asym_block also need to be re-implemented. + ``build_eri_phys_asym_block`` also need to be re-implemented. """ - def __init__(self, n_orbs, n_orbs_alpha, n_alpha, n_beta): + def __init__(self, n_orbs, n_orbs_alpha, n_alpha, n_beta, restricted): self.block_slice_mapping = None self.n_orbs = n_orbs self.n_orbs_alpha = n_orbs_alpha self.n_alpha = n_alpha self.n_beta = n_beta + self.restricted = restricted self.eri_cache = {} self.eri_asymm_cache = {} @@ -166,20 +167,8 @@ def __init__(self, n_orbs, n_orbs_alpha, n_alpha, n_beta): self.block_slice_mapping = BlockSliceMappingHelper(aro, bro, arv, brv) @property - def coeffs_occ_alpha(self): - raise NotImplementedError("Implement coeffs_occ_alpha") - - @property - def coeffs_virt_alpha(self): - raise NotImplementedError("Implement coeffs_virt_alpha") - - @property - def coeffs_occ_beta(self): - raise NotImplementedError("Implement coeffs_occ_beta") - - @property - def coeffs_virt_beta(self): - raise NotImplementedError("Implement coeffs_virt_beta") + def coefficients(self): + raise NotImplementedError("Implement coefficients") def compute_mo_eri(self, block, coeffs, use_cache=True): raise NotImplementedError("Implement compute_mo_eri") @@ -200,9 +189,22 @@ def fill_slice(self, slices, out): slices requested slice of ERIs from libtensor out view to libtensor memory """ + # map index slices to orbital and spin space + # because we do not have access to the MoSpaces object inside the + # HartreeFockProvider implementation mapping = self.block_slice_mapping.map_slices_to_blocks_and_spins( slices ) + + # mo_spaces (antisymm. physicists' notation), + # e.g. an index slice from maps to ['O', 'O', 'O', 'O'] + # + # spin_block is the equivalent for the spin block + # + # comp_block_slices gives you the slices inside the computed block + # required. E.g., if you need a part of o2, the full occupied + # integral block is computed in the host program, but only + # parts of it are written to libtensor memory mo_spaces, spin_block, comp_block_slices = mapping if len(mo_spaces) != 4: raise RuntimeError( @@ -216,36 +218,47 @@ def fill_slice(self, slices, out): # if the backend provides Chemists' ERIs # -> (ik|jl) if self.eri_notation == "chem": - mo_spaces = "".join( - np.take(np.array(mo_spaces), [0, 2, 1, 3]) - ) + mo_spaces = "".join(np.take(np.array(mo_spaces), [0, 2, 1, 3])) spin_block_str = "".join(spin_block) - # TODO: probably not needed in the future - # since libtensor will only ask for canonical blocks allowed, spin_symm = is_spin_allowed(spin_block_str) if allowed: - eri = self.build_eri_phys_asym_block( - can_block=mo_spaces, spin_symm=spin_symm - ) + eri = self.build_eri_phys_asym_block(can_block=mo_spaces, + spin_symm=spin_symm) out[:] = eri[comp_block_slices] else: out[:] = 0 def build_eri_phys_asym_block(self, can_block=None, spin_symm=None): - co = self.coeffs_occ_alpha - cv = self.coeffs_virt_alpha block = can_block asym_block = "".join([block[i] for i in [0, 3, 2, 1]]) - both_blocks = "{}-{}-{}-{}".format( - block, asym_block, str(spin_symm.pref1), str(spin_symm.pref2) - ) + both_blocks = f"{block}-{asym_block}-{spin_symm.pref1}-{spin_symm.pref2}" + if not self.restricted: + # add the spin block to the caching string + both_blocks += "-" + spin_symm.transposition if both_blocks in self.eri_asymm_cache.keys(): return self.eri_asymm_cache[both_blocks] # TODO: avoid caching of (VV|VV) # because we don't need it anymore -> is cached anyways - coeffs_transform = tuple(co if x == "O" else cv for x in block) + spin_chem = list(spin_symm.transposition[i] for i in [0, 2, 1, 3]) + spin_key = "".join(spin_chem) + coeffs_transform = tuple(self.coefficients[x + y] + for x, y in zip(block, spin_chem)) + + if not self.restricted: + block += spin_key + asym_block += "".join([spin_key[i] for i in [0, 2, 1, 3]]) + + # For the given spin block, check which individual blocks + # in Chemists' notation are actually needed to form the antisymmetrized + # integral in Physicists' notation. + # Some of them are zero and don't need to be computed. + # Example: + # = - = (oo|vv) - (ov|vo) + # the last term is zero in the case of (ab|ba) + + # both terms in the antisymm. are non-zero if spin_symm.pref1 != 0 and spin_symm.pref2 != 0: can_block_integrals = self.compute_mo_eri(block, coeffs_transform) eri_phys = can_block_integrals.transpose(0, 2, 1, 3) @@ -255,9 +268,11 @@ def build_eri_phys_asym_block(self, can_block=None, spin_symm=None): asym_block, chem_asym ).transpose(0, 3, 2, 1).transpose(0, 2, 1, 3) eris = spin_symm.pref1 * eri_phys - spin_symm.pref2 * asymm + # only the second term is zero elif spin_symm.pref1 != 0 and spin_symm.pref2 == 0: can_block_integrals = self.compute_mo_eri(block, coeffs_transform) eris = spin_symm.pref1 * can_block_integrals.transpose(0, 2, 1, 3) + # only the first term is zero elif spin_symm.pref1 == 0 and spin_symm.pref2 != 0: chem_asym = tuple(coeffs_transform[i] for i in [0, 3, 2, 1]) asymm = self.compute_mo_eri( @@ -280,19 +295,10 @@ def build_full_eri_ffff(self): brv = slice(n_orbs_alpha + n_beta, n_orbs) eri = np.zeros((n_orbs, n_orbs, n_orbs, n_orbs)) - co = self.coeffs_occ_alpha - cv = self.coeffs_virt_alpha blocks = ["OOVV", "OVOV", "OOOV", "OOOO", "OVVV", "VVVV"] - # TODO: needs to be refactored to support UHF for b in blocks: slices_alpha = [aro if x == "O" else arv for x in b] slices_beta = [bro if x == "O" else brv for x in b] - coeffs_transform = tuple(co if x == "O" else cv for x in b) - # make canonical integral block - can_block_integrals = self.compute_mo_eri(b, coeffs_transform) - - # automatically set ERI tensor's symmetry-equivalent blocks - trans_sym_blocks = get_symm_equivalent_transpositions_for_block(b) # Slices for the spin-allowed blocks aaaa = SpinBlockSlice("aaaa", (slices_alpha[0], slices_alpha[1], @@ -304,12 +310,19 @@ def build_full_eri_ffff(self): bbaa = SpinBlockSlice("bbaa", (slices_beta[0], slices_beta[1], slices_alpha[2], slices_alpha[3])) non_zero_spin_block_slice_list = [aaaa, bbbb, aabb, bbaa] - for tsym_block in trans_sym_blocks: - sym_block_eri = can_block_integrals.transpose(tsym_block) - for non_zero_spin_block in non_zero_spin_block_slice_list: - transposed_spin_slices = tuple( - non_zero_spin_block.slices[i] for i in tsym_block - ) + trans_sym_blocks = get_symm_equivalent_transpositions_for_block(b) + + # automatically set ERI tensor's symmetry-equivalent blocks + for spin_block in non_zero_spin_block_slice_list: + coeffs_transform = tuple(self.coefficients[x + y] + for x, y in zip(b, spin_block.spins)) + can_block_integrals = self.compute_mo_eri( + b + "".join(spin_block.spins), coeffs_transform + ) + for tsym_block in trans_sym_blocks: + sym_block_eri = can_block_integrals.transpose(tsym_block) + transposed_spin_slices = tuple(spin_block.slices[i] + for i in tsym_block) eri[transposed_spin_slices] = sym_block_eri return eri diff --git a/adcc/backends/psi4.py b/adcc/backends/psi4.py index 61dba46a..c1902659 100644 --- a/adcc/backends/psi4.py +++ b/adcc/backends/psi4.py @@ -23,11 +23,11 @@ import psi4 import numpy as np +from adcc.misc import cached_property + from .InvalidReference import InvalidReference from .eri_build_helper import EriBuilder -from adcc.misc import cached_property - from libadcc import HartreeFockProvider @@ -43,18 +43,19 @@ def electric_dipole(self): class Psi4EriBuilder(EriBuilder): - def __init__(self, wfn, n_orbs, n_orbs_alpha, n_alpha, n_beta): + def __init__(self, wfn, n_orbs, n_orbs_alpha, n_alpha, n_beta, restricted): self.wfn = wfn self.mints = psi4.core.MintsHelper(self.wfn) - super().__init__(n_orbs, n_orbs_alpha, n_alpha, n_beta) - - @property - def coeffs_occ_alpha(self): - return self.wfn.Ca_subset("AO", "OCC") + super().__init__(n_orbs, n_orbs_alpha, n_alpha, n_beta, restricted) @property - def coeffs_virt_alpha(self): - return self.wfn.Ca_subset("AO", "VIR") + def coefficients(self): + return { + "Oa": self.wfn.Ca_subset("AO", "OCC"), + "Ob": self.wfn.Cb_subset("AO", "OCC"), + "Va": self.wfn.Ca_subset("AO", "VIR"), + "Vb": self.wfn.Cb_subset("AO", "VIR"), + } def compute_mo_eri(self, block, coeffs, use_cache=True): if block in self.eri_cache and use_cache: @@ -66,20 +67,19 @@ def compute_mo_eri(self, block, coeffs, use_cache=True): class Psi4HFProvider(HartreeFockProvider): """ - This implementation is only valid for RHF + This implementation is only valid + if no orbital reordering is required. """ def __init__(self, wfn): # Do not forget the next line, # otherwise weird errors result super().__init__() - - if not isinstance(wfn, psi4.core.RHF): - raise InvalidReference("Only restricted references (RHF) are supported.") - self.wfn = wfn self.eri_ffff = None - self.eri_builder = Psi4EriBuilder(self.wfn, self.n_orbs, self.wfn.nmo(), - wfn.nalpha(), wfn.nbeta()) + self.eri_builder = Psi4EriBuilder( + self.wfn, self.n_orbs, self.wfn.nmo(), wfn.nalpha(), wfn.nbeta(), + self.restricted + ) self.operator_integral_provider = Psi4OperatorIntegralProvider(self.wfn) def get_backend(self): @@ -93,7 +93,7 @@ def get_conv_tol(self): return threshold def get_restricted(self): - return True # TODO Hard-coded for now. + return isinstance(self.wfn, (psi4.core.RHF, psi4.core.ROHF)) def get_energy_scf(self): return self.wfn.energy() @@ -121,7 +121,8 @@ def get_nuclear_multipole(self, order): def fill_orbcoeff_fb(self, out): mo_coeff_a = np.asarray(self.wfn.Ca()) - mo_coeff = (mo_coeff_a, mo_coeff_a) + mo_coeff_b = np.asarray(self.wfn.Cb()) + mo_coeff = (mo_coeff_a, mo_coeff_b) out[:] = np.transpose( np.hstack((mo_coeff[0], mo_coeff[1])) ) @@ -156,34 +157,35 @@ def has_eri_phys_asym_ffff(self): def flush_cache(self): self.eri_ffff = None - self.eri_cache = None + self.eri_builder.flush_cache() def import_scf(wfn): if not isinstance(wfn, psi4.core.HF): raise InvalidReference( "Only psi4.core.HF and its subtypes are supported references in " - "backends.psi4.import_scf. This indicates that you passed an unsupported " - "SCF reference. Make sure you did a restricted or unrestricted HF " - "calculation." + "backends.psi4.import_scf. This indicates that you passed an " + "unsupported SCF reference. Make sure you did a restricted or " + "unrestricted HF calculation." ) - if not isinstance(wfn, psi4.core.RHF): - raise InvalidReference("Right now only restricted references (RHF) are " + if not isinstance(wfn, (psi4.core.RHF, psi4.core.UHF)): + raise InvalidReference("Right now only RHF and UHF references are " "supported for Psi4.") # TODO This is not fully correct, because the core.Wavefunction object # has an internal, but py-invisible Options structure, which contains # the actual set of options ... theoretically they could differ scf_type = psi4.core.get_global_option('SCF_TYPE') - unsupported_scf_types = ["CD", "DISK_DF", "MEM_DF"] # Choleski or density-fitting + # CD = Choleski, DF = density-fitting + unsupported_scf_types = ["CD", "DISK_DF", "MEM_DF"] if scf_type in unsupported_scf_types: raise InvalidReference(f"Unsupported Psi4 SCF_TYPE, should not be one " "of {unsupported_scf_types}") if wfn.nirrep() > 1: - raise InvalidReference("The passed Psi4 wave function object needs to have " - "exactly one irrep, i.e. be of C1 symmetry.") + raise InvalidReference("The passed Psi4 wave function object needs to " + "have exactly one irrep, i.e. be of C1 symmetry.") # Psi4 throws an exception if SCF is not converged, so there is no need # to assert that here. @@ -199,24 +201,32 @@ def run_hf(xyz, basis, charge=0, multiplicity=1, conv_tol=1e-12, "ccpvdz": "cc-pvdz", } - mol = psi4.geometry(""" + mol = psi4.geometry(f""" {charge} {multiplicity} {xyz} symmetry c1 units au no_reorient no_com - """.format(xyz=xyz, charge=charge, multiplicity=multiplicity)) + """) + psi4.core.be_quiet() - reference = "RHF" + psi4.set_options({ + 'basis': basissets.get(basis, basis), + 'scf_type': 'pk', + 'e_convergence': conv_tol, + 'd_convergence': conv_tol_grad, + 'maxiter': max_iter, + 'reference': "RHF" + }) + if multiplicity != 1: - reference = "UHF" - psi4.set_options({'basis': basissets.get(basis, basis), - 'scf_type': 'pk', - 'e_convergence': conv_tol, - 'd_convergence': conv_tol_grad, - 'maxiter': max_iter, - 'reference': reference}) + psi4.set_options({ + 'reference': "UHF", + 'maxiter': max_iter + 500, + 'soscf': 'true' + }) + _, wfn = psi4.energy('SCF', return_wfn=True, molecule=mol) psi4.core.clean() return wfn diff --git a/adcc/backends/pyscf.py b/adcc/backends/pyscf.py index 6de1fdea..aab9a0fd 100644 --- a/adcc/backends/pyscf.py +++ b/adcc/backends/pyscf.py @@ -20,16 +20,14 @@ ## along with adcc. If not, see . ## ## --------------------------------------------------------------------- -import warnings import numpy as np -from .InvalidReference import InvalidReference -from .eri_build_helper import EriBuilder - from pyscf import ao2mo, gto, scf from adcc.misc import cached_property -from adcc.DataHfProvider import DataHfProvider + +from .InvalidReference import InvalidReference +from .eri_build_helper import EriBuilder from libadcc import HartreeFockProvider @@ -47,17 +45,25 @@ def electric_dipole(self): # TODO: refactor ERI builder to be more general # IntegralBuilder would be good class PyScfEriBuilder(EriBuilder): - def __init__(self, scfres, n_orbs, n_orbs_alpha, n_alpha, n_beta): + def __init__( + self, scfres, n_orbs, n_orbs_alpha, n_alpha, n_beta, restricted + ): self.scfres = scfres - super().__init__(n_orbs, n_orbs_alpha, n_alpha, n_beta) - - @property - def coeffs_occ_alpha(self): - return self.scfres.mo_coeff[:, :self.n_alpha] + if restricted: + self.mo_coeff = (self.scfres.mo_coeff, + self.scfres.mo_coeff) + else: + self.mo_coeff = self.scfres.mo_coeff + super().__init__(n_orbs, n_orbs_alpha, n_alpha, n_beta, restricted) @property - def coeffs_virt_alpha(self): - return self.scfres.mo_coeff[:, self.n_alpha:] + def coefficients(self): + return { + "Oa": self.mo_coeff[0][:, :self.n_alpha], + "Ob": self.mo_coeff[1][:, :self.n_beta], + "Va": self.mo_coeff[0][:, self.n_alpha:], + "Vb": self.mo_coeff[1][:, self.n_beta:], + } def compute_mo_eri(self, block, coeffs, use_cache=True): if block in self.eri_cache and use_cache: @@ -72,7 +78,8 @@ def compute_mo_eri(self, block, coeffs, use_cache=True): class PyScfHFProvider(HartreeFockProvider): """ - This implementation is only valid for RHF + This implementation is only valid + if no orbital reordering is required. """ def __init__(self, scfres): # Do not forget the next line, @@ -81,8 +88,10 @@ def __init__(self, scfres): self.scfres = scfres self.eri_ffff = None n_alpha, n_beta = scfres.mol.nelec - self.eri_builder = PyScfEriBuilder(self.scfres, self.n_orbs, - self.n_orbs_alpha, n_alpha, n_beta) + self.eri_builder = PyScfEriBuilder( + self.scfres, self.n_orbs, self.n_orbs_alpha, n_alpha, n_beta, + self.restricted + ) self.operator_integral_provider = PyScfOperatorIntegralProvider( self.scfres ) @@ -108,8 +117,8 @@ def get_restricted(self): elif isinstance(self.scfres.mo_occ, np.ndarray): restricted = self.scfres.mo_occ.ndim < 2 else: - raise InvalidReference("Unusual pyscf SCF class encountered. Could not " - "determine restricted / unrestricted.") + raise InvalidReference("Unusual pyscf SCF class encountered. Could " + "not determine restricted / unrestricted.") return restricted def get_energy_scf(self): @@ -187,214 +196,22 @@ def flush_cache(self): self.eri_ffff = None -def convert_scf_to_dict(scfres): - if not isinstance(scfres, scf.hf.SCF): - raise InvalidReference("Unsupported type for backends.pyscf.convert_scf_to_dict.") - - if not scfres.converged: - raise InvalidReference("Cannot start an adc calculation on top of an SCF, " - "which is not yet converged. Did you forget to run " - "the kernel() or the scf() function of the pyscf scf " - "object?") - - # Try to determine whether we are restricted - if isinstance(scfres.mo_occ, list): - restricted = len(scfres.mo_occ) < 2 - elif isinstance(scfres.mo_occ, np.ndarray): - restricted = scfres.mo_occ.ndim < 2 - else: - raise InvalidReference("Unusual pyscf SCF class encountered. Could not " - "determine restricted / unrestricted.") - - mo_occ = scfres.mo_occ - mo_energy = scfres.mo_energy - mo_coeff = scfres.mo_coeff - fock_bb = scfres.get_fock() - - # pyscf only keeps occupation and mo energies once if restriced, - # so we unfold it here in order to unify the treatment in the rest - # of the code - if restricted: - mo_occ = np.asarray((mo_occ / 2, mo_occ / 2)) - mo_energy = (mo_energy, mo_energy) - mo_coeff = (mo_coeff, mo_coeff) - fock_bb = (fock_bb, fock_bb) - - # Transform fock matrix to MOs - fock = tuple(mo_coeff[i].transpose().conj() @ fock_bb[i] @ mo_coeff[i] - for i in range(2)) - del fock_bb - - # Determine number of orbitals - n_orbs_alpha = mo_coeff[0].shape[1] - n_orbs_beta = mo_coeff[1].shape[1] - n_orbs = n_orbs_alpha + n_orbs_beta - if n_orbs_alpha != n_orbs_beta: - raise InvalidReference("adcc cannot deal with different number of alpha and " - "beta orbitals like in a restricted " - "open-shell reference at the moment.") - - # Determine number of electrons - n_alpha = np.sum(mo_occ[0] > 0) - n_beta = np.sum(mo_occ[1] > 0) - if n_alpha != np.sum(mo_occ[0]) or n_beta != np.sum(mo_occ[1]): - raise InvalidReference("Fractional occupation numbers are not supported " - "in adcc.") - - # conv_tol is energy convergence, conv_tol_grad is gradient convergence - if scfres.conv_tol_grad is None: - conv_tol_grad = np.sqrt(scfres.conv_tol) - else: - conv_tol_grad = scfres.conv_tol_grad - threshold = max(10 * scfres.conv_tol, conv_tol_grad) - - # - # Put basic data into a dictionary - # - data = { - "n_orbs_alpha": int(n_orbs_alpha), - "energy_scf": float(scfres.e_tot), - "restricted": restricted, - "threshold": float(threshold), - "spin_multiplicity": 0, - } - if restricted: - # Note: In the pyscf world spin is 2S, so the multiplicity - # is spin + 1 - data["spin_multiplicity"] = int(scfres.mol.spin) + 1 - - # - # Orbital reordering - # - # TODO This should not be needed any more - # adcc assumes that the occupied orbitals are specified first, - # followed by the virtual orbitals. Pyscf does this by means of the - # mo_occ numpy arrays, so we need to reorder in order to agree - # with what is expected in adcc. - # - # First build a structured numpy array with the negative occupation - # in the primary field and the energy in the secondary - # for each alpha and beta - order_array = ( - np.array(list(zip(-mo_occ[0], mo_energy[0])), - dtype=np.dtype("float,float")), - np.array(list(zip(-mo_occ[1], mo_energy[1])), - dtype=np.dtype("float,float")), - ) - sort_indices = tuple(np.argsort(ary) for ary in order_array) - - # Use the indices which sort order_array (== stort_indices) to reorder - mo_occ = tuple(mo_occ[i][sort_indices[i]] for i in range(2)) - mo_energy = tuple(mo_energy[i][sort_indices[i]] for i in range(2)) - mo_coeff = tuple(mo_coeff[i][:, sort_indices[i]] for i in range(2)) - fock = tuple(fock[i][sort_indices[i]][:, sort_indices[i]] for i in range(2)) - - # - # SCF orbitals and SCF results - # - data["occupation_f"] = np.hstack((mo_occ[0], mo_occ[1])) - data["orben_f"] = np.hstack((mo_energy[0], mo_energy[1])) - fullfock_ff = np.zeros((n_orbs, n_orbs)) - fullfock_ff[:n_orbs_alpha, :n_orbs_alpha] = fock[0] - fullfock_ff[n_orbs_alpha:, n_orbs_alpha:] = fock[1] - data["fock_ff"] = fullfock_ff - - non_canonical = np.max(np.abs(data["fock_ff"] - np.diag(data["orben_f"]))) - if non_canonical > data["threshold"]: - raise ValueError("Running adcc on top of a non-canonical fock " - "matrix is not implemented.") - - cf_bf = np.hstack((mo_coeff[0], mo_coeff[1])) - data["orbcoeff_fb"] = cf_bf.transpose() - - # - # ERI AO to MO transformation - # - if hasattr(scfres, "_eri") and scfres._eri is not None: - # eri is stored ... use it directly - eri_ao = scfres._eri - else: - # eri is not stored ... generate it now. - eri_ao = scfres.mol.intor('int2e', aosym='s8') - - aro = slice(0, n_alpha) - bro = slice(n_orbs_alpha, n_orbs_alpha + n_beta) - arv = slice(n_alpha, n_orbs_alpha) - brv = slice(n_orbs_alpha + n_beta, n_orbs) - - # compute full ERI tensor (with really everything) - eri = ao2mo.general( - eri_ao, (cf_bf, cf_bf, cf_bf, cf_bf), compact=False - ) - eri = eri.reshape(n_orbs, n_orbs, n_orbs, n_orbs) - del eri_ao - - # Adjust spin-forbidden blocks to be exactly zero - eri[aro, bro, :, :] = 0 - eri[aro, brv, :, :] = 0 - eri[arv, bro, :, :] = 0 - eri[arv, brv, :, :] = 0 - - eri[bro, aro, :, :] = 0 - eri[bro, arv, :, :] = 0 - eri[brv, aro, :, :] = 0 - eri[brv, arv, :, :] = 0 - - eri[:, :, aro, bro] = 0 - eri[:, :, aro, brv] = 0 - eri[:, :, arv, bro] = 0 - eri[:, :, arv, brv] = 0 - - eri[:, :, bro, aro] = 0 - eri[:, :, bro, arv] = 0 - eri[:, :, brv, aro] = 0 - eri[:, :, brv, arv] = 0 - - data["eri_ffff"] = eri - - # Compute electric and nuclear multipole moments - charges = scfres.mol.atom_charges() - coords = scfres.mol.atom_coords() - data["multipoles"] = { - "nuclear_0": int(np.sum(charges)), - "nuclear_1": np.einsum('i,ix->x', charges, coords), - "elec_0": -int(n_alpha + n_beta), - "elec_1": scfres.mol.intor_symmetric('int1e_r', comp=3), - } - - data["backend"] = "pyscf" - return data - - def import_scf(scfres): - # TODO This could be a bit more verbose + # TODO The error messages here could be a bit more verbose if not isinstance(scfres, scf.hf.SCF): raise InvalidReference("Unsupported type for backends.pyscf.import_scf.") if not scfres.converged: - raise InvalidReference("Cannot start an adc calculation on top of an SCF, " - "which is not yet converged. Did you forget to run " - "the kernel() or the scf() function of the pyscf scf " - "object?") + raise InvalidReference("Cannot start an adc calculation on top of an SCF," + " which is not yet converged. Did you forget to" + " run the kernel() or the scf() function of the" + " pyscf scf object?") # TODO Check for point-group symmetry, # check for density-fitting or choleski - # Try to determine whether we are restricted - if isinstance(scfres.mo_occ, list): - restricted = len(scfres.mo_occ) < 2 - elif isinstance(scfres.mo_occ, np.ndarray): - restricted = scfres.mo_occ.ndim < 2 - else: - raise InvalidReference("Unusual pyscf SCF class encountered. Could not " - "determine restricted / unrestricted.") - - if restricted: - return PyScfHFProvider(scfres) - else: - warnings.warn("Falling back to slow import for UHF result.") - return DataHfProvider(convert_scf_to_dict(scfres)) + return PyScfHFProvider(scfres) def run_hf(xyz, basis, charge=0, multiplicity=1, conv_tol=1e-12, @@ -415,5 +232,59 @@ def run_hf(xyz, basis, charge=0, multiplicity=1, conv_tol=1e-12, mf.conv_tol = conv_tol mf.conv_tol_grad = conv_tol_grad mf.max_cycle = max_iter + # since we want super tight convergence for tests, + # tweak the options for non-RHF systems + if multiplicity != 1: + mf.max_cycle += 500 + mf.diis = scf.EDIIS() + mf.diis_space = 3 + mf = scf.addons.frac_occ(mf) mf.kernel() return mf + + +def run_core_hole(xyz, basis, charge=0, multiplicity=1, + conv_tol=1e-12, conv_tol_grad=1e-8, max_iter=150): + mol = gto.M( + atom=xyz, + basis=basis, + unit="Bohr", + # spin in the pyscf world is 2S + spin=multiplicity - 1, + charge=charge, + # Disable commandline argument parsing in pyscf + parse_arg=False, + dump_input=False, + verbose=0, + ) + + # First normal run + mf = scf.UHF(mol) + mf.conv_tol = conv_tol + mf.conv_tol_grad = conv_tol_grad + mf.max_cycle = max_iter + # since we want super tight convergence for tests, + # tweak the options for non-RHF systems + if multiplicity != 1: + mf.max_cycle += 500 + mf.diis = scf.EDIIS() + mf.diis_space = 3 + mf = scf.addons.frac_occ(mf) + mf.kernel() + + # make beta core hole + mo0 = tuple(c.copy() for c in mf.mo_coeff) + occ0 = tuple(o.copy() for o in mf.mo_occ) + occ0[1][0] = 0.0 + dm0 = mf.make_rdm1(mo0, occ0) + + # Run second SCF with MOM + mf_chole = scf.UHF(mol) + scf.addons.mom_occ_(mf_chole, mo0, occ0) + mf_chole.conv_tol = conv_tol + mf_chole.conv_tol_grad = conv_tol_grad + mf_chole.max_cycle += 500 + mf_chole.diis = scf.EDIIS() + mf_chole.diis_space = 3 + mf_chole.kernel(dm0) + return mf_chole diff --git a/adcc/backends/test_backends_crossref.py b/adcc/backends/test_backends_crossref.py index 4436fdda..4a0992bd 100644 --- a/adcc/backends/test_backends_crossref.py +++ b/adcc/backends/test_backends_crossref.py @@ -50,6 +50,18 @@ def template_adc2_h2o(self, basis): results[b] = adcc.adc2(scfres, n_singlets=5, conv_tol=1e-10) compare_adc_results(results, 5e-9) + def template_adc2_uhf_ch2nh2(self, basis): + results = {} + # UHF not supported for VeloxChem + if "veloxchem" in backends: + backends.remove("veloxchem") + if not len(backends): + pytest.skip("Not enough backends that support UHF available.") + for b in backends: + scfres = cached_backend_hf(b, "ch2nh2", basis, multiplicity=2) + results[b] = adcc.adc2(scfres, n_states=5, conv_tol=1e-10) + compare_adc_results(results, 5e-9) + def template_cvs_adc2_h2o(self, basis): results = {} for b in backends: diff --git a/adcc/backends/test_psi4.py b/adcc/backends/test_psi4.py index 7109707b..e485935c 100644 --- a/adcc/backends/test_psi4.py +++ b/adcc/backends/test_psi4.py @@ -50,16 +50,15 @@ def base_test(self, wfn): hfdata = adcc.backends.import_scf_results(wfn) assert hfdata.backend == "psi4" - # only RHF support until now - assert hfdata.restricted - n_orbs = 2 * wfn.nmo() assert hfdata.spin_multiplicity != 0 assert hfdata.n_orbs_alpha == hfdata.n_orbs_beta - assert np.all(hfdata.orben_f[:hfdata.n_orbs_alpha] - == hfdata.orben_f[hfdata.n_orbs_alpha:]) + + if hfdata.restricted: + assert np.all(hfdata.orben_f[:hfdata.n_orbs_alpha] + == hfdata.orben_f[hfdata.n_orbs_alpha:]) assert hfdata.energy_scf == wfn.energy() assert hfdata.spin_multiplicity == wfn.molecule().multiplicity() @@ -81,7 +80,8 @@ def base_test(self, wfn): # Fock matrix fock_ff fock_alpha_bb = np.asarray(wfn.Fa()) fock_beta_bb = np.asarray(wfn.Fb()) - assert_almost_equal(fock_alpha_bb, fock_beta_bb) + if hfdata.restricted: + assert_almost_equal(fock_alpha_bb, fock_beta_bb) fock_ff = np.zeros((n_orbs, n_orbs)) fock_alpha = np.einsum('ui,vj,uv', np.asarray(wfn.Ca()), @@ -116,3 +116,17 @@ def template_rhf_h2o(self, basis): mints = psi4.core.MintsHelper(wfn.basisset()) ao_dip = [np.array(comp) for comp in mints.ao_dipole()] operator_import_test(wfn, ao_dip) + + def template_uhf_ch2nh2(self, basis): + wfn = adcc.backends.run_hf("psi4", geometry.xyz["ch2nh2"], basis, + multiplicity=2) + self.base_test(wfn) + + # Test ERI + eri_asymm_construction_test(wfn) + eri_asymm_construction_test(wfn, core_orbitals=1) + + # Test dipole + mints = psi4.core.MintsHelper(wfn.basisset()) + ao_dip = [np.array(comp) for comp in mints.ao_dipole()] + operator_import_test(wfn, ao_dip) diff --git a/adcc/backends/test_pyscf.py b/adcc/backends/test_pyscf.py index 180a7013..25314f1d 100644 --- a/adcc/backends/test_pyscf.py +++ b/adcc/backends/test_pyscf.py @@ -36,35 +36,15 @@ import pytest -if have_backend("pyscf"): - from pyscf import gto, scf - basissets = ["sto3g", "ccpvdz"] @expand_test_templates(basissets) @pytest.mark.skipif(not have_backend("pyscf"), reason="pyscf not found.") class TestPyscf(unittest.TestCase): - def run_core_hole(self, mol): - # First normal run - mf = scf.UHF(mol) - mf.conv_tol = 1e-12 - mf.kernel() - - # make core hole - mo0 = tuple(c.copy() for c in mf.mo_coeff) - occ0 = tuple(o.copy() for o in mf.mo_occ) - occ0[0][0] = 0.0 - dm0 = mf.make_rdm1(mo0, occ0) - - # Run second SCF with MOM - chole = scf.UHF(mol) - scf.addons.mom_occ_(chole, mo0, occ0) - chole.conv_tol = 1e-12 - chole.kernel(dm0) - return chole - def base_test(self, scfres): + from pyscf import scf + hfdata = adcc.backends.import_scf_results(scfres) assert hfdata.backend == "pyscf" @@ -87,11 +67,9 @@ def base_test(self, scfres): mo_coeff = (scfres.mo_coeff, scfres.mo_coeff) fock_bb = (fock_bb, fock_bb) else: - assert hfdata.spin_multiplicity == 0 - # Check SCF type fits assert isinstance(scfres, scf.uhf.UHF) - + assert hfdata.n_alpha >= hfdata.n_beta mo_occ = scfres.mo_occ mo_energy = scfres.mo_energy mo_coeff = scfres.mo_coeff @@ -100,20 +78,6 @@ def base_test(self, scfres): assert hfdata.n_alpha == np.sum(mo_occ[0] > 0) assert hfdata.n_beta == np.sum(mo_occ[1] > 0) - # Check the lowest n_alpha / n_beta orbitals are occupied - occ_a = [mo_occ[0][mo_energy[0] == ene] - for ene in hfdata.orben_f[:hfdata.n_alpha]] - occ_b = [mo_occ[1][mo_energy[1] == ene] - for ene in hfdata.orben_f[n_orbs_alpha: - n_orbs_alpha + hfdata.n_beta]] - assert np.all(np.asarray(occ_a) > 0) - assert np.all(np.asarray(occ_b) > 0) - - # TODO: Implement full tests for UHF once the modern interface - # is extended - if not hfdata.restricted: - return - # occupation_f assert_almost_equal(hfdata.occupation_f, np.hstack((mo_occ[0], mo_occ[1]))) @@ -136,9 +100,7 @@ def base_test(self, scfres): assert_almost_equal(hfdata.fock_ff, fullfock_ff) # test symmetry of the ERI tensor - allowed_permutations = [ - p.transposition for p in eri_permutations["chem"] - ] + allowed_permutations = [p.transposition for p in eri_permutations["chem"]] eri = np.empty((hfdata.n_orbs, hfdata.n_orbs, hfdata.n_orbs, hfdata.n_orbs)) sfull = slice(hfdata.n_orbs) @@ -159,13 +121,22 @@ def template_rhf_h2o(self, basis): ao_dip = mf.mol.intor_symmetric('int1e_r', comp=3) operator_import_test(mf, list(ao_dip)) - def test_h2o_sto3g_core_hole(self): - mol = gto.M( - atom=geometry.xyz["h2o"], - basis='sto-3g', - unit="Bohr", - # needed to disable commandline argument parsing in pyscf - parse_arg=False, + def template_uhf_ch2nh2(self, basis): + mf = adcc.backends.run_hf( + "pyscf", geometry.xyz["ch2nh2"], basis, multiplicity=2 ) - mf = self.run_core_hole(mol) + self.base_test(mf) + + # Test ERI + eri_asymm_construction_test(mf) + eri_asymm_construction_test(mf, core_orbitals=1) + + # Test dipole + ao_dip = mf.mol.intor_symmetric('int1e_r', comp=3) + operator_import_test(mf, list(ao_dip)) + + def test_h2o_sto3g_core_hole(self): + from adcc.backends.pyscf import run_core_hole + + mf = run_core_hole(geometry.xyz["h2o"], "sto3g") self.base_test(mf) diff --git a/adcc/backends/test_veloxchem.py b/adcc/backends/test_veloxchem.py index dece5a8c..2f203d8f 100644 --- a/adcc/backends/test_veloxchem.py +++ b/adcc/backends/test_veloxchem.py @@ -130,7 +130,8 @@ def template_rhf_h2o(self, basis): # Test dipole dipole_drv = vlx.ElectricDipoleIntegralsDriver(scfdrv.task.mpi_comm) - dipole_mats = dipole_drv.compute(scfdrv.task.molecule, scfdrv.task.ao_basis) + dipole_mats = dipole_drv.compute(scfdrv.task.molecule, + scfdrv.task.ao_basis) integrals = (dipole_mats.x_to_numpy(), dipole_mats.y_to_numpy(), dipole_mats.z_to_numpy()) operator_import_test(scfdrv, integrals) diff --git a/adcc/backends/testing.py b/adcc/backends/testing.py index c5d7e1f8..f5f957c2 100644 --- a/adcc/backends/testing.py +++ b/adcc/backends/testing.py @@ -138,7 +138,7 @@ def operator_import_test(scfres, ao_dict): ) -def cached_backend_hf(backend, molecule, basis): +def cached_backend_hf(backend, molecule, basis, multiplicity=1): """ Run the SCF for a backend and a particular test case (if not done) and return the result. @@ -152,7 +152,8 @@ def cached_backend_hf(backend, molecule, basis): def payload(): hfres = adcc.backends.run_hf(backend, xyz=geometry.xyz[molecule], basis=basis, conv_tol=1e-13, - conv_tol_grad=1e-12) + multiplicity=multiplicity, + conv_tol_grad=1e-11) return adcc.backends.import_scf_results(hfres) # For reasons not clear to me (mfh), caching does not work @@ -160,7 +161,7 @@ def payload(): if backend == "pyscf": return payload() - key = (backend, molecule, basis) + key = (backend, molecule, basis, str(multiplicity)) try: return __cache_cached_backend_hf[key] except NameError: diff --git a/adcc/backends/veloxchem.py b/adcc/backends/veloxchem.py index bee0665e..370817ae 100644 --- a/adcc/backends/veloxchem.py +++ b/adcc/backends/veloxchem.py @@ -143,7 +143,9 @@ def build_full_eri_ffff(self): slices_beta = [bro if x == "O" else brv for x in b] # automatically set ERI tensor's symmetry-equivalent blocks - trans_sym_blocks = get_symm_equivalent_transpositions_for_block(b, notation="chem") + trans_sym_blocks = get_symm_equivalent_transpositions_for_block( + b, notation="chem" + ) # Slices for the spin-allowed blocks aaaa = SpinBlockSlice("aaaa", (slices_alpha[0], slices_alpha[1], @@ -281,18 +283,19 @@ def flush_cache(self): def import_scf(scfdrv): - # TODO This could be a little more informative + # TODO The error messages in here could be a little more informative if not isinstance(scfdrv, vlx.scfrestdriver.ScfRestrictedDriver): - raise InvalidReference("Unsupported type for backends.veloxchem.import_scf.") + raise InvalidReference("Unsupported type for " + "backends.veloxchem.import_scf.") if not hasattr(scfdrv, "task"): raise InvalidReference("Please attach the VeloxChem task to " "the VeloxChem SCF driver") if not scfdrv.is_converged: - raise InvalidReference("Cannot start an adc calculation on top of an SCF, " - "which is not converged.") + raise InvalidReference("Cannot start an adc calculation on top " + "of an SCF, which is not converged.") provider = VeloxChemHFProvider(scfdrv) return provider diff --git a/adcc/conftest.py b/adcc/conftest.py index 347c7889..aee6e52f 100644 --- a/adcc/conftest.py +++ b/adcc/conftest.py @@ -64,7 +64,8 @@ def pytest_addoption(parser): def pytest_collection_modifyitems(config, items): - slowcases = ["h2o_def2tzvp", "h2o_ccpvdz", "cn_ccpvdz", "h2s_6311g"] + slowcases = ["h2o_def2tzvp", "h2o_ccpvdz", "cn_ccpvdz", + "h2s_6311g", "ch2nh2_ccpvdz"] if config.getoption("mode") == "fast": skip_slow = pytest.mark.skip(reason="need '--mode full' option to run.") diff --git a/adcc/memory_pool.py b/adcc/memory_pool.py index a633c7cd..da266ef0 100644 --- a/adcc/memory_pool.py +++ b/adcc/memory_pool.py @@ -41,14 +41,16 @@ def initialise(self, max_memory, tensor_block_size=16, tensor_block_size : int, optional This parameter roughly has the meaning of how many indices are handled - together on operations. A good value is 16 for most nowaday CPU cachelines. + together on operations. A good value is 16 for most nowaday CPU + cachelines. pagefile_prefix : str, optional Directory prefix for storing temporary cache files. allocator : str, optional - The allocator to be used. Valid values are "libxm", "standard" (libstc++ - allocator) and "default", where "default" uses the best-available default. + The allocator to be used. Valid values are "libxm", "standard" + (libstc++ allocator) and "default", where "default" uses the + best-available default. """ if allocator not in ["standard", "default"]: if allocator not in libadcc.__features__: diff --git a/adcc/test_DataHfProvider.py b/adcc/test_DataHfProvider.py index c2f0c5c7..7cafa623 100644 --- a/adcc/test_DataHfProvider.py +++ b/adcc/test_DataHfProvider.py @@ -56,7 +56,7 @@ def test_dict(self): # Import hfdata from dict compare_refstate_with_reference(data, refdata, spec, scfres=bdict, - compare_eri_almost_abs=True) + compare_eri="abs") def test_hdf5(self): data = cache.hfdata["cn_sto3g"] @@ -91,4 +91,4 @@ def test_hdf5(self): # Import hfdata from hdf5 file compare_refstate_with_reference(data, refdata, spec, scfres=fn, - compare_eri_almost_abs=True) + compare_eri="abs") diff --git a/adcc/test_ReferenceState_backends.py b/adcc/test_ReferenceState_backends.py index 7e0fa663..ec20f0fb 100644 --- a/adcc/test_ReferenceState_backends.py +++ b/adcc/test_ReferenceState_backends.py @@ -32,9 +32,8 @@ from .misc import expand_test_templates from .test_ReferenceState_refdata import compare_refstate_with_reference -# The methods to test (currently only restricted is supported in this test) -testcases = [case for case in cache.hfimport.keys() - if cache.hfdata[case]["restricted"]] +# The methods to test +testcases = [case for case in cache.hfimport.keys()] backends = [b for b in adcc.backends.available() if b != "molsturm"] @@ -54,11 +53,17 @@ def base_test(self, system, backend, case): backend == "veloxchem" and molecule == "h2o": pytest.skip("VeloxChem does not support f-functions.") - scfres = cached_backend_hf(backend, molecule, basis) - compare_refstate_with_reference( - data, reference, case, scfres, compare_orbcoeff=False, - compare_eri_almost_abs=True - ) + multiplicity = 1 + compare_eri = "abs" + if molecule in ["cn", "ch2nh2"]: + multiplicity = 2 + if molecule == "cn": + compare_eri = "off" + + scfres = cached_backend_hf(backend, molecule, basis, multiplicity) + compare_refstate_with_reference(data, reference, case, scfres, + compare_orbcoeff=False, + compare_eri=compare_eri) def template_generic(self, case, backend): self.base_test(case, backend, "gen") diff --git a/adcc/test_ReferenceState_refdata.py b/adcc/test_ReferenceState_refdata.py index 7213f6f6..f73d18ba 100644 --- a/adcc/test_ReferenceState_refdata.py +++ b/adcc/test_ReferenceState_refdata.py @@ -30,14 +30,18 @@ from adcc.testdata.cache import cache -def compare_refstate_with_reference( - data, reference, spec, scfres=None, compare_orbcoeff=True, - compare_eri_almost_abs=False -): - atol = data["conv_tol"] - import_data = data - if scfres: +def compare_refstate_with_reference(data, reference, spec, scfres=None, + compare_orbcoeff=True, compare_eri="value"): + # Extract convergence tolerance setting for comparison threshold + if scfres is None: + import_data = data + atol = data["conv_tol"] + else: import_data = scfres + if hasattr(scfres, "conv_tol"): + atol = scfres.conv_tol + else: + atol = data["conv_tol"] # TODO once hfdata is an HDF5 file # refcases = ast.literal_eval(data["reference_cases"][()]) @@ -60,7 +64,7 @@ def compare_refstate_with_reference( assert refstate.n_orbs_beta == data["n_orbs_alpha"] assert refstate.n_alpha == sum(data["occupation_f"][:refstate.n_orbs_alpha]) assert refstate.n_beta == sum(data["occupation_f"][refstate.n_orbs_alpha:]) - assert refstate.conv_tol == data["conv_tol"] + assert refstate.conv_tol == atol # because atol is set to be the SCF conv_tol assert_allclose(refstate.energy_scf, data["energy_scf"], atol=atol) assert refstate.mospaces.subspaces == subspaces @@ -90,11 +94,11 @@ def compare_refstate_with_reference( assert_allclose(refstate.fock(ss).to_ndarray(), reference["fock"][ss], atol=atol) - if compare_eri_almost_abs: + if compare_eri == "abs": for ss in reference["eri"].keys(): assert_almost_equal(np.abs(refstate.eri(ss).to_ndarray()), np.abs(reference["eri"][ss])) - else: + elif compare_eri == "value": for ss in reference["eri"].keys(): assert_allclose(refstate.eri(ss).to_ndarray(), reference["eri"][ss], atol=atol) diff --git a/adcc/test_functionality_xes.py b/adcc/test_functionality_xes.py new file mode 100644 index 00000000..200ec124 --- /dev/null +++ b/adcc/test_functionality_xes.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +## --------------------------------------------------------------------- +## +## Copyright (C) 2019 by the adcc authors +## +## This file is part of adcc. +## +## adcc is free software: you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published +## by the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## adcc is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with adcc. If not, see . +## +## --------------------------------------------------------------------- +import adcc +import unittest +import numpy as np + +from numpy.testing import assert_allclose + +from adcc.backends import have_backend +from adcc.testdata import geometry + +import pytest + + +@pytest.mark.skipif(not have_backend("pyscf"), reason="pyscf not found.") +class TestFunctionalityXes(unittest.TestCase): + # Test for XES calculations using pyscf / adcc + + def base_test(self, system, ref): + from adcc.backends.pyscf import run_core_hole + + basis = system.split("_")[-1] + molecule = system.split("_")[0] + mf = run_core_hole(geometry.xyz[molecule], basis) + state = adcc.adc2x(mf, conv_tol=1e-7, n_states=len(ref["eigenvalues"])) + + assert_allclose(state.excitation_energies, ref["eigenvalues"], atol=1e-6) + + # Computing the dipole moment implies a lot of cancelling in the + # contraction, which has quite an impact on the accuracy. + assert_allclose(state.oscillator_strengths, ref["oscillator_strengths"], + atol=1e-4) + assert_allclose(state.state_dipole_moments, ref["state_dipole_moments"], + atol=1e-4) + + def test_h2o_sto3g_adc2x_xes_singlets(self): + ref = {} + ref["eigenvalues"] = np.array([ + -19.61312030966488, -19.53503046052984, -19.30567103495504, + -19.03686703652111, -18.97033973109105, -18.95165735507893, + -18.94994083890595, -18.90692826540985, -18.88461188150268, + -18.87778442694299]) + + ref["oscillator_strengths"] = np.array([ + 0.05035461011516018, 0.03962058660581094, 0.031211292115201725, + 1.136150679562653e-06, 0.00016786705751929378, 1.149042625218289e-09, + 0.0002824180316791623, 8.321992182153297e-05, 1.0616015344521101e-10, + 7.574230970577102e-10]) + + ref["state_dipole_moments"] = np.array([ + [0.8806045671880932, 1.2064165118168874e-13, 0.6222480112064355], + [0.6940504287188208, -1.4395573883255837e-13, 0.49028206407194075], + [1.0467560044828614, -2.960053355673259e-14, 0.7394274748687037], + [0.22520990007716524, -1.284945843433397e-11, 0.16778189446510727], + [0.36531640078799776, 1.2660291475935138e-09, 0.26443331112341384], + [0.37666647657062025, 1.6550371042775045e-07, 0.25756747786732237], + [0.1752216284582866, 1.6992711802466892e-09, 0.13405532923190466], + [0.2602860685582161, 7.339227199973813e-10, 0.1928141069176047], + [0.43017102549320574, -1.7386733480222987e-08, 0.3115788202607428], + [0.3521150147399802, -4.259803461929766e-07, 0.23755654819723393] + ]) + self.base_test("h2o_sto3g", ref) + + def test_h2o_ccpvdz_adc2x_xes_singlets(self): + ref = {} + ref["eigenvalues"] = np.array([ + -19.519700798837476, -19.445092829708972, -19.269887129049895, + -18.998700591075018, -18.948428933760578]) + ref["oscillator_strengths"] = np.array([ + 0.052448756411532765, 0.04381087006608744, 0.037804719841347485, + 5.153812050043302e-06, 3.790267234406099e-05]) + ref["state_dipole_moments"] = np.array([ + [0.8208067509364902, -1.048772874567675e-13, 0.5798211061449533], + [0.6800814813832732, -1.2487083270270364e-13, 0.4802418117812448], + [0.993795308970137, -1.9141243421572952e-13, 0.7017671690535234], + [-0.16173588644603332, -4.85408235339015e-09, -0.1088534193905284], + [-0.19851759440650885, -1.825542427613543e-07, -0.1356563992145512] + ]) + self.base_test("h2o_ccpvdz", ref) diff --git a/adcc/testdata/0_download_testdata.sh b/adcc/testdata/0_download_testdata.sh index bec837bb..1b224479 100755 --- a/adcc/testdata/0_download_testdata.sh +++ b/adcc/testdata/0_download_testdata.sh @@ -2,6 +2,8 @@ SOURCE="https://get.adc-connect.org/testdata/0.4.0/" DATAFILES=( + ch2nh2_sto3g_hfdata.hdf5 + ch2nh2_sto3g_hfimport.hdf5 cn_sto3g_hfdata.hdf5 cn_sto3g_hfimport.hdf5 cn_sto3g_reference_adc0.hdf5 diff --git a/adcc/testdata/SHA256SUMS b/adcc/testdata/SHA256SUMS index e7f7256e..b640e653 100644 --- a/adcc/testdata/SHA256SUMS +++ b/adcc/testdata/SHA256SUMS @@ -1,3 +1,5 @@ +b2f8be7dbee55278f8e2dcf4fad8dc760797de582b752366881fa6a47a497c18 ch2nh2_sto3g_hfdata.hdf5 +424db3133e90ba113ef2800e115054a382099202976bf58dc560da53389f7da5 ch2nh2_sto3g_hfimport.hdf5 27aced3089fb5eb208dbce6b29b10576e431e99199c7a57ccfd5e6a762db1572 cn_ccpvdz_hfdata.hdf5 a15001b874327b1cec2c84bfe4a24695c71633f31aaf618dc25744dda2c1022b cn_ccpvdz_hfimport.hdf5 7e3a04d69a8110a78c90ada8e3dbccefaf83bee20fc1bf15fbef1d0a610edd44 cn_ccpvdz_reference_adc0.hdf5 diff --git a/adcc/testdata/cache.py b/adcc/testdata/cache.py index c18b9f4f..9cf1d741 100644 --- a/adcc/testdata/cache.py +++ b/adcc/testdata/cache.py @@ -86,7 +86,7 @@ def fullfile(fn): class TestdataCache(): - cases = ["h2o_sto3g", "cn_sto3g", "hf3_631g", "h2s_sto3g"] + cases = ["h2o_sto3g", "cn_sto3g", "hf3_631g", "h2s_sto3g", "ch2nh2_sto3g"] mode_full = False @staticmethod @@ -128,11 +128,13 @@ def refstate_cvs(self): ret = {} for case in self.testcases: # TODO once hfdata is an HDF5 file - # refcases = ast.literal_eval(self.hfdata[case]["reference_cases"][()]) + # refcases = ast.literal_eval( + # self.hfdata[case]["reference_cases"][()]) refcases = ast.literal_eval(self.hfdata[case]["reference_cases"]) if "cvs" not in refcases: continue - ret[case] = adcc.ReferenceState(self.hfdata[case], **refcases["cvs"]) + ret[case] = adcc.ReferenceState(self.hfdata[case], + **refcases["cvs"]) ret[case].import_all() return ret @@ -167,7 +169,8 @@ def reference_data(self): if datafile is None or not os.path.isfile(datafile): continue fulldict.update(hdf5io.load(datafile)) - ret[k] = fulldict + if fulldict: + ret[k] = fulldict return ret @cached_property diff --git a/adcc/testdata/generate_hfdata_ch2nh2_sto3g.py b/adcc/testdata/generate_hfdata_ch2nh2_sto3g.py new file mode 100755 index 00000000..9f88cf45 --- /dev/null +++ b/adcc/testdata/generate_hfdata_ch2nh2_sto3g.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +## --------------------------------------------------------------------- +## +## Copyright (C) 2018 by the adcc authors +## +## This file is part of adcc. +## +## adcc is free software: you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published +## by the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## adcc is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with adcc. If not, see . +## +## --------------------------------------------------------------------- +import sys + +from pyscf import gto, scf +from geometry import xyz +from os.path import dirname, join + +sys.path.insert(0, join(dirname(__file__), "adcc-testdata")) + +import adcctestdata as atd # noqa: E402 + +# Run SCF in pyscf and converge super-tight using an EDIIS +mol = gto.M( + atom=xyz["ch2nh2"], + basis='sto-3g', + unit="Bohr", + spin=1, # =2S, ergo doublet + verbose=4 +) +mf = scf.UHF(mol) +mf.diis = scf.EDIIS() +mf.conv_tol = 1e-14 +mf.conv_tol_grad = 1e-12 +mf.diis_space = 6 +mf.max_cycle = 500 +mf = scf.addons.frac_occ(mf) +mf.kernel() +h5f = atd.dump_pyscf(mf, "ch2nh2_sto3g_hfdata.hdf5") + +h5f["reference_cases"] = str({ + "gen": {}, + "cvs": {"core_orbitals": 2}, +}) diff --git a/adcc/testdata/generate_hfimport.py b/adcc/testdata/generate_hfimport.py index 7829dbf4..786ec63a 100755 --- a/adcc/testdata/generate_hfimport.py +++ b/adcc/testdata/generate_hfimport.py @@ -64,7 +64,9 @@ def dump_imported(key, dump_cvs=True): print("Caching data for {} ...".format(key)) data = cache.hfdata[key] dictionary = {} - refcases = ast.literal_eval(data["reference_cases"][()]) + # TODO once hfdata is an HDF5 file + # refcases = ast.literal_eval(data["reference_cases"][()]) + refcases = ast.literal_eval(data["reference_cases"]) for name, args in refcases.items(): print("Working on {} {} ...".format(key, name)) refstate = adcc.ReferenceState(data, **args) @@ -83,6 +85,9 @@ def main(): dump_imported("cn_sto3g") dump_imported("cn_ccpvdz") + # CH2NH2 unrestricted (no symmetries) + dump_imported("ch2nh2_sto3g") + if __name__ == "__main__": main() diff --git a/adcc/testdata/geometry.py b/adcc/testdata/geometry.py index 872e38fd..5355b74a 100644 --- a/adcc/testdata/geometry.py +++ b/adcc/testdata/geometry.py @@ -45,4 +45,13 @@ H 0 0 0 F 0 0 2.5 """, + # + "ch2nh2": """ + C -1.043771327642266 0.9031379094521343 -0.0433881118200138 + N 1.356218645077853 -0.0415928720016770 0.9214682528604154 + H -1.624635343811075 2.6013402912925274 1.0436579440747924 + H -2.522633198204392 -0.5697335292951204 0.1723619198215792 + H 2.681464678974086 1.3903093043650074 0.6074335654801934 + H 1.838098806841944 -1.5878801706882844 -0.2108367437177239 + """ } diff --git a/docs/calculations.rst b/docs/calculations.rst index 5db001b6..bf351d6e 100644 --- a/docs/calculations.rst +++ b/docs/calculations.rst @@ -175,6 +175,43 @@ their oscillator strength as well as the square norm of the singles (``|v1|^2``) and doubles (``|v2|^2``) parts of the corresponding excitation vectors. +A quick overview of the dominating orbitals involved in the +determined excitations, can also be obtained very easily. +For this simply print the string returned by +the :func:`adcc.ExcitedStates.describe_amplitudes()` +method, i.e. ``print(state.describe_amplitudes())``. +In our case it would produce a table such as:: + + +-------------------------------------------------------+ + | State 0 , 0.3043779 au, 8.282543 eV | + +-------------------------------------------------------+ + | HOMO -> LUMO a ->a -0.675 | + | HOMO -> LUMO +3 a ->a +0.094 | + | HOMO -> LUMO +4 a ->a -0.0674 | + + ... + + +-------------------------------------------------------+ + | State 1 , 0.3768004 au, 10.25326 eV | + +-------------------------------------------------------+ + | HOMO -> LUMO +1 a ->a +0.663 | + | HOMO -> LUMO +2 a ->a +0.14 | + | HOMO -> LUMO +6 a ->a -0.112 | + + ... + + +-------------------------------------------------------+ + | State 2 , 0.3866926 au, 10.52244 eV | + +-------------------------------------------------------+ + | HOMO -1 -> LUMO a ->a +0.675 | + | HOMO -1 -> LUMO +3 a ->a -0.0902 | + | HOMO -1 -> LUMO+10 a ->a -0.035 | + | HOMO -1 -> LUMO +4 a ->a +0.0338 | + + ... + +In the tables a few lines have been cute near the ``...`` for clearity. + Without a doubt, ADC(3) is a rather expensive method, taking already noticable time for a simple system such as a triple zeta water calculation. For comparison an equivalent ADC(1) diff --git a/examples/hydrogen_fluoride/psi4_631g_sf_adc2.py b/examples/hydrogen_fluoride/psi4_631g_sf_adc2.py new file mode 100755 index 00000000..61aa02de --- /dev/null +++ b/examples/hydrogen_fluoride/psi4_631g_sf_adc2.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +import adcc +import psi4 + +# Run SCF in psi4 +mol = psi4.geometry(""" + 0 3 + H 0 0 0 + F 0 0 2.5 + symmetry c1 + units au + no_reorient + no_com + """) + +psi4.set_num_threads(adcc.thread_pool.n_cores) +psi4.core.be_quiet() +psi4.set_options({'basis': "6-31g", + 'e_convergence': 1e-14, + 'd_convergence': 1e-9, + 'reference': 'uhf'}) +scf_e, wfn = psi4.energy('SCF', return_wfn=True) + +# Run solver and print results +states = adcc.adc2(wfn, n_spin_flip=5, conv_tol=1e-8) +print(states.describe()) +print(states.describe_amplitudes()) diff --git a/examples/water/psi4_sto3g_uhf_adc2.py b/examples/water/psi4_sto3g_uhf_adc2.py new file mode 100755 index 00000000..b8f1e808 --- /dev/null +++ b/examples/water/psi4_sto3g_uhf_adc2.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +import psi4 + +import adcc + +mol = psi4.geometry(""" + 0 3 + O 0 0 0 + H 0 0 1.795239827225189 + H 1.693194615993441 0 -0.599043184453037 + symmetry c1 + units au + """) + +# set the number of cores equal to the auto-determined value from +# the adcc ThreadPool +psi4.set_num_threads(adcc.thread_pool.n_cores) +psi4.core.be_quiet() +psi4.set_options({'basis': "sto-3g", + 'scf_type': 'pk', + 'e_convergence': 1e-12, + 'reference': 'uhf', + 'd_convergence': 1e-8}) +scf_e, wfn = psi4.energy('SCF', return_wfn=True) + +# Run an adc2 calculation: +state = adcc.adc2(wfn, n_states=5) + +print(state.describe()) diff --git a/examples/water/pyscf_ccpvdz_xes.py b/examples/water/pyscf_ccpvdz_xes.py index 1ec61fd8..ba2b16d6 100755 --- a/examples/water/pyscf_ccpvdz_xes.py +++ b/examples/water/pyscf_ccpvdz_xes.py @@ -25,7 +25,7 @@ # Make a core hole mo0 = copy.deepcopy(mf.mo_coeff) occ0 = copy.deepcopy(mf.mo_occ) -occ0[0][0] = 0.0 # alpha core hole +occ0[1][0] = 0.0 # beta core hole dm = mf.make_rdm1(mo0, occ0) mf_core = scf.UHF(mol) diff --git a/examples/water/pyscf_sto3g_uhf_adc2.py b/examples/water/pyscf_sto3g_uhf_adc2.py new file mode 100755 index 00000000..4bd69312 --- /dev/null +++ b/examples/water/pyscf_sto3g_uhf_adc2.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +import adcc + +from pyscf import gto, scf + +# Run SCF in pyscf +mol = gto.M( + atom='O 0 0 0;' + 'H 0 0 1.795239827225189;' + 'H 1.693194615993441 0 -0.599043184453037', + basis='sto-3g', + spin=2, + unit="Bohr" +) +scfres = scf.UHF(mol) +scfres.conv_tol = 1e-8 +scfres.conv_tol_grad = 1e-8 +scfres.max_cycle = 100 +scfres.verbose = 4 +scfres.kernel() + +# Explicitly initialise ADC virtual memory pool to (256 MiB) +# (if this call is missing than only RAM is used. This allows +# to dump data to disk as well such that maximally up to the +# specified amount of data (in bytes) will reside in RAM) +adcc.memory_pool.initialise(max_memory=256 * 1024 * 1024) + +# Run an adc2 calculation: +singlets = adcc.adc2(scfres, n_states=5) +# triplets = adcc.adc2(singlets.matrix, n_triplets=3) + +print(singlets.describe()) +print() +# print(triplets.describe()) diff --git a/extension/AdcCore.py b/extension/AdcCore.py index 7ee1ae53..c15f9dca 100644 --- a/extension/AdcCore.py +++ b/extension/AdcCore.py @@ -52,13 +52,17 @@ def has_mkl_numpy(): for lib in numpy.__config__.blas_mkl_info.get("libraries", {})) except ImportError as e: if "mkl" in str(e): - # numpy seems to be installed and linked against MKL, but mkl was not found. - raise ImportError("Trying to import numpy for MKL check, but obtained an " - "import error indicating a missing MKL dependency. " - "Did you load the MKL modules properly?") from e - - # This indicates a missing numpy or a big error in numpy. It's best to assume - # MKL is not there and (potentially) install the non-mkl version from pypi + # numpy seems to be installed and linked against MKL, + # but mkl was not found. + raise ImportError( + "Trying to import numpy for MKL check, but obtained an " + "import error indicating a missing MKL dependency. " + "Did you load the MKL modules properly?" + ) from e + + # This indicates a missing numpy or a big error in numpy. It's best + # to assume MKL is not there and (potentially) install the non-mkl + # version from pypi return False @@ -117,7 +121,8 @@ def upstream(self): def checkout(self, version): """Checkout adccore source code in the given version if possible""" if not self.upstream: - raise RuntimeError("Cannot checkout adccore, since upstream not known.") + raise RuntimeError("Cannot checkout adccore, since upstream " + "not known.") subprocess.check_call(["git", "clone", self.upstream, self.source_dir]) olddir = os.getcwd() @@ -149,7 +154,8 @@ def build(self): features += ["mkl"] build_dir = join(self.source_dir, "build") - build_adccore.build_install(build_dir, self.install_dir, features=features) + build_adccore.build_install(build_dir, self.install_dir, + features=features) def build_documentation(self): """Build adccore documentation. Only valid if has_source is true""" @@ -170,8 +176,8 @@ def build_documentation(self): def file_globs(self): """ Return the file globs to be applied relative to the top directory of the - repository in order to obtain all files relevant for the binary distribution - of adccore. + repository in order to obtain all files relevant for the binary + distribution of adccore. """ return [ self.install_dir + "/adccore_config.json", @@ -204,14 +210,15 @@ def download(self, version, postfix=None): local = tmpdir + "/" + fn status_code = request_urllib(base_url + "/" + fn, local) if status_code < 200 or status_code >= 300: - msg = ("Could not download adccore version {} for platform {} from {}." - "".format(version, get_platform(), base_url)) + msg = ("Could not download adccore version {} for platform {} " + "from {}.".format(version, get_platform(), base_url)) if 400 <= status_code < 500: # Either an unsupported version or an error on our end - msg += (" This should not have happened and either this means your" - " platform / OS / architecture is unsupported or that there" - " is a bug in adcc. Please check the adcc installation" - " instructions (https://adc-connect.org/installation.html)" + msg += (" This should not have happened and either this means" + " your platform / OS / architecture is unsupported or" + " that there is a bug in adcc. Please check the adcc " + " installation instructions" + " (https://adc-connect.org/latest/installation.html)" " and if in doubt, please open an issue on github.") raise RuntimeError(msg) diff --git a/scripts/load_libtensor.py b/scripts/load_libtensor.py index a087e6f0..f6c7c219 100644 --- a/scripts/load_libtensor.py +++ b/scripts/load_libtensor.py @@ -55,8 +55,8 @@ def load_libtensor(fle): shape.append(int(v[i + 1])) except ValueError: raise ValueError("Could not parse libtensor file: " - "Could not parse the " + str(i) + "th element of the " - "dimensionality string.") + "Could not parse the " + str(i) + "th element of " + "the dimensionality string.") shape = tuple(shape) # Construct a string where all fields are on an individual line diff --git a/scripts/upload_core.py b/scripts/upload_core.py index f89a8693..00fa6a35 100755 --- a/scripts/upload_core.py +++ b/scripts/upload_core.py @@ -89,7 +89,8 @@ def upload_tarball(filename): if ":" in target: # Remote deployment host, tdir = target.split(":") command = "put {} {}/".format(filename, tdir) - subprocess.run(["sftp", "-b", "-", host], input=command.encode(), check=True) + subprocess.run(["sftp", "-b", "-", host], input=command.encode(), + check=True) else: # Dummy local deployment subprocess.run(["cp", filename, target], check=True) diff --git a/setup.cfg b/setup.cfg index e5d7a21d..cd142716 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,11 +8,12 @@ tag = True [bumpversion:file:setup.py] [flake8] -select = C,E,F,W,B,B950 -ignore = E241,E266,W503,E501 -max-line-length = 80 +ignore = E241,E266,W503 +max-line-length = 82 per-file-ignores = - examples/water/data.py:E131,E126,E222,E121,E123 + examples/water/data.py:E131,E126,E222,E121,E123,E501 + adccore/**:W,E,F + adcc/testdata/adcc-testdata/**:E501 [aliases] test = pytest diff --git a/setup.py b/setup.py index d3692bd2..fa00c7b3 100755 --- a/setup.py +++ b/setup.py @@ -25,8 +25,8 @@ import os import sys import glob -import setuptools import warnings +import setuptools from os.path import join @@ -61,7 +61,8 @@ def get_adccore_data(): from AdcCore import AdcCore, get_platform adccore = AdcCore() - if not adccore.is_config_file_present or adccore.version != adccore_version[0] \ + if not adccore.is_config_file_present \ + or adccore.version != adccore_version[0] \ or adccore.platform != get_platform(): # Get this version by building it or downloading it adccore.obtain(*adccore_version) @@ -89,9 +90,10 @@ def __init__(self): for conf in ["/etc/ld.so.conf"] + glob.glob("/etc/ld.so.conf.d/*.conf"): if not os.path.isfile(conf): - warnings.warn("Resolving configuration file {} failed. Probably" - " the file has been remove during distribution upgrade" - " and only a symbolic link to the removed file is left.".format(conf)) + warnings.warn("Resolving configuration file {} failed. Probably " + "the file has been remove during distribution " + "upgrade and only a symbolic link to the removed " + "file is left.".format(conf)) continue with open(conf, "r") as fp: for line in fp: @@ -120,7 +122,8 @@ def adccsetup(*args, **kwargs): except Exception as e: url = kwargs["url"] + "/installation.html" raise RuntimeError("Unfortunately adcc setup.py failed.\n" - "For hints how to install adcc, see {}.".format(url)) from e + "For hints how to install adcc, see {}." + "".format(url)) from e #