From 29a9ddd5341bd6ae0f78373187a5629124fd47aa Mon Sep 17 00:00:00 2001 From: Kamal Choudhary Date: Tue, 13 Dec 2022 11:20:14 -0500 Subject: [PATCH] Develop (#268) * Image augmentation. * Augment images. * Augment images. * Specie update. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Update conf.py * Update conf.py * Multi-output graph bacthing. * Add EDOS dataset. * Temp. * Add circuit maker. * Add circuit maker. * NELECT update. * Version update, more DBs added. * Fix CHGCAR vasp. * Added volumetric reshape for CHGCAR. * Tmp * Tershoff Hamman update, specie update. * Add crop from center in STM. * Add Fourier transfor in STM. * Update STM pytest. * Add DPI to STM. * Zeo++ added, Atoms cif update, STM update, random vacancy maker added. * Atoms tempfile fix, Potcar from atoms module added. * Test for docs. * C2DB link update, docs Atoms update. * C2DB link update, docs Atoms update. * Version update, COD DB, QM9 JCTC DB added. * Compostion bug fix, elemental descriptor added. * Develop (#186) * Update outputs.py I added the calculation of the Raman intensities inside parse_raman_dat * Update outputs.py * Update outputs.py * Update outputs.py * Update cfid.py * Delete __init__.py * stylecss added. * stylecss added. * Adding extra Makefile/ * Remove examples from docs. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Tutorials update. * Tutorials docs update. * Docs update,pdb reader updated. * Update action_build.yml * Update action_build.yml * Remove pytraj strong dependencies. * Update docs, Added PDBBind and HPOV datasets. * Docs update. * Add thcikness to surface builder. * Surface builder update, Chemical only magpie descriptors added, pdb_core dataset added, zeopp tempfile bugfix. * Typo fix. * Add names to chem descs. * Lessen hermsolver pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * No DFT3D * Exclude dft_3d dataset for memory issue. * Update figshare test. * Update figshare test. * Exclude db from coverage. * Exclude db from coverage. * Add magpie.json. * Add magpie.json. * Wien2k bands bug fix. * Wien2k bands bug fix. * Update JARVIS-FF,Elastictensor,LAMMPS parse folder, VASP bandstructure plot code. * JFF update. * Add JQE_TB3 and hMOF dataset. * Update LAMMPS module. * Update LAMMPS module. * Fix elastic tensor module. * Figshare update, docs db name update. * Substitutions. * Update figshare dft_3d, cfid_3d. * Docs data update. * Generate substitutions. * Lint fix. * Update DOS. * Update DOS. * Adding folders for nexus setup. * Update QMOF and hMOF info. * Fixing auto klength error. * Adding zeopp surface area. * Vacancy bug fix, added hmof to docs. * vacancy update. * QE inputs update. * Pyhon 3.9 test, added QE el-ph, VASP XANES, modified hmof db details. * Update Contribution.rst * Minor lint fix. * Update action_build.yml * Update action_build.yml * Fix qiskit requirements. * Fix phonopy requirements. * Fix all requirements. * Fix phonopy requirements. * QE test. * QE test. * Update action_build. * Try other python versions. * Try other python versions. * README updates. * Adding nexus. (#197) * Develop (#196) * Image augmentation. * Augment images. * Augment images. * Specie update. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Update conf.py * Update conf.py * Multi-output graph bacthing. * Add EDOS dataset. * Temp. * Add circuit maker. * Add circuit maker. * NELECT update. * Version update, more DBs added. * Fix CHGCAR vasp. * Added volumetric reshape for CHGCAR. * Tmp * Tershoff Hamman update, specie update. * Add crop from center in STM. * Add Fourier transfor in STM. * Update STM pytest. * Add DPI to STM. * Zeo++ added, Atoms cif update, STM update, random vacancy maker added. * Atoms tempfile fix, Potcar from atoms module added. * Test for docs. * C2DB link update, docs Atoms update. * C2DB link update, docs Atoms update. * Version update, COD DB, QM9 JCTC DB added. * Compostion bug fix, elemental descriptor added. * Develop (#186) * Update outputs.py I added the calculation of the Raman intensities inside parse_raman_dat * Update outputs.py * Update outputs.py * Update outputs.py * Update cfid.py * Delete __init__.py * stylecss added. * stylecss added. * Adding extra Makefile/ * Remove examples from docs. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Tutorials update. * Tutorials docs update. * Docs update,pdb reader updated. * Update action_build.yml * Update action_build.yml * Remove pytraj strong dependencies. * Update docs, Added PDBBind and HPOV datasets. * Docs update. * Add thcikness to surface builder. * Surface builder update, Chemical only magpie descriptors added, pdb_core dataset added, zeopp tempfile bugfix. * Typo fix. * Add names to chem descs. * Lessen hermsolver pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * No DFT3D * Exclude dft_3d dataset for memory issue. * Update figshare test. * Update figshare test. * Exclude db from coverage. * Exclude db from coverage. * Add magpie.json. * Add magpie.json. * Wien2k bands bug fix. * Wien2k bands bug fix. * Update JARVIS-FF,Elastictensor,LAMMPS parse folder, VASP bandstructure plot code. * JFF update. * Add JQE_TB3 and hMOF dataset. * Update LAMMPS module. * Update LAMMPS module. * Fix elastic tensor module. * Figshare update, docs db name update. * Substitutions. * Update figshare dft_3d, cfid_3d. * Docs data update. * Generate substitutions. * Lint fix. * Update DOS. * Update DOS. Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY * First input. * added black * Example folder. Co-authored-by: Kamal Choudhary Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY * Revert back. * Update .readthedocs.yaml * Update dev-requirements.txt * Docs requirements update. * Update .readthedocs.yaml * Update requirements.txt * Update .readthedocs.yaml * Update requirements.txt * Update .readthedocs.yaml * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update atoms.py * Fixe pbc in ase_to_Atoms. * Add installation tests (#214) * QE inputs, XANES, GHAction updates. (#210) * Image augmentation. * Augment images. * Augment images. * Specie update. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Update conf.py * Update conf.py * Multi-output graph bacthing. * Add EDOS dataset. * Temp. * Add circuit maker. * Add circuit maker. * NELECT update. * Version update, more DBs added. * Fix CHGCAR vasp. * Added volumetric reshape for CHGCAR. * Tmp * Tershoff Hamman update, specie update. * Add crop from center in STM. * Add Fourier transfor in STM. * Update STM pytest. * Add DPI to STM. * Zeo++ added, Atoms cif update, STM update, random vacancy maker added. * Atoms tempfile fix, Potcar from atoms module added. * Test for docs. * C2DB link update, docs Atoms update. * C2DB link update, docs Atoms update. * Version update, COD DB, QM9 JCTC DB added. * Compostion bug fix, elemental descriptor added. * Develop (#186) * Update outputs.py I added the calculation of the Raman intensities inside parse_raman_dat * Update outputs.py * Update outputs.py * Update outputs.py * Update cfid.py * Delete __init__.py * stylecss added. * stylecss added. * Adding extra Makefile/ * Remove examples from docs. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Tutorials update. * Tutorials docs update. * Docs update,pdb reader updated. * Update action_build.yml * Update action_build.yml * Remove pytraj strong dependencies. * Update docs, Added PDBBind and HPOV datasets. * Docs update. * Add thcikness to surface builder. * Surface builder update, Chemical only magpie descriptors added, pdb_core dataset added, zeopp tempfile bugfix. * Typo fix. * Add names to chem descs. * Lessen hermsolver pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * No DFT3D * Exclude dft_3d dataset for memory issue. * Update figshare test. * Update figshare test. * Exclude db from coverage. * Exclude db from coverage. * Add magpie.json. * Add magpie.json. * Wien2k bands bug fix. * Wien2k bands bug fix. * Update JARVIS-FF,Elastictensor,LAMMPS parse folder, VASP bandstructure plot code. * JFF update. * Add JQE_TB3 and hMOF dataset. * Update LAMMPS module. * Update LAMMPS module. * Fix elastic tensor module. * Figshare update, docs db name update. * Substitutions. * Update figshare dft_3d, cfid_3d. * Docs data update. * Generate substitutions. * Lint fix. * Update DOS. * Update DOS. * Adding folders for nexus setup. * Update QMOF and hMOF info. * Fixing auto klength error. * Adding zeopp surface area. * Vacancy bug fix, added hmof to docs. * vacancy update. * QE inputs update. * Pyhon 3.9 test, added QE el-ph, VASP XANES, modified hmof db details. * Update Contribution.rst * Minor lint fix. * Update action_build.yml * Update action_build.yml * Fix qiskit requirements. * Fix phonopy requirements. * Fix all requirements. * Fix phonopy requirements. * QE test. * QE test. * Update action_build. * Try other python versions. * Try other python versions. * README updates. * Adding nexus. (#197) * Develop (#196) * Image augmentation. * Augment images. * Augment images. * Specie update. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Add Latt2D, STM image (b-1) fix, image augmentation fix. * Update conf.py * Update conf.py * Multi-output graph bacthing. * Add EDOS dataset. * Temp. * Add circuit maker. * Add circuit maker. * NELECT update. * Version update, more DBs added. * Fix CHGCAR vasp. * Added volumetric reshape for CHGCAR. * Tmp * Tershoff Hamman update, specie update. * Add crop from center in STM. * Add Fourier transfor in STM. * Update STM pytest. * Add DPI to STM. * Zeo++ added, Atoms cif update, STM update, random vacancy maker added. * Atoms tempfile fix, Potcar from atoms module added. * Test for docs. * C2DB link update, docs Atoms update. * C2DB link update, docs Atoms update. * Version update, COD DB, QM9 JCTC DB added. * Compostion bug fix, elemental descriptor added. * Develop (#186) * Update outputs.py I added the calculation of the Raman intensities inside parse_raman_dat * Update outputs.py * Update outputs.py * Update outputs.py * Update cfid.py * Delete __init__.py * stylecss added. * stylecss added. * Adding extra Makefile/ * Remove examples from docs. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Docs update. * Tutorials update. * Tutorials docs update. * Docs update,pdb reader updated. * Update action_build.yml * Update action_build.yml * Remove pytraj strong dependencies. * Update docs, Added PDBBind and HPOV datasets. * Docs update. * Add thcikness to surface builder. * Surface builder update, Chemical only magpie descriptors added, pdb_core dataset added, zeopp tempfile bugfix. * Typo fix. * Add names to chem descs. * Lessen hermsolver pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * Reduced pytest. * No DFT3D * Exclude dft_3d dataset for memory issue. * Update figshare test. * Update figshare test. * Exclude db from coverage. * Exclude db from coverage. * Add magpie.json. * Add magpie.json. * Wien2k bands bug fix. * Wien2k bands bug fix. * Update JARVIS-FF,Elastictensor,LAMMPS parse folder, VASP bandstructure plot code. * JFF update. * Add JQE_TB3 and hMOF dataset. * Update LAMMPS module. * Update LAMMPS module. * Fix elastic tensor module. * Figshare update, docs db name update. * Substitutions. * Update figshare dft_3d, cfid_3d. * Docs data update. * Generate substitutions. * Lint fix. * Update DOS. * Update DOS. Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY * First input. * added black * Example folder. Co-authored-by: Kamal Choudhary Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY * Revert back. * Update .readthedocs.yaml * Update dev-requirements.txt * Docs requirements update. * Update .readthedocs.yaml * Update requirements.txt * Update .readthedocs.yaml * Update requirements.txt * Update .readthedocs.yaml * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update requirements.txt * Update atoms.py * Fixe pbc in ase_to_Atoms. Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY Co-authored-by: wines1 <74620550+wines1@users.noreply.github.com> * Add installation tests * Fix codestyle * Fix codestyle with black * Add docstrings * Fix pydocstyle error * Update __init__.py * Update __init__.py Co-authored-by: Kamal Choudhary Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY Co-authored-by: wines1 <74620550+wines1@users.noreply.github.com> * Adding QE super. * Minor changes to QE module, atoms xyz fix. * Adding qe_tb info, and version update. * Update __init__.py * WIP super QE. * Working version of ScSi. * QE inputs and task update. * Add master super. * Add master super. * Lint fix. * Lint fix. * Minor fix. * ET update. * Fix ET test. * Update sanitize atoms. * Additonal checks on supercond. * Debye bug fix. * Pressure in QE Super. * Version fix, publication update, supercond workflow update. * Lint fix. * Tensorboard fix. * Tensorboard fix. * Tensorboard fix. * Melting temp fix. * Update vasp.py (#234) * Local tetra tmp. * Version update. * Lint fix. * HSE06 * Tmp. * Vacancy update, Optimade structureand other minor lint updates. * STEM pytest fix. * Minor lint fix. * Fixed selectrive dynamics issue in Poscar, force reading for single atom system in Vasprun, np.array in core.graps, num_atoms for single atom systems in core.Atoms * Lint fix. * Added phononDos class. * fix pytorch UserWarning in build_undirected_edgedata() (#243) site-packages/jarvis/core/graphs.py:158: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. r = torch.tensor(r).type(torch.get_default_dtype()) * Add PhaseDiagram. * Add PhaseDiagram. * PhaseDiagram update. * STM image pytest increase, requirements upgrade. * STM image pytest increase, requirements upgrade. * STM image pytest increase, requirements upgrade. * Compare atoms, get spg info directly from atoms. * Flake8 fix. * Update publications.rst * Update qiskit. * Update qiskit. * Update test_hermsolver.py * FIix qiskit DOS. * Update test_hermsolver.py * Fix linting. * Develop og (#257) * Added isotope scattering rate to phonon DOS analysis script. * Linting fix. * Linting fix. * Linting fix. * Add QE convergence. * Update converg.py. * Nexus. * Super with converg. * Super with converg. * SuperCon QE update. * Figshare DB update. * Figshare DB update. * Update alignn ff link. * Lint fix. * DB docs update. Co-authored-by: tavazza Co-authored-by: knc6 Co-authored-by: KAMAL CHOUDHARY Co-authored-by: wines1 <74620550+wines1@users.noreply.github.com> Co-authored-by: Saurav Maheshkar Co-authored-by: Janosh Riebesell Co-authored-by: Ramya Gurunathan --- docs/source/databases.rst | 2 + docs/source/publications.rst | 52 +-- jarvis/__init__.py | 2 +- jarvis/analysis/phonon/dos.py | 58 +++- jarvis/db/figshare.py | 34 +- jarvis/io/nexus/inputs.py | 158 ++++++++- jarvis/io/qe/inputs.py | 3 + jarvis/tasks/nexus/qmc.py | 115 +++++++ jarvis/tasks/qe/converg.py | 546 ++++++++++++++++++++++++++++++++ jarvis/tasks/qe/master_super.py | 55 ++-- jarvis/tasks/qe/super.py | 46 ++- setup.py | 2 +- 12 files changed, 1013 insertions(+), 60 deletions(-) create mode 100644 jarvis/tasks/nexus/qmc.py create mode 100644 jarvis/tasks/qe/converg.py diff --git a/docs/source/databases.rst b/docs/source/databases.rst index 01e302b1..e4a7c3ad 100644 --- a/docs/source/databases.rst +++ b/docs/source/databases.rst @@ -20,6 +20,8 @@ Database name Number of data-points Description (Keyword:FD-ELAST) ``jff`` 2538 Various 3D materials properties in JARVIS-FF database computed with several force-fields +``alignn_ff_db`` 307113 Energy per atom, forces and stresses for ALIGNN-FF + trainig for 75k materials. ``edos_pdos`` 48469 Normalized electron and phonon density of states with interpolated values and fixed number of bins ``megnet`` 69239 Formation energy and bandgaps of 3D materials properties diff --git a/docs/source/publications.rst b/docs/source/publications.rst index 0fbb205a..daf61983 100644 --- a/docs/source/publications.rst +++ b/docs/source/publications.rst @@ -38,51 +38,61 @@ JARVIS-DFT related [14. Density functional theory-based electric field gradient database, Sci. Data](https://www.nature.com/articles/s41597-020-00707-8) +[15. High-throughput DFT-based discovery of next generation two-dimensional (2D) superconductors](https://arxiv.org/abs/2211.05254) + +[16. A systematic DFT+U and Quantum Monte Carlo benchmark of magnetic two-dimensional (2D) CrX (X = I, Br, Cl, F)](https://arxiv.org/abs/2209.10379) + JARVIS-ML related ----------------------------------------------------- -[15. Machine learning with force-field inspired descriptors for materials: fast screening and mapping energy landscape, Phys. Rev. Mat., 2, 083801 (2018).](https://journals.aps.org/prmaterials/abstract/10.1103/PhysRevMaterials.2.083801) +[17. Machine learning with force-field inspired descriptors for materials: fast screening and mapping energy landscape, Phys. Rev. Mat., 2, 083801 (2018).](https://journals.aps.org/prmaterials/abstract/10.1103/PhysRevMaterials.2.083801) + +[18. Convergence and machine learning predictions of Monkhorst-Pack k-points and plane-wave cut-off in high-throughput DFT calculations, Comp. Mat. Sci. 161, 300 (2019).](https://www.sciencedirect.com/science/article/pii/S0927025619300813?via%3Dihub) + +[19. Materials science in the artificial intelligence age: high-throughput library generation, machine learning, and a pathway from correlations to the underpinning physics, MRS Comm., 1-18, 2019.](https://doi.org/10.1557/mrc.2019.95) + +[20. Enhancing materials property prediction by leveraging computational and experimental data using deep transfer learning, Nature Comm., 10, 1, (2019).](https://www.nature.com/articles/s41467-019-13297-w) -[16. Convergence and machine learning predictions of Monkhorst-Pack k-points and plane-wave cut-off in high-throughput DFT calculations, Comp. Mat. Sci. 161, 300 (2019).](https://www.sciencedirect.com/science/article/pii/S0927025619300813?via%3Dihub) +[21. Accelerated Discovery of Efficient Solar-cell Materials using Quantum and Machine-learning Methods, Chem. Mater., 31, 5900 (2019).](https://pubs.acs.org/doi/10.1021/acs.chemmater.9b02166) -[17. Materials science in the artificial intelligence age: high-throughput library generation, machine learning, and a pathway from correlations to the underpinning physics, MRS Comm., 1-18, 2019.](https://doi.org/10.1557/mrc.2019.95) +[22. High-throughput Density Functional Perturbation Theory and Machine Learning Predictions of Infrared, Piezoelectric and Dielectric Responses, npj Computational Materials 6, 64 (2020).](https://www.nature.com/articles/s41524-020-0337-2) -[18. Enhancing materials property prediction by leveraging computational and experimental data using deep transfer learning, Nature Comm., 10, 1, (2019).](https://www.nature.com/articles/s41467-019-13297-w) +[23. Data-driven Discovery of 3D and 2D Thermoelectric Materials, J. Phys.: Cond. Matt.](https://iopscience.iop.org/article/10.1088/1361-648X/aba06b/meta) -[19. Accelerated Discovery of Efficient Solar-cell Materials using Quantum and Machine-learning Methods, Chem. Mater., 31, 5900 (2019).](https://pubs.acs.org/doi/10.1021/acs.chemmater.9b02166) +[24. Efficient Computational Design of 2D van der Waals Heterostructures: Band-Alignment, Lattice-Mismatch, Web-app Generation and Machine-learning.](https://arxiv.org/abs/2004.03025) -[20. High-throughput Density Functional Perturbation Theory and Machine Learning Predictions of Infrared, Piezoelectric and Dielectric Responses, npj Computational Materials 6, 64 (2020).](https://www.nature.com/articles/s41524-020-0337-2) +[25. Enhancing materials property prediction by leveraging computational and experimental data using deep transfer learning, Nature Commun.](https://www.nature.com/articles/s41467-019-13297-w) -[21. Data-driven Discovery of 3D and 2D Thermoelectric Materials, J. Phys.: Cond. Matt.](https://iopscience.iop.org/article/10.1088/1361-648X/aba06b/meta) +[26. Atomistic Line Graph Neural Network for Improved Materials Property Predictions, npj Computational Materials 7, 1 (2021)](https://www.nature.com/articles/s41524-021-00650-1) -[22. Efficient Computational Design of 2D van der Waals Heterostructures: Band-Alignment, Lattice-Mismatch, Web-app Generation and Machine-learning.](https://arxiv.org/abs/2004.03025) +[27. Recent advances and applications of deep learning methods in materials science, npj Computational Materials 8, 1 (2022)](https://www.nature.com/articles/s41524-022-00734-6) -[23. Enhancing materials property prediction by leveraging computational and experimental data using deep transfer learning, Nature Commun.](https://www.nature.com/articles/s41467-019-13297-w) +[28. Graph neural network predictions of metal organic framework CO2 adsorption properties, Comp. Mat. Sci., 210, 111388 (2022)](https://www.sciencedirect.com/science/article/pii/S092702562200163X) -[24. Atomistic Line Graph Neural Network for Improved Materials Property Predictions, npj Computational Materials 7, 1 (2021)](https://www.nature.com/articles/s41524-021-00650-1) +[29. Data-Driven Multi-Scale Modeling and Optimization for Elastic Properties of Cubic Microstructures](https://link.springer.com/article/10.1007/s40192-022-00258-3) -[25. Recent advances and applications of deep learning methods in materials science, npj Computational Materials 8, 1 (2022)](https://www.nature.com/articles/s41524-022-00734-6) +[30. Uncertainty Prediction for Machine Learning Models of Material Properties](https://pubs.acs.org/doi/abs/10.1021/acsomega.1c03752) -[26. Graph neural network predictions of metal organic framework CO2 adsorption properties, Comp. Mat. Sci., 210, 111388 (2022)](https://www.sciencedirect.com/science/article/pii/S092702562200163X) +[31. Cross-property deep transfer learning framework for enhanced predictive analytics on small materials data](https://www.nature.com/articles/s41467-021-26921-5) -[27. Data-Driven Multi-Scale Modeling and Optimization for Elastic Properties of Cubic Microstructures](https://link.springer.com/article/10.1007/s40192-022-00258-3) +[32. Prediction of the Electron Density of States for Crystalline Compounds with Atomistic Line Graph Neural Networks (ALIGNN)](https://link.springer.com/article/10.1007/s11837-022-05199-y) -[28. Uncertainty Prediction for Machine Learning Models of Material Properties](https://pubs.acs.org/doi/abs/10.1021/acsomega.1c03752) +[33. Designing High-Tc Superconductors with BCS-inspired Screening, Density Functional Theory and Deep-learning](https://arxiv.org/abs/2205.00060) -[29. Cross-property deep transfer learning framework for enhanced predictive analytics on small materials data](https://www.nature.com/articles/s41467-021-26921-5) +[34. Rapid Prediction of Phonon Structure and Properties using an Atomistic Line Graph Neural Network (ALIGNN)](https://arxiv.org/abs/2207.12510) -[30. Prediction of the Electron Density of States for Crystalline Compounds with Atomistic Line Graph Neural Networks (ALIGNN)](https://link.springer.com/article/10.1007/s11837-022-05199-y) +[35. Unified Graph Neural Network Force-field for the Periodic Table](https://arxiv.org/abs/2209.05554) -[31. Designing High-Tc Superconductors with BCS-inspired Screening, Density Functional Theory and Deep-learning](https://arxiv.org/abs/2205.00060) +[36. AtomVision: A machine vision library for atomistic images](https://arxiv.org/abs/2212.02586) -[32. Rapid Prediction of Phonon Structure and Properties using an Atomistic Line Graph Neural Network (ALIGNN)](https://arxiv.org/abs/2207.12510) +[37. ChemNLP: A Natural Language Processing based Library for Materials Chemistry Text Data](https://arxiv.org/abs/2209.08203) -[33. Unified Graph Neural Network Force-field for the Periodic Table](https://arxiv.org/abs/2209.05554) +[38. A critical examination of robustness and generalizability of machine learning prediction of materials properties](https://arxiv.org/abs/2210.13597) JARVIS-QC related ----------------------------------------------------- -[34. Quantum Computation for Predicting Electron and Phonon Properties of Solids., J. Phys.: Cond. Matt.](https://iopscience.iop.org/article/10.1088/1361-648X/ac1154) +[39. Quantum Computation for Predicting Electron and Phonon Properties of Solids., J. Phys.: Cond. Matt.](https://iopscience.iop.org/article/10.1088/1361-648X/ac1154) JARVIS-QETB related ----------------------------------------------------- -[35. Fast and Accurate Prediction of Material Properties with Three-Body Tight-Binding Model for the Periodic Table](https://arxiv.org/abs/2112.11585) +[40. Fast and Accurate Prediction of Material Properties with Three-Body Tight-Binding Model for the Periodic Table](https://arxiv.org/abs/2112.11585) \ No newline at end of file diff --git a/jarvis/__init__.py b/jarvis/__init__.py index a3ab81bd..16c1221e 100644 --- a/jarvis/__init__.py +++ b/jarvis/__init__.py @@ -1,5 +1,5 @@ """Version number.""" -__version__ = "2022.09.16" +__version__ = "2022.12.11" import os diff --git a/jarvis/analysis/phonon/dos.py b/jarvis/analysis/phonon/dos.py index 298b9201..903382e3 100644 --- a/jarvis/analysis/phonon/dos.py +++ b/jarvis/analysis/phonon/dos.py @@ -1,8 +1,11 @@ """Module to analyze phonon dos.""" import numpy as np +from phonopy.structure.atoms import isotope_data +from math import pi as pi icm_to_eV = 1.23981e-4 +icm_to_thz = 2.99792458e-2 hbar = 6.582119569e-16 # eV*s kB = 8.617333262145e-5 # eV/K e = 1.60217662e-19 @@ -42,12 +45,8 @@ def heat_capacity(self, temperature=300): # Eq. 1 dos = np.array(self.phonon_dos) / icm_to_eV x = (omega) / (kB * temperature) - Cp = ( - kB - * x[1:] ** 2 - * (np.exp(x[1:]) / (np.exp(x[1:]) - 1) ** 2) - * dos[1:] - ) + prefix = kB * x[1:] ** 2 * (np.exp(x[1:]) / (np.exp(x[1:]) - 1) ** 2) + Cp = prefix * dos[1:] Cp = np.insert(Cp, 0, 0) return np.trapz(Cp, omega) * e * Na @@ -60,3 +59,50 @@ def vibrational_entropy(self, temperature=300): S_vib = kB * ((n + 1) * np.log(n + 1) + n * np.log(n)) * dos[1:] S_vib = np.insert(S_vib, 0, S_vib[0]) return S_vib + + def phonon_isotope_scattering(self, atoms=None): + """ + Get phonon-isotope scattering rate at natural isotopic abundance. + Returns scattering rate in units of Hz. + """ + omega = np.array(self.phonon_freq_cm) + dos = np.array(self.phonon_dos) + + def isotopic_gamma(atoms): + formula = atoms.composition.reduce() + natoms = sum([v for v in formula[0].values()]) + ave_m = 0 + gamma = 0 + for k, v in formula[0].items(): + iso_list = isotope_data[k] + ave_m_n = sum([iso[2] * iso[1] for iso in iso_list]) + g = [iso[2] * (iso[1] - ave_m_n) ** 2 for iso in iso_list] + gamma_n = sum(g) + ave_m += ave_m_n * (v / natoms) + gamma += gamma_n * (v / natoms) + return gamma / (ave_m ** 2) + + gamma = isotopic_gamma(atoms) + atmV = (atoms.volume / atoms.num_atoms) * 1e-30 + omega = omega * icm_to_thz + dos = dos / icm_to_thz / (atmV * atoms.num_atoms) + tau = (pi / 6) * (atmV * gamma * omega ** 2) * dos + return np.trapz(tau, omega) * 1e12 + + +if __name__ == "__main__": + from jarvis.core.atoms import Atoms + from jarvis.db.figshare import get_jid_data + + dos_entry = get_jid_data(jid="JVASP-1459", dataset="edos_pdos") + dft3d_entry = get_jid_data(jid="JVASP-1459", dataset="dft_3d") + ph_dos = dos_entry["pdos_elast"] + ph_freq = np.arange(0, 1000, 5) + atoms = Atoms.from_dict(dft3d_entry["atoms"]) + + ph = PhononDos(phonon_dos=ph_dos, phonon_freq_cm=ph_freq) + debye_temp = ph.debye_temperature(atoms) + iso_scatt = ph.phonon_isotope_scattering(atoms) + + print("Debye temperature:", debye_temp) + print("Isotope scattering rate:", iso_scatt) diff --git a/jarvis/db/figshare.py b/jarvis/db/figshare.py index 8e5c7040..e65f7770 100644 --- a/jarvis/db/figshare.py +++ b/jarvis/db/figshare.py @@ -28,24 +28,27 @@ def get_db_info(): db_info = { # https://doi.org/10.6084/m9.figshare.6815705 "dft_2d": [ - "https://ndownloader.figshare.com/files/26808917", - "d2-3-12-2021.json", + "https://ndownloader.figshare.com/files/38521268", + "d2-12-12-2022.json", "Obtaining 2D dataset 1.1k ...", - "https://www.nature.com/articles/s41524-020-00440-1", + "https://www.nature.com/articles/s41524-020-00440-1" + + "\nOther versions:https://doi.org/10.6084/m9.figshare.6815705", ], # https://doi.org/10.6084/m9.figshare.6815699 "dft_3d": [ - "https://ndownloader.figshare.com/files/29204826", - "jdft_3d-8-18-2021.json", - "Obtaining 3D dataset 55k ...", - "https://www.nature.com/articles/s41524-020-00440-1", + "https://ndownloader.figshare.com/files/38521619", + "jdft_3d-12-12-2022.json", + "Obtaining 3D dataset 76k ...", + "https://www.nature.com/articles/s41524-020-00440-1" + + "\nOther versions:https://doi.org/10.6084/m9.figshare.6815699", ], # https://doi.org/10.6084/m9.figshare.6815699 "cfid_3d": [ "https://ndownloader.figshare.com/files/29205201", "cfid_3d-8-18-2021.json", "Obtaining 3D dataset 55k ...", - "https://www.nature.com/articles/s41524-020-00440-1", + "https://www.nature.com/articles/s41524-020-00440-1" + + "\nOther versions:https://doi.org/10.6084/m9.figshare.6815699", ], # https://doi.org/10.6084/m9.figshare.14213522 "jff": [ @@ -56,6 +59,14 @@ def get_db_info(): "Obtaining JARVIS-FF 2k ...", "https://www.nature.com/articles/s41524-020-00440-1", ], + # https://doi.org/10.6084/m9.figshare.21667874 + "alignn_ff_db": [ + "https://ndownloader.figshare.com/files/38522315", + # "https://ndownloader.figshare.com/files/26809760", + "id_prop.json", + "Obtaining ALIGNN-FF training DB 300k ...", + "https://arxiv.org/abs/2209.05554", + ], "mp_3d_2020": [ "https://ndownloader.figshare.com/files/26791259", "all_mp.json", @@ -203,6 +214,13 @@ def get_db_info(): "Obtaining PDBBind dataset 11k...", "https://doi.org/10.1093/bioinformatics/btu626", ], + # https://doi.org/10.6084/m9.figshare.21713885 + "snumat": [ + "https://ndownloader.figshare.com/files/38521736", + "snumat.json", + "Obtaining SNUMAT Hybrid functional dataset 10k...", + "https://www.nature.com/articles/s41597-020-00723-8", + ], # https://doi.org/10.6084/m9.figshare.13215308 "aflow2": [ "https://ndownloader.figshare.com/files/25453265", diff --git a/jarvis/io/nexus/inputs.py b/jarvis/io/nexus/inputs.py index 2c160221..a2edd8ed 100644 --- a/jarvis/io/nexus/inputs.py +++ b/jarvis/io/nexus/inputs.py @@ -1 +1,157 @@ -"""Module to prepare and parse input files.""" +"""Module to prepare input files.""" + + +def get_Zeff(): + """Get valence electrons.""" + return dict( + Ag=19, + Al=3, + Ar=8, + As=5, + Au=19, + B=3, + Be=2, + Bi=5, + Br=7, + C=4, + Ca=10, + Cl=7, + Co=17, + Cr=14, + Cu=19, + F=7, + Fe=16, + Ga=3, + Ge=4, + H=1, + He=2, + I=7, + Ir=17, + K=9, + Kr=8, + Li=1, + Mg=2, + Mn=15, + Mo=14, + N=5, + Na=1, + Ne=8, + Ni=18, + O=6, + P=5, + Pd=18, + S=6, + Sc=11, + Se=6, + Si=4, + Tb=19, + Te=6, + Ti=12, + V=13, + W=14, + Zn=20, + ) + + +def get_pseudo_dft_dict(): + """Get PSPs for DFT.""" + return { + "Ag": "Ag.ccECP.AREP.upf", # Ag.ccECP.SOREP.upf + "Al": "Al.ccECP.upf", + "Ar": "Ar.ccECP.upf", + "As": "As.ccECP.upf", + "Au": "Au.ccECP.AREP.upf", # Au.ccECP.SOREP.upf + "B": "B.ccECP.upf", + "Be": "Be.ccECP.upf", + "Bi": "Bi.ccECP.AREP.upf", # Bi.ccECP.SOREP.upf + "Br": "Br.ccECP.upf", + "C": "C.ccECP.upf", + "Ca": "Ca.ccECP.upf", + "Cl": "Cl.ccECP.upf", + "Co": "Co.ccECP-soft.upf", # Co.opt.upf + "Cr": "Cr.ccECP.upf", # Cr.opt.upf + "Cu": "Cu.ccECP-soft.upf", # Cu.opt.upf + "F": "F.ccECP.upf", + "Fe": "Fe.ccECP-soft.upf", # Fe.opt.upf + "Ga": "Ga.ccECP.upf", + "Ge": "Ge.ccECP.upf", + "H": "H.ccECP.upf", + "He": "He.ccECP.upf", + "I": "I.ccECP.AREP.upf", # I.ccECP.SOREP.upf + "Ir": "Ir.ccECP.AREP.upf", # Ir.ccECP.SOREP.upf + "K": "K.ccECP.upf", + "Kr": "Kr.ccECP.upf", + "Li": "Li.ccECP.upf", + "Mg": "Mg.ccECP.upf", + "Mn": "Mn.ccECP.upf", # Mn.opt.upf + "Mo": "Mo.ccECP.AREP.upf", # Mo.ccECP.SOREP.upf + "N": "N.ccECP.upf", + "Na": "Na.ccECP.upf", + "Ne": "Ne.ccECP.upf", + "Ni": "Ni.ccECP-soft.upf", # Ni.opt.upf + "O": "O.ccECP.upf", + "P": "P.ccECP.upf", + "Pd": "Pd.ccECP.AREP.upf", # Pd.ccECP.SOREP.upf + "S": "S.ccECP.upf", + "Sc": "Sc.ccECP-soft.upf", # Sc.opt.upf + "Se": "Se.ccECP.upf", + "Si": "Si.ccECP.upf", + "Tb": "Tb.ccECP.AREP.upf", # Tb.ccECP.SOREP.upf + "Te": "Te.ccECP.AREP.upf", # Te.ccECP.SOREP.upf + "Ti": "Ti.ccECP-soft.upf", # Ti.opt.upf + "V": "V.ccECP-soft.upf", # V.opt.upf + "W": "W.ccECP.AREP.upf", # W.ccECP.SOREP.upf + "Zn": "Zn.ccECP-soft.upf", # Zn.opt.upf + } + + +def get_pseudo_qmc_dict(): + """Get PSPs for QMC.""" + return { + "Ag": "Ag.ccECP.AREP.xml", # Ag.ccECP.SOREP.xml + "Al": "Al.ccECP.xml", + "Ar": "Ar.ccECP.xml", + "As": "As.ccECP.upf", + "Au": "Au.ccECP.AREP.xml", # Au.ccECP.SOREP.xml + "B": "B.ccECP.xml", + "Be": "Be.ccECP.xml", + "Bi": "Bi.ccECP.AREP.xml", # Bi.ccECP.SOREP.xml + "Br": "Br.ccECP.xml", + "C": "C.ccECP.xml", + "Ca": "Ca.ccECP.xml", + "Cl": "Cl.ccECP.xml", + "Co": "Co.ccECP-soft.xml", # Co.opt.xml + "Cr": "Cr.ccECP.xml", # Cr.opt.xml + "Cu": "Cu.ccECP-soft.xml", # Cu.opt.xml + "F": "F.ccECP.xml", + "Fe": "Fe.ccECP-soft.xml", # Fe.opt.xml + "Ga": "Ga.ccECP.xml", + "Ge": "Ge.ccECP.xml", + "H": "H.ccECP.xml", + "He": "He.ccECP.xml", + "I": "I.ccECP.AREP.xml", # I.ccECP.SOREP.xml + "Ir": "Ir.ccECP.AREP.xml", # Ir.ccECP.SOREP.xml + "K": "K.ccECP.xml", + "Kr": "Kr.ccECP.xml", + "Li": "Li.ccECP.xml", + "Mg": "Mg.ccECP.xml", + "Mn": "Mn.ccECP.xml", # Mn.opt.xml + "Mo": "Mo.ccECP.AREP.xml", # Mo.ccECP.SOREP.xml + "N": "N.ccECP.xml", + "Na": "Na.ccECP.xml", + "Ne": "Ne.ccECP.xml", + "Ni": "Ni.ccECP-soft.xml", # Ni.opt.xml + "O": "O.ccECP.xml", + "P": "P.ccECP.upf", + "Pd": "Pd.ccECP.AREP.xml", # Pd.ccECP.SOREP.xml + "S": "S.ccECP.xml", + "Sc": "Sc.ccECP-soft.xml", # Sc.opt.xml + "Se": "Se.ccECP.xml", + "Si": "Si.ccECP.xml", + "Tb": "Tb.ccECP.AREP.xml", # Tb.ccECP.SOREP.xml + "Te": "Te.ccECP.AREP.xml", # Te.ccECP.SOREP.xml + "Ti": "Ti.ccECP-soft.xml", # Ti.opt.xml + "V": "V.ccECP-soft.xml", # V.opt.xml + "W": "W.ccECP.AREP.xml", # W.ccECP.SOREP.xml + "Zn": "Zn.ccECP-soft.xml", # Zn.opt.xml + } diff --git a/jarvis/io/qe/inputs.py b/jarvis/io/qe/inputs.py index d0e124ef..3910183f 100644 --- a/jarvis/io/qe/inputs.py +++ b/jarvis/io/qe/inputs.py @@ -5,6 +5,7 @@ import tarfile from jarvis.core.specie import Specie import numpy as np + # from jarvis.analysis.structure.spacegroup import Spacegroup3D @@ -38,6 +39,8 @@ def __init__( "http://www.physics.rutgers.edu/" "gbrv/all_pbesol_UPF_v1.5.tar.gz" ) + print("Please cite for PSPs:") + print("https://doi.org/10.1016/j.commatsci.2013.08.053") if not os.path.exists(psp_dir): print("Downloading PSPs") tar_file_name = str( diff --git a/jarvis/tasks/nexus/qmc.py b/jarvis/tasks/nexus/qmc.py new file mode 100644 index 00000000..43e22f51 --- /dev/null +++ b/jarvis/tasks/nexus/qmc.py @@ -0,0 +1,115 @@ +"""Module for running QMCPack.""" +from jarvis.io.nexus.inputs import ( + get_Zeff, + get_pseudo_dft_dict, + # get_pseudo_qmc_dict, +) +from jarvis.io.qe.outputs import QEout +from nexus import generate_physical_system +from nexus import generate_pwscf +from nexus import settings, job, run_project, obj +from jarvis.core.atoms import Atoms +from jarvis.db.figshare import get_jid_data +from nexus import read_structure +from jarvis.core.kpoints import Kpoints3D +import numpy as np +import os + + +def get_energy_nexus( + atoms=None, + poscar_path=None, + length=10, + ecut=40, + temp_filename="POSCAR-temp.vasp", + qe_presub="module load qe/7.0.pw2qmcpack", + qmcpack_presub="module load qmcpack/3.14.0", + nodes=2, + cores=16, + threads=1, + app="pw.x", + net_spin=0, + path="scf", + machine="dobby", + pseudo_path="/rk2/knc6/QMC/pseudopotentials", + results_path="/users/knc6/Software/jarvis/usnistgov/jarvis/results", + runs_path="/users/knc6/Software/jarvis/usnistgov/jarvis/runs", +): + """Get energy from Nexus+qmcpack.""" + results = results_path + runs = runs_path + settings( + results=results, + pseudo_dir=pseudo_path, # location of pseudopotential directory + sleep=1, + runs=runs, + machine=machine, + ) + if atoms is None: + atoms = Atoms.from_poscar(poscar_path) + temp_filename = poscar_path + + kp = ( + Kpoints3D() + .automatic_length_mesh(lattice_mat=atoms.lattice_mat, length=length) + ._kpoints + ) + kp = np.array(kp).flatten().tolist() + atoms.write_poscar(temp_filename) + structure = read_structure(temp_filename, format="poscar") + print("structure", structure) + print("Running job") + shared_qe = obj( + occupations="smearing", + smearing="gaussian", + degauss=0.005, + input_dft="PBE", + ecut=ecut, # Plane Wave cutoff energy + conv_thr=1.0e-7, + mixing_beta=0.2, + nosym=True, + use_folded=True, + spin_polarized=True, + ) + + qe_job = job( + nodes=nodes, cores=cores, threads=threads, app=app, presub=qe_presub + ) + Zeff = get_Zeff() + system = generate_physical_system( + structure=structure, + kshift=(0, 0, 0), + net_spin=net_spin, # Specify Up - Down Electrons + **{e: Zeff[e] for e in structure.elem} + ) + pseudo_dft_dict = get_pseudo_dft_dict() + arr = [] + for sp in atoms.elements: + arr.append(pseudo_dft_dict[sp]) + scf = generate_pwscf( + identifier="scf", # log output goes to scf.out + path="scf", # directory to run in + job=qe_job, # pyscf must run w/o mpi + system=system, + input_type="scf", + pseudos=arr, + kgrid=kp, + wf_collect=False, + **shared_qe + ) + print('scf', scf) + run_project() + out_file = os.path.join(runs, "scf", "scf.out") + print("out_file", out_file) + qeout = QEout(filename=out_file) + tot_energy = qeout.get_total_energy() + print("tot_energy", tot_energy) + return tot_energy + + +if __name__ == "__main__": + jid = "JVASP-32" + atoms = Atoms.from_dict( + get_jid_data(jid="JVASP-1002", dataset="dft_3d")["atoms"] + ) + get_energy_nexus(atoms=atoms) diff --git a/jarvis/tasks/qe/converg.py b/jarvis/tasks/qe/converg.py new file mode 100644 index 00000000..b4135344 --- /dev/null +++ b/jarvis/tasks/qe/converg.py @@ -0,0 +1,546 @@ +"""Module to converge K-points and Cutoff.""" +from jarvis.core.atoms import Atoms +from jarvis.core.kpoints import Kpoints3D as Kpoints +from jarvis.tasks.qe.qe import QEjob +import time + +sleep = 5 + + +def converg_kpoints( + atoms=None, + length=0, + encut=40, + ecutrho=250, + tol=0.001, + increment=5, + qe_cmd="/cluster/deb9/bin/mpirun -n 16 /cluster/bin/pw.x", + psp_dir=None, +): + """Converge k-points for a material.""" + scf_init = { + "control": { + "calculation": "'scf'", + "restart_mode": "'from_scratch'", + "prefix": "'KPOINTS'", + "outdir": "'./'", + "tstress": ".true.", + "tprnfor": ".true.", + "disk_io": "'nowf'", + "pseudo_dir": None, + "verbosity": "'high'", + "nstep": 100, + "etot_conv_thr": "1.0d-5", + }, + "system": { + "ibrav": 0, + "degauss": 0.01, + "nat": None, + "ntyp": None, + "ecutwfc": encut, + "ecutrho": ecutrho, + "occupations": "'smearing'", + "smearing": "'mp'", + }, + "electrons": { + "diagonalization": "'david'", + "mixing_mode": "'plain'", + "mixing_beta": 0.7, + "conv_thr": "1d-9", + }, + } + + en1 = -10000 + convg_kp1 = False + convg_kp2 = False + length1 = length + kp_list = [] + while not convg_kp2 and not convg_kp1: + length1 = length1 + increment + print("Incrementing length", length1) + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length + ) + + prefix = "KPOINTS-" + str(length1) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length + ) + mesh = kpoints.kpts[0] + if mesh not in kp_list: + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + psp_dir=psp_dir, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + print("Energy", info_scf["total_energy"]) + en2 = float(info_scf["total_energy"]) + time.sleep(sleep) + while abs(en2 - en1) > tol: + en1 = en2 + print("Incrementing length", length1) + while mesh in kp_list: + length1 = length1 + increment + # Assuming you are not super unlucky + # kpoints = Auto_Kpoints(mat=mat, length=length1) + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length1 + ) # Auto_Kpoints(mat=mat, length=length) + mesh = kpoints.kpts[0] + + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length1 + ) # Auto_Kpoints(mat=mat, length=length) + mesh = kpoints.kpts[0] + prefix = "KPOINTS-" + str(length1) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + if mesh not in kp_list: + kp_list.append(mesh) + + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + psp_dir=psp_dir, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + + info_scf = qejob_scf_init.runjob() + en2 = float(info_scf["total_energy"]) + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + else: + + length1 = length1 + increment + # Assuming you are not super unlucky + # kpoints = Auto_Kpoints(mat=mat, length=length1) + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length1 + ) # Auto_Kpoints(mat=mat, length=length) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + + convg_kp1 = True + + # Some extra points to check + print("Some extra points to check for KPOINTS") + length3 = length1 + increment + prefix = "KPOINTS-" + str(length3) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length3 + ) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + psp_dir=psp_dir, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en3 = float(info_scf["total_energy"]) + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + length4 = length3 + increment + prefix = "KPOINTS-" + str(length4) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length4 + ) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + psp_dir=psp_dir, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en4 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + length5 = length4 + increment + prefix = "KPOINTS-" + str(length5) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length5 + ) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + psp_dir=psp_dir, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en5 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + length6 = length5 + increment + prefix = "KPOINTS-" + str(length6) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length6 + ) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + psp_dir=psp_dir, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en6 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + length7 = length6 + increment + prefix = "KPOINTS-" + str(length7) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length7 + ) + mesh = kpoints.kpts[0] + kp_list.append(mesh) + scf_init["system"]["ntyp"] = "" + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + output_file="scf_init.out", + psp_dir=psp_dir, + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en7 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + if ( + abs(en3 - en2) > tol + or abs(en4 - en2) > tol + or abs(en5 - en2) > tol + or abs(en6 - en2) > tol + or abs(en7 - en2) > tol + ): + fkp = open("EXTRA_KPOINTS", "w") + line = str("Extra KPOINTS needed ") + str(length1) + "\n" + fkp.write(line) + line = ( + str("en2 length1 ") + + str(" ") + + str(en2) + + str(" ") + + str(length1) + + "\n" + ) + fkp.write(line) + line = ( + str("en3 length3 ") + + str(" ") + + str(en3) + + str(" ") + + str(length3) + + "\n" + ) + fkp.write(line) + line = ( + str("en4 length4 ") + + str(" ") + + str(en4) + + str(" ") + + str(length4) + + "\n" + ) + fkp.write(line) + line = ( + str("en5 length5 ") + + str(" ") + + str(en5) + + str(" ") + + str(length5) + + "\n" + ) + fkp.write(line) + fkp.close() + en1 = en3 + length1 = length3 + else: + print( + "KPOINTS convergence achieved for ", length1, + ) + convg_kp2 = True + + return length1 + + +def converg_cutoff( + atoms=None, + length=10, + encut=40, + ecutrho=250, + tol=0.001, + increment=5, + psp_dir=None, + qe_cmd="/cluster/deb9/bin/mpirun -n 16 /cluster/bin/pw.x", +): + """Converge cutoff for a material.""" + scf_init = { + "control": { + "calculation": "'scf'", + "restart_mode": "'from_scratch'", + "prefix": "'ENCUT'", + "outdir": "'./'", + "tstress": ".true.", + "tprnfor": ".true.", + "disk_io": "'nowf'", + "pseudo_dir": None, + "verbosity": "'high'", + "nstep": 100, + "etot_conv_thr": "1.0d-5", + }, + "system": { + "ibrav": 0, + "degauss": 0.01, + "nat": None, + "ntyp": None, + "ecutwfc": encut, + "ecutrho": ecutrho, + "occupations": "'smearing'", + "smearing": "'mp'", + }, + "electrons": { + "diagonalization": "'david'", + "mixing_mode": "'plain'", + "mixing_beta": 0.7, + "conv_thr": "1d-9", + }, + } + + en1 = -10000 + convg_encut1 = False + convg_encut2 = False + encut1 = encut + kpoints = Kpoints().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=length + ) + while not convg_encut2 and not convg_encut1: + + prefix = "ENCUT-" + str(encut1) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut + qejob_scf_init = QEjob( + atoms=atoms, + psp_dir=psp_dir, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + print("Energy", info_scf["total_energy"]) + en2 = float(info_scf["total_energy"]) + time.sleep(sleep) + while abs(en2 - en1) > tol: + en1 = en2 + encut1 = encut1 + increment + print("Incrementing cutoff", encut1) + prefix = "ENCUT-" + str(encut1) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut1 + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + psp_dir=psp_dir, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + + info_scf = qejob_scf_init.runjob() + en2 = float(info_scf["total_energy"]) + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + convg_encut1 = True + + # Some extra points to check + print("Some extra points to check for ENCUT") + encut3 = encut1 + increment + prefix = "ENCUT-" + str(encut3) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut3 + + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + psp_dir=psp_dir, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en3 = float(info_scf["total_energy"]) + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + encut4 = encut3 + increment + prefix = "ENCUT-" + str(encut4) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut4 + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + psp_dir=psp_dir, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en4 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + encut5 = encut4 + increment + prefix = "ENCUT-" + str(encut5) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut5 + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + psp_dir=psp_dir, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en5 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + encut6 = encut5 + increment + prefix = "ENCUT-" + str(encut6) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut6 + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + output_file="scf_init.out", + psp_dir=psp_dir, + qe_cmd=qe_cmd, + jobname=prefix, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en6 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + encut7 = encut6 + increment + prefix = "ENCUT-" + str(encut7) + scf_init["control"]["prefix"] = str('"') + prefix + str('"') + scf_init["system"]["ntyp"] = "" + scf_init["system"]["ecutwfc"] = encut7 + qejob_scf_init = QEjob( + atoms=atoms, + input_params=scf_init, + output_file="scf_init.out", + qe_cmd=qe_cmd, + jobname=prefix, + psp_dir=psp_dir, + kpoints=kpoints, + input_file=prefix + "_ascf_init.in", + ) + info_scf = qejob_scf_init.runjob() + en7 = info_scf["total_energy"] + print("Energy", info_scf["total_energy"]) + time.sleep(sleep) + + if ( + abs(en3 - en2) > tol + or abs(en4 - en2) > tol + or abs(en5 - en2) > tol + or abs(en6 - en2) > tol + or abs(en7 - en2) > tol + ): + + en1 = en3 + encut = encut1 + fen = open("EXTRA_ENCUT", "w") + line = str("Extra ENCUT needed ") + str(encut) + "\n" + fen.write(line) + fen.close() + else: + print("ENCUT convergence achieved for ", encut) + convg_encut2 = True + return encut + + +if __name__ == "__main__": + from jarvis.db.figshare import get_jid_data + + atoms = Atoms.from_dict( + get_jid_data(jid="JVASP-1002", dataset="dft_3d")["atoms"] + ) + # converg_kpoints(atoms=atoms) + converg_cutoff(atoms=atoms) diff --git a/jarvis/tasks/qe/master_super.py b/jarvis/tasks/qe/master_super.py index 8bcbff02..89164113 100644 --- a/jarvis/tasks/qe/master_super.py +++ b/jarvis/tasks/qe/master_super.py @@ -25,8 +25,10 @@ def get_jid_data(jid="JVASP-667", dataset="dft_2d"): qe_cmd = "mpirun -np 16 /home/kfg/codes/q-e-qe-6.5/bin/pw.x" +qe_cmd = "/home/kfg/codes/q-e-qe-6.5/bin/pw.x" run_dir = "/wrk/knc6/Super" +run_dir = "/wrk/knc6/CDVAE_SUP" def non_prime_kpoints(kpts=[]): @@ -53,35 +55,50 @@ def write_qejob(pyname="job.py", job_json=""): f.close() +jids = ["JVASP-816", "JVASP-19821"] +jids = [ + "POSCAR-AlN2Sc.vasp", + "POSCAR-CrV6Pt.vasp", + "POSCAR-NbNiV6.vasp", + "POSCAR-RuNb.vasp", +] submit_job = True -jids = ["JVASP-19821"] +use_preconverged_kpoints = False for i in jids: try: print("jid", i) + if "POSCAR" not in i: + dat = get_jid_data(jid=i, dataset="dft_3d") + a_atoms = Atoms.from_dict(dat["atoms"]) + else: + a_atoms = Atoms.from_poscar(i) dir_name = os.path.join(run_dir, i + "_SUPER") if not os.path.exists(dir_name): os.makedirs(dir_name) os.chdir(dir_name) - dat = get_jid_data(jid=i, dataset="dft_3d") - a_atoms = Atoms.from_dict(dat["atoms"]) + atoms = Spacegroup3D(a_atoms).refined_atoms.get_primitive_atoms # print (atoms) - kp = Kpoints3D().automatic_length_mesh( - # lattice_mat=atoms.lattice_mat, - # length=10 - lattice_mat=atoms.lattice_mat, - length=dat["kpoint_length_unit"], - ) - kpts = kp._kpoints[0] - kpts = non_prime_kpoints(kpts) - kp = Kpoints3D(kpoints=[kpts]) - print("kpts", kpts) - - nq1 = get_factors(kpts[0])[0] - nq2 = get_factors(kpts[1])[0] - nq3 = get_factors(kpts[2])[0] - qp = Kpoints3D(kpoints=[[nq1, nq2, nq3]]) + if use_preconverged_kpoints: + kp = Kpoints3D().automatic_length_mesh( + # lattice_mat=atoms.lattice_mat, + # length=10 + lattice_mat=atoms.lattice_mat, + length=dat["kpoint_length_unit"], + ) + kpts = kp._kpoints[0] + kpts = non_prime_kpoints(kpts) + kp = Kpoints3D(kpoints=[kpts]) + print("kpts", kpts) + + nq1 = get_factors(kpts[0])[0] + nq2 = get_factors(kpts[1])[0] + nq3 = get_factors(kpts[2])[0] + qp = Kpoints3D(kpoints=[[nq1, nq2, nq3]]) + else: + kp = Kpoints3D(kpoints=[]) + qp = Kpoints3D(kpoints=[]) sup = SuperCond(atoms=atoms, kp=kp, qp=qp, qe_cmd=qe_cmd).to_dict() dumpjson(data=sup, filename="sup.json") write_qejob(job_json=os.path.abspath("sup.json")) @@ -98,7 +115,7 @@ def write_qejob(pyname="job.py", job_json=""): job_line=path, jobname=i, directory=os.getcwd(), - queue="mml", + # queue="mml", walltime="330:0:0", submit_cmd=["sbatch", "submit_job"], ) diff --git a/jarvis/tasks/qe/super.py b/jarvis/tasks/qe/super.py index 4af8000a..7aa29919 100644 --- a/jarvis/tasks/qe/super.py +++ b/jarvis/tasks/qe/super.py @@ -1,11 +1,14 @@ """Module to run Tc calculation.""" -# Ref: https://arxiv.org/abs/2205.00060 +# Ref: +# https://www.nature.com/articles/s41524-022-00933-1 from jarvis.io.qe.outputs import DataFileSchema from jarvis.core.atoms import Atoms from jarvis.core.kpoints import Kpoints3D from jarvis.tasks.qe.qe import QEjob +from jarvis.tasks.qe.converg import converg_kpoints import numpy as np import os +from jarvis.core.utils import get_factors # from jarvis.core.utils import get_factors @@ -34,6 +37,19 @@ def parse_lambda(filename="lambda"): print() +def non_prime_kpoints(kpts=[]): + """Get non prime kpoints.""" + mem = [] + for i in kpts: + facts = get_factors(i) + if len(facts) == 1: + val = i + 1 + else: + val = i + mem.append(val) + return mem + + def very_clean(): """Clean files.""" cmd = ( @@ -54,22 +70,28 @@ def __init__( qe_cmd="pw.x", relax_calc="'vc-relax'", pressure=None, + psp_dir=None, ): """Initialize the class.""" self.atoms = atoms self.kp = kp self.qp = qp + # self.kp = kp + # self.qp = qp self.relax_calc = relax_calc self.qe_cmd = qe_cmd + self.psp_dir = psp_dir self.pressure = pressure def to_dict(self): """Get dictionary.""" info = {} info["atoms"] = self.atoms.to_dict() + info["kp"] = self.kp.to_dict() info["qp"] = self.qp.to_dict() info["qe_cmd"] = self.qe_cmd + info["psp_dir"] = self.psp_dir info["relax_calc"] = self.relax_calc info["pressure"] = self.pressure return info @@ -92,6 +114,24 @@ def runjob(self): atoms = self.atoms kp = self.kp qp = self.qp + if not kp._kpoints: + kp_len = converg_kpoints( + atoms=atoms, qe_cmd=self.qe_cmd, psp_dir=self.psp_dir + ) + kp = Kpoints3D().automatic_length_mesh( + lattice_mat=atoms.lattice_mat, length=kp_len + ) + kpts = kp._kpoints[0] + kpts = non_prime_kpoints(kpts) + kp = Kpoints3D(kpoints=[kpts]) + print("kpts", kpts) + + nq1 = get_factors(kpts[0])[0] + nq2 = get_factors(kpts[1])[0] + nq3 = get_factors(kpts[2])[0] + qp = Kpoints3D(kpoints=[[nq1, nq2, nq3]]) + self.kp = kp + self.qp = qp relax = { "control": { # "calculation": "'scf'", @@ -101,7 +141,7 @@ def runjob(self): "outdir": "'./'", "tstress": ".true.", "tprnfor": ".true.", - "disk_io": "'low'", + "disk_io": "'nowf'", "wf_collect": ".true.", "pseudo_dir": None, "verbosity": "'high'", @@ -157,7 +197,7 @@ def runjob(self): "outdir": "'./'", "tstress": ".true.", "tprnfor": ".true.", - "disk_io": "'low'", + "disk_io": "'nowf'", "pseudo_dir": None, "verbosity": "'high'", "nstep": 100, diff --git a/setup.py b/setup.py index d79cee4f..75e1af56 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setup( name="jarvis-tools", - version="2022.09.16", + version="2022.12.11", long_description=long_d, install_requires=[ "numpy>=1.19.5",