Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add grid_embeding_neurons in training input #4

Open
wants to merge 5 commits into
base: deep_density
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 62 additions & 27 deletions deepmd/pt/model/atomic_model/density_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
)

import torch
import torch.nn as nn

from deepmd.pt.model.descriptor.env_mat import (
prod_env_mat,
Expand Down Expand Up @@ -49,14 +50,24 @@ def __init__(self, descriptor, fitting, type_map, **kwargs):
self.sel = self.descriptor.get_sel()
self.nnei = self.descriptor.get_nsel()
self.axis_neuron = self.descriptor.axis_neuron
neurons = []
dims = [1 + self.descriptor.repinit_args.tebd_dim] + neurons + [self.descriptor.get_dim_out()]
self.grid_embedding_layers = [MLPLayer(
dims[i],
dims[i+1],
precision=env.DEFAULT_PRECISION,
activation_function="tanh",
) for i in range(len(neurons)+1)]

# Initialize grid embedding layers if specified
self.grid_embedding_neurons = getattr(fitting, 'grid_embedding_neurons', [])
if self.grid_embedding_neurons:
input_dim = 1 + self.descriptor.repinit_args.tebd_dim
output_dim = self.descriptor.get_dim_out()
dims = [input_dim] + self.grid_embedding_neurons + [output_dim]
self.grid_embedding_layers = nn.ModuleList([
MLPLayer(
dims[i],
dims[i+1],
precision=env.DEFAULT_PRECISION,
activation_function="tanh",
) for i in range(len(dims)-1)
])
else:
self.grid_embedding_layers = None


wanted_shape = (1, self.nnei, 4)
mean = torch.zeros(
Expand All @@ -68,6 +79,15 @@ def __init__(self, descriptor, fitting, type_map, **kwargs):
self.register_buffer("mean", mean)
self.register_buffer("stddev", stddev)

def _apply_grid_embedding(self, h2_and_type: torch.Tensor) -> torch.Tensor:
"""Apply grid embedding if layers are present, otherwise return input directly."""
if self.grid_embedding_layers is not None:
gg = h2_and_type
for layer in self.grid_embedding_layers:
gg = layer(gg)
return gg
return h2_and_type

def forward_atomic(
self,
extended_coord,
Expand All @@ -82,27 +102,34 @@ def forward_atomic(
grid_nlist: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Return atomic prediction.

Parameters
----------
extended_coord
coodinates in extended region
extended_atype
atomic type in extended region
nlist
neighbor list. nf x nloc x nsel
mapping
mapps the extended indices to local indices
fparam
frame parameter. nf x ndf
aparam
atomic parameter. nf x nloc x nda
extended_coord : torch.Tensor
Extended coordinates
extended_atype : torch.Tensor
Extended atom types
nlist : torch.Tensor
Neighbor list
mapping : Optional[torch.Tensor], optional
Maps the extended indices to local indices, by default None
fparam : Optional[torch.Tensor], optional
Frame parameter, by default None
aparam : Optional[torch.Tensor], optional
Atomic parameter, by default None
comm_dict : Optional[Dict[str, torch.Tensor]], optional
Communication dictionary, by default None
grid : Optional[torch.Tensor], optional
Grid points, by default None
grid_type : Optional[torch.Tensor], optional
Grid point types, by default None
grid_nlist : Optional[torch.Tensor], optional
Grid neighbor list, by default None

Returns
-------
result_dict
the result dict, defined by the `FittingOutputDef`.

Dict[str, torch.Tensor]
Dictionary containing the density predictions
"""
nframes, nloc, nnei = nlist.shape
atype = extended_atype[:, :nloc]
Expand Down Expand Up @@ -159,9 +186,17 @@ def forward_atomic(
# nb x ngrid x nnei x (1+ntebd)
h2_and_type = torch.concat([h2[:, :, :, :1], grid_tebd], -1)
# nb x ngrid x nnei x ng1
gg = h2_and_type
for layer in self.grid_embedding_layers:
gg = layer(gg)
gg = self._apply_grid_embedding(h2_and_type)
if self.grid_embedding_layers is None:
# Add a linear projection to match dimensions if needed
weight = torch.ones(
self.descriptor.get_dim_out(),
gg.shape[-1],
device=gg.device,
dtype=gg.dtype # Match the input tensor dtype
)
gg = torch.nn.functional.linear(gg, weight, bias=None)

# electron-to-atom equivariant feature: nb x ngrid x nnei x 4 x ng1
e2aef = h2.unsqueeze(-1) * gg.unsqueeze(-2)

Expand Down
8 changes: 7 additions & 1 deletion deepmd/pt/model/task/density.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
ntypes: int,
dim_descrpt: int,
neuron: List[int] = [128, 128, 128],
grid_embedding_neurons: List[int] = [], # Add this line
bias_atom_e: Optional[torch.Tensor] = None,
resnet_dt: bool = True,
numb_fparam: int = 0,
Expand Down Expand Up @@ -71,6 +72,7 @@ def __init__(
type_map=type_map,
**kwargs,
)
self.grid_embedding_neurons = grid_embedding_neurons

def output_def(self) -> FittingOutputDef:
return FittingOutputDef(
Expand All @@ -91,13 +93,17 @@ def deserialize(cls, data: dict) -> "GeneralFitting":
check_version_compatibility(data.pop("@version", 1), 2, 1)
data.pop("var_name")
data.pop("dim_out")
return super().deserialize(data)
grid_embedding_neurons = data.pop("grid_embedding_neurons", [])
instance = super().deserialize(data)
instance.grid_embedding_neurons = grid_embedding_neurons
return instance

def serialize(self) -> dict:
"""Serialize the fitting to dict."""
return {
**super().serialize(),
"type": "density",
"grid_embedding_neurons": self.grid_embedding_neurons,
}

# make jit happy with torch 2.0.0
Expand Down
9 changes: 8 additions & 1 deletion deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -1499,6 +1499,7 @@ def fitting_density():
doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams."
doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams."
doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built."
doc_grid_embedding_neurons = "The number of neurons in each hidden layer of the grid embedding network. Default is empty list which means no hidden layers." # Add this line
doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.'
doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision."
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
Expand All @@ -1519,6 +1520,13 @@ def fitting_density():
alias=["n_neuron"],
doc=doc_neuron,
),
Argument( # Add this argument
"grid_embedding_neurons",
List[int],
optional=True,
default=[],
doc=doc_grid_embedding_neurons,
),
Argument(
"activation_function",
str,
Expand All @@ -1541,7 +1549,6 @@ def fitting_density():
Argument("seed", [int, None], optional=True, doc=doc_seed),
]


@fitting_args_plugin.register("dos")
def fitting_dos():
doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams."
Expand Down
Loading