From f05ca027046094ea71c37778ae47ef123fce367f Mon Sep 17 00:00:00 2001 From: chendanyang Date: Thu, 11 Dec 2025 23:27:00 +0800 Subject: [PATCH] e3nn docs update --- .../applications/deephe3nn/models/model.py | 2 +- mindscience/e3nn/README.md | 2 +- mindscience/e3nn/README_en.md | 2 +- mindscience/e3nn/__init__.py | 2 + mindscience/e3nn/nn/activation.py | 21 +- mindscience/e3nn/nn/batchnorm.py | 35 +-- mindscience/e3nn/nn/fc.py | 21 +- mindscience/e3nn/nn/gate.py | 69 +++--- mindscience/e3nn/nn/normact.py | 36 ++- mindscience/e3nn/nn/one_hot.py | 56 +++-- mindscience/e3nn/nn/scatter.py | 29 ++- mindscience/e3nn/o3/irreps.py | 119 ++++++---- mindscience/e3nn/o3/norm.py | 19 +- mindscience/e3nn/o3/rotation.py | 112 ++++----- mindscience/e3nn/o3/spherical_harmonics.py | 25 +- mindscience/e3nn/o3/sub.py | 77 +++--- mindscience/e3nn/o3/tensor_product.py | 29 +-- mindscience/e3nn/o3/wigner.py | 44 ++-- mindscience/e3nn/so2_conv/__init__.py | 3 +- .../e3nn/so2_conv/init_edge_rot_mat.py | 19 +- mindscience/e3nn/so2_conv/so2.py | 75 +++--- mindscience/e3nn/so2_conv/so3.py | 89 ++++--- mindscience/e3nn/so2_conv/wigner.py | 15 +- mindscience/e3nn/utils/batch_dot.py | 2 +- mindscience/e3nn/utils/func.py | 19 +- mindscience/e3nn/utils/initializer.py | 3 +- mindscience/e3nn/utils/ncon.py | 221 ++---------------- mindscience/e3nn/utils/radius.py | 45 ++-- tests/e3nn/o3/test_sub.py | 17 -- 29 files changed, 545 insertions(+), 663 deletions(-) diff --git a/MindChem/applications/deephe3nn/models/model.py b/MindChem/applications/deephe3nn/models/model.py index f267c556e..2b2c40485 100644 --- a/MindChem/applications/deephe3nn/models/model.py +++ b/MindChem/applications/deephe3nn/models/model.py @@ -33,7 +33,7 @@ from mindscience.e3nn.o3.spherical_harmonics import SphericalHarmonics from mindscience.e3nn.o3.sub import (FullyConnectedTensorProduct, Linear, LinearBias) from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation -from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat +from mindscience.e3nn.so2_conv import init_edge_rot_mat from .e3modules import (E3ElementWise, E3LayerNorm, SelfTp, SeparateWeightTensorProduct, SkipConnection, diff --git a/mindscience/e3nn/README.md b/mindscience/e3nn/README.md index 2acff963f..4029d5d9d 100644 --- a/mindscience/e3nn/README.md +++ b/mindscience/e3nn/README.md @@ -147,7 +147,7 @@ mindscience.e3nn/ ```python from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation -from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat +from mindscience.e3nn.so2_conv import init_edge_rot_mat irreps_in = "2x0e + 1x1o" irreps_out = "1x0e + 1x1o" diff --git a/mindscience/e3nn/README_en.md b/mindscience/e3nn/README_en.md index 334b33ab3..f0d14f7aa 100644 --- a/mindscience/e3nn/README_en.md +++ b/mindscience/e3nn/README_en.md @@ -149,7 +149,7 @@ mindscience.e3nn/ ```python from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation -from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat +from mindscience.e3nn.so2_conv import init_edge_rot_mat irreps_in = "2x0e + 1x1o" irreps_out = "1x0e + 1x1o" diff --git a/mindscience/e3nn/__init__.py b/mindscience/e3nn/__init__.py index 5ba0a5f68..2837c0a74 100644 --- a/mindscience/e3nn/__init__.py +++ b/mindscience/e3nn/__init__.py @@ -15,9 +15,11 @@ """init for e3 module""" from .o3 import * from .nn import * +from .so2_conv import * from .utils import * __all__ = [] __all__.extend(o3.__all__) __all__.extend(nn.__all__) +__all__.extend(so2_conv.__all__) __all__.extend(utils.__all__) diff --git a/mindscience/e3nn/nn/activation.py b/mindscience/e3nn/nn/activation.py index 8553fd84e..ca2d96c8a 100644 --- a/mindscience/e3nn/nn/activation.py +++ b/mindscience/e3nn/nn/activation.py @@ -59,15 +59,15 @@ class _Normalize(nn.Cell): class Activation(nn.Cell): r""" - Activation function for scalar-tensors. The parities of irreps may be changed according to the parity of each - activation functions. - Odd scalars require the corresponding activation functions to be odd or even. + Activation function for scalar irreps (:math:`l = 0`). The parity of each irrep may change depending on whether the + corresponding activation function is even or odd. Even scalars (`0e`) keep their parity; odd scalars (`0o`) flip + to even (`0e`) when an even activation is applied and remain odd (`0o`) only if an odd activation is used. Args: - irreps_in (Union[str, Irrep, Irreps]): the input irreps. - acts (List[Func]): a list of activation functions for each part of `irreps_in`. - The length of the `acts` will be clipped or filled by identity functions to match the length of `irreps_in`. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. + irreps_in (Union[str, Irrep, Irreps]): The input irreps. + acts (List[Func]): A list of activation functions for each part of `irreps_in`. + The length of `acts` will be clipped or filled with identity functions to match the length of `irreps_in`. + dtype (mindspore.dtype): The data type of the input tensor. Default: ``mindspore.float32``. Inputs: - **inputs** (Tensor) - The shape of Tensor is :math:`(*, irreps\_in.dim)`. @@ -76,14 +76,11 @@ class Activation(nn.Cell): - **outputs** (Tensor) - The shape of Tensor is :math:`(*, irreps\_in.dim)`. Raises: - ValueError: If `irreps_in` contain non-scalar irrep. + ValueError: If `irreps_in` contains non-scalar irrep. ValueError: If a irrep in `irreps_in` is odd, but the corresponding activation function is neither even nor odd. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import Activation + >>> from mindscience.e3nn.nn import Activation >>> from mindspore import ops, Tensor >>> act = Activation('3x0o+2x0e+1x0o', [ops.abs, ops.tanh]) >>> print(act) diff --git a/mindscience/e3nn/nn/batchnorm.py b/mindscience/e3nn/nn/batchnorm.py index 9a98cb0ba..732549010 100644 --- a/mindscience/e3nn/nn/batchnorm.py +++ b/mindscience/e3nn/nn/batchnorm.py @@ -21,20 +21,26 @@ from ..o3.irreps import Irreps class BatchNorm(nn.Cell): r""" - Batch normalization for orthonormal representations. - It normalizes by the norm of the representations. - Note that the norm is invariant only for orthonormal representations. - Irreducible representations `wigner_D` are orthonormal. + Batch normalization tailored for orthonormal group representations. + + Unlike conventional BatchNorm, this layer normalizes each irreducible + representation block by its **invariant norm**, ensuring equivariance is + preserved under group actions such as rotations. Statistics are computed + independently per multiplicity block, keeping the tensor structure intact. + + The norm is invariant only for orthonormal representations. Irreducible + representations `wigner_D` (and any real basis derived from them) satisfy + this requirement, making the layer safe for standard `e3nn` irreps. Args: - irreps (Union[str, Irrep, Irreps]): the input irreps. - eps (float): avoid division by zero when we normalize by the variance. Default: ``1e-5``. - momentum (float): momentum of the running average. Default: ``0.1``. - affine (bool): do we have weight and bias parameters. Default: ``True``. - reduce (str): {'mean', 'max'}, method used to reduce. Default: ``'mean'``. - instance (bool): apply instance norm instead of batch norm. Default: ``Flase``. - normalization (str): {'component', 'norm'}, normalization method. Default: ``'component'``. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. + irreps (Union[str, Irrep, Irreps]): Input irreps. + eps (float): Small constant to avoid division by zero when normalizing by variance. Default: ``1e-5``. + momentum (float): Momentum for the running average. Default: ``0.1``. + affine (bool): Whether to include learnable weight and bias parameters. Default: ``True``. + reduce (str): Reduction method, either ``'mean'`` or ``'max'``. Default: ``'mean'``. + instance (bool): If ``True``, apply instance normalization instead of batch normalization. Default: ``False``. + normalization (str): Normalization method, either ``'component'`` or ``'norm'``. Default: ``'component'``. + dtype (mindspore.dtype): Data type of the input tensor. Default: ``mindspore.float32``. Inputs: - **input** (Tensor) - The shape of Tensor is :math:`(batch, ..., irreps.dim)`. @@ -46,11 +52,8 @@ class BatchNorm(nn.Cell): ValueError: If `reduce` is not in ['mean', 'max']. ValueError: If `normalization` is not in ['component', 'norm']. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import BatchNorm + >>> from mindscience.e3nn.nn import BatchNorm >>> from mindspore import ops, Tensor >>> bn = BatchNorm('3x0o+2x0e+1x0o') >>> print(bn) diff --git a/mindscience/e3nn/nn/fc.py b/mindscience/e3nn/nn/fc.py index 4d85dc4e2..0bd1253da 100644 --- a/mindscience/e3nn/nn/fc.py +++ b/mindscience/e3nn/nn/fc.py @@ -51,13 +51,15 @@ class _Layer(nn.Cell): class FullyConnectedNet(nn.SequentialCell): r""" Fully-connected Neural Network with normalized activation on scalars. - + It stacks multiple dense layers and automatically normalizes the activation + function to maintain stable signal magnitudes during forward/backward passes. + Args: - h_list (List[int]): a list of input, internal and output dimensions for dense layers. - act (Func): activation function which will be automatically normalized. Default: ``None``. - out_act (bool): whether apply the activation function on the output. Default: ``False``. - init_method (Union[str, mindspore.common.initializer]): initialize parameters. Default: ``'normal'``. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. + h_list (List[int]): A list of input, internal and output dimensions for dense layers. + act (Func, optional): Activation function which will be automatically normalized. Default: ``None``. + out_act (bool, optional): Whether to apply the activation function on the output. Default: ``False``. + init_method (Union[str, mindspore.common.initializer], optional): Method to initialize parameters. Default: ``'normal'``. + dtype (mindspore.dtype, optional): The data type of the input tensor. Default: ``mindspore.float32``. Inputs: - **input** (Tensor) - The shape of Tensor is :math:`(h\_list[0])`. @@ -68,12 +70,9 @@ class FullyConnectedNet(nn.SequentialCell): Raises: TypeError: If the elements `h_list` are not `int`. - Supported Platforms: - ``Ascend`` - Examples: >>> import mindspore as ms - >>> from mindchemistry.e3.nn import FullyConnectedNet + >>> from mindscience.e3nn.nn import FullyConnectedNet >>> fc = FullyConnectedNet([4,10,20,12,6], ops.tanh) FullyConnectedNet [4, 10, 20, 12, 6] >>> v = ms.Tensor([.1,.2,.3,.4]) @@ -94,7 +93,7 @@ class FullyConnectedNet(nn.SequentialCell): for i, (h1, h2) in enumerate(zip(self.h_list, self.h_list[1:])): if not isinstance(h1, int) or not isinstance(h2, int): - raise TypeError + raise TypeError(f"h_list[{i}] and h_list[{i+1}] must be int, but got {h1} and {h2}") if i == len(self.h_list) - 2 and (not out_act): a = identity diff --git a/mindscience/e3nn/nn/gate.py b/mindscience/e3nn/nn/gate.py index f67a35d55..8b7c21872 100644 --- a/mindscience/e3nn/nn/gate.py +++ b/mindscience/e3nn/nn/gate.py @@ -77,32 +77,50 @@ class _Sortcut(nn.Cell): class Gate(nn.Cell): r""" - Gate activation function. The input contain three parts: the first part `irreps_scalars` are scalars that only be - affected by activation functions `acts`; - the second part `irreps_gates` are scalars that be affected by activation functions `act_gates` and be multiplied - on the third part. + Gate activation function. + + The input tensor is conceptually split into three disjoint subsets: + + 1. **Scalars for activation** (`irreps_scalars`): + These scalars are transformed element-wise by the corresponding + activation functions in `acts`, without affecting any other part. + + 2. **Scalars for gating** (`irreps_gates`): + These scalars are transformed element-wise by the corresponding + activation functions in `act_gates`, and then used as *gates* to + modulate the third subset. + + 3. **Gated irreps** (`irreps_gated`): + These irreps (of any angular momentum) are multiplied + channel-wise by the gated scalars produced in step 2. + + Mathematically, the operation is expressed as .. math:: - \left(\bigoplus_i \phi_i(x_i) \right) \oplus \left(\bigoplus_j \phi_j(g_j) y_j \right) + \left( \bigoplus_i \phi_i(x_i) \right) + \oplus + \left( \bigoplus_j \phi_j(g_j)\, y_j \right), + + where - where :math:`x_i` and :math:`\phi_i` are from `irreps_scalars` and `acts`, and :math:`g_j`, :math:`\phi_j`, - and :math:`y_j` are from `irreps_gates`, `act_gates`, and `irreps_gated`. + * :math:`x_i` and :math:`\phi_i` correspond to the `irreps_scalars` and `acts`, + * :math:`g_j`, :math:`\phi_j`, and :math:`y_j` correspond to the `irreps_gates`, + `act_gates`, and `irreps_gated`, respectively. + + The output irreps are the concatenation of the transformed scalars and the + gated irreps, preserving the overall equivariance properties. Args: - irreps_scalars (Union[str, Irrep, Irreps]): the input scalar irreps that will be passed through the - activation functions `acts`. - acts (List[Func]): a list of activation functions for each part of `irreps_scalars`. - The length of the `acts` will be clipped or filled by identity functions to match the length of - `irreps_scalars`. - irreps_gates (Union[str, Irrep, Irreps]): the input scalar irreps that will be passed through the - activation functions `act_gates` and multiplied by `irreps_gated`. - act_gates (List[Func]): a list of activation functions for each part of `irreps_gates`. - The length of the `acts` will be clipped or filled by identity functions to match the length of - `irreps_gates`. - irreps_gated (Union[str, Irrep, Irreps]): the input irreps that will be gated. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. - ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32``. + irreps_scalars (Union[str, Irrep, Irreps]): Scalar irreps to be activated by `acts`. + acts (List[Func]): Activation functions for each part of `irreps_scalars`. + Length is auto-padded/clipped with identity functions to match `irreps_scalars`. + irreps_gates (Union[str, Irrep, Irreps]): Scalar irreps to be activated by `act_gates` + and used as gates for `irreps_gated`. + act_gates (List[Func]): Activation functions for each part of `irreps_gates`. + Length is auto-padded/clipped with identity functions to match `irreps_gates`. + irreps_gated (Union[str, Irrep, Irreps]): Irreps to be gated. + dtype (mindspore.dtype): Input tensor dtype. Default: ``mindspore.float32``. + ncon_dtype (mindspore.dtype): Dtype for ncom computation. Default: ``mindspore.float32``. Inputs: - **input** (Tensor) - The shape of Tensor is :math:`(..., irreps\_in.dim)`. @@ -111,16 +129,13 @@ class Gate(nn.Cell): - **output** (Tensor) - The shape of Tensor is :math:`(..., irreps\_out.dim)`. Raises: - ValueError: If `irreps_scalars` or `irreps_gates` contain non-scalar irrep. - ValueError: If the total multiplication of `irreps_gates` do not match the total multiplication of + ValueError: If `irreps_scalars` or `irreps_gates` contains non-scalar irrep. + ValueError: If the total multiplication of `irreps_gates` does not match the total multiplication of `irreps_gated`. - Supported Platforms: - ``Ascend`` - Examples: >>> from mindspore import ops - >>> from mindchemistry.e3.nn import Gate + >>> from mindscience.e3nn.nn import Gate >>> Gate('2x0e', [ops.tanh], '1x0o+2x0e', [ops.abs], '2x1o+1x2e') Gate (2x0e+1x0o+2x0e+2x1o+1x2e -> 2x0e+2x1o+1x2e) """ diff --git a/mindscience/e3nn/nn/normact.py b/mindscience/e3nn/nn/normact.py index 2080931fa..03efe67c4 100644 --- a/mindscience/e3nn/nn/normact.py +++ b/mindscience/e3nn/nn/normact.py @@ -22,23 +22,25 @@ from ..o3.norm import Norm class NormActivation(nn.Cell): - r"""Activation function for the norm of irreps. + r""" + Activation function for the norm of irreps. Applies a scalar activation to the norm of each irrep and outputs a (normalized) version of that irrep multiplied - by the scalar output of the scalar activation. + by the scalar output of the scalar activation. Optionally, a learnable bias can be added to the norms before + the activation, and the resulting features can be normalized by their original norm to preserve angular + information while only modulating their magnitude. Args: - irreps_in (Union[str, Irrep, Irreps]): the input irreps. - act (Func): an activation function for each part of the norm of `irreps_in`. - normalize (bool): whether to normalize the input features before multiplying them by the scalars from the - nonlinearity. Default: True. - epsilon (float): when ``normalize``, norms smaller than ``epsilon`` will be clamped up to ``epsilon`` - to avoid division by zero. Not allowed when `normalize` is False. Default: None. - bias (bool): whether to apply a learnable additive bias to the inputs of the `act`. Default: False. - init_method (Union[str, float, mindspore.common.initializer]): initialize parameters. - Default: ``'normal'``. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. - ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32``. + irreps_in (Union[str, Irrep, Irreps]): Input irreps. + act (Func): Activation function applied to the norm of each irrep. + normalize (bool): Whether to normalize input features before multiplying by the scalars from the + nonlinearity. Default: ``True``. + epsilon (float): When ``normalize``, norms smaller than ``epsilon`` are clamped to ``epsilon`` + to prevent division by zero. Ignored if ``normalize`` is ``False``. Default: ``None``. + bias (bool): Whether to apply a learnable additive bias to the inputs of ``act``. Default: ``False``. + init_method (Union[str, float, mindspore.common.initializer]): Parameter initialization method. + Default: ``'zeros'``. + dtype (mindspore.dtype): Data type of input tensors. Default: ``mindspore.float32``. + ncon_dtype (mindspore.dtype): Data type for ncon computation. Default: ``mindspore.float32``. Inputs: - **input** (Tensor) - The shape of Tensor is :math:`(..., irreps\_in.dim)`. @@ -50,13 +52,9 @@ class NormActivation(nn.Cell): ValueError: If `epsilon` is not None and `normalize` is False. ValueError: If `epsilon` is not positive. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import NormActivation + >>> from mindscience.e3nn.nn import NormActivation >>> from mindspore import ops, Tensor - >>> set_context(device_id=6) >>> norm_activation = NormActivation("2x1e", ops.sigmoid, bias=True) >>> print(norm_activation) NormActivation [sigmoid] (2x1e -> 2x1e) diff --git a/mindscience/e3nn/nn/one_hot.py b/mindscience/e3nn/nn/one_hot.py index 262b4863b..deb6349d2 100644 --- a/mindscience/e3nn/nn/one_hot.py +++ b/mindscience/e3nn/nn/one_hot.py @@ -32,29 +32,49 @@ def soft_unit_step(x): x \mapsto \theta(x) e^{-1/x} Args: - x (Tensor): the input tensor. + x (Tensor): Input tensor. Returns: Tensor, the output of the unit step function. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import soft_unit_step - >>> from mindspore import ops, set_context, Tensor + >>> from mindscience.e3nn.nn import soft_unit_step + >>> from mindspore import ops, Tensor >>> x = Tensor(ops.linspace(-1.0, 10.0, 1000)) >>> outputs = soft_unit_step(x) >>> print(outputs.shape) (1000,) - """ return ops.relu(x) * ops.exp(- 1 / x) / x class OneHot(nn.Cell): r""" - One-hot embedding. + One-hot embedding with irreps support. + + The output is automatically wrapped with :class:`~e3nn.o3.Irreps` to indicate + that it transforms as a collection of scalar (:math:`l = 0`) representations. This allows + the embedding to be used seamlessly in e3nn networks that expect irreps + annotations. + + Args: + num_types (int): Number of distinct atom types. + dtype (mindspore.dtype): Data type of the embedding. Default: ``mindspore.float32``. + + Inputs: + - **atom_type** (Tensor) - Tensor of shape :math:`(...)`, containing integer atom-type indices. + + Outputs: + - **output** (Tensor) - One-hot tensor of shape :math:`(..., \text{num_types})`. + + Examples: + >>> from mindscience.e3nn.nn import OneHot + >>> from mindspore import Tensor + >>> one_hot = OneHot(num_types=4) + >>> atom_type = Tensor([0, 2, 1]) + >>> out = one_hot(atom_type) + >>> print(out.shape) + (3, 4) """ def __init__(self, num_types, dtype=float32): @@ -83,17 +103,19 @@ class SoftOneHotLinspace(nn.Cell): .. math:: y_i(x) = \frac{1}{Z} f_i(x) - where :math:`x` is the input and :math:`f_i` is the ith basis function. + where :math:`x` is the input and :math:`f_i` is the ith basis function and :math:`Z` is a constant defined (if possible) such that, .. math:: \langle \sum_{i=1}^N y_i(x)^2 \rangle_x \approx 1 - Note that `bessel` basis cannot be normalized. + Note that `bessel` basis cannot be normalized. The resulting features are + designed to be invariant under translations and rotations, making them + suitable for encoding radial or scalar information in 3D geometric models. Args: start (float): minimum value span by the basis. - end (float): maximum value span by the basis. + end (float): maximum value span by the basis. number (int): number of basis functions :math:`N`. basis (str): {'gaussian', 'cosine', 'smooth_finite', 'fourier', 'bessel'}, the basis family. Default: ``'smooth_finite'``. @@ -110,11 +132,8 @@ class SoftOneHotLinspace(nn.Cell): Raises: ValueError: If `basis` is not in {'gaussian', 'cosine', 'smooth_finite', 'fourier', 'bessel'}. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import SoftOneHotLinspace + >>> from mindscience.e3nn.nn import SoftOneHotLinspace >>> from mindspore import ops, Tensor >>> soft_one_hot_linspace = SoftOneHotLinspace(-0.5, 1.5, number=4) >>> x = Tensor(ops.ones((4, 6))) @@ -209,7 +228,7 @@ def soft_one_hot_linspace(x, start, end, number, basis='smooth_finite', cutoff=T Args: x (Tensor): The shape of Tensor is :math:`(...)`. start (float): minimum value span by the basis. - end (float): maximum value span by the basis. + end (float): maximum value span by the basis. number (int): number of basis functions :math:`N`. basis (str): {'gaussian', 'cosine', 'smooth_finite', 'fourier', 'bessel'}, the basis family. Default: ``'smooth_finite'``. @@ -222,11 +241,8 @@ def soft_one_hot_linspace(x, start, end, number, basis='smooth_finite', cutoff=T Raises: ValueError: If `basis` is not in {'gaussian', 'cosine', 'smooth_finite', 'fourier', 'bessel'}. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.nn import soft_one_hot_linspace + >>> from mindscience.e3nn.nn import soft_one_hot_linspace >>> from mindspore import ops, Tensor >>> x = Tensor(ops.ones((4, 6))) >>> outputs = soft_one_hot_linspace(x, -0.5, 1.5, number=4) diff --git a/mindscience/e3nn/nn/scatter.py b/mindscience/e3nn/nn/scatter.py index 922ac15ef..17a071f7d 100644 --- a/mindscience/e3nn/nn/scatter.py +++ b/mindscience/e3nn/nn/scatter.py @@ -19,17 +19,40 @@ from mindspore.ops import operations as P class Scatter(nn.Cell): r""" - Easy-use version of scatter. + Easy-to-use wrapper for scatter operations: aggregates source values into a destination tensor according to index. Args: mode (str): {'add', 'sum', 'div', 'max', 'min', 'mul'}, scatter mode. + 'add' or 'sum': element-wise addition. + 'div': element-wise division. + 'max': element-wise maximum. + 'min': element-wise minimum. + 'mul': element-wise multiplication. + Default: `'add'` Raises: ValueError: If `mode` is not legal. - Supported Platforms: - ``CPU`` ``GPU`` ``Ascend`` + Inputs: + - **src** (Tensor) - The source tensor to scatter. + - **index** (Tensor) - The indices of elements to scatter, must be int type. + - **out** (Tensor, optional) - The destination tensor. If provided, scatter will be performed in-place. + - **dim_size** (int, optional) - If `out` is not given, automatically create output with size `dim_size`. + If `dim_size` is not given, a minimal sized output tensor is returned. Default: None. + Outputs: + Tensor, the result after scatter operation. + + Examples: + >>> import mindspore as ms + >>> from mindspore import Tensor + >>> from mindscience.e3nn.nn import Scatter + >>> scatter = Scatter('add') + >>> src = Tensor([[1, 2], [3, 4], [5, 6]], ms.float32) + >>> index = Tensor([0, 0, 1], ms.int32) + >>> out = scatter(src, index) + >>> print(out.shape) + (3,2) """ def __init__(self, mode='add'): diff --git a/mindscience/e3nn/o3/irreps.py b/mindscience/e3nn/o3/irreps.py index 01273bf9d..57ba34bfb 100644 --- a/mindscience/e3nn/o3/irreps.py +++ b/mindscience/e3nn/o3/irreps.py @@ -35,10 +35,14 @@ class Irrep: r""" Irreducible representation of O(3). This class does not contain any data, it is a structure that describe the representation. It is typically used as argument of other classes of the library to define the input and output representations of functions. + The irrep is labeled by a non-negative integer `l` (the degree) and a parity `p` (1 for even, -1 for odd). + Common aliases: "e" for even parity, "o" for odd parity, "y" for parity (-1)^l. Args: - l (Union[int, str]): non-negative integer, the degree of the representation, :math:`l = 0, 1, \dots`. Or string to indicate the degree and parity. - p (int): {1, -1}, the parity of the representation. Default: ``None``. + l (Union[int, str]): non-negative integer, the degree of the representation, :math:`l = 0, 1, \dots`. + Alternatively, a string such as ``"1o"`` or ``"2e"`` encoding both degree and parity. + p (int, optional): the parity of the representation, :math:`p \in \{1, -1\}`. + Ignored when ``l`` is a string. Default: ``None``. Raises: NotImplementedError: If method is not implemented. @@ -46,11 +50,8 @@ class Irrep: ValueError: If `l` cannot be converted to an `Irrep`. TypeError: If `l` is not int or str. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import Irrep + >>> from mindscience.e3nn.o3 import Irrep >>> Irrep(0, 1) 0e >>> Irrep("1y") @@ -80,23 +81,23 @@ class Irrep: name = l.strip() l = int(name[:-1]) if l < 0: - raise ValueError + raise ValueError("Irrep degree must be non-negative.") p = { 'e': 1, 'o': -1, 'y': (-1) ** l, }[name[-1]] except Exception: - raise ValueError + raise ValueError(f"Cannot convert string {l} to Irrep.") elif isinstance(l, tuple): l, p = l if not isinstance(l, int): - raise TypeError + raise TypeError("Irrep degree must be int.") elif l < 0: - raise ValueError + raise ValueError("Irrep degree must be non-negative.") if p not in [-1, 1]: - raise ValueError + raise ValueError("Irrep parity must be 1 or -1.") object.__setattr__(self, "l", l) object.__setattr__(self, "p", p) @@ -116,12 +117,17 @@ class Irrep: def wigD_from_angles(self, alpha, beta, gamma, k=None): r""" - Representation wigner D matrices of O(3) from Euler angles. + Compute the Wigner-D matrix representation of O(3) from the three Euler angles + :math:`(\alpha, \beta, \gamma)` that describe the rotation sequence: + + 1. Rotate by :math:`\gamma` around the original Y axis. + 2. Rotate by :math:`\beta` around the new X axis. + 3. Rotate by :math:`\alpha` around the newest Y axis. Args: - alpha (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): rotation :math:`\alpha` around Y axis, applied third. - beta (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): rotation :math:`\beta` around X axis, applied second. - gamma (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): rotation :math:`\gamma` around Y axis, applied first. + alpha (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): Rotation :math:`\alpha` around Y axis, applied third. + beta (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): Rotation :math:`\beta` around X axis, applied second. + gamma (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): Rotation :math:`\gamma` around Y axis, applied first. k (Union[None, Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): How many times the parity is applied. Default: ``None`` . Returns: @@ -142,7 +148,7 @@ class Irrep: def wigD_from_matrix(self, R): r""" - Representation wigner D matrices of O(3) from rotation matrices. + Compute the Wigner-D matrix representation of O(3) from rotation matrices. Args: R (Tensor): Rotation matrices. The shape of Tensor is :math:`(..., 3, 3)`. @@ -162,7 +168,7 @@ class Irrep: [ 0, 0, -1]] """ if not isinstance(R, Tensor): - raise TypeError + raise TypeError("R must be a Tensor.") d = Tensor(np.sign(np.linalg.det(R.asnumpy()))) R = _expand_last_dims(d) * R k = (1. - d) / 2 @@ -173,6 +179,12 @@ class Irrep: return 2 * self.l + 1 def is_scalar(self) -> bool: + r""" + Check whether this irrep is the trivial (scalar) representation. + + Returns: + bool, True if `l = 0` and parity `p = 1`, False otherwise. + """ return self.l == 0 and self.p == 1 def __mul__(self, other): @@ -197,7 +209,7 @@ class Irrep: other (int): multiple number of the `Irrep`. Returns: - `Irreps` - corresponding multiple `Irrep`. + `Irreps`, corresponding multiple `Irrep`. Raises: TypeError: If `other` is not int. @@ -273,20 +285,19 @@ class Irreps: r""" Direct sum of irreducible representations of O(3). This class does not contain any data, it is a structure that describe the representation. It is typically used as argument of other classes of the library to define the input and output representations of functions. + The irreps are stored as a tuple of (_MulIr) objects, each containing a multiplicity and an Irrep. + This allows for easy manipulation, such as addition, multiplication, and filtering of representations. Args: - irreps (Union[str, Irrep, Irreps, List[Tuple[int]]]): a string to represent the direct sum of irreducible representations. + irreps (Union[str, Irrep, Irreps, List[Tuple[int]]]): A string to represent the direct sum of irreducible representations. Default: None. Raises: ValueError: If `irreps` cannot be converted to an `Irreps`. ValueError: If the mul part of `irreps` part is negative. TypeError: If the mul part of `irreps` part is not int. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import Irreps + >>> from mindscience.e3nn.o3 import Irreps >>> x = Irreps([(100, (0, 1)), (50, (1, 1))]) 100x0e+50x1e >>> x.dim @@ -329,22 +340,22 @@ class Irreps: ir = Irrep(mir) if not isinstance(mul, int): - raise TypeError + raise TypeError("Irrep multiplicity must be int.") elif mul < 0: - raise ValueError + raise ValueError("Irrep multiplicity must be non-negative.") out += (_MulIr(mul, ir),) except Exception: - raise ValueError + raise ValueError("Irreps string format is invalid.") elif irreps is None: pass else: - out = self.handle_irreps(irreps, out) + out = self._handle_irreps(irreps, out) self.data = out self.dim = self._dim() self.slice = self._slices() self.slice_tuples = [(s.start, s.stop - s.start) for s in self.slice] - def handle_irreps(self, irreps, out): + def _handle_irreps(self, irreps, out): for mir in irreps: if isinstance(mir, str): @@ -367,7 +378,7 @@ class Irreps: ir = Irrep(ir) if not (isinstance(mul, int) and mul >= 0 and ir is not None): - raise ValueError + raise ValueError("Irreps format is invalid.") out += (_MulIr(mul, ir),) return out @@ -432,7 +443,7 @@ class Irreps: other (int): multiple number of the `Irreps`. Returns: - `Irreps` - corresponding multiple `Irreps`. + `Irreps`, corresponding multiple `Irreps`. Raises: NotImplementedError: If `other` is `Irreps`, please use `o3.TensorProduct`. @@ -456,7 +467,7 @@ class Irreps: other (int): multiple number of the `Irreps`. Returns: - `Irreps` - repeated multiple `Irreps`. + `Irreps`, repeated multiple `Irreps`. """ return self * other @@ -507,7 +518,7 @@ class Irreps: Simplify the representations. Returns: - `Irreps` + `Irreps`, simplified `Irreps`. Examples: >>> Irreps("1e + 1e + 0e").simplify() @@ -528,7 +539,7 @@ class Irreps: Remove any irreps with multiplicities of zero. Returns: - `Irreps` + `Irreps`, irreps with multiplicities of zero removed. Examples: >>> Irreps("4x0e + 0x1o + 2x3e").remove_zero_multiplicities() @@ -557,11 +568,9 @@ class Irreps: Sort the representations by increasing degree. Returns: - irreps (`Irreps`) - sorted `Irreps` - - p (tuple[int]) - permute orders. `p[old_index] = new_index` - - inv (tuple[int]) - inversed permute orders. `p[new_index] = old_index` + `Irreps`, sorted `Irreps`. + p (tuple[int]), permute orders. `p[old_index] = new_index` + inv (tuple[int]), inversed permute orders. `p[new_index] = old_index` Examples: >>> Irreps("1e + 0e + 1e").sort().irreps @@ -615,7 +624,12 @@ class Irreps: def decompose(self, v, batch=False): r""" - Decompose a vector by `Irreps`. + Decompose a vector into irreducible components according to the current `Irreps` structure. + + This method reshapes the last axis of the input tensor `v` such that each slice + corresponds to one of the irreducible representations listed in `self`. The + resulting list contains one tensor per irrep, with shape + `(..., multiplicity, irrep_dimension)`. Args: v (Tensor): the vector to be decomposed. @@ -664,7 +678,7 @@ class Irreps: Args: lmax (int): maximum of `l`. - p (int): {1, -1}, the parity of the representation. + p (int): {1, -1}, the parity of the representation. Default: -1. Returns: `Irreps`, representation of :math:`(Y^0, Y^1, \dots, Y^{\mathrm{lmax}})`. @@ -679,15 +693,20 @@ class Irreps: def randn(self, *size, normalization='component'): r""" - Random tensor. + Generate a random tensor whose last dimension matches the total dimension of these irreps. + The irreps structure is used to split the last axis into individual irrep blocks, + each of which can be normalized either per-component or per-irrep norm. Args: - *size (List[int]): size of the output tensor, needs to contains a `-1`. - normalization (str): {'component', 'norm'}, type of normalization method. + size (List[int]): size of the output tensor, needs to contains a `-1`. + normalization (str): {'component', 'norm'}, type of normalization method. Default: 'component'. Returns: Tensor, the shape is `size` where `-1` is replaced by `self.dim`. + Raises: + ValueError: If `normalization` is not 'component' or 'norm'. + Examples: >>> Irreps("5x0e + 10x1o").randn(5, -1, 5, normalization='norm').shape (5, 35, 5) @@ -713,7 +732,15 @@ class Irreps: def wigD_from_angles(self, alpha, beta, gamma, k=None): r""" - Representation wigner D matrices of O(3) from Euler angles. + Compute the Wigner-D matrix representation of O(3) from the three Euler angles + :math:`(\alpha, \beta, \gamma)` that describe the rotation sequence: + + 1. Rotate by :math:`\gamma` around the original Y axis. + 2. Rotate by :math:`\beta` around the new X axis. + 3. Rotate by :math:`\alpha` around the newest Y axis. + + The result is the direct sum of the Wigner-D matrices for each irrep contained in this `Irreps` object, + repeated according to multiplicity. Args: alpha (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): rotation :math:`\alpha` around Y axis, applied third. @@ -735,7 +762,7 @@ class Irreps: def wigD_from_matrix(self, R): r""" - Representation wigner D matrices of O(3) from rotation matrices. + Compute Wigner-D matrices of O(3) from rotation matrices. Args: R (Tensor): Rotation matrices. The shape of Tensor is :math:`(..., 3, 3)`. @@ -754,7 +781,7 @@ class Irreps: [ 0, 0, -1]] """ if not isinstance(R, Tensor): - raise TypeError + raise TypeError("R needs to be a Tensor") d = Tensor(np.sign(np.linalg.det(R.asnumpy()))) R = _expand_last_dims(d) * R k = (1 - d) / 2 diff --git a/mindscience/e3nn/o3/norm.py b/mindscience/e3nn/o3/norm.py index 150e52178..01968f342 100644 --- a/mindscience/e3nn/o3/norm.py +++ b/mindscience/e3nn/o3/norm.py @@ -21,14 +21,18 @@ from .tensor_product import TensorProduct class Norm(nn.Cell): r""" - Norm of each irrep in a direct sum of irreps. + Compute the norm (length) of each irreducible representation (irrep) contained in a direct-sum tensor. + + Given a tensor that transforms under a direct sum of irreps, this module returns a tensor whose + entries are the norms of the individual irreps. For example, if the input contains three + vectors (3x1o irreps), the output will be a 3-component tensor where each entry is the + Euclidean norm of the corresponding vector. Args: irreps_in (Union[str, Irrep, Irreps]): Irreps for the input. - squared (bool): whether to return the squared norm. Default: False. - dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32`` . - ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32`` . + squared (bool): Whether to return the squared norm. Default: False. + dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32``. + ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. Default: ``mindspore.float32``. Inputs: - **v** (Tensor) - The shape of Tensor is :math:`(..., irreps\_in.dim)` . @@ -36,13 +40,10 @@ class Norm(nn.Cell): Outputs: - **output** (Tensor) - The shape of Tensor is :math:`(..., irreps\_out.dim)` . - Supported Platforms: - ``Ascend`` - Examples: >>> import mindspore as ms >>> import numpy as np - >>> from mindchemistry.e3.o3 import Norm + >>> from mindscience.e3nn.o3 import Norm >>> n = Norm('3x1o') >>> v = ms.Tensor(np.linspace(1., 2., n.irreps_in.dim), dtype=ms.float32) >>> n(v).shape diff --git a/mindscience/e3nn/o3/rotation.py b/mindscience/e3nn/o3/rotation.py index 96bbe21cc..fc2e5a9c2 100644 --- a/mindscience/e3nn/o3/rotation.py +++ b/mindscience/e3nn/o3/rotation.py @@ -31,73 +31,63 @@ rand = ops.UniformReal(seed=seed) def identity_angles(*shape, dtype=float32): r""" - Give the identity set of Euler angles. + Return the identity set of Euler angles :math:`(\alpha, \beta, \gamma)` that corresponds to “no rotation”. + Whatever shape is requested, the three returned tensors are filled with zeros. Args: shape (Tuple[int]): The shape of additional dimensions. dtype (mindspore.dtype): The type of input tensor. Default: ``mindspore.float32`` . Returns: - alpha (Tensor) - The alpha Euler angles. - - beta (Tensor) - The beta Euler angles. - - gamma (Tensor) - The gamma Euler angles. + Tuple[Tensor]: A tuple of :math:`alpha` Tensors, each of shape `shape`. Raises: TypeError: If dtype of 'shape' is not tuple. TypeError: If dtype of the element of 'shape' is not int. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import identity_angles + >>> from mindscience.e3nn.o3 import identity_angles >>> m = identity_angles((1)) >>> print(m) (Tensor(shape=[1], dtype=Float32, value= [ 0.00000000e+00]), Tensor(shape=[1], dtype=Float32, value= [ 0.00000000e+00]), Tensor(shape=[1], dtype=Float32, value= [ 0.00000000e+00])) """ if not isinstance(shape, tuple): - raise TypeError + raise TypeError("shape needs to be a tuple") if not all(map(lambda x: isinstance(x, int), shape)): - raise TypeError + raise TypeError("the element of shape needs to be int") abc = zeros((3,) + shape, dtype) return abc[0], abc[1], abc[2] def rand_angles(*shape): r""" - Give a random set of Euler angles. + Return a uniformly-random set of Euler angles :math:`(\alpha, \beta, \gamma)` that represents + a random rotation in 3-D space. :math:`\alpha` and :math:`\gamma` are sampled uniformly from [0, 2π), + while :math:`\beta` is sampled from [0, π] with probability density proportional to sin(:math:`\beta`), + ensuring uniform distribution over the rotation group SO(3). Args: shape (Tuple[int]): The shape of additional dimensions. Returns: - alpha (Tensor) - The alpha Euler angles. - - beta (Tensor) - The beta Euler angles. - - gamma (Tensor) - The gamma Euler angles. + Tuple[Tensor]: A tuple of :math:`alpha` Tensors, each of shape `shape`. Raises: TypeError: If dtype of 'shape' is not tuple. TypeError: If dtype of the element of 'shape' is not int. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import rand_angles + >>> from mindscience.e3nn.o3 import rand_angles >>> m = rand_angles((1)) >>> print(m) (Tensor(shape=[1], dtype=Float32, value= [ 4.00494671e+00]), Tensor(shape=[1], dtype=Float32, value= [ 1.29240000e+00]), Tensor(shape=[1], dtype=Float32, value= [ 5.71690750e+00])) """ if not isinstance(shape, tuple): - raise TypeError + raise TypeError("shape needs to be a tuple") if not all(map(lambda x: isinstance(x, int), shape)): - raise TypeError + raise TypeError("the element of shape needs to be int") alpha, gamma = 2 * math.pi * rand((2,) + shape) beta = ops.acos(2 * rand(shape) - 1) return alpha, beta, gamma @@ -105,7 +95,10 @@ def rand_angles(*shape): def compose_angles(a1, b1, c1, a2, b2, c2): r""" - Computes the composed Euler angles of two sets of Euler angles. + Compute the Euler angles that result from composing two rotations. + + Given two rotations represented by Euler angles (a1, b1, c1) and (a2, b2, c2), + this function returns the Euler angles (a, b, c) of the combined rotation .. math:: @@ -131,15 +124,10 @@ def compose_angles(a1, b1, c1, a2, b2, c2): The first applied gamma Euler angles. Returns: - - alpha (Tensor), The composed alpha Euler angles. - - beta (Tensor), The composed beta Euler angles. - - gamma (Tensor), The composed gamma Euler angles. - - Supported Platforms: - ``Ascend`` + Tuple[Tensor]: A tuple of :math:`alpha`, :math:`beta`, :math:`gamma` Tensors. Examples: - >>> from mindchemistry.e3.o3 import compose_angles + >>> from mindscience.e3nn.o3 import compose_angles >>> m = compose_angles(0.4, 0.5, 0.6, 0.7, 0.8, 0.9) >>> print(m) (Tensor(shape=[], dtype=Float32, value= 1.34227), Tensor(shape=[], dtype=Float32, value= 1.02462), @@ -153,7 +141,7 @@ def compose_angles(a1, b1, c1, a2, b2, c2): def matrix_x(angle): r""" - Give the rotation matrices around x axis for given angle. + Return the :math:`3 \times 3` rotation matrix for a rotation about the x-axis by the given angle. Args: angle (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): @@ -163,11 +151,8 @@ def matrix_x(angle): Returns: Tensor, the rotation matrices around x axis. The shape of output is :math:`(..., 3, 3)` - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import matrix_x + >>> from mindscience.e3nn.o3 import matrix_x >>> m = matrix_x(0.4) >>> print(m) [[ 1. 0. 0. ] @@ -187,7 +172,7 @@ def matrix_x(angle): def matrix_y(angle): r""" - Give the rotation matrices around y axis for given angle. + Return the :math:`3 \times 3` rotation matrix for a rotation about the y-axis by the given angle. Args: angle (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): @@ -196,11 +181,8 @@ def matrix_y(angle): Returns: Tensor, the rotation matrices around y axis. The shape of output is :math:`(..., 3, 3)` - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import matrix_y + >>> from mindscience.e3nn.o3 import matrix_y >>> m = matrix_y(0.5) >>> print(m) [[ 0.87758255 0. 0.47942555] @@ -220,7 +202,7 @@ def matrix_y(angle): def matrix_z(angle): r""" - Give the rotation matrices around z axis for given angle. + Return the :math:`3 \times 3` rotation matrix for a rotation about the z-axis by the given angle. Args: angle (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): @@ -230,11 +212,8 @@ def matrix_z(angle): Returns: Tensor, the rotation matrices around z axis. The shape of output is :math:`(..., 3, 3)`. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import matrix_z + >>> from mindscience.e3nn.o3 import matrix_z >>> m = matrix_z(0.6) >>> print(m) [[ 0.8253357 -0.5646425 0. ] @@ -254,7 +233,11 @@ def matrix_z(angle): def angles_to_matrix(alpha, beta, gamma): r""" - Conversion from angles to matrix. + Convert Euler angles (:math:`\alpha`, :math:`\beta`, :math:`\gamma`) into the corresponding :math:`3 \times 3` rotation matrix. + The resulting matrix represents the rotation + + .. math:: + R = Ry(\alpha) * Rx(\beta) * Ry(\gamma). Args: alpha (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): @@ -267,11 +250,8 @@ def angles_to_matrix(alpha, beta, gamma): Returns: Tensor, the rotation matrices. Matrices of shape :math:`(..., 3, 3)`. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import angles_to_matrix + >>> from mindscience.e3nn.o3 import angles_to_matrix >>> m = angles_to_matrix(0.4, 0.5, 0.6) >>> print(m) [[ 0.5672197 0.1866971 0.8021259 ] @@ -285,25 +265,20 @@ def angles_to_matrix(alpha, beta, gamma): def matrix_to_angles(r_param): r""" - Conversion from matrix to angles. + Convert :math:`3 \times 3` rotation matrix into Euler angles (:math:`\alpha`, :math:`\beta`, :math:`\gamma`). Args: r_param (Tensor): The rotation matrices. Matrices of shape :math:`(..., 3, 3)`. Returns: - - alpha (Tensor), The alpha Euler angles. The shape of Tensor is :math:`(...)`. - - beta (Tensor), The beta Euler angles. The shape of Tensor is :math:`(...)`. - - gamma (Tensor), The gamma Euler angles. The shape of Tensor is :math:`(...)`. + Tuple[Tensor]: A tuple of :math:`alpha`, :math:`beta`, :math:`gamma` Tensors. Raise: ValueError: If the det(R) is not equal to 1. - Supported Platforms: - ``Ascend`` - Examples: >>> import mindspore as ms - >>> from mindchemistry.e3.o3 import matrix_to_angles + >>> from mindscience.e3nn.o3 import matrix_to_angles >>> input = ms.Tensor([[0.5672197, 0.1866971, 0.8021259], [0.27070403, 0.87758255, -0.395687], ... [-0.77780527, 0.44158012,0.4472424]]) >>> m = matrix_to_angles(input) @@ -312,7 +287,7 @@ def matrix_to_angles(r_param): Tensor(shape=[], dtype=Float32, value= 0.6)) """ if not np.allclose(np.linalg.det(r_param.asnumpy()), 1., 1e-3, 1e-5): - raise ValueError + raise ValueError("The det(R) is not equal to 1.") x = ops.matmul(r_param, Tensor([0.0, 1.0, 0.0])) a, b = xyz_to_angles(x) @@ -327,7 +302,7 @@ def matrix_to_angles(r_param): def angles_to_xyz(alpha, beta): r""" - Convert :math:`(\alpha, \beta)` into a point :math:`(x, y, z)` on the sphere. + Convert the two spherical angles (:math:`\alpha`, :math:`\beta`) into Cartesian coordinates (x, y, z) on the unit sphere. Args: alpha (Union[Tensor[float32], List[float], Tuple[float], ndarray[np.float32], float]): @@ -338,12 +313,9 @@ def angles_to_xyz(alpha, beta): Returns: Tensor, the point :math:`(x, y, z)` on the sphere. The shape of Tensor is :math:`(..., 3)` - Supported Platforms: - ``Ascend`` - Examples >>> import mindspore as ms - >>> from mindchemistry.e3.o3 import angles_to_xyz + >>> from mindscience.e3nn.o3 import angles_to_xyz >>> print(angles_to_xyz(ms.Tensor(1.7), ms.Tensor(0.0)).abs()) [0., 1., 0.] """ @@ -365,15 +337,11 @@ def xyz_to_angles(xyz): xyz (Tensor): The point :math:`(x, y, z)` on the sphere. The shape of Tensor is :math:`(..., 3)`. Returns: - alpha (Tensor) - The alpha Euler angles. The shape of Tensor is :math:`(...)`. - beta (Tensor) - The beta Euler angles. The shape of Tensor is :math:`(...)`. - - Supported Platforms: - ``Ascend`` + Tuple[Tensor]: A tuple of :math:`alpha`, :math:`beta` Tensors. Examples: >>> import mindspore as ms - >>> from mindchemistry.e3.o3 import xyz_to_angles + >>> from mindscience.e3nn.o3 import xyz_to_angles >>> input = ms.Tensor([3, 3, 3]) >>> m = xyz_to_angles(input) >>> print(m) diff --git a/mindscience/e3nn/o3/spherical_harmonics.py b/mindscience/e3nn/o3/spherical_harmonics.py index f6725802b..2b947bdb2 100644 --- a/mindscience/e3nn/o3/spherical_harmonics.py +++ b/mindscience/e3nn/o3/spherical_harmonics.py @@ -24,7 +24,10 @@ def _sqrt(x, dtype=float32): class SphericalHarmonics(nn.Cell): r""" - Return Spherical harmonics layer. + Spherical-harmonics cell: maps 3-D Cartesian vectors (x, y, z) to the + corresponding complex-valued spherical-harmonic basis functions :math:`Y_l^m(\hat{x})`. + The layer can return any requested degree(s) l and automatically handles + parity (even/odd) selection rules. Args: irreps_out (Union[str, `Irreps`]): irreducible representations of output for spherical harmonics. @@ -49,11 +52,8 @@ class SphericalHarmonics(nn.Cell): The output parity should have been p = {input_p**l}. NotImplementedError: If `l` is larger than 11. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import SphericalHarmonics + >>> from mindscience.e3nn.o3 import SphericalHarmonics >>> from mindspore import ops >>> sh = SphericalHarmonics(0, False, normalization='component') >>> x = ops.rand(2,3) @@ -67,7 +67,8 @@ class SphericalHarmonics(nn.Cell): self.normalize = normalize self.normalization = normalization if normalization not in ['integral', 'component', 'norm']: - raise ValueError + raise ValueError(f"SphericalHarmonics only supports normalization methods 'integral', 'component', 'norm'. " + f"But got normalization={normalization}.") if isinstance(irreps_out, str): irreps_out = Irreps(irreps_out) @@ -80,7 +81,7 @@ class SphericalHarmonics(nn.Cell): irreps_in = Irreps(irreps_in) if irreps_in not in (Irreps("1x1o"), Irreps("1x1e")): - raise ValueError + raise ValueError("SphericalHarmonics only supports input irreps of '1x1o' or '1x1e'.") self.irreps_in = irreps_in input_p = irreps_in.data[0].ir.p @@ -88,12 +89,16 @@ class SphericalHarmonics(nn.Cell): ls = [] for mul, (l, p) in irreps_out: if p != input_p ** l: - raise ValueError + raise ValueError(f"SphericalHarmonics only supports output irreps of '1x1o' or '1x1e'. " + f"But got irreps_out={irreps_out}.") ls.extend([l] * mul) elif isinstance(irreps_out, int): ls = [irreps_out] else: ls = list(irreps_out) + + if max(ls) > 11: + raise NotImplementedError("SphericalHarmonics only supports up to l=11.") irreps_out = Irreps([(1, (l, input_p ** l)) for l in ls]).simplify() self.irreps_out = irreps_out @@ -177,7 +182,7 @@ def spherical_harmonics(l, x, normalize=True, normalization='integral'): .. math:: Y^{l+1}_i(x) &= \text{cste}(l) \; & C_{ijk} Y^l_j(x) x_k \partial_k Y^{l+1}_i(x) &= \text{cste}(l) \; (l+1) & C_{ijk} Y^l_j(x) - Where :math:`C` are the `wigner_3j`. + where :math:`C` are the `wigner_3j`. Args: l (Union[int, List[int]]): degree of the spherical harmonics. @@ -721,5 +726,5 @@ def _spherical_harmonics(lmax: int, x, y, z): if lmax == 11: return ops.stack(results, axis=-1) - # 默认返回最高阶 (l=11) + # by default (l=11) return ops.stack(results, axis=-1) diff --git a/mindscience/e3nn/o3/sub.py b/mindscience/e3nn/o3/sub.py index 03ebe60cf..8b2dbcd50 100644 --- a/mindscience/e3nn/o3/sub.py +++ b/mindscience/e3nn/o3/sub.py @@ -27,7 +27,7 @@ class FullyConnectedTensorProduct(TensorProduct): Fully-connected weighted tensor product. All the possible path allowed by :math:`|l_1 - l_2| \leq l_{out} \leq l_1 + l_2` are made. Equivalent to `TensorProduct` with `instructions='connect'`. - For details, see :class:`mindchemistry.e3.o3.TensorProduct`. + For details, see :class:`mindscience.e3nn.o3.TensorProduct`. Args: irreps_in1 (Union[str, Irrep, Irreps]): Irreps for the first input. @@ -39,13 +39,10 @@ class FullyConnectedTensorProduct(TensorProduct): weight_init (str): {'zeros', 'ones', 'truncatedNormal', 'normal', 'uniform', 'he_uniform', 'he_normal', 'xavier_uniform'}, the initial method of weights. Default: 'normal'. ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32`` . - - Supported Platforms: - ``Ascend`` + Default: ``mindspore.float32``. Examples: - >>> from mindchemistry.e3.o3 import FullyConnectedTensorProduct + >>> from mindscience.e3nn.o3 import FullyConnectedTensorProduct >>> FullyConnectedTensorProduct('2x1o', '1x1o+3x0e', '5x2e+4x1o') TensorProduct [connect] (2x1o x 1x1o+3x0e -> 5x2e+4x1o) @@ -68,9 +65,10 @@ class FullyConnectedTensorProduct(TensorProduct): class FullTensorProduct(TensorProduct): r""" Full tensor product between two irreps. - + All possible output irreps are generated, including every allowed + :math:`|l_1 - l_2| \leq l_{out} \leq l_1 + l_2` combination. Equivalent to `TensorProduct` with `instructions='full'`. - For details, see :class:`mindchemistry.e3.o3.TensorProduct`. + For details, see :class:`mindscience.e3nn.o3.TensorProduct`. Args: irreps_in1 (Union[str, Irrep, Irreps]): Irreps for the first input. @@ -85,11 +83,9 @@ class FullTensorProduct(TensorProduct): ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. Default: ``mindspore.float32`` . - Supported Platforms: - ``Ascend`` Examples: - >>> from mindchemistry.e3.o3 import FullTensorProduct + >>> from mindscience.e3nn.o3 import FullTensorProduct >>> FullTensorProduct('2x1o+4x0o', '1x1o+3x0e') TensorProduct [full] (2x1o+4x0o x 1x1o+3x0e -> 2x0e+12x0o+6x1o+2x1e+4x1e+2x2e) @@ -111,10 +107,9 @@ class FullTensorProduct(TensorProduct): class ElementwiseTensorProduct(TensorProduct): r""" - Elementwise connected tensor product. - + Elementwise tensor product. Equivalent to `TensorProduct` with `instructions='element'`. - For details, see :class:`mindchemistry.e3.o3.TensorProduct`. + For details, see :class:`mindscience.e3nn.o3.TensorProduct`. Args: irreps_in1 (Union[str, Irrep, Irreps]): Irreps for the first input. @@ -129,11 +124,8 @@ class ElementwiseTensorProduct(TensorProduct): ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. Default: ``mindspore.float32`` . - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import ElementwiseTensorProduct + >>> from mindscience.e3nn.o3 import ElementwiseTensorProduct >>> ElementwiseTensorProduct('2x2e+4x1o', '3x1e+3x0o') TensorProduct [element] (2x2e+1x1o+3x1o x 2x1e+1x1e+3x0o -> 2x1e+2x2e+2x3e+1x0o+1x1o+1x2o+3x1e) @@ -158,7 +150,7 @@ class Linear(TensorProduct): Linear operation equivariant. Equivalent to `TensorProduct` with `instructions='linear'`. - For details, see :class:`mindchemistry.e3.o3.TensorProduct`. + For details, see :class:`mindscience.e3nn.o3.TensorProduct`. Args: irreps_in (Union[str, Irrep, Irreps]): Irreps for the input. @@ -169,13 +161,10 @@ class Linear(TensorProduct): weight_init (str): {'zeros', 'ones', 'truncatedNormal', 'normal', 'uniform', 'he_uniform', 'he_normal', 'xavier_uniform'}, the initial method of weights. Default: ``'normal'``. ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32`` . - - Supported Platforms: - ``Ascend`` + Default: ``mindspore.float32``. Examples: - >>> from mindchemistry.e3.o3 import Linear + >>> from mindscience.e3nn.o3 import Linear >>> Linear('2x2e+3x1o+3x0e', '3x2e+5x1o+2x0e') TensorProduct [linear] (2x2e+3x1o+3x0e x 1x0e -> 3x2e+5x1o+2x0e) @@ -190,7 +179,7 @@ class Linear(TensorProduct): **kwargs) -class Instruction(NamedTuple): +class _Instruction(NamedTuple): i_in: int i_out: int path_shape: tuple @@ -204,14 +193,6 @@ def _prod(x): return out -def prod(x): - """Compute the product of a sequence.""" - out = 1 - for a in x: - out *= a - return out - - def _sum_tensors_withbias(xs, shape, dtype): """sum tensors of same irrep.""" if xs: @@ -263,7 +244,7 @@ class LinearBias(TensorProduct): Linear operation equivariant with option to add bias. Equivalent to `TensorProduct` with `instructions='linear'` with option to add bias. For details, - see :class:`mindchemistry.e3.o3.TensorProduct`. + see :class:`mindscience.e3nn.o3.TensorProduct`. Args: irreps_in (Union[str, Irrep, Irreps]): Irreps for the input. @@ -275,13 +256,18 @@ class LinearBias(TensorProduct): 'xavier_uniform'}, the initial method of weights. Default: ``'normal'``. has_bias (bool): whether add bias to calculation ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32`` . - - Supported Platforms: - ``Ascend`` + Default: ``mindspore.float32``. + + Inputs: + - **v1** (Tensor): Input tensor. The shape of Tensor is :math:`(..., 2l+1)`. + - **v2** (Tensor): Input tensor. The shape of Tensor is :math:`(..., 2l+1)`. Default: None. + - **weight** (Tensor): Weight tensor. The shape of Tensor is :math:`(..., 2l+1)`. + + Outputs: + - **out** (Tensor): Output tensor. The shape of Tensor is :math:`(..., 2l+1)`. Examples: - >>> from mindchemistry.e3.o3 import LinearBias + >>> from mindscience.e3nn.o3 import LinearBias >>> LinearBias('2x2e+3x1o+3x0e', '3x2e+5x1o+2x0e') TensorProduct [linear] (2x2e+3x1o+3x0e x 1x0e -> 3x2e+5x1o+2x0e) @@ -307,7 +293,7 @@ class LinearBias(TensorProduct): is_scalar_num = biases.count(True) instructions = [ - Instruction(i_in=-1, + _Instruction(i_in=-1, i_out=i_out, path_shape=(mul_ir.dim,), path_weight=1.0) @@ -323,7 +309,7 @@ class LinearBias(TensorProduct): if bias: path_shape = (mul_ir.dim,) path_weight = 1.0 - instruction = Instruction(i_in=-1, i_out=i_out, path_shape=path_shape, path_weight=path_weight) + instruction = _Instruction(i_in=-1, i_out=i_out, path_shape=path_shape, path_weight=path_weight) self.bias_instructions.append(instruction) if is_scalar_num == 1: @@ -443,7 +429,7 @@ class TensorSquare(TensorProduct): Compute the square tensor product of a tensor. Equivalent to `TensorProduct` with `irreps_in2=None and instructions='full' or 'connect'`. For details, - see :class:`mindchemistry.e3.o3.TensorProduct`. + see :class:`mindscience.e3nn.o3.TensorProduct`. If `irreps_out` is given, this operation is fully connected. If `irreps_out` is not given, the operation has no parameter and is like full tensor product. @@ -459,16 +445,13 @@ class TensorSquare(TensorProduct): weight_init (str): {'zeros', 'ones', 'truncatedNormal', 'normal', 'uniform', 'he_uniform', 'he_normal', 'xavier_uniform'}, the initial method of weights. Default: 'normal'. ncon_dtype (mindspore.dtype): The type of input tensors of ncon computation module. - Default: ``mindspore.float32`` . + Default: ``mindspore.float32``. Raises: ValueError: If both `irreps_out` and `filter_ir_out` are not None. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import TensorSquare + >>> from mindscience.e3nn.o3 import TensorSquare >>> TensorSquare('2x1o', irreps_out='5x2e+4x1e+7x1o') TensorProduct [connect] (2x1o x 2x1o -> 5x2e+4x1e) >>> TensorSquare('2x1o+3x0e', filter_ir_out='5x2o+4x1e+2x0e') diff --git a/mindscience/e3nn/o3/tensor_product.py b/mindscience/e3nn/o3/tensor_product.py index 0f281fc11..b771110a8 100644 --- a/mindscience/e3nn/o3/tensor_product.py +++ b/mindscience/e3nn/o3/tensor_product.py @@ -14,14 +14,12 @@ # ============================================================================ from mindspore import Tensor, nn, ops, Parameter, get_context, float32, int32, vmap from mindspore.common.initializer import initializer -import mindspore as ms from .irreps import Irreps from .wigner import wigner_3j from ..utils.ncon import Ncon from ..utils.func import narrow from ..utils.initializer import renormal_initializer -import numpy as np -from mindspore.numpy import tensordot + def _prod(x): out = 1 @@ -270,18 +268,6 @@ def _init_ncon(mode, ls): return ncon -class uvw_ncon_v2(nn.Cell): - def __init__(self): - super(uvw_ncon_v2, self).__init__() - self.tensordot1 = tensordot - self.tensordot2 = tensordot - self.tensordot3 = vmap(tensordot, (0,0,None), 0) - def construct(self, m1, m2, m3, m4): - temp1 = self.tensordot1(m3, m1 , [2,1]) - temp2 = self.tensordot1(m2, m4 , [1,0]) - res = self.tensordot3(temp2, temp1, ([0,1],[1,0])) - return res - def _init_ncon_weight(mode, weight_mode, ls): """tensor graph contractions with weights""" if mode == 'uvw': @@ -388,18 +374,18 @@ class TensorProduct(nn.Cell): Default: ``mindspore.float32`` . Inputs: - - **x** (Tensor) - The shape of Tensor is ``(..., irreps_in1.dim)`` - - **y** (Tensor) - The shape of Tensor is ``(..., irreps_in2.dim)`` + - **v1** (Tensor) - The shape of Tensor is ``(..., irreps_in1.dim)`` + - **v2** (Tensor) - The shape of Tensor is ``(..., irreps_in2.dim)``. Default: None. - **weight** (Tensor) - `Tensor` or list of `Tensor`, optional required if ``internal_weights`` is ``False``. The shape of Tensor is ``(self.weight_numel,)`` if ``shared_weights`` is ``True``. The shape of Tensor is ``(..., self.weight_numel)`` if ``shared_weights`` is ``False`` or list of tensors of shapes ``weight_shape`` / ``(...) + weight_shape``. Use ``self.instructions`` to know what are the weights used for. - The shape of Tensor is ``(..., irreps_out.dim)``. + The shape of Tensor is ``(..., irreps_out.dim)``. Default: None. Outputs: - - **outputs** (Tensor) - The shape of Tensor is ``(..., irreps_out.dim)``. + - **v_out** (Tensor) - The shape of Tensor is ``(..., irreps_out.dim)``. Raises: ValueError: If `irreps_out` is not legal. @@ -412,12 +398,9 @@ class TensorProduct(nn.Cell): ValueError: If the initial method is not supported. ValueError: If the number of input tensors is not match to the number of input irreps. - Supported Platforms: - ``Ascend`` - Examples: >>> import mindspore as ms - >>> from mindchemistry.e3.o3 import TensorProduct + >>> from mindscience.e3nn.o3 import TensorProduct Standard tensor product: >>> tp1 = TensorProduct('2x1o+4x0o', '1x1o+3x0e') TensorProduct [full] (2x1o+4x0o x 1x1o+3x0e -> 2x0e+12x0o+6x1o+2x1e+4x1e+2x2e) diff --git a/mindscience/e3nn/o3/wigner.py b/mindscience/e3nn/o3/wigner.py index bd086be33..6ba55a994 100644 --- a/mindscience/e3nn/o3/wigner.py +++ b/mindscience/e3nn/o3/wigner.py @@ -28,7 +28,15 @@ PI = Tensor(math.pi) def change_basis_real_to_complex(l, dtype=float32): r""" - Convert a real basis of spherical harmonics in term of complex. + Transform a real-valued spherical-harmonic basis into its complex-valued counterpart. + The routine constructs the unitary matrix Q that maps the real basis functions + (often denoted :math:`y_{l,m}^{real}`) to the standard complex basis :math:`Y_{l,m}` via + + .. math:: + Y = Q \cdot y. + + Columns of :math:`Q` are ordered by :math:`m = -l,\ldots,+l`; the resulting complex basis + satisfies the usual phase and normalization conventions of quantum mechanics. Args: l (int): degree of spherical harmonics. @@ -37,11 +45,8 @@ def change_basis_real_to_complex(l, dtype=float32): Returns: Tensor, the complex basis with dtype complex64 for `dtype` = float32 and complex128 for `dtype` = float64. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import change_basis_real_to_complex + >>> from mindscience.e3nn.o3 import change_basis_real_to_complex >>> m = change_basis_real_to_complex(1) >>> print(m) [[-0.70710677+0.j 0. +0.j 0. -0.70710677j] @@ -81,11 +86,8 @@ def su2_generators(j, dtype=complex64): Raise: TypeError: If `j` is not int. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import su2_generators + >>> from mindscience.e3nn.o3 import su2_generators >>> m = su2_generators(1) >>> print(m) [[[ 0. +0.j 0.70710677+0.j @@ -139,11 +141,8 @@ def so3_generators(l, dtype=float32): TypeError: If `l` is not int. ValueError: If matrices data are inconsistent. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import so3_generators + >>> from mindscience.e3nn.o3 import so3_generators >>> m = so3_generators(1) >>> print(m) [[[ 0. 0. 0. ] @@ -176,6 +175,15 @@ def wigner_D(l, alpha, beta, gamma): r""" Wigner D matrix representation of SO(3). + These matrices describe how quantum-mechanical states with angular-momentum l + rotate under a sequence of three Euler angles (Z-Y-Z convention): + 1. Rotate by :math:`\gamma` around the original Z axis + 2. Rotate by :math:`\beta` around the new Y axis + 3. Rotate by :math:`\alpha` around the newest Z axis + + The resulting :math:`D^l(\alpha,\beta,\gamma)` is a :math:`(2l+1) \times (2l+1)` unitary matrix whose entries + are the famous Wigner D-functions. + It satisfies the following properties: * :math:`D(\text{identity rotation}) = \text{identity matrix}` * :math:`D(R_1 \circ R_2) = D(R_1) \circ D(R_2)` @@ -190,11 +198,8 @@ def wigner_D(l, alpha, beta, gamma): Returns: Tensor, Wigner D matrix :math:`D^l(\alpha, \beta, \gamma)`. The shape of Tensor is :math:`(2l+1, 2l+1)`. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import wigner_D + >>> from mindscience.e3nn.o3 import wigner_D >>> m = wigner_D(1,1,1,1) >>> print(m) [[-0.09064701 0.7080733 0.70029646] @@ -238,11 +243,8 @@ def wigner_3j(l1, l2, l3, dtype=float32): TypeError: If `l1`, `l2` or `l3` are not int. ValueError: If `l1`, `l2` and `l3` do not satisfy abs(l2 - l3) <= l1 <= l2 + l3. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.o3 import wigner_3j + >>> from mindscience.e3nn.o3 import wigner_3j >>> m = wigner_3j(1,1,1) >>> print(m) [[[ 0. 0. 0. ] diff --git a/mindscience/e3nn/so2_conv/__init__.py b/mindscience/e3nn/so2_conv/__init__.py index 1ea4a3bf5..20f7995ba 100644 --- a/mindscience/e3nn/so2_conv/__init__.py +++ b/mindscience/e3nn/so2_conv/__init__.py @@ -17,5 +17,6 @@ init file """ from .so3 import SO3Rotation from .so2 import SO2Convolution +from .init_edge_rot_mat import init_edge_rot_mat -__all__ = ["SO3Rotation", "SO2Convolution"] +__all__ = ["SO3Rotation", "SO2Convolution", "init_edge_rot_mat"] diff --git a/mindscience/e3nn/so2_conv/init_edge_rot_mat.py b/mindscience/e3nn/so2_conv/init_edge_rot_mat.py index a05a2264b..14e86a7c1 100644 --- a/mindscience/e3nn/so2_conv/init_edge_rot_mat.py +++ b/mindscience/e3nn/so2_conv/init_edge_rot_mat.py @@ -20,8 +20,23 @@ import mindspore.numpy as ms_np def init_edge_rot_mat(edge_distance_vec): - """ - get rotating matrix from edge distance vector + r""" + Initialize the rotation matrix from the edge distance vector. + + Args: + edge_distance_vec (Tensor): Edge distance vector with shape (batch_size, 3). + + Returns: + Tensor, Rotation matrix with shape (batch_size, 3, 3). + + Examples: + >>> import mindspore as ms + >>> edge_vec = ms.Tensor([[1.0, 0.0, 0.0], + ... [0.0, 1.0, 0.0], + ... [0.0, 0.0, 1.0]]) + >>> rot_mat = init_edge_rot_mat(edge_vec) + >>> print(rot_mat.shape) + (3, 3, 3) """ epsilon = 0.00000001 edge_vec_0 = edge_distance_vec diff --git a/mindscience/e3nn/so2_conv/so2.py b/mindscience/e3nn/so2_conv/so2.py index 0c23b319e..d5d431f80 100644 --- a/mindscience/e3nn/so2_conv/so2.py +++ b/mindscience/e3nn/so2_conv/so2.py @@ -20,25 +20,9 @@ from mindspore import ops, nn from mindscience.e3nn.o3 import Irreps -class Silu(nn.Cell): - """ - silu activation class - """ - - def __init__(self): - super().__init__() - self.sigmoid = nn.Sigmoid() - - def construct(self, x): - """ - silu activation class construct process - """ - return ops.mul(x, self.sigmoid(x)) - - -class SO2MConvolution(nn.Cell): - """ - SO2 Convolution subnetwork +class _SO2MConvolution(nn.Cell): + r""" + SO2 Convolution subnetwork for processing complex-valued features on the circle group SO(2). """ def __init__(self, in_channels, out_channels): @@ -66,8 +50,45 @@ class SO2MConvolution(nn.Cell): class SO2Convolution(nn.Cell): - """ - SO2 Convolution network + r""" + SO(2)-equivariant convolution layer for complex-valued features on the circle group. + + This layer maps between two `Irreps` spaces that describe how the inputs/outputs + transform under planar rotations. It keeps the m-quantum number (the index that + labels the SO(2) irreducible representations) diagonal, so that each m-block is + processed independently. For :math:`m = 0` (scalar part) a real dense layer is used; + for :math:`m > 0` a complex-valued :math:`1\times 1` convolution (implemented as a real :math:`2\times 2` weight + matrix acting on the real/imaginary parts) is applied. + + Args: + irreps_in (str or Irreps): + Input irreps, e.g. ``"3x0e + 2x1o"`` (3 scalars + 2 vectors). + irreps_out (str or Irreps): + Output irreps, e.g. ``"5x0e + 1x1o"``. + + Inputs: + - **x** (list[Tensor]): list of real tensors, each of shape ``(batch, mul, 2l+1)`` representing the + complex-valued SO(2) features for each irrep of order ``l``. + The last dimension indexes the magnetic quantum number ``m = -l ... +l``. + - **x_edge** (Tensor): real tensor of shape ``(edges, features)`` containing edge (radial) attributes + that are broadcast and combined with the SO(2) features during convolution. + + Outputs: + - **tuple** (Tensor): A tuple of real tensors, one for each irrep in ``irreps_out``. + Each tensor has shape ``(batch, mul, 2l+1)`` and contains the complex-valued + SO(2) features for the corresponding irrep of order ``l``. The last dimension + indexes the magnetic quantum number ``m = -l ... +l``. + + Examples: + >>> import mindspore as ms + >>> from mindscience.e3nn.so2_conv import SO2Convolution + >>> conv = SO2Convolution("2x0e + 1x1o", "2x0e + 1x1o") + >>> x = [ms.ops.randn(4, 2, 1), + ... ms.ops.randn(4, 1, 3)] + >>> x_edge = ms.ops.randn(4, 10) + >>> out = conv(x, x_edge) + >>> len(out), out[0].shape, out[1].shape + (2, (4, 2, 1), (4, 1, 3)) """ def __init__(self, irreps_in, irreps_out): @@ -83,9 +104,9 @@ class SO2Convolution(nn.Cell): for mulir in self.irreps_out: self.max_order_out = max(self.max_order_out, mulir.ir.l) - self.m_shape_dict_in, self.irreps_in1_length = self.get_m_info( + self.m_shape_dict_in, self.irreps_in1_length = self._get_m_info( self.irreps_in1, self.max_order_in) - self.m_shape_dict_out, self.irreps_out_length = self.get_m_info( + self.m_shape_dict_out, self.irreps_out_length = self._get_m_info( self.irreps_out, self.max_order_out) self.fc_m0 = nn.Dense(self.m_shape_dict_in.get(0, None), @@ -98,7 +119,7 @@ class SO2Convolution(nn.Cell): for i in range(self.global_max_order): if i == 0: continue - so2_m_convolution = SO2MConvolution(self.m_shape_dict_in.get(i, None), + so2_m_convolution = _SO2MConvolution(self.m_shape_dict_in.get(i, None), self.m_shape_dict_out.get(i, None)) self.so2_m_conv.append(so2_m_convolution) @@ -110,7 +131,7 @@ class SO2Convolution(nn.Cell): value = mulir.mul self.irreps_out_data.append((key, value)) - def get_m_info(self, irreps, max_order): + def _get_m_info(self, irreps, max_order): """ helper function to get m_info """ @@ -134,7 +155,7 @@ class SO2Convolution(nn.Cell): return m_shape_dict, len(irreps) - def get_m_list_merge(self, x): + def _get_m_list_merge(self, x): """ helper function to get m_list_merge """ @@ -165,7 +186,7 @@ class SO2Convolution(nn.Cell): """ ##################### _m_primary ######################### num_edges = ops.shape(x_edge)[0] - m_list_merge = self.get_m_list_merge(x) + m_list_merge = self._get_m_list_merge(x) # ##################### finish _m_primary ######################### # radial function out = [] diff --git a/mindscience/e3nn/so2_conv/so3.py b/mindscience/e3nn/so2_conv/so3.py index 0ffae5a5f..4baf19620 100644 --- a/mindscience/e3nn/so2_conv/so3.py +++ b/mindscience/e3nn/so2_conv/so3.py @@ -16,7 +16,7 @@ so3 file """ import mindspore as ms -from mindspore import nn, ops, vmap, jit_class +from mindspore import ops, vmap, jit_class from mindspore.numpy import tensordot from mindscience.e3nn import o3 from mindscience.e3nn.o3 import Irreps @@ -24,36 +24,20 @@ from mindscience.e3nn.o3 import Irreps from .wigner import wigner_D -class SO3Embedding(nn.Cell): - """ - SO3Embedding class - """ - - def __init__(self): - self.embedding = None - - def _rotate(self, so3rotation, lmax_list, max_list): - """ - SO3Embedding rotate - """ - embedding_rotate = so3rotation[0].rotate(self.embedding, lmax_list[0], - max_list[0]) - self.embedding = embedding_rotate - - def _rotate_inv(self, so3rotation): - """ - SO3Embedding rotate inverse - """ - embedding_rotate = so3rotation[0].rotate_inv(self.embedding, - self.lmax_list[0], - self.mmax_list[0]) - self.embedding = embedding_rotate - - @jit_class class SO3Rotation: """ - SO3_Rotation class + Class for handling SO(3) rotations of spherical-harmonic irreps. + + Args: + lmax (int): Maximum angular momentum to be considered. + irreps_in (Irreps or str): Input irreps specification. + irreps_out (Irreps or str): Output irreps specification. + + Examples: + >>> rot = SO3Rotation(lmax=2, irreps_in="1x0e + 1x1o", irreps_out="1x1o") + >>> wigner, wigner_inv = rot.set_wigner(rot_mat3x3) + >>> rotated = rot.rotate(embedding, wigner) """ def __init__(self, lmax, irreps_in, irreps_out): @@ -65,7 +49,16 @@ class SO3Rotation: @staticmethod def narrow(inputs, axis, start, length): """ - SO3_Rotation narrow class + Narrow (slice) a tensor along a specified axis. + + Args: + inputs (Tensor): The tensor to be sliced. + axis (int): The axis along which to perform the slice. + start (int): The starting index of the slice. + length (int): The number of elements to include in the slice. + + Returns + Tensor, The sliced tensor. """ begins = [0] * inputs.ndim begins[axis] = start @@ -79,7 +72,15 @@ class SO3Rotation: @staticmethod def rotation_to_wigner_d_matrix(edge_rot_mat, start_lmax, end_lmax): """ - SO3_Rotation rotation_to_wigner_d_matrix + Convert a batch of :math:`3 \times 3` rotation matrices into Wigner-D matrices for the specified range of angular momenta. + + Args: + edge_rot_mat (Tensor): Batch of SO(3) rotation matrices of shape (..., 3, 3). + start_lmax (int): Minimum angular momentum to include. + end_lmax (int): Maximum angular momentum to include. + + Returns: + list[Tensor], List of Wigner-D matrices for l = start_lmax … end_lmax, each of shape (..., 2l+1, 2l+1). """ x = edge_rot_mat @ ms.Tensor([0.0, 1.0, 0.0]) alpha, beta = o3.xyz_to_angles(x) @@ -96,7 +97,15 @@ class SO3Rotation: def set_wigner(self, rot_mat3x3): """ - SO3_Rotation set_wigner + Compute Wigner-D matrices and their inverses from a batch of :math:`3 \times 3` rotation matrices. + + Args: + rot_mat3x3 (Tensor): Batch of SO(3) rotation matrices of shape (..., 3, 3). + + Returns: + tuple[list[Tensor], list[Tensor]], A tuple containing two lists: + - wigner: List of Wigner-D matrices for l = 0 … lmax, each of shape (..., 2l+1, 2l+1). + - wigner_inv: List of transposed (inverse) Wigner-D matrices for l = 0 … lmax, same shapes. """ wigner = self.rotation_to_wigner_d_matrix(rot_mat3x3, 0, self.lmax) wigner_inv = [] @@ -107,7 +116,14 @@ class SO3Rotation: def rotate(self, embedding, wigner): """ - SO3_Rotation rotate + Rotate an embedding tensor according to the provided Wigner-D matrices. + + Args: + embedding (Tensor): Input tensor of shape (..., irreps_in.dim) containing the spherical-harmonic coefficients to be rotated. + wigner (tuple[Tensor]): Tuple of Wigner-D matrices for l = 0 … lmax, each of shape (..., 2l+1, 2l+1). + + Returns: + tuple[Tensor], Tuple of rotated tensors, one per irrep in irreps_in, each of shape (..., mul, 2l+1). """ res = [] batch_shape = embedding.shape[:-1] @@ -133,7 +149,14 @@ class SO3Rotation: def rotate_inv(self, embedding, wigner_inv): """ - SO3_Rotation rotate_inv + Apply the inverse SO(3) rotation to an embedding tensor using the provided inverse Wigner-D matrices. + + Args: + embedding (tuple[Tensor]): Tuple of tensors, one per irrep in irreps_out, each of shape (..., mul, 2l+1). + wigner_inv (tuple[Tensor]): Tuple of inverse (transposed) Wigner-D matrices for l = 0 … lmax, each of shape (..., 2l+1, 2l+1). + + Returns: + Tensor, The rotated-back tensor of shape (..., irreps_out.dim) obtained by concatenating the inverse-rotated irreps. """ res = [] batch_shape = embedding[0].shape[0:1] diff --git a/mindscience/e3nn/so2_conv/wigner.py b/mindscience/e3nn/so2_conv/wigner.py index 85d5153c7..83a4957ac 100644 --- a/mindscience/e3nn/so2_conv/wigner.py +++ b/mindscience/e3nn/so2_conv/wigner.py @@ -32,7 +32,20 @@ with open(pkl_path, 'rb') as f: def wigner_D(lv, alpha, beta, gamma): """ - wigner_D function that complies with mindspore.jit compilation + Compute the Wigner D-matrix for the given angular momentum level and Euler angles. + This implementation reads from the precomputed data and is compatible with ms.jit compilations + + Args: + lv (int): Angular momentum level (l value). + alpha (Tensor): First Euler angle (rotation around z-axis). + beta (Tensor): Second Euler angle (rotation around y-axis). + gamma (Tensor): Third Euler angle (rotation around z-axis). + + Returns: + Tensor, The Wigner D-matrix of shape (..., 2*lv+1, 2*lv+1). + + Raises: + NotImplementedError: If the requested l value exceeds the maximum supported level. """ if not lv < len(jd): raise NotImplementedError( diff --git a/mindscience/e3nn/utils/batch_dot.py b/mindscience/e3nn/utils/batch_dot.py index 06dfbd2ce..fa2991e01 100644 --- a/mindscience/e3nn/utils/batch_dot.py +++ b/mindscience/e3nn/utils/batch_dot.py @@ -113,7 +113,7 @@ def batch_dot(x1, x2, axes=None): If None, defaults to the last axis of x1 and second-to-last axis of x2. Returns: - Tensor: The batch dot product result. + Tensor, The batch dot product result. Raises: ValueError: If batch sizes of x1 and x2 don't match. diff --git a/mindscience/e3nn/utils/func.py b/mindscience/e3nn/utils/func.py index 22bdf38ad..5c47a74e9 100644 --- a/mindscience/e3nn/utils/func.py +++ b/mindscience/e3nn/utils/func.py @@ -52,7 +52,7 @@ def _to_tensor(arg): def broadcast_shapes(*shapes): r""" - Return the broadcast shape of the shapes of input tensors. + Return the broadcasted shape of the shapes of input tensors. Args: shapes (tuple): Any number of shapes of tensors to be broadcasted. @@ -92,7 +92,7 @@ def broadcast_shapes(*shapes): def broadcast_tensors(*tensors): r""" - Broadcasts the given tensors. + Broadcasts the given tensors to a common shape. Args: tensors (Tensor): Any number of tensors of the same type. @@ -118,7 +118,7 @@ def broadcast_args(*args): Broadcasts the given data with multiple types. Args: - *arg (Union[Tensor[float32], list[float], tuple[float], + arg (Union[Tensor[float32], list[float], tuple[float], ndarray[np.float32], float]): Any number of data to be broadcasted. Returns: @@ -158,7 +158,18 @@ def _expand_last_dims(x): def narrow(inputs, axis, start, length): - """tmp narrow API""" + """ + Narrow (slice) a tensor along a specified axis. + + Args: + inputs (Tensor): The tensor to be sliced. + axis (int): The axis along which to perform the slice. + start (int): The starting index of the slice. + length (int): The number of elements to include in the slice. + + Returns: + Tensor, The sliced tensor. + """ begins = [0] * inputs.ndim begins[axis] = start sizes = [i for i in inputs.shape] diff --git a/mindscience/e3nn/utils/initializer.py b/mindscience/e3nn/utils/initializer.py index 755538ea5..dacf81cdf 100644 --- a/mindscience/e3nn/utils/initializer.py +++ b/mindscience/e3nn/utils/initializer.py @@ -33,7 +33,6 @@ class Uniform(Initializer): Args: scale (float): The bound of the Uniform distribution. Default: 1.0. - Examples: >>> import mindspore >>> from mindspore.common.initializer import initializer, Uniform @@ -61,7 +60,7 @@ def renormal_initializer(init_method): 'he_uniform', 'he_normal', 'xavier_uniform'. Returns: - Initializer: The corresponding initializer instance. + Initializer, The corresponding initializer instance. Raises: ValueError: If the initialization method is not supported. diff --git a/mindscience/e3nn/utils/ncon.py b/mindscience/e3nn/utils/ncon.py index f47d64bab..ca92fc409 100644 --- a/mindscience/e3nn/utils/ncon.py +++ b/mindscience/e3nn/utils/ncon.py @@ -20,16 +20,16 @@ from mindspore import ops, nn, vmap from mindspore.numpy import tensordot, trace, expand_dims -def list_to_tuple(lst): - """list_to_tuple""" - return tuple(list_to_tuple(item) if isinstance(item, list) else item for item in lst) +def _list_to_tuple(lst): + """list to tuple""" + return tuple(_list_to_tuple(item) if isinstance(item, list) else item for item in lst) -def nest_vmap(fn, in_list, out_list, pt): +def _nest_vmap(fn, in_list, out_list, pt): """nest vmap function""" if pt == len(in_list) - 1: return vmap(fn, in_list[pt], out_list[pt]) - return vmap(nest_vmap(fn, in_list, out_list, pt + 1), in_list[pt], out_list[pt]) + return vmap(_nest_vmap(fn, in_list, out_list, pt + 1), in_list[pt], out_list[pt]) def _create_order(con_list): @@ -225,10 +225,10 @@ def _process_commands(con_list): if not con_list[0]: return conmmands, operators - do_ndot(con_list, conmmands, operators, order, batch_legs) + _do_ndot(con_list, conmmands, operators, order, batch_legs) # do Hadamard(alike) product - do_hadamard(con_list, conmmands, operators) + _do_hadamard(con_list, conmmands, operators) # do outer product for i, con in enumerate(con_list): @@ -263,7 +263,7 @@ def _process_commands(con_list): return conmmands, operators -def do_ndot(con_list, conmmands, operators, order, batch_legs): +def _do_ndot(con_list, conmmands, operators, order, batch_legs): """do_ndot Args: @@ -325,9 +325,9 @@ def do_ndot(con_list, conmmands, operators, order, batch_legs): ndot_leg_inds = ndot_leg_inds[0] if len(ndot_leg_inds) == 1 else np.array( ndot_leg_inds).transpose().tolist() - conmmands.append(_make_dict('ndot', inds, list_to_tuple(ndot_leg_inds), batch_leg_inds)) + conmmands.append(_make_dict('ndot', inds, _list_to_tuple(ndot_leg_inds), batch_leg_inds)) operators.append( - nest_vmap(tensordot, batch_leg_inds, [0] * len(batch_leg_inds), 0) if batch_leg_inds else tensordot) + _nest_vmap(tensordot, batch_leg_inds, [0] * len(batch_leg_inds), 0) if batch_leg_inds else tensordot) # merge two con_list for leg in con_list[inds[1]]: @@ -339,7 +339,7 @@ def do_ndot(con_list, conmmands, operators, order, batch_legs): order = order[:-1] -def do_hadamard(con_list, conmmands, operators): +def _do_hadamard(con_list, conmmands, operators): """do_hadamard Args: @@ -364,7 +364,7 @@ def do_hadamard(con_list, conmmands, operators): hadamard_legs = [[], []] con_raw = deepcopy(con_list) - handle_inds(con_list, out_list, hadamard_legs) + _handle_inds(con_list, out_list, hadamard_legs) expand_axis = deepcopy(hadamard_legs) for i, axis in enumerate(expand_axis): @@ -388,7 +388,7 @@ def do_hadamard(con_list, conmmands, operators): operators.append([ops.permute, ops.tile, ops.mul, expand_dims]) -def handle_inds(con_list, out_list, hadamard_legs): +def _handle_inds(con_list, out_list, hadamard_legs): """handle_inds""" for i, con in enumerate(con_list): if con: @@ -412,20 +412,17 @@ class Ncon(nn.Cell): The negative indices indicate the dimensions to be keeped (as batch dimensions). Inputs: - - **input** (List[Tensor]) - Tensor List. + - **ten_list** (List[Tensor]) - Tensor List. Outputs: - - **output** (Tensor) - The shape of tensor depends on the input and the computation process. + - **ten_list[0]** (Tensor) - The shape of tensor depends on the input and the computation process. Raises: ValueError: If the number of commands is not match the number of operations. - Supported Platforms: - ``Ascend`` - Examples: >>> from mindspore import ops - >>> from mindchemistry.e3.utils import Ncon + >>> from mindscience.e3.utils import Ncon Trace of a matrix: >>> a = ops.ones((3, 3)) >>> Ncon([[1, 1]])([a]) @@ -511,189 +508,3 @@ class Ncon(nn.Cell): for d in self.commands: s += str(d) + '\n' return s - - -def test_other(): - """test_other""" - ncon = Ncon([[5, -1, 1, 4, 3, -2], [3, -2, -1, 4, 2], [2, -3], [-3, -4]]) - v1 = ops.ones((3, 1, 3, 4, 5, 2)) - v2 = ops.ones((5, 2, 1, 4, 6)) - v3 = ops.ones((6, 3)) - v4 = ops.ones((3, 4)) - print(ncon) - out = ncon([v1, v2, v3, v4]) - print(out.shape) - - ncon = Ncon([[-1, 2], [-1, 1], [2, 1, -2]]) - v1 = ops.ones((20, 50)) - v2 = ops.ones((20, 2)) - v3 = ops.ones((50, 2, 7)) - print(ncon) - out = ncon([v1, v2, v3]) - print(out.shape) - - ncon = Ncon([[-1, -2, 1], [-1, 1]]) - v1 = ops.ones((3, 4, 5)) - v2 = ops.ones((3, 5)) - print(ncon) - out = ncon([v1, v2]) - print(out.shape) - - -def test_diagonal(): - """test_diagonal""" - ncon = Ncon([[-1, -1]]) - v1 = ops.ones((3, 3)) - print(ncon) - out = ncon([v1]) - print(out.shape) - print(out) - - -def test_outer(): - """test_other""" - ncon = Ncon([[-1], [-2]]) - v1 = ops.ones((2)) - v2 = ops.ones((3)) - print(ncon) - out = ncon([v1, v2]) - print(out.shape) - print(out) - - -def test_outer_multi_input(): - """test_other""" - ncon = Ncon([[-1], [-2], [-3]]) - v1 = ops.ones((2)) - v2 = ops.ones((3)) - v3 = ops.ones((4)) - print(ncon) - out = ncon([v1, v2, v3]) - print(out.shape) - print(out) - - -def test_ndot(): - """test_other""" - ncon = Ncon([[-1, -2, 1], [-1, 1]]) - v1 = ops.ones((3, 4, 5)) - v2 = ops.ones((3, 5)) - print(ncon) - out = ncon([v1, v2]) - print(out.shape) - print(out) - - -def test_ndot_2(): - """test_other""" - ncon = Ncon([[-1, -2, 1, 2], [-1, 1, 2]]) - v1 = ops.ones((3, 4, 5, 6)) - v2 = ops.ones((3, 5, 6)) - print(ncon) - out = ncon([v1, v2]) - print(out.shape) - print(out) - - -def test_hadamard(): - """test_hadamard""" - a = np.arange(6).reshape((2, 3)) - b = np.arange(6).reshape((2, 3)) - print(a) - print(b) - einstr = f"zu,zu->zu" - d = np.einsum(einstr, a, b) - print(d) - print(d.shape) - - ma = ms.Tensor(a, dtype=ms.float32) - mb = ms.Tensor(b, dtype=ms.float32) - ncon = Ncon([[-1, -2], [-1, -2]]) - print(ncon) - md = ncon([ma, mb]) - print(md.shape) - print(np.allclose(md.asnumpy(), d)) - - -def test_hadamard_alike(): - """test_hadamard_alike""" - a = np.arange(8).reshape((2, 4)) - b = np.arange(24).reshape((2, 3, 4)) - print(a) - print(b) - einstr = f"zi,zui->zui" - d = np.einsum(einstr, a, b) - print(d) - print(d.shape) - - ma = ms.Tensor(a, dtype=ms.float32) - mb = ms.Tensor(b, dtype=ms.float32) - ncon = Ncon([[-1, -3], [-1, -2, -3]]) - print(ncon) - md = ncon([ma, mb]) - print(md.shape) - print(np.allclose(md.asnumpy(), d)) - - -def test_hadamard_with_outer(): - """test_hadamard_with_outer""" - a = np.arange(24).reshape((2, 3, 4)) - b = np.arange(30).reshape((2, 3, 5)) - print(f"a:\n {a}") - print(f"b:\n {b}") - - einstr = f"zui,zuj->zuij" - - d = np.einsum(einstr, a, b) - print(f"d:\n {d}") - print(f"d.shape:\n {d.shape}") - - ma = ms.Tensor(a, dtype=ms.float32) - mb = ms.Tensor(b, dtype=ms.float32) - - ncon = Ncon([[-1, -2, -3], [-1, -2, -4]]) - print(ncon) - md = ncon([ma, mb]) - print(md.shape) - print(np.allclose(md.asnumpy(), d)) - - -def test_hadamard_outer_nosequential(): - """test_hadamard_outer_nosequential""" - a = np.arange(8).reshape((2, 4)) - b = np.arange(30).reshape((2, 5, 3)) - print(f"a:\n {a}") - print(f"b:\n {b}") - - einstr = f"ac,adb->abcd" - - d = np.einsum(einstr, a, b) - print(f"d:\n {d}") - print(f"d.shape:\n {d.shape}") - ma = ms.Tensor(a, dtype=ms.float32) - mb = ms.Tensor(b, dtype=ms.float32) - - ncon = Ncon([[-1, -3], [-1, -4, -2]]) - print(ncon) - md = ncon([ma, mb]) - print(md.shape) - print(np.allclose(md.asnumpy(), d)) - - -def test_sum(): - """test_other""" - ncon = Ncon([[1, 2]]) - v1 = ops.ones((2, 3)) - print(ncon) - out = ncon([v1]) - print(out.shape) - print(out) - - -if __name__ == '__main__': - import mindspore as ms - - ms.set_context(device_target="GPU", device_id=4, mode=ms.GRAPH_MODE, save_graphs=False) - np.random.seed(123) - - test_hadamard_outer_nosequential() diff --git a/mindscience/e3nn/utils/radius.py b/mindscience/e3nn/utils/radius.py index b6cf2cd5f..5b1b54a0c 100644 --- a/mindscience/e3nn/utils/radius.py +++ b/mindscience/e3nn/utils/radius.py @@ -44,20 +44,15 @@ def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32): max_num_neighbors (int): The maximum number of neighbors to return for each element in `y`. Dufault: ``32``. Returns: - edge_index (numpy.ndarray) - including edges of source and destination. - - batch_x (numpy.ndarray) - batch vector of x. - - batch_y (numpy.ndarray) - batch vector of y. + edge_index (numpy.ndarray): including edges of source and destination. + batch_x (numpy.ndarray): batch vector of x. + batch_y (numpy.ndarray): batch vector of y. Raises: ValueError: If the last dimension of `x` and `y` do not match. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.utils import radius + >>> from mindscience.e3nn.utils import radius >>> import numpy as np >>> np.random.seed(1) >>> x = np.random.random((5, 12, 3)) @@ -108,18 +103,14 @@ def radius_graph(x, r, batch=None, loop=False, max_num_neighbors=32, flow='sourc message passing. Dufault: ``'source_to_target'``. Returns: - edge_index (ndarray) - including edges of source and destination. - - batch (ndarray) - batch vector. + edge_index (ndarray): including edges of source and destination. + batch (ndarray): batch vector. Raises: ValueError: If `flow` is not in {'source_to_target', 'target_to_source'}. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.utils import radius_graph + >>> from mindscience.e3nn.utils import radius_graph >>> import numpy as np >>> np.random.seed(1) >>> x = np.random.random((5, 12, 3)) @@ -152,20 +143,16 @@ def radius_full(x, y, batch_x=None, batch_y=None): batch_y (ndarray): batch vector of y. If it is none, then calculate based on y and return. Default: ``None``. Returns: - edge_index (numpy.ndarray) - including edges of source and destination. - - batch_x (numpy.ndarray) - batch vector of x. - - batch_y (numpy.ndarray) - batch vector of y. + edge_index (numpy.ndarray): including edges of source and destination. + batch_x (numpy.ndarray): batch vector of x. + batch_y (numpy.ndarray): batch vector of y. Raises: ValueError: If the last dimension of `x` and `y` do not match. - Supported Platforms: - ``Ascend`` Examples: - >>> from mindchemistry.e3.utils import radius_full + >>> from mindscience.e3nn.utils import radius_full >>> from mindspore import ops, Tensor >>> x = Tensor(ops.ones((5, 12, 3))) >>> edge_index, batch_x, batch_y = radius_full(x, x) @@ -215,18 +202,14 @@ def radius_graph_full(x, batch=None, loop=False, flow='source_to_target'): message passing. Dufault: ``'source_to_target'``. Returns: - edge_index (ndarray) - including edges of source and destination. - - batch (ndarray) - batch vector. + edge_index (ndarray): including edges of source and destination. + batch (ndarray): batch vector. Raises: ValueError: If `flow` is not in {'source_to_target', 'target_to_source'}. - Supported Platforms: - ``Ascend`` - Examples: - >>> from mindchemistry.e3.utils import radius_graph_full + >>> from mindscience.e3nn.utils import radius_graph_full >>> from mindspore import ops, Tensor >>> x = Tensor(ops.ones((5, 12, 3))) >>> edge_index, batch = radius_graph_full(x) diff --git a/tests/e3nn/o3/test_sub.py b/tests/e3nn/o3/test_sub.py index fd8d1aabb..cf8cb6564 100644 --- a/tests/e3nn/o3/test_sub.py +++ b/tests/e3nn/o3/test_sub.py @@ -39,10 +39,7 @@ from mindscience.e3nn.o3.sub import ( Linear, LinearBias, TensorSquare, - prod, - _prod, _sum_tensors_withbias, - Instruction ) @@ -92,16 +89,6 @@ class TestTensorProductClasses: class TestUtilityFunctions: """Test utility functions.""" - def test_prod_functions(self): - """Test product computation functions.""" - # Test prod function - assert prod([2, 3, 4]) == 24 - assert prod([]) == 1 - - # Test _prod function - assert _prod((2, 3, 4)) == 24 - assert _prod(()) == 1 - def test_tensor_utilities(self): """Test tensor utility functions.""" # Test _sum_tensors_withbias @@ -112,10 +99,6 @@ class TestUtilityFunctions: expected = np.array([5, 7, 9]) assert np.allclose(result.asnumpy(), expected) - # Test Instruction NamedTuple - instr = Instruction(i_in=0, i_out=1, path_shape=(2, 3), path_weight=1.5) - assert instr.i_in == 0 and instr.i_out == 1 - class TestEdgeCases: """Test edge cases and error handling.""" -- Gitee