From 534af02505c9ef80e365c68183e552beb94ecea7 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Wed, 16 Apr 2025 16:09:55 -0600 Subject: [PATCH 01/17] checkpoint --- docs/source/conf.py | 1 - docs/source/tensor.rst | 8 ++------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index dee16974..0c091042 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -199,4 +199,3 @@ # Autodoc settings autoclass_content = "class" autodoc_member_order = "bysource" -autodoc_class_signature = "separated" diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index f27fe87e..b527ea56 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,19 +1,15 @@ Dense Tensor (:class:`tensor`) ------------------------------ -.. note:: - - Classes and functions defined in ``tensor.py`` have been promoted to the ``pyttb`` namespace. For *all* examples in this document, the following module imports are assumed:: >>> import pyttb as ttb >>> import numpy as np -.. automodule:: pyttb.tensor +.. autoclass:: pyttb.tensor :members: :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: + :exclude-members: __dict__, __weakref__, __slots__, __init__ .. autofunction:: pyttb.tenones .. autofunction:: pyttb.tenzeros From e1301b5a8513896ba4fbb06f8209a73f9ac8c5f6 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Thu, 10 Jul 2025 11:29:17 -0600 Subject: [PATCH 02/17] First attempt at using autosummary in sphinx documentation --- docs/source/_templates/autosummary/class.rst | 30 ++++ docs/source/_templates/autosummary/module.rst | 23 +++ docs/source/_templates/base.rst | 5 + docs/source/conf.py | 7 +- docs/source/functionality.rst | 6 - docs/source/index.rst | 2 +- docs/source/ktensor.rst | 11 -- docs/source/pyttb_utils.rst | 8 -- docs/source/reference.rst | 24 +++- docs/source/sptenmat.rst | 11 -- docs/source/sptensor.rst | 14 -- docs/source/sumtensor.rst | 12 -- docs/source/tenmat.rst | 11 -- docs/source/tensor.rst | 18 --- docs/source/tensor_classes.rst | 16 +-- docs/source/ttensor.rst | 11 -- pyttb/tensor.py | 135 ++++++++---------- 17 files changed, 153 insertions(+), 191 deletions(-) create mode 100644 docs/source/_templates/autosummary/class.rst create mode 100644 docs/source/_templates/autosummary/module.rst create mode 100644 docs/source/_templates/base.rst delete mode 100644 docs/source/functionality.rst delete mode 100644 docs/source/ktensor.rst delete mode 100644 docs/source/pyttb_utils.rst delete mode 100644 docs/source/sptenmat.rst delete mode 100644 docs/source/sptensor.rst delete mode 100644 docs/source/sumtensor.rst delete mode 100644 docs/source/tenmat.rst delete mode 100644 docs/source/tensor.rst delete mode 100644 docs/source/ttensor.rst diff --git a/docs/source/_templates/autosummary/class.rst b/docs/source/_templates/autosummary/class.rst new file mode 100644 index 00000000..0bae7ba5 --- /dev/null +++ b/docs/source/_templates/autosummary/class.rst @@ -0,0 +1,30 @@ +{{ objname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__, __init__, __deepcopy__, __hash__ + + {% block attributes %} + {% if attributes %} + .. rubric:: Attributes + .. autosummary:: + :toctree: + {% for item in attributes %} + {{ name }}.{{ item }} + {% endfor %} + {% endif %} + {% endblock %} + + {% block methods %} + {% if methods %} + .. rubric:: Methods + .. autosummary:: + :toctree: + {% for item in methods %} + {{ name }}.{{ item }} + {% endfor %} + {% endif %} + {% endblock %} \ No newline at end of file diff --git a/docs/source/_templates/autosummary/module.rst b/docs/source/_templates/autosummary/module.rst new file mode 100644 index 00000000..a00d76e8 --- /dev/null +++ b/docs/source/_templates/autosummary/module.rst @@ -0,0 +1,23 @@ +{{ fullname | escape | underline }} + +.. rubric:: Description +.. automodule:: {{ fullname }} +.. currentmodule:: {{ fullname }} + +{% if classes %} +.. rubric:: Classes +.. autosummary:: + :toctree: + {% for class in classes %} + {{ class }} + {% endfor %} +{% endif %} + +{% if functions %} +.. rubric:: Functions +.. autosummary:: + :toctree: + {% for function in functions %} + {{ function }} + {% endfor %} +{% endif %} diff --git a/docs/source/_templates/base.rst b/docs/source/_templates/base.rst new file mode 100644 index 00000000..e4862661 --- /dev/null +++ b/docs/source/_templates/base.rst @@ -0,0 +1,5 @@ +{{ objname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. auto{{ objtype }}:: {{ objname }} \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 0c091042..99d95736 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,10 +52,11 @@ "sphinx.ext.mathjax", "sphinx.ext.viewcode", "sphinx.ext.napoleon", + "sphinx.ext.autosummary", "myst_nb", ] -autodoc_preserve_defaults = True +#autodoc_preserve_defaults = True myst_enable_extensions = [ "amsmath", @@ -197,5 +198,7 @@ # -- Extension configuration ------------------------------------------------- # Autodoc settings -autoclass_content = "class" +autoclass_content = "both" autodoc_member_order = "bysource" +autosummary_generate = True +autosummary_generate_overwrite = False \ No newline at end of file diff --git a/docs/source/functionality.rst b/docs/source/functionality.rst deleted file mode 100644 index a8e30003..00000000 --- a/docs/source/functionality.rst +++ /dev/null @@ -1,6 +0,0 @@ -:orphan: - -Functionality -************* - -In construction \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index 930010c1..4e87fb06 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -71,7 +71,7 @@ This is relevant for indexing. In the future we hope to extend support for both. getting_started.rst -Python API +Python Reference ================ .. toctree:: diff --git a/docs/source/ktensor.rst b/docs/source/ktensor.rst deleted file mode 100644 index 66e23b5c..00000000 --- a/docs/source/ktensor.rst +++ /dev/null @@ -1,11 +0,0 @@ -Kruskal Tensor (:class:`ktensor`) ---------------------------------- -.. note:: - - The ``ktensor`` class defined in ``ktensor.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.ktensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/pyttb_utils.rst b/docs/source/pyttb_utils.rst deleted file mode 100644 index 7cc02f4c..00000000 --- a/docs/source/pyttb_utils.rst +++ /dev/null @@ -1,8 +0,0 @@ -Helper Functions (:mod:`pyttb_utils`) -------------------------------------- - -.. automodule:: pyttb.pyttb_utils - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 46cab023..f634a618 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -1,8 +1,20 @@ -Reference (:mod:`pyttb`) -======================== +Reference (:class:`pyttb`) +========================== -.. toctree:: - :maxdepth: 2 +.. rubric:: Description +.. automodule:: pyttb +.. currentmodule:: pyttb - tensor_classes.rst - algorithms.rst + +.. rubric:: Classes +.. autosummary:: + :toctree: generated + + pyttb.tensor + pyttb.sptensor + pyttb.ktensor + pyttb.ttensor + pyttb.sumtensor + pyttb.tenmat + pyttb.sptenmat + pyttb.pyttb_utils diff --git a/docs/source/sptenmat.rst b/docs/source/sptenmat.rst deleted file mode 100644 index b9554a53..00000000 --- a/docs/source/sptenmat.rst +++ /dev/null @@ -1,11 +0,0 @@ -Sparse Tensor as Matrix (:class:`sptenmat`) -------------------------------------------- -.. note:: - - The ``sptenmat`` class defined in ``sptenmat.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.sptenmat - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/sptensor.rst b/docs/source/sptensor.rst deleted file mode 100644 index 638eb8c9..00000000 --- a/docs/source/sptensor.rst +++ /dev/null @@ -1,14 +0,0 @@ -Sparse Tensor (:class:`sptensor`) ---------------------------------- -.. note:: - - Classes and functions defined in ``sptensor.py`` have been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.sptensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: - -.. autofunction:: pyttb.sptenrand -.. autofunction:: pyttb.sptendiag \ No newline at end of file diff --git a/docs/source/sumtensor.rst b/docs/source/sumtensor.rst deleted file mode 100644 index d9f793e5..00000000 --- a/docs/source/sumtensor.rst +++ /dev/null @@ -1,12 +0,0 @@ -Sum Tensor (:class:`sumtensor`) -------------------------------- -.. note:: - - The ``sumtensor`` class defined in ``sumtensor.py`` has been promoted to the ``pyttb`` namespace. - - -.. autoclass:: pyttb.sumtensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/tenmat.rst b/docs/source/tenmat.rst deleted file mode 100644 index b4ea5044..00000000 --- a/docs/source/tenmat.rst +++ /dev/null @@ -1,11 +0,0 @@ -Tensor as Matrix (:class:`tenmat`) ----------------------------------- -.. note:: - - The ``tenmat`` class defined in ``tenmat.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.tenmat - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst deleted file mode 100644 index b527ea56..00000000 --- a/docs/source/tensor.rst +++ /dev/null @@ -1,18 +0,0 @@ -Dense Tensor (:class:`tensor`) ------------------------------- - -For *all* examples in this document, the following module imports are assumed:: - - >>> import pyttb as ttb - >>> import numpy as np - -.. autoclass:: pyttb.tensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__, __init__ - -.. autofunction:: pyttb.tenones -.. autofunction:: pyttb.tenzeros -.. autofunction:: pyttb.tenrand -.. autofunction:: pyttb.tendiag -.. autofunction:: pyttb.teneye diff --git a/docs/source/tensor_classes.rst b/docs/source/tensor_classes.rst index 56fb9f21..cba8faa1 100644 --- a/docs/source/tensor_classes.rst +++ b/docs/source/tensor_classes.rst @@ -4,12 +4,12 @@ Tensor Classes .. toctree:: :maxdepth: 1 - tensor.rst - sptensor.rst - ktensor.rst - ttensor.rst - sumtensor.rst - tenmat.rst - sptenmat.rst - pyttb_utils.rst + generated/pyttb.tensor.rst + generated/pyttb.sptensor.rst + generated/pyttb.ktensor.rst + generated/pyttb.ttensor.rst + generated/pyttb.sumtensor.rst + generated/pyttb.tenmat.rst + generated/pyttb.sptenmat.rst + generated/pyttb.pyttb_utils.rst diff --git a/docs/source/ttensor.rst b/docs/source/ttensor.rst deleted file mode 100644 index b803d318..00000000 --- a/docs/source/ttensor.rst +++ /dev/null @@ -1,11 +0,0 @@ -Tucker tensor (:class:`ttensor`) --------------------------------- -.. note:: - - The ``ttensor`` class defined in ``ttensor.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.ttensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 1fe81dd3..81425a91 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -54,15 +54,71 @@ class tensor: """Class for dense tensors. - Attributes + Parameters ---------- - data : numpy.ndarray - Data of the tensor - shape : tuple of integers - Size of the tensor + data : optional + Source data as :class:`numpy.ndarray` + shape : optional + Shape of the tensor as a :class:`tuple` or any iterable array of integers. + A single integer means that the tensor should be a 1D array. + If no shape is given, defaults to :attr:`numpy.ndarray.shape` of ``data``. + Otherwise, the data is reshaped to the specified shape. + copy : optional + Whether to deep copy (versus reference) the data. + By default, the data is deep copied. - Instances of :class:`pyttb.tensor` can be created using :meth:`__init__` - or the following methods: + Examples + -------- + Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: + + >>> data = np.array([[[1,13],[5,17],[9,21]], + ... [[2,14],[6,18],[10,22]], + ... [[3,15],[7,19],[11,23]], + ... [[4,16],[8,20],[12,24]]]) + >>> T = ttb.tensor(data) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and + reshape it:: + + >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ... 17, 18, 19, 20, 21, 22, 23, 24]) + >>> T = ttb.tensor(data, shape=(4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create an empty :class:`pyttb.tensor`:: + + >>> T = ttb.tensor() + >>> print(T) + empty tensor of shape () + data = [] + + + Notes + -------- + Instances of :class:`pyttb.tensor` can also be created using the following methods: * :meth:`from_function` - Create a tensor from a function * :meth:`copy` - Make a deep copy of a tensor @@ -87,71 +143,6 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """ - Create a :class:`pyttb.tensor`. - - Parameters - ---------- - data : optional - Source data as :class:`numpy.ndarray` - shape : optional - Shape of the tensor as a :class:`tuple` or any iterable array of integers. - A single integer means that the tensor should be a 1D array. - If no shape is given, defaults to :attr:`numpy.ndarray.shape` of ``data``. - Otherwise, the data is reshaped to the specified shape. - copy : optional - Whether to deep copy (versus reference) the data. - By default, the data is deep copied. - - Examples - -------- - Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: - - >>> data = np.array([[[1,13],[5,17],[9,21]], - ... [[2,14],[6,18],[10,22]], - ... [[3,15],[7,19],[11,23]], - ... [[4,16],[8,20],[12,24]]]) - >>> T = ttb.tensor(data) - >>> print(T) - tensor of shape (4, 3, 2) with order F - data[:, :, 0] = - [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] - data[:, :, 1] = - [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] - - Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and - reshape it:: - - >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ... 17, 18, 19, 20, 21, 22, 23, 24]) - >>> T = ttb.tensor(data, shape=(4, 3, 2)) - >>> print(T) - tensor of shape (4, 3, 2) with order F - data[:, :, 0] = - [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] - data[:, :, 1] = - [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] - - Create an empty :class:`pyttb.tensor`:: - - >>> T = ttb.tensor() - >>> print(T) - empty tensor of shape () - data = [] - - """ if data is None: # EMPTY / DEFAULT CONSTRUCTOR self.data: np.ndarray = np.array([], order=self.order) From d4b989b070054d243df90e4abe198c665040f709 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Thu, 10 Jul 2025 12:12:09 -0600 Subject: [PATCH 03/17] Fixing ruff issues --- docs/source/conf.py | 4 ++-- pyttb/tensor.py | 27 +++++++++++++-------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 99d95736..8b203716 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -56,7 +56,7 @@ "myst_nb", ] -#autodoc_preserve_defaults = True +# autodoc_preserve_defaults = True myst_enable_extensions = [ "amsmath", @@ -201,4 +201,4 @@ autoclass_content = "both" autodoc_member_order = "bysource" autosummary_generate = True -autosummary_generate_overwrite = False \ No newline at end of file +autosummary_generate_overwrite = False diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 81425a91..b6b7e9a8 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -80,14 +80,14 @@ class tensor: tensor of shape (4, 3, 2) with order F data[:, :, 0] = [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] data[:, :, 1] = [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] + [14 18 22] + [15 19 23] + [16 20 24]] Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and reshape it:: @@ -99,14 +99,14 @@ class tensor: tensor of shape (4, 3, 2) with order F data[:, :, 0] = [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] data[:, :, 1] = [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] + [14 18 22] + [15 19 23] + [16 20 24]] Create an empty :class:`pyttb.tensor`:: @@ -114,10 +114,9 @@ class tensor: >>> print(T) empty tensor of shape () data = [] - Notes - -------- + ----- Instances of :class:`pyttb.tensor` can also be created using the following methods: * :meth:`from_function` - Create a tensor from a function From a90fed999d4fcf0172ea168c6effe31107283f0e Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Thu, 10 Jul 2025 15:18:12 -0600 Subject: [PATCH 04/17] Trying to get the toctree populated. --- docs/source/_templates/{ => autosummary}/base.rst | 0 docs/source/conf.py | 4 +++- docs/source/index.rst | 2 +- docs/source/tensor_classes.rst | 15 --------------- 4 files changed, 4 insertions(+), 17 deletions(-) rename docs/source/_templates/{ => autosummary}/base.rst (100%) delete mode 100644 docs/source/tensor_classes.rst diff --git a/docs/source/_templates/base.rst b/docs/source/_templates/autosummary/base.rst similarity index 100% rename from docs/source/_templates/base.rst rename to docs/source/_templates/autosummary/base.rst diff --git a/docs/source/conf.py b/docs/source/conf.py index 8b203716..1a13b490 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -198,7 +198,9 @@ # -- Extension configuration ------------------------------------------------- # Autodoc settings -autoclass_content = "both" +#autoclass_content = "both" autodoc_member_order = "bysource" autosummary_generate = True autosummary_generate_overwrite = False +autosummary_ignore_module_all = False +autosummary_imported_members = True \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index 4e87fb06..850c7cf5 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -75,7 +75,7 @@ Python Reference ================ .. toctree:: - :maxdepth: 2 + :maxdepth: 3 reference.rst diff --git a/docs/source/tensor_classes.rst b/docs/source/tensor_classes.rst deleted file mode 100644 index cba8faa1..00000000 --- a/docs/source/tensor_classes.rst +++ /dev/null @@ -1,15 +0,0 @@ -Tensor Classes -============== - -.. toctree:: - :maxdepth: 1 - - generated/pyttb.tensor.rst - generated/pyttb.sptensor.rst - generated/pyttb.ktensor.rst - generated/pyttb.ttensor.rst - generated/pyttb.sumtensor.rst - generated/pyttb.tenmat.rst - generated/pyttb.sptenmat.rst - generated/pyttb.pyttb_utils.rst - From 292f4510b33d92fa61f4da83957753e313517d43 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Thu, 10 Jul 2025 15:25:34 -0600 Subject: [PATCH 05/17] Ruff fixes --- docs/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 1a13b490..ee05732f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -198,9 +198,9 @@ # -- Extension configuration ------------------------------------------------- # Autodoc settings -#autoclass_content = "both" +# autoclass_content = "both" autodoc_member_order = "bysource" autosummary_generate = True autosummary_generate_overwrite = False autosummary_ignore_module_all = False -autosummary_imported_members = True \ No newline at end of file +autosummary_imported_members = True From 63bfe875955278ec4de0f94b33bfc04ef59ca468 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Thu, 28 Aug 2025 08:33:49 -0600 Subject: [PATCH 06/17] Checkpoint --- docs/source/conf.py | 2 +- docs/source/functionality.rst | 6 -- docs/source/ktensor.rst | 11 --- docs/source/pyttb_utils.rst | 8 -- docs/source/sptenmat.rst | 11 --- docs/source/sptensor.rst | 14 ---- docs/source/sumtensor.rst | 12 --- docs/source/tenmat.rst | 11 --- docs/source/tensor.rst | 18 ----- docs/source/tensor_classes.rst | 15 ---- docs/source/ttensor.rst | 11 --- pyttb/tensor.py | 141 ++++++++++++++++----------------- 12 files changed, 70 insertions(+), 190 deletions(-) delete mode 100644 docs/source/functionality.rst delete mode 100644 docs/source/ktensor.rst delete mode 100644 docs/source/pyttb_utils.rst delete mode 100644 docs/source/sptenmat.rst delete mode 100644 docs/source/sptensor.rst delete mode 100644 docs/source/sumtensor.rst delete mode 100644 docs/source/tenmat.rst delete mode 100644 docs/source/tensor.rst delete mode 100644 docs/source/tensor_classes.rst delete mode 100644 docs/source/ttensor.rst diff --git a/docs/source/conf.py b/docs/source/conf.py index 0c091042..1a39989f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -55,7 +55,7 @@ "myst_nb", ] -autodoc_preserve_defaults = True +# autodoc_preserve_defaults = True myst_enable_extensions = [ "amsmath", diff --git a/docs/source/functionality.rst b/docs/source/functionality.rst deleted file mode 100644 index a8e30003..00000000 --- a/docs/source/functionality.rst +++ /dev/null @@ -1,6 +0,0 @@ -:orphan: - -Functionality -************* - -In construction \ No newline at end of file diff --git a/docs/source/ktensor.rst b/docs/source/ktensor.rst deleted file mode 100644 index 66e23b5c..00000000 --- a/docs/source/ktensor.rst +++ /dev/null @@ -1,11 +0,0 @@ -Kruskal Tensor (:class:`ktensor`) ---------------------------------- -.. note:: - - The ``ktensor`` class defined in ``ktensor.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.ktensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/pyttb_utils.rst b/docs/source/pyttb_utils.rst deleted file mode 100644 index 7cc02f4c..00000000 --- a/docs/source/pyttb_utils.rst +++ /dev/null @@ -1,8 +0,0 @@ -Helper Functions (:mod:`pyttb_utils`) -------------------------------------- - -.. automodule:: pyttb.pyttb_utils - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/sptenmat.rst b/docs/source/sptenmat.rst deleted file mode 100644 index b9554a53..00000000 --- a/docs/source/sptenmat.rst +++ /dev/null @@ -1,11 +0,0 @@ -Sparse Tensor as Matrix (:class:`sptenmat`) -------------------------------------------- -.. note:: - - The ``sptenmat`` class defined in ``sptenmat.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.sptenmat - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/sptensor.rst b/docs/source/sptensor.rst deleted file mode 100644 index 638eb8c9..00000000 --- a/docs/source/sptensor.rst +++ /dev/null @@ -1,14 +0,0 @@ -Sparse Tensor (:class:`sptensor`) ---------------------------------- -.. note:: - - Classes and functions defined in ``sptensor.py`` have been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.sptensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: - -.. autofunction:: pyttb.sptenrand -.. autofunction:: pyttb.sptendiag \ No newline at end of file diff --git a/docs/source/sumtensor.rst b/docs/source/sumtensor.rst deleted file mode 100644 index d9f793e5..00000000 --- a/docs/source/sumtensor.rst +++ /dev/null @@ -1,12 +0,0 @@ -Sum Tensor (:class:`sumtensor`) -------------------------------- -.. note:: - - The ``sumtensor`` class defined in ``sumtensor.py`` has been promoted to the ``pyttb`` namespace. - - -.. autoclass:: pyttb.sumtensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/tenmat.rst b/docs/source/tenmat.rst deleted file mode 100644 index b4ea5044..00000000 --- a/docs/source/tenmat.rst +++ /dev/null @@ -1,11 +0,0 @@ -Tensor as Matrix (:class:`tenmat`) ----------------------------------- -.. note:: - - The ``tenmat`` class defined in ``tenmat.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.tenmat - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst deleted file mode 100644 index b527ea56..00000000 --- a/docs/source/tensor.rst +++ /dev/null @@ -1,18 +0,0 @@ -Dense Tensor (:class:`tensor`) ------------------------------- - -For *all* examples in this document, the following module imports are assumed:: - - >>> import pyttb as ttb - >>> import numpy as np - -.. autoclass:: pyttb.tensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__, __init__ - -.. autofunction:: pyttb.tenones -.. autofunction:: pyttb.tenzeros -.. autofunction:: pyttb.tenrand -.. autofunction:: pyttb.tendiag -.. autofunction:: pyttb.teneye diff --git a/docs/source/tensor_classes.rst b/docs/source/tensor_classes.rst deleted file mode 100644 index 56fb9f21..00000000 --- a/docs/source/tensor_classes.rst +++ /dev/null @@ -1,15 +0,0 @@ -Tensor Classes -============== - -.. toctree:: - :maxdepth: 1 - - tensor.rst - sptensor.rst - ktensor.rst - ttensor.rst - sumtensor.rst - tenmat.rst - sptenmat.rst - pyttb_utils.rst - diff --git a/docs/source/ttensor.rst b/docs/source/ttensor.rst deleted file mode 100644 index b803d318..00000000 --- a/docs/source/ttensor.rst +++ /dev/null @@ -1,11 +0,0 @@ -Tucker tensor (:class:`ttensor`) --------------------------------- -.. note:: - - The ``ttensor`` class defined in ``ttensor.py`` has been promoted to the ``pyttb`` namespace. - -.. autoclass:: pyttb.ttensor - :members: - :special-members: - :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 1fe81dd3..460892b1 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -54,15 +54,77 @@ class tensor: """Class for dense tensors. - Attributes + Parameters ---------- - data : numpy.ndarray - Data of the tensor - shape : tuple of integers - Size of the tensor + data : optional + Source data as :class:`numpy.ndarray` + shape : optional + Shape of the tensor as a :class:`tuple` or any iterable array of integers. + A single integer means that the tensor should be a 1D array. + If no shape is given, defaults to :attr:`numpy.ndarray.shape` of ``data``. + Otherwise, the data is reshaped to the specified shape. + copy : optional + Whether to deep copy (versus reference) the data. + By default, the data is deep copied. + + + **Attributes** + + - **data** (:class:`numpy.ndarray`) : Data of the tensor + - **shape** (:class:`tuple`) : Size of the tensor + + + Examples + -------- + Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: + + >>> data = np.array([[[1,13],[5,17],[9,21]], + ... [[2,14],[6,18],[10,22]], + ... [[3,15],[7,19],[11,23]], + ... [[4,16],[8,20],[12,24]]]) + >>> T = ttb.tensor(data) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and + reshape it:: + + >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ... 17, 18, 19, 20, 21, 22, 23, 24]) + >>> T = ttb.tensor(data, shape=(4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] - Instances of :class:`pyttb.tensor` can be created using :meth:`__init__` - or the following methods: + Create an empty :class:`pyttb.tensor`:: + + >>> T = ttb.tensor() + >>> print(T) + empty tensor of shape () + data = [] + + Notes + ----- + Instances of :class:`pyttb.tensor` can also be created using the following methods: * :meth:`from_function` - Create a tensor from a function * :meth:`copy` - Make a deep copy of a tensor @@ -87,71 +149,6 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """ - Create a :class:`pyttb.tensor`. - - Parameters - ---------- - data : optional - Source data as :class:`numpy.ndarray` - shape : optional - Shape of the tensor as a :class:`tuple` or any iterable array of integers. - A single integer means that the tensor should be a 1D array. - If no shape is given, defaults to :attr:`numpy.ndarray.shape` of ``data``. - Otherwise, the data is reshaped to the specified shape. - copy : optional - Whether to deep copy (versus reference) the data. - By default, the data is deep copied. - - Examples - -------- - Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: - - >>> data = np.array([[[1,13],[5,17],[9,21]], - ... [[2,14],[6,18],[10,22]], - ... [[3,15],[7,19],[11,23]], - ... [[4,16],[8,20],[12,24]]]) - >>> T = ttb.tensor(data) - >>> print(T) - tensor of shape (4, 3, 2) with order F - data[:, :, 0] = - [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] - data[:, :, 1] = - [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] - - Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and - reshape it:: - - >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ... 17, 18, 19, 20, 21, 22, 23, 24]) - >>> T = ttb.tensor(data, shape=(4, 3, 2)) - >>> print(T) - tensor of shape (4, 3, 2) with order F - data[:, :, 0] = - [[ 1 5 9] - [ 2 6 10] - [ 3 7 11] - [ 4 8 12]] - data[:, :, 1] = - [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] - - Create an empty :class:`pyttb.tensor`:: - - >>> T = ttb.tensor() - >>> print(T) - empty tensor of shape () - data = [] - - """ if data is None: # EMPTY / DEFAULT CONSTRUCTOR self.data: np.ndarray = np.array([], order=self.order) From 9d99181192efb2d63d1658588ac28b1a5a9b33d1 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 30 Aug 2025 18:12:56 -0600 Subject: [PATCH 07/17] Fixing doc problems with recent merges --- docs/source/ktensor.rst | 16 ++++++++++++++++ docs/source/pyttb_utils.rst | 12 ++++++------ docs/source/sptenmat.rst | 16 ++++++++++++++++ docs/source/sptensor.rst | 19 +++++++++++++++++++ docs/source/sumtensor.rst | 16 ++++++++++++++++ docs/source/tenmat.rst | 16 ++++++++++++++++ docs/source/tensor.rst | 4 ++-- docs/source/ttensor.rst | 2 +- 8 files changed, 92 insertions(+), 9 deletions(-) create mode 100644 docs/source/ktensor.rst create mode 100644 docs/source/sptenmat.rst create mode 100644 docs/source/sptensor.rst create mode 100644 docs/source/sumtensor.rst create mode 100644 docs/source/tenmat.rst diff --git a/docs/source/ktensor.rst b/docs/source/ktensor.rst new file mode 100644 index 00000000..3171f8d8 --- /dev/null +++ b/docs/source/ktensor.rst @@ -0,0 +1,16 @@ +Kruskal Tensor (:class:`pyttb.ktensor`) +======================================= +.. note:: + + Classes and functions defined in ``ktensor.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + +.. autoclass:: pyttb.ktensor + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__ + :show-inheritance: \ No newline at end of file diff --git a/docs/source/pyttb_utils.rst b/docs/source/pyttb_utils.rst index 0627df89..7e9e4f85 100644 --- a/docs/source/pyttb_utils.rst +++ b/docs/source/pyttb_utils.rst @@ -1,10 +1,10 @@ -Helper Functions (:mod:`pyttb_utils`, :mod:`khatrirao`) --------------------------------------------------------- +Helper Functions (:mod:`pyttb.pyttb_utils`, :mod:`pyttb.khatrirao`) +=================================================================== -.. autofunction:: pyttb.khatrirao.khatrirao - -.. automodule:: pyttb.pyttb_utils +.. autoclass:: pyttb.pyttb_utils :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__ - :show-inheritance: \ No newline at end of file + :show-inheritance: + +.. autofunction:: pyttb.khatrirao \ No newline at end of file diff --git a/docs/source/sptenmat.rst b/docs/source/sptenmat.rst new file mode 100644 index 00000000..41e34d28 --- /dev/null +++ b/docs/source/sptenmat.rst @@ -0,0 +1,16 @@ +Sparse Tensor as Matrix (:class:`pyttb.sptenmat`) +================================================= +.. note:: + + Classes and functions defined in ``sptenmat.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + +.. autoclass:: pyttb.sptenmat + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__ + :show-inheritance: \ No newline at end of file diff --git a/docs/source/sptensor.rst b/docs/source/sptensor.rst new file mode 100644 index 00000000..2b432233 --- /dev/null +++ b/docs/source/sptensor.rst @@ -0,0 +1,19 @@ +Sparse Tensor (:class:`pyttb.sptensor`) +======================================= +.. note:: + + Classes and functions defined in ``sptensor.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + +.. autoclass:: pyttb.sptensor + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__, __deepcopy__ + :show-inheritance: + +.. autofunction:: pyttb.sptenrand +.. autofunction:: pyttb.sptendiag \ No newline at end of file diff --git a/docs/source/sumtensor.rst b/docs/source/sumtensor.rst new file mode 100644 index 00000000..2be30dad --- /dev/null +++ b/docs/source/sumtensor.rst @@ -0,0 +1,16 @@ +Sum Tensor (:class:`pyttb.sumtensor`) +===================================== +.. note:: + + Classes and functions defined in ``sumtensor.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + +.. autoclass:: pyttb.sumtensor + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__ + :show-inheritance: \ No newline at end of file diff --git a/docs/source/tenmat.rst b/docs/source/tenmat.rst new file mode 100644 index 00000000..107a734d --- /dev/null +++ b/docs/source/tenmat.rst @@ -0,0 +1,16 @@ +Tensor as Matrix (:class:`pyttb.tenmat`) +======================================== +.. note:: + + Classes and functions defined in ``tenmat.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + +.. autoclass:: pyttb.tenmat + :members: + :special-members: + :exclude-members: __dict__, __weakref__, __slots__ + :show-inheritance: \ No newline at end of file diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index 697980b4..f81506d5 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,5 +1,5 @@ Dense Tensor (:class:`pyttb.tensor`) ------------------------------------- +==================================== .. note:: Classes and functions defined in ``tensor.py`` have been promoted to the ``pyttb`` namespace. @@ -9,7 +9,7 @@ For *all* examples in this document, the following module imports are assumed:: >>> import pyttb as ttb >>> import numpy as np -.. automodule:: pyttb.tensor +.. autoclass:: pyttb.tensor :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__, __deepcopy__ diff --git a/docs/source/ttensor.rst b/docs/source/ttensor.rst index 31a698f2..27dbf371 100644 --- a/docs/source/ttensor.rst +++ b/docs/source/ttensor.rst @@ -2,7 +2,7 @@ Tucker tensor (:class:`pyttb.ttensor`) -------------------------------------- .. note:: - The ``ttensor`` class defined in ``ttensor.py`` has been promoted to the ``pyttb`` namespace. + Classes and functions defined in ``sptensor.py`` have been promoted to the ``pyttb`` namespace. .. autoclass:: pyttb.ttensor :members: From f785ff46bfa298a57ba93dda2c232cba6c052d6a Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 30 Aug 2025 19:13:21 -0600 Subject: [PATCH 08/17] Another round of updates, still getting warnings/errors with intersphinx --- docs/source/cpals.rst | 11 ++++++++--- docs/source/cpapr.rst | 11 ++++++++--- docs/source/gcpopt.rst | 7 ++++++- docs/source/hosvd.rst | 10 +++++++--- docs/source/pyttb_utils.rst | 2 +- docs/source/tensor.rst | 2 +- docs/source/tuckerals.rst | 7 ++++++- 7 files changed, 37 insertions(+), 13 deletions(-) diff --git a/docs/source/cpals.rst b/docs/source/cpals.rst index e8551484..fda3399c 100644 --- a/docs/source/cpals.rst +++ b/docs/source/cpals.rst @@ -1,8 +1,13 @@ -CP Alternating Least Squares (:obj:`cp_als`) -============================================ +CP Alternating Least Squares (:obj:`pyttb.cp_als`) +================================================== .. note:: - The ``cp_als`` function defined in ``cp_als.py`` has been promoted to the ``pyttb`` namespace. + Functions defined in ``cp_als.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np .. autofunction:: pyttb.cp_als \ No newline at end of file diff --git a/docs/source/cpapr.rst b/docs/source/cpapr.rst index d06dc158..01f87813 100644 --- a/docs/source/cpapr.rst +++ b/docs/source/cpapr.rst @@ -1,8 +1,13 @@ -CP Alternating Poisson Regression (:obj:`cp_apr`) -================================================= +CP Alternating Poisson Regression (:obj:`pyttb.cp_apr`) +======================================================= .. note:: - The ``cp_apr`` function defined in ``cp_apr.py`` has been promoted to the ``pyttb`` namespace. + Functions defined in ``cp_apr.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np .. autofunction:: pyttb.cp_apr \ No newline at end of file diff --git a/docs/source/gcpopt.rst b/docs/source/gcpopt.rst index 39415cf0..ccc64792 100644 --- a/docs/source/gcpopt.rst +++ b/docs/source/gcpopt.rst @@ -2,7 +2,12 @@ Generalized CP Optimization (:obj:`pyttb.gcp_opt`) ================================================== .. note:: - The ``gcp_opt`` function defined in ``gcp_opt.py`` has been promoted to the ``pyttb`` namespace. + Functions defined in ``gcp_optor.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np .. autofunction:: pyttb.gcp_opt diff --git a/docs/source/hosvd.rst b/docs/source/hosvd.rst index c6e5e035..05969cd6 100644 --- a/docs/source/hosvd.rst +++ b/docs/source/hosvd.rst @@ -1,8 +1,12 @@ -Tucker Higher-Order SVD (:obj:`hosvd`) -====================================== +Tucker Higher-Order SVD (:obj:`pyttb.hosvd`) +============================================ .. note:: - The ``hosvd`` function defined in ``hosvd.py`` has been promoted to the ``pyttb`` namespace. + Functions defined in ``hosvd.py`` have been promoted to the ``pyttb`` namespace. +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np .. autofunction:: pyttb.hosvd diff --git a/docs/source/pyttb_utils.rst b/docs/source/pyttb_utils.rst index 7e9e4f85..f7d9a8b8 100644 --- a/docs/source/pyttb_utils.rst +++ b/docs/source/pyttb_utils.rst @@ -7,4 +7,4 @@ Helper Functions (:mod:`pyttb.pyttb_utils`, :mod:`pyttb.khatrirao`) :exclude-members: __dict__, __weakref__, __slots__ :show-inheritance: -.. autofunction:: pyttb.khatrirao \ No newline at end of file +.. autofunction:: pyttb.khatrirao.khatrirao \ No newline at end of file diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index f81506d5..67e01b72 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -12,7 +12,7 @@ For *all* examples in this document, the following module imports are assumed:: .. autoclass:: pyttb.tensor :members: :special-members: - :exclude-members: __dict__, __weakref__, __slots__, __deepcopy__ + :exclude-members: __init__, __dict__, __weakref__, __slots__, __deepcopy__ :show-inheritance: .. autofunction:: pyttb.tenones diff --git a/docs/source/tuckerals.rst b/docs/source/tuckerals.rst index 6859e775..81698d15 100644 --- a/docs/source/tuckerals.rst +++ b/docs/source/tuckerals.rst @@ -2,6 +2,11 @@ Tucker Alternating Least Squares (:obj:`pyttb.tucker_als`) ========================================================== .. note:: - The ``tucker_als`` function defined in ``tucker_als.py`` has been promoted to the ``pyttb`` namespace. + Functions defined in ``tucker_als.py`` have been promoted to the ``pyttb`` namespace. + +For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np .. autofunction:: pyttb.tucker_als From 279d3af6eb6c1967e7b8be75a0c0d84c3f4c47ef Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 30 Aug 2025 19:24:14 -0600 Subject: [PATCH 09/17] More attempts at fixes --- docs/source/conf.py | 3 ++- docs/source/pyttb_utils.rst | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 51e2900a..5d376631 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -57,7 +57,7 @@ "myst_nb", ] -# autodoc_preserve_defaults = True +autodoc_preserve_defaults = True myst_enable_extensions = [ "amsmath", @@ -202,3 +202,4 @@ # Autodoc settings autoclass_content = "class" autodoc_member_order = "bysource" +autodoc_class_signature = "separated" \ No newline at end of file diff --git a/docs/source/pyttb_utils.rst b/docs/source/pyttb_utils.rst index f7d9a8b8..a3cefd20 100644 --- a/docs/source/pyttb_utils.rst +++ b/docs/source/pyttb_utils.rst @@ -1,7 +1,7 @@ Helper Functions (:mod:`pyttb.pyttb_utils`, :mod:`pyttb.khatrirao`) =================================================================== -.. autoclass:: pyttb.pyttb_utils +.. automodule:: pyttb.pyttb_utils :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__ From 683cbaaac7a33ada924c667a3d2ca52d62e770a3 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 30 Aug 2025 19:25:53 -0600 Subject: [PATCH 10/17] Ruff fix --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 5d376631..d9198ec1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -202,4 +202,4 @@ # Autodoc settings autoclass_content = "class" autodoc_member_order = "bysource" -autodoc_class_signature = "separated" \ No newline at end of file +autodoc_class_signature = "separated" From 2aebab4108ecedf12a2f2fcc590b9036bdaf51d0 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Mon, 1 Sep 2025 10:01:05 -0600 Subject: [PATCH 11/17] Updating sphinx build flags to better align to RTD. --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2d36e338..2ba1abc4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -76,7 +76,7 @@ current or filing a new [issue](https://github.com/sandialabs/pyttb/issues). ``` 1. For the CI version which is more strict ```commandline - sphinx-build ./docs/source ./docs/build -W -n --keep-going + sphinx-build ./docs/source ./docs/build -E -W --keep-going ``` 2. If not on Windows optionally add `-j auto` for parallelization 2. Clear notebook outputs if run locally see `nbstripout` in our [pre-commit configuration](.pre-commit-config.yaml) From 5e97fd5f2b520c6a4d265b1e6c01aebccbe10cab Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Fri, 19 Sep 2025 17:37:00 -0600 Subject: [PATCH 12/17] Checkpoint on tensor class --- Atemp.mtx | 23 ++ Ktemp.tns | 26 ++ Ttemps.tns | 63 ++++ docs/source/tensor.rst | 7 +- docs/source/tutorial/class_tensor.ipynb | 448 ++++++++++++++++++++---- docs/source/tutorials.rst | 20 +- ndarraytemp.txt | 5 + pyttb/import_data.py | 1 + pyttb/tensor.py | 22 +- 9 files changed, 526 insertions(+), 89 deletions(-) create mode 100644 Atemp.mtx create mode 100644 Ktemp.tns create mode 100644 Ttemps.tns create mode 100644 ndarraytemp.txt diff --git a/Atemp.mtx b/Atemp.mtx new file mode 100644 index 00000000..8610dcd6 --- /dev/null +++ b/Atemp.mtx @@ -0,0 +1,23 @@ +matrix +2 +5 4 +1.0000000000000000e+00 +6.0000000000000000e+00 +1.1000000000000000e+01 +1.6000000000000000e+01 +2.0000000000000000e+00 +7.0000000000000000e+00 +1.2000000000000000e+01 +1.7000000000000000e+01 +3.0000000000000000e+00 +8.0000000000000000e+00 +1.3000000000000000e+01 +1.8000000000000000e+01 +4.0000000000000000e+00 +9.0000000000000000e+00 +1.4000000000000000e+01 +1.9000000000000000e+01 +5.0000000000000000e+00 +1.0000000000000000e+01 +1.5000000000000000e+01 +2.0000000000000000e+01 diff --git a/Ktemp.tns b/Ktemp.tns new file mode 100644 index 00000000..008b2bea --- /dev/null +++ b/Ktemp.tns @@ -0,0 +1,26 @@ +ktensor +3 +5 4 3 +2 +1.0000000000000000e+00 1.0000000000000000e+00 +matrix +2 +5 2 +1.0000000000000000e+00 6.0000000000000000e+00 +2.0000000000000000e+00 7.0000000000000000e+00 +3.0000000000000000e+00 8.0000000000000000e+00 +4.0000000000000000e+00 9.0000000000000000e+00 +5.0000000000000000e+00 1.0000000000000000e+01 +matrix +2 +4 2 +1.0000000000000000e+00 5.0000000000000000e+00 +2.0000000000000000e+00 6.0000000000000000e+00 +3.0000000000000000e+00 7.0000000000000000e+00 +4.0000000000000000e+00 8.0000000000000000e+00 +matrix +2 +3 2 +1.0000000000000000e+00 4.0000000000000000e+00 +2.0000000000000000e+00 5.0000000000000000e+00 +3.0000000000000000e+00 6.0000000000000000e+00 diff --git a/Ttemps.tns b/Ttemps.tns new file mode 100644 index 00000000..272a629f --- /dev/null +++ b/Ttemps.tns @@ -0,0 +1,63 @@ +tensor +3 +3 4 5 +2.4313567236808464e-01 +8.5461744740975198e-01 +4.7075829812164127e-01 +4.2353756119620134e-01 +1.0809034164312792e-01 +2.1343236316591530e-01 +8.5903831396461416e-01 +7.1766748043247763e-01 +5.4911648502816746e-01 +4.1056038109268744e-01 +1.4811092697982420e-01 +8.3249584750236127e-01 +1.0121943083605089e-01 +9.7508795425127137e-01 +3.2216902851253792e-01 +9.3173977031640864e-01 +1.0383386811897577e-01 +9.2861648393714524e-01 +8.1416258911849548e-01 +7.0798320371456391e-02 +5.1434957568763429e-01 +7.5637552873427727e-01 +6.4449482686655946e-01 +9.4240027822025030e-01 +6.1353965709566283e-01 +7.2064416484485094e-01 +2.1790465309932883e-01 +2.3804789082290456e-01 +9.4523655339031210e-01 +4.7692482053634855e-02 +6.6796875897810104e-01 +4.4208307334537400e-02 +5.4493817437709080e-01 +2.2222558358093836e-01 +6.5748454188029049e-01 +3.8686062626499940e-01 +4.3879610723962370e-01 +7.4589963733585618e-03 +6.3797267253995427e-01 +6.9602079511880577e-02 +5.7579815120471067e-01 +8.9216605249320491e-01 +1.6293235001713857e-01 +7.7003190987734860e-01 +7.2973922571523331e-01 +4.4292734840430104e-01 +5.4163473219816638e-01 +9.5545208091364964e-01 +1.7967933562544913e-01 +1.3367078192317039e-01 +4.8051761768530332e-01 +4.2048793075932145e-01 +4.6862170808474413e-01 +7.1053768046252053e-01 +2.0714109593530705e-02 +4.5485897754916493e-01 +8.4330193211606341e-01 +6.8253074577361383e-01 +2.2654173766977470e-01 +8.1469028482006889e-01 diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index 67e01b72..abdfb6bf 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,10 +1,7 @@ Dense Tensor (:class:`pyttb.tensor`) ==================================== -.. note:: - - Classes and functions defined in ``tensor.py`` have been promoted to the ``pyttb`` namespace. -For *all* examples in this document, the following module imports are assumed:: +For *all* examples in this document, the following imports are assumed:: >>> import pyttb as ttb >>> import numpy as np @@ -12,7 +9,7 @@ For *all* examples in this document, the following module imports are assumed:: .. autoclass:: pyttb.tensor :members: :special-members: - :exclude-members: __init__, __dict__, __weakref__, __slots__, __deepcopy__ + :exclude-members: __init__, __dict__, __weakref__, __deepcopy__ :show-inheritance: .. autofunction:: pyttb.tenones diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index 491564e2..cfcc6930 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -4,12 +4,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Tensors\n", + "# Dense Tensors\n", "```\n", "Copyright 2025 National Technology & Engineering Solutions of Sandia,\n", "LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the\n", "U.S. Government retains certain rights in this software.\n", - "```\n" + "```" ] }, { @@ -21,33 +21,51 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "from __future__ import annotations\n", - "\n", - "import sys\n", - "\n", + "import pyttb as ttb\n", "import numpy as np\n", "\n", - "import pyttb as ttb" + "from __future__ import annotations\n", + "import sys" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from an array" + "## Creating a `tensor` from an `numpy` multidimensional array" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 4, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]\n", + "data[:, :, 2] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "M = np.ones((2, 4, 3)) # A 2x4x3 array.\n", + "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", "X = ttb.tensor(M) # Convert to a tensor object\n", "X" ] @@ -61,11 +79,39 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X = X.reshape((4, 2, 3))\n", + "execution_count": 70, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 2, 4) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1.]\n", + " [1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 3] =\n", + "[[1. 1.]\n", + " [1. 1.]\n", + " [1. 1.]]" + ] + }, + "execution_count": 70, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", + "X = ttb.tensor(M, shape=(3, 2, 4)) # Convert to a tensor object with compatible shape\n", "X" ] }, @@ -73,130 +119,331 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a one-dimensional `tensor`\n", - "`np.random.rand(m,n)` creates a two-dimensional tensor with `m` rows and `n` columns." + "## Specifying singleton dimensions in a `tensor`\n", + "If you need to explicitly include a singleton dimensions -- e.g., when matching the number of dimensions between tensors -- you can use the `shape` parameter when creating a `tensor` to do this." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 71, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3) with order F\n", + "data[:, :] =\n", + "[[1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]]" + ] + }, + "execution_count": 71, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5, 1)) # Creates a 2-way tensor.\n", - "X" + "Y = ttb.tensor(np.ones((4, 3))) # Creates a 2-way tensor.\n", + "Y" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (1, 4, 1, 3, 1) with order F\n", + "data[:, :, 0, 0, 0] =\n", + "[[1. 1. 1. 1.]]\n", + "data[:, :, 0, 1, 0] =\n", + "[[1. 1. 1. 1.]]\n", + "data[:, :, 0, 2, 0] =\n", + "[[1. 1. 1. 1.]]" + ] + }, + "execution_count": 72, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.random.seed(0)\n", + "Y = ttb.tensor(np.ones((4, 3)), shape=(1, 4, 1, 3, 1)) # Creates a 5-way tensor with singleton dimensions from a two-way array.\n", + "Y" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To specify a 1-way `tensor`, use `(m,)` syntax, signifying a vector with `m` elements." + "## Viewing the individual attributes of a `tensor`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X.data:\n", + "[[[0.5488135 0.96366276 0.0202184 ]\n", + " [0.60276338 0.79172504 0.77815675]\n", + " [0.4236548 0.56804456 0.97861834]\n", + " [0.43758721 0.07103606 0.46147936]]\n", + "\n", + " [[0.71518937 0.38344152 0.83261985]\n", + " [0.54488318 0.52889492 0.87001215]\n", + " [0.64589411 0.92559664 0.79915856]\n", + " [0.891773 0.0871293 0.78052918]]]\n", + "X.shape:\n", + "(2, 4, 3)\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5), shape=(5,)) # Creates a 1-way tensor.\n", - "X" + "X = ttb.tenrand((2, 4, 3)) # Create data.\n", + "print(f\"X.data:\\n{X.data}\") # The data array.\n", + "print(f\"X.shape:\\n{X.shape}\") # The shape." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Specifying trailing singleton dimensions in a `tensor`\n", - "Likewise, trailing singleton dimensions must be explicitly specified." + "## Creating a `tensor` as a copy of another `tensor`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 79, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X[0]: 2.0\n", + "X2[0]: 1.0\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(4, 3)) # Creates a 2-way tensor.\n", - "Y" + "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", + "X = ttb.tensor(M) # Convert to a tensor object with compatible shape\n", + "X2 = X.copy() # This creates a copy of X and stores it in X2\n", + "X[0, 0, 0] = 2 # Change a value from 1 to 2 \n", + "print(f\"X[0]: {X[0]}\")\n", + "print(f\"X2[0]: {X2[0]}\") # Will be different from X[0, 0, 0] since X2 was created as a copy of X" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(3, 4, 1), (3, 4, 1)) # Creates a 3-way tensor.\n", - "Y" + "Note that in Python setting one variable equal to another results in a reference of one variable to another, not a copy. This is the case with `pyttb` data class instances as well, as shown below." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X[0]: 2.0\n", + "X2[0]: 2.0\n" + ] + } + ], "source": [ - "## The constituent parts of a `tensor`" + "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", + "X = ttb.tensor(M) # Convert to a tensor object with compatible shape\n", + "X2 = X # This creates a reference of X but not a copy\n", + "X[0, 0, 0] = 2 # Change a value from 1 to 2 \n", + "print(f\"X[0]: {X[0]}\")\n", + "print(f\"X2[0]: {X2[0]}\") # Will be the same as X[0, 0, 0] since X2 is a reference to X" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "X.data # The array." + "## Creating a one-dimensional `tensor`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 82, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (5,) with order F\n", + "data[:] =\n", + "[1. 1. 1. 1. 1.]" + ] + }, + "execution_count": 82, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X.shape # The shape." + "np.random.seed(0)\n", + "X = ttb.tensor(np.ones(5)) # Creates a 1-way tensor from a 1-way array.\n", + "X" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from its constituent parts" + "Nota that `np.ones((m,n))` creates a two-dimensional array with `m` rows and `n` columns, and a `tensor` will inherit that shape, even if `m` and/or `n` is eaual to 1." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 83, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (5, 1) with order F\n", + "data[:, :] =\n", + "[[1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]]" + ] + }, + "execution_count": 83, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "Y = X.copy() # Copies X.\n", - "Y" + "X = ttb.tensor(np.ones(shape=(5, 1))) # Creates a 2-way tensor from a two-way array (even though is represents a vector).\n", + "X" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating an empty `tensor`\n", - "An empty constructor exists." + "## Using `tenrand` to create a random `tensor`" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 4, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 0.43758721]\n", + " [0.71518937 0.54488318 0.64589411 0.891773 ]]\n", + "data[:, :, 1] =\n", + "[[0.96366276 0.79172504 0.56804456 0.07103606]\n", + " [0.38344152 0.52889492 0.92559664 0.0871293 ]]\n", + "data[:, :, 2] =\n", + "[[0.0202184 0.77815675 0.97861834 0.46147936]\n", + " [0.83261985 0.87001215 0.79915856 0.78052918]]" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X = ttb.tensor() # Creates an empty tensor\n", + "# Create tensor with values sampled uniformly from [0,1]\n", + "np.random.seed(0)\n", + "X = ttb.tenrand((2, 4, 3))\n", + "X" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 4, 3) with order F\n", + "data[:, :, 0] =\n", + "[[23.35161788 3.04728791 6.5822303 34.85704155]\n", + " [36.4628495 -0.55145313 14.56407579 36.52093157]]\n", + "data[:, :, 1] =\n", + "[[24.00553622 27.86468466 35.50697393 41.83147913]\n", + " [ 6.27534301 35.73075019 28.24209331 18.1769294 ]]\n", + "data[:, :, 2] =\n", + "[[ 7.99313321 16.08437592 37.62368276 10.16048464]\n", + " [32.78160802 13.79306013 3.40955674 30.59496089]]" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create tensor with values sampled uniformly from [a,b] where a < b\n", + "a = -1\n", + "b = 42\n", + "#np.random.seed(0)\n", + "X = ttb.tenrand((2, 4, 3)) * (b-a) + a\n", + "X" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 4, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0.17644502 0.03723909 0.37952756 0.0768091 ]\n", + " [0.13100464 0.07707799 0.01109036 0.24496075]]\n", + "data[:, :, 1] =\n", + "[[0.00058489 0.11416032 0.01117244 0.03309492]\n", + " [0.02161545 0.04421351 0.07349822 0.2187989 ]]\n", + "data[:, :, 2] =\n", + "[[8.24610548e-03 4.67157204e-02 1.63106699e-01 2.98181706e-03]\n", + " [4.79103816e-03 1.54792458e-01 7.41521559e-02 1.27479709e-04]]" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create random tensors whose values are drawn from other distributions using numpy.random and converting to a tensor \n", + "A = np.random.beta(0.5,2.5,size=(2,4,3))\n", + "X = ttb.tensor(A)\n", "X" ] }, @@ -204,7 +451,29 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenones` to create a `tensor` of all ones" + "## Creating an empty `tensor`" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "empty tensor of shape ()\n", + "data = []" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X = ttb.tensor() # Creates an empty tensor\n", + "X" ] }, { @@ -221,14 +490,37 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenzeros` to create a `tensor` of all zeros" + "## Using `tenzeros` to create a `tensor` of all zeros" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 54, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 1, 4) with order F\n", + "data[:, :, 0] =\n", + "[[0.]\n", + " [0.]]\n", + "data[:, :, 1] =\n", + "[[0.]\n", + " [0.]]\n", + "data[:, :, 2] =\n", + "[[0.]\n", + " [0.]]\n", + "data[:, :, 3] =\n", + "[[0.]\n", + " [0.]]" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X = ttb.tenzeros((2, 1, 4)) # Creates a 2x1x4 tensor of zeroes.\n", "X" @@ -238,7 +530,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenrand` to create a random `tensor`" + "## Using `tenrand` to create a random `tensor`" ] }, { @@ -1035,7 +1327,25 @@ ] } ], - "metadata": {}, + "metadata": { + "kernelspec": { + "display_name": "pyttb_312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, "nbformat": 4, "nbformat_minor": 1 } diff --git a/docs/source/tutorials.rst b/docs/source/tutorials.rst index 4b1ffbc8..11f1c363 100644 --- a/docs/source/tutorials.rst +++ b/docs/source/tutorials.rst @@ -4,11 +4,11 @@ Tensor Types .. toctree:: :maxdepth: 1 - Dense Tensors - Sparse Tensors - Tucker Tensors - Kruskal Tensors - Sum of Structured Tensors + Dense Tensors (tensor) + Sparse Tensors (sptensor) + Tucker Tensors (sptensor) + Kruskal Tensors (ktensor) + Sum of Structured Tensors (sumtensor) CP Decompositions ================= @@ -16,9 +16,9 @@ CP Decompositions .. toctree:: :maxdepth: 1 - Alternating Least Squares (CP-ALS) - Alternating Poisson Regression (CP-APR) - Generalized CP (GCP-OPT) + Alternating Least Squares (cp_als) + Alternating Poisson Regression (cp_apr) + Generalized CP (gcp_opt) Tucker Decompositions ===================== @@ -26,8 +26,8 @@ Tucker Decompositions .. toctree:: :maxdepth: 1 - Higher-order SVD (HOSVD) - Alternating Least Squares (ALS) + Higher-order SVD (hosvd) + Alternating Least Squares (tucker_als) Working with Tensors ==================== diff --git a/ndarraytemp.txt b/ndarraytemp.txt new file mode 100644 index 00000000..050474a6 --- /dev/null +++ b/ndarraytemp.txt @@ -0,0 +1,5 @@ +1.0000000000000000e+00 6.0000000000000000e+00 +2.0000000000000000e+00 7.0000000000000000e+00 +3.0000000000000000e+00 8.0000000000000000e+00 +4.0000000000000000e+00 9.0000000000000000e+00 +5.0000000000000000e+00 1.0000000000000000e+01 diff --git a/pyttb/import_data.py b/pyttb/import_data.py index 7c73c26d..49e1aa94 100644 --- a/pyttb/import_data.py +++ b/pyttb/import_data.py @@ -66,6 +66,7 @@ def import_data( fac_shape = import_shape(fp) fac = import_array(fp, np.prod(fac_shape)) fac = np.reshape(fac, np.array(fac_shape)) + #fac = np.asfortranarray(np.reshape(fac, np.array(fac_shape))) factor_matrices.append(fac) return ttb.ktensor(factor_matrices, weights, copy=False) raise ValueError("Failed to load tensor data") # pragma: no cover diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 42651752..55c4db19 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -62,10 +62,8 @@ class tensor: # noqa: PLW1641 Whether to deep copy (versus reference) the data. By default, the data is deep copied. - **Attributes** - - **data** (:class:`numpy.ndarray`) : Data of the tensor - - **shape** (:class:`tuple`) : Size of the tensor + ----- Examples -------- @@ -115,6 +113,8 @@ class tensor: # noqa: PLW1641 empty tensor of shape () data = [] + ----- + Notes ----- Instances of :class:`pyttb.tensor` can also be created using the following methods: @@ -131,10 +131,22 @@ class tensor: # noqa: PLW1641 * :meth:`pyttb.ttensor.to_tensor` - Convert a Tucker tensor to a dense tensor * :meth:`pyttb.tenmat.to_tensor` - Convert a tenmat to a dense tensor - See :doc:`/tutorial/class_tensor` for getting started with the tensor class. + ----- + + **Tutorial** + + See the :doc:`/tutorial/class_tensor` tutorial for getting started with + the tensor class. + + ----- + + **Attributes and Methods** """ - __slots__ = ("data", "shape") + __slots__ = { + "data": "Source data as :class:`numpy.ndarray`", + "shape": "Shape of the tensor as a :class:`tuple` or any iterable array of integers." + } def __init__( self, From a022ab92614743d21250553f27b503c76f4746b2 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Fri, 19 Sep 2025 17:39:59 -0600 Subject: [PATCH 13/17] Fix formatting --- docs/source/tutorial/class_tensor.ipynb | 364 ++++-------------------- pyttb/import_data.py | 2 +- pyttb/tensor.py | 7 +- 3 files changed, 59 insertions(+), 314 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index cfcc6930..864e5749 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -21,15 +21,17 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "import pyttb as ttb\n", + "from __future__ import annotations\n", + "\n", + "import sys\n", + "\n", "import numpy as np\n", "\n", - "from __future__ import annotations\n", - "import sys" + "import pyttb as ttb" ] }, { @@ -41,29 +43,9 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (2, 4, 3) with order F\n", - "data[:, :, 0] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]\n", - "data[:, :, 1] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]\n", - "data[:, :, 2] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", "X = ttb.tensor(M) # Convert to a tensor object\n", @@ -79,36 +61,9 @@ }, { "cell_type": "code", - "execution_count": 70, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (3, 2, 4) with order F\n", - "data[:, :, 0] =\n", - "[[1. 1.]\n", - " [1. 1.]\n", - " [1. 1.]]\n", - "data[:, :, 1] =\n", - "[[1. 1.]\n", - " [1. 1.]\n", - " [1. 1.]]\n", - "data[:, :, 2] =\n", - "[[1. 1.]\n", - " [1. 1.]\n", - " [1. 1.]]\n", - "data[:, :, 3] =\n", - "[[1. 1.]\n", - " [1. 1.]\n", - " [1. 1.]]" - ] - }, - "execution_count": 70, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", "X = ttb.tensor(M, shape=(3, 2, 4)) # Convert to a tensor object with compatible shape\n", @@ -125,25 +80,9 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (4, 3) with order F\n", - "data[:, :] =\n", - "[[1. 1. 1.]\n", - " [1. 1. 1.]\n", - " [1. 1. 1.]\n", - " [1. 1. 1.]]" - ] - }, - "execution_count": 71, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "np.random.seed(0)\n", "Y = ttb.tensor(np.ones((4, 3))) # Creates a 2-way tensor.\n", @@ -152,29 +91,14 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (1, 4, 1, 3, 1) with order F\n", - "data[:, :, 0, 0, 0] =\n", - "[[1. 1. 1. 1.]]\n", - "data[:, :, 0, 1, 0] =\n", - "[[1. 1. 1. 1.]]\n", - "data[:, :, 0, 2, 0] =\n", - "[[1. 1. 1. 1.]]" - ] - }, - "execution_count": 72, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "np.random.seed(0)\n", - "Y = ttb.tensor(np.ones((4, 3)), shape=(1, 4, 1, 3, 1)) # Creates a 5-way tensor with singleton dimensions from a two-way array.\n", + "Y = ttb.tensor(\n", + " np.ones((4, 3)), shape=(1, 4, 1, 3, 1)\n", + ") # Creates a 5-way tensor with singleton dimensions from a two-way array.\n", "Y" ] }, @@ -187,28 +111,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "X.data:\n", - "[[[0.5488135 0.96366276 0.0202184 ]\n", - " [0.60276338 0.79172504 0.77815675]\n", - " [0.4236548 0.56804456 0.97861834]\n", - " [0.43758721 0.07103606 0.46147936]]\n", - "\n", - " [[0.71518937 0.38344152 0.83261985]\n", - " [0.54488318 0.52889492 0.87001215]\n", - " [0.64589411 0.92559664 0.79915856]\n", - " [0.891773 0.0871293 0.78052918]]]\n", - "X.shape:\n", - "(2, 4, 3)\n" - ] - } - ], + "outputs": [], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 4, 3)) # Create data.\n", @@ -225,25 +130,18 @@ }, { "cell_type": "code", - "execution_count": 79, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "X[0]: 2.0\n", - "X2[0]: 1.0\n" - ] - } - ], + "outputs": [], "source": [ "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", "X = ttb.tensor(M) # Convert to a tensor object with compatible shape\n", "X2 = X.copy() # This creates a copy of X and stores it in X2\n", - "X[0, 0, 0] = 2 # Change a value from 1 to 2 \n", + "X[0, 0, 0] = 2 # Change a value from 1 to 2\n", "print(f\"X[0]: {X[0]}\")\n", - "print(f\"X2[0]: {X2[0]}\") # Will be different from X[0, 0, 0] since X2 was created as a copy of X" + "print(\n", + " f\"X2[0]: {X2[0]}\"\n", + ") # Will be different from X[0, 0, 0] since X2 was created as a copy of X" ] }, { @@ -257,21 +155,12 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "X[0]: 2.0\n", - "X2[0]: 2.0\n" - ] - } - ], + "outputs": [], "source": [ "M = np.ones((2, 4, 3)) # A 2x4x3 array of ones.\n", "X = ttb.tensor(M) # Convert to a tensor object with compatible shape\n", "X2 = X # This creates a reference of X but not a copy\n", - "X[0, 0, 0] = 2 # Change a value from 1 to 2 \n", + "X[0, 0, 0] = 2 # Change a value from 1 to 2\n", "print(f\"X[0]: {X[0]}\")\n", "print(f\"X2[0]: {X2[0]}\") # Will be the same as X[0, 0, 0] since X2 is a reference to X" ] @@ -285,22 +174,9 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (5,) with order F\n", - "data[:] =\n", - "[1. 1. 1. 1. 1.]" - ] - }, - "execution_count": 82, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "np.random.seed(0)\n", "X = ttb.tensor(np.ones(5)) # Creates a 1-way tensor from a 1-way array.\n", @@ -316,29 +192,14 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (5, 1) with order F\n", - "data[:, :] =\n", - "[[1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]]" - ] - }, - "execution_count": 83, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.ones(shape=(5, 1))) # Creates a 2-way tensor from a two-way array (even though is represents a vector).\n", + "X = ttb.tensor(\n", + " np.ones(shape=(5, 1))\n", + ") # Creates a 2-way tensor from a two-way array (even though is represents a vector).\n", "X" ] }, @@ -351,29 +212,9 @@ }, { "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (2, 4, 3) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.60276338 0.4236548 0.43758721]\n", - " [0.71518937 0.54488318 0.64589411 0.891773 ]]\n", - "data[:, :, 1] =\n", - "[[0.96366276 0.79172504 0.56804456 0.07103606]\n", - " [0.38344152 0.52889492 0.92559664 0.0871293 ]]\n", - "data[:, :, 2] =\n", - "[[0.0202184 0.77815675 0.97861834 0.46147936]\n", - " [0.83261985 0.87001215 0.79915856 0.78052918]]" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Create tensor with values sampled uniformly from [0,1]\n", "np.random.seed(0)\n", @@ -383,66 +224,26 @@ }, { "cell_type": "code", - "execution_count": 50, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (2, 4, 3) with order F\n", - "data[:, :, 0] =\n", - "[[23.35161788 3.04728791 6.5822303 34.85704155]\n", - " [36.4628495 -0.55145313 14.56407579 36.52093157]]\n", - "data[:, :, 1] =\n", - "[[24.00553622 27.86468466 35.50697393 41.83147913]\n", - " [ 6.27534301 35.73075019 28.24209331 18.1769294 ]]\n", - "data[:, :, 2] =\n", - "[[ 7.99313321 16.08437592 37.62368276 10.16048464]\n", - " [32.78160802 13.79306013 3.40955674 30.59496089]]" - ] - }, - "execution_count": 50, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Create tensor with values sampled uniformly from [a,b] where a < b\n", "a = -1\n", "b = 42\n", - "#np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) * (b-a) + a\n", + "# np.random.seed(0)\n", + "X = ttb.tenrand((2, 4, 3)) * (b - a) + a\n", "X" ] }, { "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (2, 4, 3) with order F\n", - "data[:, :, 0] =\n", - "[[0.17644502 0.03723909 0.37952756 0.0768091 ]\n", - " [0.13100464 0.07707799 0.01109036 0.24496075]]\n", - "data[:, :, 1] =\n", - "[[0.00058489 0.11416032 0.01117244 0.03309492]\n", - " [0.02161545 0.04421351 0.07349822 0.2187989 ]]\n", - "data[:, :, 2] =\n", - "[[8.24610548e-03 4.67157204e-02 1.63106699e-01 2.98181706e-03]\n", - " [4.79103816e-03 1.54792458e-01 7.41521559e-02 1.27479709e-04]]" - ] - }, - "execution_count": 52, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Create random tensors whose values are drawn from other distributions using numpy.random and converting to a tensor \n", - "A = np.random.beta(0.5,2.5,size=(2,4,3))\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create random tensors whose values are drawn from other distributions using numpy.random and converting to a tensor\n", + "A = np.random.beta(0.5, 2.5, size=(2, 4, 3))\n", "X = ttb.tensor(A)\n", "X" ] @@ -456,21 +257,9 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "empty tensor of shape ()\n", - "data = []" - ] - }, - "execution_count": 53, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "X = ttb.tensor() # Creates an empty tensor\n", "X" @@ -495,32 +284,9 @@ }, { "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor of shape (2, 1, 4) with order F\n", - "data[:, :, 0] =\n", - "[[0.]\n", - " [0.]]\n", - "data[:, :, 1] =\n", - "[[0.]\n", - " [0.]]\n", - "data[:, :, 2] =\n", - "[[0.]\n", - " [0.]]\n", - "data[:, :, 3] =\n", - "[[0.]\n", - " [0.]]" - ] - }, - "execution_count": 54, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "X = ttb.tenzeros((2, 1, 4)) # Creates a 2x1x4 tensor of zeroes.\n", "X" @@ -1327,25 +1093,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "pyttb_312", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 1 } diff --git a/pyttb/import_data.py b/pyttb/import_data.py index 49e1aa94..eaba92de 100644 --- a/pyttb/import_data.py +++ b/pyttb/import_data.py @@ -66,7 +66,7 @@ def import_data( fac_shape = import_shape(fp) fac = import_array(fp, np.prod(fac_shape)) fac = np.reshape(fac, np.array(fac_shape)) - #fac = np.asfortranarray(np.reshape(fac, np.array(fac_shape))) + # fac = np.asfortranarray(np.reshape(fac, np.array(fac_shape))) factor_matrices.append(fac) return ttb.ktensor(factor_matrices, weights, copy=False) raise ValueError("Failed to load tensor data") # pragma: no cover diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 55c4db19..8e7eb430 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -134,7 +134,7 @@ class tensor: # noqa: PLW1641 ----- **Tutorial** - + See the :doc:`/tutorial/class_tensor` tutorial for getting started with the tensor class. @@ -143,10 +143,7 @@ class tensor: # noqa: PLW1641 **Attributes and Methods** """ - __slots__ = { - "data": "Source data as :class:`numpy.ndarray`", - "shape": "Shape of the tensor as a :class:`tuple` or any iterable array of integers." - } + __slots__ = {"data": "Source data", "shape": "Shape of the tensor"} def __init__( self, From 012b01a2c3fd750a77cbe40bd231e52718b1fc6e Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 4 Oct 2025 20:54:06 -0600 Subject: [PATCH 14/17] Updating tensor docstrings. --- docs/source/tensor.rst | 6 +- pyttb/tensor.py | 2121 ++++++++++++++++++++++------------------ 2 files changed, 1183 insertions(+), 944 deletions(-) diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index abdfb6bf..4788d760 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -12,8 +12,8 @@ For *all* examples in this document, the following imports are assumed:: :exclude-members: __init__, __dict__, __weakref__, __deepcopy__ :show-inheritance: -.. autofunction:: pyttb.tenones -.. autofunction:: pyttb.tenzeros -.. autofunction:: pyttb.tenrand .. autofunction:: pyttb.tendiag .. autofunction:: pyttb.teneye +.. autofunction:: pyttb.tenones +.. autofunction:: pyttb.tenrand +.. autofunction:: pyttb.tenzeros diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 8e7eb430..f87a5fa5 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -52,15 +52,16 @@ class tensor: # noqa: PLW1641 Parameters ---------- data : optional - Source data as :class:`numpy.ndarray` + Source data as :class:`numpy.ndarray`. shape : optional Shape of the tensor as a :class:`tuple` or any iterable array of integers. A single integer means that the tensor should be a 1D array. - If no shape is given, defaults to :attr:`numpy.ndarray.shape` of ``data``. - Otherwise, the data is reshaped to the specified shape. + If :attr:`shape` is not given, defaults to :attr:`numpy.ndarray.shape` + of :attr:`data`. Otherwise, :attr:`data` is reshaped to the specified + :attr:`shape`. copy : optional - Whether to deep copy (versus reference) the data. - By default, the data is deep copied. + Whether to deep copy (versus reference) :attr:`data`. + By default, :attr:`data` is deep copied. ----- @@ -69,10 +70,12 @@ class tensor: # noqa: PLW1641 -------- Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: - >>> data = np.array([[[1,13],[5,17],[9,21]], - ... [[2,14],[6,18],[10,22]], - ... [[3,15],[7,19],[11,23]], - ... [[4,16],[8,20],[12,24]]]) + >>> data = np.array( + ... [[[1,13],[5,17],[ 9,21]], + ... [[2,14],[6,18],[10,22]], + ... [[3,15],[7,19],[11,23]], + ... [[4,16],[8,20],[12,24]]] + ... ) >>> T = ttb.tensor(data) >>> print(T) tensor of shape (4, 3, 2) with order F @@ -90,8 +93,7 @@ class tensor: # noqa: PLW1641 Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and reshape it:: - >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ... 17, 18, 19, 20, 21, 22, 23, 24]) + >>> data = np.arange(1,25) >>> T = ttb.tensor(data, shape=(4, 3, 2)) >>> print(T) tensor of shape (4, 3, 2) with order F @@ -143,7 +145,7 @@ class tensor: # noqa: PLW1641 **Attributes and Methods** """ - __slots__ = {"data": "Source data", "shape": "Shape of the tensor"} + __slots__ = {"data": "Tensor data array.", "shape": "Shape of the tensor."} def __init__( self, @@ -196,6 +198,34 @@ def __init__( self.shape = shape return + @property + def ndims(self) -> int: + """ + Number of dimensions of the tensor (i.e., length of the tensor shape). + + Examples + -------- + >>> T = ttb.tenones((2, 2, 2)) + >>> T.ndims + 3 + """ + if self.shape == (0,): + return 0 + return len(self.shape) + + @property + def nnz(self) -> int: + """ + Number of non-zero elements in the tensor (even though this is a dense tensor). + + Examples + -------- + >>> T = ttb.tenones((2, 2, 2)) + >>> T.nnz + 8 + """ + return np.count_nonzero(self.data) + @property def order(self) -> Literal["F"]: """Return the data layout of the underlying storage.""" @@ -215,7 +245,8 @@ def from_function( function_handle: Callable[[tuple[int, ...]], np.ndarray], shape: Shape, ) -> tensor: - """Construct a :class:`pyttb.tensor` with data from a function. + """ + Construct a :class:`pyttb.tensor` with data from a function. Parameters ---------- @@ -258,7 +289,7 @@ def from_function( Create a :class:`pyttb.tensor` with all entries equal to 1 using :func:`numpy.ones`. Observe that we specifically specify Fortran order:: - >>> T = ttb.tensor.from_function(lambda s: np.ones(s,order='F'), (2, 3, 4)) + >>> T = ttb.tensor.from_function(lambda s: np.ones(s), (2, 3, 4)) >>> print(T) tensor of shape (2, 3, 4) with order F data[:, :, 0] = @@ -283,33 +314,6 @@ def from_function( # Create the tensor return cls(data, shape, copy=False) - def copy(self) -> tensor: - """Make a deep copy of a :class:`pyttb.tensor`. - - Returns - ------- - Deep copy of original tensor. - - Examples - -------- - Observing the difference between a shallow copy and a deep copy. When the - original tensor changes, so does the shallow copy, but the deep copy does not:: - - >>> T = ttb.tensor(np.ones(8), (2, 2, 2)) - >>> T_shallow = T - >>> T_deep = T.copy() - >>> T[0, 0, 0] = 3 - >>> T[0, 0, 0] == T_shallow[0, 0, 0] - True - >>> T[0, 0, 0] == T_deep[0, 0, 0] - False - """ - return ttb.tensor(self.data, self.shape, copy=True) - - def __deepcopy__(self, memo): - """Return deep copy of this tensor.""" - return self.copy() - @overload def collapse( self, @@ -345,9 +349,9 @@ def collapse( Examples -------- - Sum all elements of tensor:: + Sum all elements of tensor: - >>> T = ttb.tensor(np.ones((4,3,2),order='F')) + >>> T = ttb.tenones((4, 3, 2)) >>> T.collapse() 24.0 @@ -369,15 +373,15 @@ def collapse( Compute the max entry in each mode-2 slice (output is a tensor):: - >>> T.collapse([0, 1], np.max) + >>> T.collapse([0, 1], fun=np.max) tensor of shape (2,) with order F data[:] = [1. 1.] Find the maximum and minimum values in a tensor:: - >>> randn = lambda s : np.random.randn(np.prod(s)) - >>> np.random.seed(0) # reproducibility + >>> randn = lambda s: np.random.randn(np.prod(s)) + >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) >>> print(T) tensor of shape (2, 2, 2) with order F @@ -434,12 +438,15 @@ def contract(self, i1: int, i2: int) -> np.ndarray | tensor: """ Contract tensor along two dimensions (array trace). + Note that the dimensions used in the contraction must have the same + size. + Parameters ---------- i1: - First dimension + First dimension. i2: - Second dimension + Second dimension. Returns ------- @@ -450,7 +457,7 @@ def contract(self, i1: int, i2: int) -> np.ndarray | tensor: Contract a three-way 2 x 2 x 2 tensor along two dimensions in three possible ways:: - >>> T = ttb.tensor(np.ones(8), (2, 2, 2)) # All-ones 2 x 2 x 2 tensor + >>> T = ttb.tenones((2, 2, 2)) # All-ones 2 x 2 x 2 tensor >>> T.contract(0, 1) tensor of shape (2,) with order F data[:] = @@ -516,14 +523,44 @@ def contract(self, i1: int, i2: int) -> np.ndarray | tensor: return ttb.tensor(newdata, newsize, copy=False) + def copy(self) -> tensor: + """ + Make a deep copy of a :class:`pyttb.tensor`. + + Returns + ------- + Deep copy of original tensor. + + Examples + -------- + Observing the difference between a shallow copy and a deep copy. When + the original tensor changes, so does the shallow copy, but the deep copy + does not:: + + >>> T = ttb.tensor(np.ones(8), (2, 2, 2)) + >>> T_shallow = T + >>> T_deep = T.copy() + >>> T[0, 0, 0] = 3 + >>> T[0, 0, 0] == T_shallow[0, 0, 0] + True + >>> T[0, 0, 0] == T_deep[0, 0, 0] + False + """ + return ttb.tensor(self.data, self.shape, copy=True) + + def __deepcopy__(self, memo): + """Return deep copy of this tensor.""" + return self.copy() + def double(self, immutable: bool = False) -> np.ndarray: """ Convert `:class:pyttb.tensor` to an `:class:numpy.ndarray` of doubles. Parameters ---------- - immutable: Whether or not the returned data cam be mutated. May enable - additional optimizations. + immutable: + Whether or not the returned data cam be mutated. May enable additional + optimizations. Returns ------- @@ -531,13 +568,13 @@ def double(self, immutable: bool = False) -> np.ndarray: Examples -------- - >>> T = ttb.tensor(np.ones(8), (2, 2, 2)) # All-ones 2 x 2 x 2 tensor - >>> T.double() - array([[[1., 1.], - [1., 1.]], - - [[1., 1.], - [1., 1.]]]) + >>> T = ttb.tensor(np.ones(8), (2, 2, 2)) # All-ones 2 x 2 x 2 tensor + >>> T.double() + array([[[1., 1.], + [1., 1.]], + + [[1., 1.], + [1., 1.]]]) """ double = self.data.astype(np.float64, order=self.order, copy=not immutable) if immutable: @@ -550,28 +587,27 @@ def exp(self) -> tensor: Returns ------- - Copy of tensor data with the exponential function applied to data\ - element-wise. + Copy of tensor data with the exponential function applied to data element-wise. Examples -------- - >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) # Tensor with entries 0 to 7 - >>> print(T) - tensor of shape (2, 2, 2) with order F - data[:, :, 0] = - [[0 2] - [1 3]] - data[:, :, 1] = - [[4 6] - [5 7]] - >>> print(T.exp()) - tensor of shape (2, 2, 2) with order F - data[:, :, 0] = - [[ 1. 7.3890561 ] - [ 2.71828183 20.08553692]] - data[:, :, 1] = - [[ 54.59815003 403.42879349] - [ 148.4131591 1096.63315843]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) # Tensor with entries 0 to 7 + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> print(T.exp()) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ 1. 7.3890561 ] + [ 2.71828183 20.08553692]] + data[:, :, 1] = + [[ 54.59815003 403.42879349] + [ 148.4131591 1096.63315843]] """ return ttb.tensor(np.exp(self.data), copy=False) @@ -581,8 +617,8 @@ def find(self) -> tuple[np.ndarray, np.ndarray]: Returns ------- - Array of subscripts of the nonzero values in the tensor and a column\ - vector of the corresponding values. + Array of subscripts of the nonzero values in the tensor and a column vector of + the corresponding values. Examples -------- @@ -621,193 +657,37 @@ def find(self) -> tuple[np.ndarray, np.ndarray]: vals = self.data[tuple(subs.T)][:, None] return subs, vals - def to_sptensor(self) -> ttb.sptensor: - """Construct a :class:`pyttb.sptensor` from `:class:pyttb.tensor`. - - Returns - ------- - Generated Sparse Tensor - - Examples - -------- - Construct a 2x2x2 tensor with some nonzero entries:: - - >>> np.random.seed(3) # reproducibility - >>> sprandint = lambda s: np.random.randint(0, 4, size=np.prod(s)) / 4; - >>> T = ttb.tensor.from_function(sprandint, (2,2,2)) - >>> print(T) - tensor of shape (2, 2, 2) with order F - data[:, :, 0] = - [[0.5 0.25] - [0. 0.75]] - data[:, :, 1] = - [[0. 0. ] - [0. 0.25]] - - Convert to a sparse tensor:: - - >>> S = T.to_sptensor() - >>> print(S) - sparse tensor of shape (2, 2, 2) with 4 nonzeros and order F - [0, 0, 0] = 0.5 - [0, 1, 0] = 0.25 - [1, 1, 0] = 0.75 - [1, 1, 1] = 0.25 - """ - subs, vals = self.find() - return ttb.sptensor(subs, vals, self.shape, copy=False) - def full(self) -> tensor: """ Create a dense tensor from dense tensor. - Convenience method to maintain common interface with other - tensor types. + Convenience method to maintain common interface with other tensor types. Returns ------- - Shallow copy + Shallow copy of original tensor. """ return self - def to_tenmat( - self, - rdims: np.ndarray | None = None, - cdims: np.ndarray | None = None, - cdims_cyclic: Literal["fc"] | Literal["bc"] | Literal["t"] | None = None, - copy: bool = True, - ) -> ttb.tenmat: - """Construct a :class:`pyttb.tenmat` from a :class:`pyttb.tensor`. - - Parameters - ---------- - rdims: - Mapping of row indices. - cdims: - Mapping of column indices. - cdims_cyclic: - When only rdims is specified maps a single rdim to the rows and - the remaining dimensions span the columns. _fc_ (forward cyclic) - in the order range(rdims,self.ndims()) followed by range(0, rdims). - _bc_ (backward cyclic) range(rdims-1, -1, -1) then - range(self.ndims(), rdims, -1). - copy: - Whether to make a copy of provided data or just reference it. - - Notes - ----- - Forward cyclic is defined by Kiers [1]_ and backward cyclic is defined by - De Lathauwer, De Moor, and Vandewalle [2]_. - - References - ---------- - .. [1] KIERS, H. A. L. 2000. Towards a standardized notation and terminology - in multiway analysis. J. Chemometrics 14, 105-122. - .. [2] DE LATHAUWER, L., DE MOOR, B., AND VANDEWALLE, J. 2000b. On the best - rank-1 and rank-(R1, R2, ... , RN ) approximation of higher-order - tensors. SIAM J. Matrix Anal. Appl. 21, 4, 1324-1342. - - Examples - -------- - Create a :class:`pyttb.tensor`. - - >>> tshape = (2, 2, 2) - >>> data = np.reshape(np.arange(prod(tshape)), tshape) - >>> T = ttb.tensor(data) - >>> T # doctest: +NORMALIZE_WHITESPACE - tensor of shape (2, 2, 2) with order F - data[:, :, 0] = - [[0 2] - [4 6]] - data[:, :, 1] = - [[1 3] - [5 7]] - - Convert to a :class:`pyttb.tenmat` unwrapping around the first dimension. - Either allow for implicit column or explicit column dimension - specification. - - >>> TM1 = T.to_tenmat(rdims=np.array([0])) - >>> TM2 = T.to_tenmat(rdims=np.array([0]), cdims=np.array([1, 2])) - >>> TM1.isequal(TM2) - True - - Convert using cyclic column ordering. For the three mode case _fc_ is the same - result. - - >>> TM3 = T.to_tenmat(rdims=np.array([0]), cdims_cyclic="fc") - >>> TM3 # doctest: +NORMALIZE_WHITESPACE - matrix corresponding to a tensor of shape (2, 2, 2) with order F - rindices = [ 0 ] (modes of tensor corresponding to rows) - cindices = [ 1, 2 ] (modes of tensor corresponding to columns) - data[:, :] = - [[0 2 1 3] - [4 6 5 7]] - - Backwards cyclic reverses the order. - - >>> TM4 = T.to_tenmat(rdims=np.array([0]), cdims_cyclic="bc") - >>> TM4 # doctest: +NORMALIZE_WHITESPACE - matrix corresponding to a tensor of shape (2, 2, 2) with order F - rindices = [ 0 ] (modes of tensor corresponding to rows) - cindices = [ 2, 1 ] (modes of tensor corresponding to columns) - data[:, :] = - [[0 1 2 3] - [4 5 6 7]] - """ - n = self.ndims - alldims = np.array([range(n)]) - tshape = self.shape - - # Verify inputs - if rdims is None and cdims is None: - assert False, "Either rdims or cdims or both must be specified." - if rdims is not None and not sum(np.isin(rdims, alldims)) == len(rdims): - assert False, "Values in rdims must be in [0, source.ndims]." - if cdims is not None and not sum(np.isin(cdims, alldims)) == len(cdims): - assert False, "Values in cdims must be in [0, source.ndims]." - - rdims, cdims = gather_wrap_dims(n, rdims, cdims, cdims_cyclic) - # if rdims or cdims is empty, hstack will output an array of float not int - if rdims.size == 0: - dims = cdims.copy() - elif cdims.size == 0: - dims = rdims.copy() - else: - dims = np.hstack([rdims, cdims]) - if not len(dims) == n or not (alldims == np.sort(dims)).all(): - assert False, ( - "Incorrect specification of dimensions, the sorted concatenation " - "of rdims and cdims must be range(source.ndims)." - ) - rprod = 1 if rdims.size == 0 else np.prod(np.array(tshape)[rdims]) - cprod = 1 if cdims.size == 0 else np.prod(np.array(tshape)[cdims]) - data = np.reshape( - self.permute(dims).data, - (rprod, cprod), - order=self.order, - ) - assert data.flags["F_CONTIGUOUS"] - return ttb.tenmat(data, rdims, cdims, tshape=tshape, copy=copy) - def innerprod( self, other: tensor | ttb.sptensor | ttb.ktensor | ttb.ttensor ) -> float: - """Efficient inner product between a tensor and other `pyttb` tensors. + """ + Efficient inner product between a tensor and other :py:mod:`pyttb` tensors. Parameters ---------- other: - Tensor to take an innerproduct with. + Tensor to take an inner product with. Examples -------- - >>> T = ttb.tensor(np.array([[1.0, 0.0], [0.0, 4.0]])) - >>> T.innerprod(T) - 17.0 - >>> S = T.to_sptensor() - >>> T.innerprod(S) - 17.0 + >>> T = ttb.tensor(np.array([[1.0, 0.0], [0.0, 4.0]])) + >>> T.innerprod(T) + 17.0 + >>> S = T.to_sptensor() + >>> T.innerprod(S) + 17.0 """ if isinstance(other, ttb.tensor): if self.shape != other.shape: @@ -831,13 +711,13 @@ def isequal(self, other: tensor | ttb.sptensor) -> bool: Examples -------- - >>> T1 = ttb.tensor(2 * np.ones((2, 2, 2))) - >>> T2 = 2 * ttb.tensor(np.ones((2, 2, 2))) - >>> T1.isequal(T2) - True - >>> T2[1, 0, 1] = 1 - >>> T1.isequal(T2) - False + >>> T1 = ttb.tensor(2 * np.ones((2, 2, 2))) + >>> T2 = 2 * ttb.tensor(np.ones((2, 2, 2))) + >>> T1.isequal(T2) + True + >>> T2[1, 0, 1] = 1 + >>> T1.isequal(T2) + False """ if isinstance(other, ttb.tensor): return bool(np.all(self.data == other.data)) @@ -876,33 +756,42 @@ def issymmetric( # noqa: PLR0912 Parameters ---------- grps: - Modes to check for symmetry + Modes to check for symmetry. version: - Any non-None value will call the non-default old version + Any non-None value will call the non-default old version. return_details: - Flag to return symmetry details in addition to bool + Flag to return symmetry details in addition to bool. Returns ------- - If symmetric in modes, optionally all differences and permutations + If symmetric in modes; optionally all differences and permutations. Examples -------- - >>> T = ttb.tensor(np.ones((2,2))) - >>> T.issymmetric() - True - >>> T.issymmetric(grps=np.arange(T.ndims)) - True - >>> is_sym, diffs, perms = \ - T.issymmetric(grps=np.arange(T.ndims), version=1, return_details=True) - >>> print(f"Tensor is symmetric: {is_sym}") - Tensor is symmetric: True - >>> print(f"Differences in modes: {diffs}") - Differences in modes: [[0.] - [0.]] - >>> print(f"Permutations: {perms}") - Permutations: [[0. 1.] - [1. 0.]] + >>> T = ttb.tenones((2, 2, 2)) + >>> T.issymmetric() + True + >>> T.issymmetric(grps=np.arange(T.ndims)) + True + >>> is_sym, diffs, perms = T.issymmetric( + ... grps=np.arange(T.ndims), version=1, return_details=True + ... ) + >>> print(f"Tensor is symmetric: {is_sym}") + Tensor is symmetric: True + >>> print(f"Differences in modes: {diffs}") + Differences in modes: [[0.] + [0.] + [0.] + [0.] + [0.] + [0.]] + >>> print(f"Permutations: {perms}") + Permutations: [[0. 1. 2.] + [0. 2. 1.] + [1. 0. 2.] + [1. 2. 0.] + [2. 0. 1.] + [2. 1. 0.]] """ n = self.ndims sz = np.array(self.shape) @@ -981,9 +870,9 @@ def logical_and(self, other: float | tensor) -> tensor: Examples -------- - >>> T = ttb.tenones((2, 2)) - >>> T.logical_and(T).collapse() # All true - 4.0 + >>> T = ttb.tenones((2, 2, 2)) + >>> T.logical_and(T).collapse() # All true + 8.0 """ def logical_and(x, y): @@ -997,9 +886,9 @@ def logical_not(self) -> tensor: Examples -------- - >>> T = ttb.tenones((2, 2)) - >>> T.logical_not().collapse() # All false - 0.0 + >>> T = ttb.tenones((2, 2, 2)) + >>> T.logical_not().collapse() # All false + 0.0 """ # Np logical not dtype argument seems to not work here return ttb.tensor(np.logical_not(self.data).astype(self.data.dtype), copy=False) @@ -1015,9 +904,9 @@ def logical_or(self, other: float | tensor) -> tensor: Examples -------- - >>> T = ttb.tenones((2, 2)) - >>> T.logical_or(T.logical_not()).collapse() # All true - 4.0 + >>> T = ttb.tenones((2, 2, 2)) + >>> T.logical_or(T.logical_not()).collapse() # All true + 8.0 """ def tensor_or(x, y): @@ -1036,9 +925,9 @@ def logical_xor(self, other: float | tensor) -> tensor: Examples -------- - >>> T = ttb.tenones((2, 2)) - >>> T.logical_xor(T.logical_not()).collapse() # All true - 4.0 + >>> T = ttb.tenones((2, 2, 2)) + >>> T.logical_xor(T.logical_not()).collapse() # All true + 8.0 """ def tensor_xor(x, y): @@ -1061,10 +950,21 @@ def mask(self, W: tensor) -> np.ndarray: Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> W = ttb.tenones((2, 2)) - >>> T.mask(W) - array([1, 3, 2, 4]) + Create a 2 x 2 x 2 tensor with values 0-7, then extract the values along the + diagonal of the tensor:: + + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> W = ttb.tendiag(np.ones(T.shape[0]), T.shape) + >>> T.mask(W) + array([0, 7]) """ # Error checking if np.any(np.array(W.shape) > np.array(self.shape)): @@ -1079,11 +979,12 @@ def mask(self, W: tensor) -> np.ndarray: def mttkrp( self, U: ttb.ktensor | Sequence[np.ndarray], n: int | np.integer ) -> np.ndarray: - """Matricized tensor times Khatri-Rao product. + """ + Matricized tensor times Khatri-Rao product. - The matrices used in the - Khatri-Rao product are passed as a :class:`pyttb.ktensor` (where the - factor matrices are used) or as a list of :class:`numpy.ndarray` objects. + The matrices used in the Khatri-Rao product are passed as a + :class:`pyttb.ktensor` (where the factor matrices are used) or as a list of + :class:`numpy.ndarray` objects. Parameters ---------- @@ -1098,11 +999,11 @@ def mttkrp( Examples -------- - >>> T = ttb.tenones((2, 2, 2)) - >>> U = [np.ones((2, 2))] * 3 - >>> T.mttkrp(U, 2) - array([[4., 4.], - [4., 4.]]) + >>> T = ttb.tenones((2, 2, 2)) + >>> U = [np.ones((2, 2))] * 3 + >>> T.mttkrp(U, 2) + array([[4., 4.], + [4., 4.]]) """ # check that we have a tensor that can perform mttkrp if self.ndims < 2: @@ -1164,13 +1065,13 @@ def mttkrps(self, U: ttb.ktensor | Sequence[np.ndarray]) -> list[np.ndarray]: Examples -------- - >>> T = ttb.tenones((2, 2, 2)) - >>> U = [np.ones((2, 2))] * 3 - >>> T.mttkrps(U) - [array([[4., 4.], - [4., 4.]]), array([[4., 4.], - [4., 4.]]), array([[4., 4.], - [4., 4.]])] + >>> T = ttb.tenones((2, 2, 2)) + >>> U = [np.ones((2, 2))] * 3 + >>> T.mttkrps(U) + [array([[4., 4.], + [4., 4.]]), array([[4., 4.], + [4., 4.]]), array([[4., 4.], + [4., 4.]])] """ if isinstance(U, ttb.ktensor): U = U.factor_matrices @@ -1192,45 +1093,18 @@ def mttkrps(self, U: ttb.ktensor | Sequence[np.ndarray]) -> list[np.ndarray]: V[-1] = W return V - @property - def ndims(self) -> int: - """ - Number of dimensions of the tensor. - - Examples - -------- - >>> T = ttb.tenones((2, 2)) - >>> T.ndims - 2 - """ - if self.shape == (0,): - return 0 - return len(self.shape) - - @property - def nnz(self) -> int: - """ - Number of non-zero elements in the tensor. - - Examples - -------- - >>> T = ttb.tenones((2, 2, 2)) - >>> T.nnz - 8 - """ - return np.count_nonzero(self.data) - def norm(self) -> float: - """Frobenius norm of the tensor. + """ + Frobenius norm of the tensor. - Defined as the square root of the sum of the - squares of the elements of the tensor. + Defined as the square root of the sum of the squares of the elements of the + tensor. Examples -------- - >>> T = ttb.tenones((2, 2, 2, 2)) - >>> T.norm() - 4.0 + >>> T = ttb.tenones((2, 2, 2, 2)) + >>> T.norm() + 4.0 """ # default of np.linalg.norm is to vectorize the data and compute the vector # norm, which is equivalent to the Frobenius norm for multidimensional arrays. @@ -1241,13 +1115,12 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: """ Compute the leading mode-n vectors of the tensor. - Computes the `r` leading eigenvectors of Tn*Tn.T (where Tn is the - mode-`n` matricization/unfolding of self), which provides information - about the mode-n fibers. In two-dimensions, the `r` leading mode-1 - vectors are the same as the `r` left singular vectors and the `r` - leading mode-2 vectors are the same as the `r` right singular - vectors. By default, this method computes the top `r` eigenvectors - of Tn*Tn.T. + Computes the `r` leading eigenvectors of Tn*Tn.T (where Tn is the mode-`n` + matricization/unfolding of self), which provides information about the mode-n + fibers. In two-dimensions, the `r` leading mode-1 vectors are the same as the + `r` left singular vectors and the `r` leading mode-2 vectors are the same as the + `r` right singular vectors. By default, this method computes the top `r` + eigenvectors of Tn*Tn.T. Parameters ---------- @@ -1264,13 +1137,13 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T.nvecs(0, 1) # doctest: +ELLIPSIS - array([[0.4045...], - [0.9145...]]) - >>> T.nvecs(0, 2) # doctest: +ELLIPSIS - array([[ 0.4045..., 0.9145...], - [ 0.9145..., -0.4045...]]) + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T.nvecs(0, 1) # doctest: +ELLIPSIS + array([[0.6318...], + [0.7751...]]) + >>> T.nvecs(0, 2) # doctest: +ELLIPSIS + array([[ 0.6318..., 0.7751...], + [ 0.7751..., -0.6318...]]) """ Xn = self.to_tenmat(rdims=np.array([n])).double() y = Xn @ Xn.T @@ -1298,9 +1171,8 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: def permute(self, order: OneDArray) -> tensor: """Permute tensor dimensions. - The result is a tensor that has the - same values, but the order of the subscripts needed to access - any particular element are rearranged as specified by `order`. + The result is a tensor that has the same values, but the order of the subscripts + needed to access any particular element are rearranged as specified by `order`. Parameters ---------- @@ -1313,17 +1185,23 @@ def permute(self, order: OneDArray) -> tensor: Examples -------- - >>> T1 = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T1 - tensor of shape (2, 2) with order F - data[:, :] = - [[1 2] - [3 4]] - >>> T1.permute(np.array((1, 0))) - tensor of shape (2, 2) with order F - data[:, :] = - [[1 3] - [2 4]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> T.permute(np.array((2, 1, 0))) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [4 6]] + data[:, :, 1] = + [[1 3] + [5 7]] """ order = parse_one_d(order) if self.ndims != order.size: @@ -1350,16 +1228,19 @@ def reshape(self, shape: Shape) -> tensor: Parameters ---------- shape: - New shape + New shape. Examples -------- - >>> T1 = ttb.tenones((2, 2)) - >>> T1.shape - (2, 2) - >>> T2 = T1.reshape((4, 1)) - >>> T2.shape - (4, 1) + >>> T = ttb.tenones((2, 2, 2)) + >>> T.shape + (2, 2, 2) + >>> T.reshape((1, 4, 2)) + tensor of shape (1, 4, 2) with order F + data[:, :, 0] = + [[1. 1. 1. 1.]] + data[:, :, 1] = + [[1. 1. 1. 1.]] """ shape = parse_shape(shape) if prod(self.shape) != prod(shape): @@ -1377,30 +1258,42 @@ def scale( Parameters ---------- - factor: Scaling factor - dims: Dimensions to scale + factor: + Scaling factor. + dims: + Dimensions to scale. Returns ------- - Scaled Tensor. + Scaled tensor. Examples -------- - >>> T = ttb.tenones((3, 4, 5)) - >>> S = np.arange(5) - >>> Y = T.scale(S, 2) - >>> Y.data[0, 0, :] - array([0., 1., 2., 3., 4.]) - >>> S = ttb.tensor(np.arange(5)) - >>> Y = T.scale(S, 2) - >>> Y.data[0, 0, :] - array([0., 1., 2., 3., 4.]) - >>> S = ttb.tensor(np.arange(12), shape=(3, 4)) - >>> Y = T.scale(S, [0, 1]) - >>> Y.data[:, :, 0] - array([[ 0., 3., 6., 9.], - [ 1., 4., 7., 10.], - [ 2., 5., 8., 11.]]) + Create tensor of ones and scale dimension 2 using a :class:`numpy.ndarray`:: + + >>> T = ttb.tenones((3, 4, 5)) + >>> S = np.arange(5) + >>> Y = T.scale(S, 2) + >>> T.data[0, 0, :] + array([1., 1., 1., 1., 1.]) + >>> Y.data[0, 0, :] + array([0., 1., 2., 3., 4.]) + + Scale in the same way using a tensor:: + + >>> S = ttb.tensor(np.arange(5)) + >>> Y = T.scale(S, 2) + >>> Y.data[0, 0, :] + array([0., 1., 2., 3., 4.]) + + Scale along multiple dimensions:: + + >>> S = ttb.tensor(np.arange(12), shape=(3, 4)) + >>> Y = T.scale(S, [0, 1]) + >>> Y.data[:, :, 0] + array([[ 0., 3., 6., 9.], + [ 1., 4., 7., 10.], + [ 2., 5., 8., 11.]]) """ if isinstance(dims, list): dims = np.array(dims) @@ -1430,7 +1323,8 @@ def scale( return ttb.tenmat(result, dims, remdims, self.shape, copy=False).to_tensor() def squeeze(self) -> tensor | float: - """Remove singleton dimensions from the tensor. + """ + Remove singleton dimensions from the tensor. Returns ------- @@ -1438,16 +1332,25 @@ def squeeze(self) -> tensor | float: Examples -------- - >>> T = ttb.tensor(np.array([[[4]]])) - >>> T.squeeze() - 4 - >>> T = ttb.tensor(np.array([[1, 2, 3]])) - >>> T.squeeze().data - array([1, 2, 3]) - """ - shapeArray = np.array(self.shape) - if np.all(shapeArray > 1): - return self.copy() + Create a 3-way tensor with two singleton dimensions and then apply squeeze:: + + >>> T = ttb.tensor(np.array([[[1, 2, 3]]])) + >>> print(T) + tensor of shape (1, 1, 3) with order F + data[:, :, 0] = + [[1]] + data[:, :, 1] = + [[2]] + data[:, :, 2] = + [[3]] + >>> T.squeeze() + tensor of shape (3,) with order F + data[:] = + [1 2 3] + """ + shapeArray = np.array(self.shape) + if np.all(shapeArray > 1): + return self.copy() else: idx = np.where(shapeArray > 1) if idx[0].size == 0: @@ -1478,15 +1381,30 @@ def symmetrize( # noqa: PLR0912,PLR0915 Examples -------- - >>> T = ttb.tenones((2, 2, 2)) - >>> T.symmetrize(np.array([0, 2])) - tensor of shape (2, 2, 2) with order F - data[:, :, 0] = - [[1. 1.] - [1. 1.]] - data[:, :, 1] = - [[1. 1.] - [1. 1.]] + Create a 2 x 2 x 2 tensor with values 0-7 and check if it is symmetric:: + + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> T.issymmetric() + False + + Symmetrize the tensor:: + + >>> T.symmetrize() # doctest: +ELLIPSIS + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0... 2.3333...] + [2.3333... 4.6666...]] + data[:, :, 1] = + [[2.3333... 4.6666...] + [4.6666... 7... ]] """ n = self.ndims sz = np.array(self.shape) @@ -1611,19 +1529,18 @@ def ttm( Tensor times matrix. Computes the n-mode product of `self` with the matrix `matrix`; i.e., - `self x_n matrix`. The integer `n` specifies the dimension (or mode) - along which the matrix should be multiplied. If `matrix.shape = (J,I)`, - then the tensor must have `self.shape[n] = I`. The result will be the - same order and shape as `self` except that the size of dimension `n` - will be `J`. + `self x_n matrix`. The integer `n` specifies the dimension (or mode) along which + the matrix should be multiplied. If `matrix.shape = (J,I)`, then the tensor must + have `self.shape[n] = I`. The result will be the same order and shape as `self` + except that the size of dimension `n` will be `J`. - Multiplication with more than one matrix is provided using a list of - matrices and corresponding dimensions in the tensor to use. Multiplication - using the transpose of the matrix (or matrices) is also provided. + Multiplication with more than one matrix is provided using a list of matrices + and corresponding dimensions in the tensor to use. Multiplication using the + transpose of the matrix (or matrices) is also provided. - The dimensions of the tensor with which to multiply can be provided as - `dims`, or the dimensions to exclude from `[0, ..., self.ndims]` can be - specified using `exclude_dims`. + The dimensions of the tensor with which to multiply can be provided as `dims`, + or the dimensions to exclude from `[0, ..., self.ndims]` can be specified using + `exclude_dims`. Parameters ---------- @@ -1632,7 +1549,7 @@ def ttm( dims: Dimensions to multiply against. exclude_dims: - Use all dimensions but these. + Multiply using all dimensions but these. transpose: Transpose matrices during multiplication. @@ -1642,23 +1559,29 @@ def ttm( Examples -------- - >>> T = ttb.tenones((2, 2, 2, 2)) - >>> A = 2 * np.ones((2, 1)) - >>> T.ttm([A, A], dims=[0, 1], transpose=True) - tensor of shape (1, 1, 2, 2) with order F - data[:, :, 0, 0] = - [[16.]] - data[:, :, 1, 0] = - [[16.]] - data[:, :, 0, 1] = - [[16.]] - data[:, :, 1, 1] = - [[16.]] - >>> T.ttm([A, A], exclude_dims=[0, 1], transpose=True) - tensor of shape (2, 2, 1, 1) with order F - data[:, :, 0, 0] = - [[16. 16.] - [16. 16.]] + Create 4-way tensor of ones and multiply by matrix specified by list of + vectors:: + + >>> T = ttb.tenones((2, 2, 2, 2)) + >>> A = 2 * np.ones((2, 1)) + >>> T.ttm([A, A], dims=[0, 1], transpose=True) + tensor of shape (1, 1, 2, 2) with order F + data[:, :, 0, 0] = + [[16.]] + data[:, :, 1, 0] = + [[16.]] + data[:, :, 0, 1] = + [[16.]] + data[:, :, 1, 1] = + [[16.]] + + Repeat by specifying which dimensions to exclude rather than to include:: + + >>> T.ttm([A, A], exclude_dims=[0, 1], transpose=True) + tensor of shape (2, 2, 1, 1) with order F + data[:, :, 0, 0] = + [[16. 16.] + [16. 16.]] """ if isinstance(matrix, Sequence): # Check that the dimensions are valid @@ -1709,14 +1632,14 @@ def ttt( otherdims: int | np.ndarray | None = None, ) -> tensor: """ - Tensor multiplication (tensor times tensor). + Tensor times tensor. - Computes the contracted product of tensors, self and other, in the - dimensions specified by the `selfdims` and `otherdims`. The sizes of - the dimensions specified by `selfdims` and `otherdims` must match; - that is, `self.shape(selfdims)` must equal `other.shape(otherdims)`. - If only `selfdims` is provided as input, it is used to specify the - dimensions for both `self` and `other`. + Computes the contracted product of tensors, self and other, in the dimensions + specified by the `selfdims` and `otherdims`. The sizes of the dimensions + specified by `selfdims` and `otherdims` must match; that is, + `self.shape(selfdims)` must equal `other.shape(otherdims)`. If only `selfdims` + is provided as input, it is used to specify the dimensions for both `self` and + `other`. Parameters ---------- @@ -1733,31 +1656,33 @@ def ttt( Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T.ttt(T) - tensor of shape (2, 2, 2, 2) with order F - data[:, :, 0, 0] = - [[1 2] - [3 4]] - data[:, :, 1, 0] = - [[ 3 6] - [ 9 12]] - data[:, :, 0, 1] = - [[2 4] - [6 8]] - data[:, :, 1, 1] = - [[ 4 8] - [12 16]] - >>> T.ttt(T, 0) - tensor of shape (2, 2) with order F - data[:, :] = - [[10 14] - [14 20]] - >>> T.ttt(T, selfdims=0, otherdims=1) - tensor of shape (2, 2) with order F - data[:, :] = - [[ 7 15] - [10 22]] + Create 4-way tensor with values 0-15 and 2-way tensor of ones:: + + >>> T1 = ttb.tensor(np.arange(16), (2, 2, 2, 2)) + >>> T2 = ttb.tenones((2, 2, 2)) + + Compute tensor times tensor using dimensions 0 and 1 of both tensors:: + + >>> T1.ttt(T2, np.array([0, 1])) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ 6. 38.] + [22. 54.]] + data[:, :, 1] = + [[ 6. 38.] + [22. 54.]] + + Compute tensor times tensor using different (but compatible) dimensions of the + two tensors involved:: + + >>> T1.ttt(T2, selfdims=np.array([1, 3]), otherdims=np.array([0, 1])) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[20. 36.] + [24. 40.]] + data[:, :, 1] = + [[20. 36.] + [24. 40.]] """ if not isinstance(other, tensor): assert False, "other must be of type tensor" @@ -1791,94 +1716,6 @@ def ttt( return cmatrix.to_tensor() return cmatrix - def ttv( - self, - vector: np.ndarray | Sequence[np.ndarray], - dims: OneDArray | None = None, - exclude_dims: OneDArray | None = None, - ) -> float | tensor: - """ - Tensor times vector. - - Computes the n-mode product of `self` with the vector `vector`; i.e., - `self x_n vector`. The integer `n` specifies the dimension (or mode) - along which the vector should be multiplied. If `vector.shape = (I,)`, - then the tensor must have `self.shape[n] = I`. The result will be the - same order and shape as `self` except that the size of dimension `n` - will be `J`. The resulting tensor has one less dimension, as dimension - `n` is removed in the multiplication. - - Multiplication with more than one vector is provided using a list of - vectors and corresponding dimensions in the tensor to use. - - The dimensions of the tensor with which to multiply can be provided as - `dims`, or the dimensions to exclude from `[0, ..., self.ndims]` can be - specified using `exclude_dims`. - - Parameters - ---------- - vector: - Vector or vectors to multiply by. - dims: - Dimensions to multiply against. - exclude_dims: - Use all dimensions but these. - - Returns - ------- - Tensor product. - - Examples - -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T.ttv(np.ones(2), 0) - tensor of shape (2,) with order F - data[:] = - [4. 6.] - >>> T.ttv(np.ones(2), 1) - tensor of shape (2,) with order F - data[:] = - [3. 7.] - >>> T.ttv([np.ones(2), np.ones(2)]) - 10.0 - """ - # Check that vector is a list of vectors, if not place single vector as element - # in list - if len(vector) > 0 and isinstance(vector[0], (int, float, np.int_, np.float64)): - return self.ttv(np.array([vector]), dims, exclude_dims) - - # Get sorted dims and index for multiplicands - dims, vidx = tt_dimscheck(self.ndims, len(vector), dims, exclude_dims) - - # Check that each multiplicand is the right size. - for i in range(dims.size): - if vector[vidx[i]].shape != (self.shape[dims[i]],): - assert False, "Multiplicand is wrong size" - - # Extract the data - c = self.data.copy() - - # Permute it so that the dimensions we're working with come last - remdims = np.setdiff1d(np.arange(0, self.ndims), dims) - if self.ndims > 1: - c = np.transpose(c, np.concatenate((remdims, dims))) - - # Do each multiply in sequence, doing the highest index first, which is - # important for vector multiplies. - n = self.ndims - sz = np.array(self.shape)[np.concatenate((remdims, dims))] - - for i in range(dims.size - 1, -1, -1): - c = np.reshape( - c, tuple([np.prod(sz[0 : n - 1]), sz[n - 1]]), order=self.order - ) - c = c.dot(vector[vidx[i]]) - n -= 1 - # If needed, convert the final result back to tensor - if n > 0: - return ttb.tensor(c, tuple(sz[0:n]), copy=False) - return c[0].item() - def ttsv( self, vector: OneDArray, @@ -1888,9 +1725,9 @@ def ttsv( """ Tensor times same vector in multiple modes. - See :meth:`ttv` for details on multiplication of a tensor with a - vector. When `skip_dim` is provided, multiply the vector by all but - dimensions except `[0, ..., skip_dim]`. + See :meth:`ttv` for details on multiplication of a tensor with a vector. When + `skip_dim` is provided, multiply the vector by all but dimensions except + `[0, ..., skip_dim]`. Parameters ---------- @@ -1959,6 +1796,106 @@ def ttsv( return y assert False, "Invalid value for version; should be None, 1, or 2" + def ttv( + self, + vector: np.ndarray | Sequence[np.ndarray], + dims: OneDArray | None = None, + exclude_dims: OneDArray | None = None, + ) -> float | tensor: + """ + Tensor times vector. + + Computes the n-mode product of `self` with the vector `vector`; i.e., + `self x_n vector`. The integer `n` specifies the dimension (or mode) along which + the vector should be multiplied. If `vector.shape = (I,)`, then the tensor must + have `self.shape[n] = I`. The result will be the same order and shape as `self` + except that the size of dimension `n` will be `J`. The resulting tensor has one + less dimension, as dimension `n` is removed in the multiplication. + + Multiplication with more than one vector is provided using a list of vectors and + corresponding dimensions in the tensor to use. + + The dimensions of the tensor with which to multiply can be provided as `dims`, + or the dimensions to exclude from `[0, ..., self.ndims]` can be specified using + `exclude_dims`. + + Parameters + ---------- + vector: + Vector or vectors to multiply by. + dims: + Dimensions to multiply against. + exclude_dims: + Multiply using all dimensions but these. + + Returns + ------- + Tensor product. + + Examples + -------- + Create 3-way tensor with values 0-7 and multiple by vector of ones along + dimension 0:: + + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> T.ttv(np.ones(2), 0) + tensor of shape (2, 2) with order F + data[:, :] = + [[ 1. 9.] + [ 5. 13.]] + + Multiply along all but dimension 2 using two vectors of ones:: + + >>> T.ttv([np.ones(2), np.ones(2)], exclude_dims=np.array([1])) + tensor of shape (2,) with order F + data[:] = + [10. 18.] + """ + # Check that vector is a list of vectors, if not place single vector as element + # in list + if len(vector) > 0 and isinstance(vector[0], (int, float, np.int_, np.float64)): + return self.ttv(np.array([vector]), dims, exclude_dims) + + # Get sorted dims and index for multiplicands + dims, vidx = tt_dimscheck(self.ndims, len(vector), dims, exclude_dims) + + # Check that each multiplicand is the right size. + for i in range(dims.size): + if vector[vidx[i]].shape != (self.shape[dims[i]],): + assert False, "Multiplicand is wrong size" + + # Extract the data + c = self.data.copy() + + # Permute it so that the dimensions we're working with come last + remdims = np.setdiff1d(np.arange(0, self.ndims), dims) + if self.ndims > 1: + c = np.transpose(c, np.concatenate((remdims, dims))) + + # Do each multiply in sequence, doing the highest index first, which is + # important for vector multiplies. + n = self.ndims + sz = np.array(self.shape)[np.concatenate((remdims, dims))] + + for i in range(dims.size - 1, -1, -1): + c = np.reshape( + c, tuple([np.prod(sz[0 : n - 1]), sz[n - 1]]), order=self.order + ) + c = c.dot(vector[vidx[i]]) + n -= 1 + # If needed, convert the final result back to tensor + if n > 0: + return ttb.tensor(c, tuple(sz[0:n]), copy=False) + return c[0].item() + def tenfun( self, function_handle: Callable[[np.ndarray, np.ndarray], np.ndarray] @@ -1972,11 +1909,11 @@ def tenfun( | ttb.sptensor | ttb.sumtensor, ) -> ttb.tensor: - """Apply a function to each element in a tensor or tensors. + """ + Apply a function to each element in a tensor or tensors. See :meth:`pyttb.tensor.tensor.tenfun_binary` and - :meth:`pyttb.tensor.tensor.tenfun_unary` for supported - options. + :meth:`pyttb.tensor.tensor.tenfun_unary` for supported options. """ assert callable(function_handle), "function_handle must be callable" @@ -2032,24 +1969,33 @@ def tenfun_binary( other: ttb.tensor | int | float, first: bool = True, ) -> ttb.tensor: - """Apply a binary operation to two tensors or a tensor and a scalar. + """ + Apply a binary operation to two tensors or a tensor and a scalar. Parameters ---------- - function_handle: Function to apply. - other: Other input to the binary function. - first: Whether the tensor comes first in the method call (if ordering matters). + function_handle: + Function to apply. + other: + Other input to the binary function. + first: + Whether the tensor comes first in the method call (if ordering matters). - Example - ------- - >>> add = lambda x, y: x + y - >>> t0 = ttb.tenones((2, 2)) - >>> t1 = t0.tenfun_binary(add, t0) - >>> t1.isequal(t0 * 2) - True - >>> t2 = t0.tenfun_binary(add, 1) - >>> t2.isequal(t1) - True + Examples + -------- + Create a tensor of ones and add it to itself using an `add` function:: + + >>> add = lambda x, y: x + y + >>> T0 = ttb.tenones((2, 2, 2)) + >>> T1 = T0.tenfun_binary(add, T0) + >>> T1.isequal(T0 * 2) + True + + Use the same add function to add a scalar to the original tensor:: + + >>> T2 = T0.tenfun_binary(add, 1) + >>> T2.isequal(T1) + True """ X = self.data if not isinstance(other, (float, int)): @@ -2074,17 +2020,44 @@ def tenfun_binary( def tenfun_unary( self, function_handle: Callable[[np.ndarray], np.ndarray], *inputs: ttb.tensor ) -> ttb.tensor: - """Apply a unary operation to multiple tensors columnwise. + """ + Apply a unary operation to multiple tensors columnwise. - Example - ------- - >>> tensor_max = lambda x: np.max(x, axis=0) - >>> data = np.array([[1, 2, 3], [4, 5, 6]]) - >>> t0 = ttb.tensor(data) - >>> t1 = ttb.tensor(data) - >>> t2 = t0.tenfun_unary(tensor_max, t1) - >>> t2.isequal(t1) - True + Examples + -------- + Create two tensors with values 0-7 and 7-0 (reversed):: + + >>> T1 = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> print(T1) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + >>> T2 = ttb.tensor(np.arange(8)[::-1], (2, 2, 2)) + >>> print(T2) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[7 5] + [6 4]] + data[:, :, 1] = + [[3 1] + [2 0]] + + Apply an operation columnwise across the tensors:: + + >>> tensor_max = lambda x: np.max(x, axis=0) + >>> T3 = T1.tenfun_unary(tensor_max, T2) + >>> print(T3) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[7. 5.] + [6. 4.]] + data[:, :, 1] = + [[4. 6.] + [5. 7.]] """ sz = self.shape for i, an_input in enumerate(inputs): @@ -2123,56 +2096,206 @@ def _tt_to_tensor( return some_tensor return some_tensor.to_tensor() - def __setitem__(self, key, value): + def to_sptensor(self) -> ttb.sptensor: """ - Subscripted assignment for a tensor. - - We can assign elements to a tensor in three ways. - - Case 1: `T[R1,R2,...,Rn] = Y`, in which case we replace the - rectangular subtensor (or single element) specified by the ranges - `R1`,...,`Rn` with `Y`. The right-hand-side can be a scalar, a tensor, - or a :class:`numpy.ndarray`. - - Case 2a: `T[S] = V`, where `S` is a `p` x `n` array of subscripts and `V` is - a scalar or a vector containing `p` values. - - Case 2b: `T[I] = V`, where `I` is a set of `p` linear indices and `V` is a - scalar or a vector containing p values. Resizing is not allowed in this - case. + Construct a :class:`pyttb.sptensor` from `:class:pyttb.tensor`. Examples -------- - >>> T = tenones((3, 4, 2)) - >>> # replaces subtensor - >>> T[0:2, 0:2, 0] = np.ones((2, 2)) - >>> # replaces two elements - >>> T[np.array([[1, 1, 1], [1, 1, 2]])] = [5, 7] - >>> # replaces two elements with linear indices - >>> T[np.array([1, 13])] = [5, 7] - >>> # grows tensor to accept new element - >>> T[1, 1, 2:3] = 1 - >>> T[1, 1, 4] = 1 - """ - access_type = get_index_variant(key) - - # Case 1: Rectangular Subtensor - if access_type == IndexVariant.SUBTENSOR: - return self._set_subtensor(key, value) - - # Case 2a: Subscript indexing - if access_type == IndexVariant.SUBSCRIPTS: - return self._set_subscripts(key, value) + Construct a 2x2x2 tensor with some nonzero entries:: - # Case 2b: Linear Indexing - if access_type == IndexVariant.LINEAR: - if isinstance(key, list): - key = np.array(key) - return self._set_linear(key, value) + >>> np.random.seed(3) # reproducibility + >>> sprandint = lambda s: np.random.randint(0, 4, size=np.prod(s)) / 4; + >>> T = ttb.tensor.from_function(sprandint, (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0.5 0.25] + [0. 0.75]] + data[:, :, 1] = + [[0. 0. ] + [0. 0.25]] - assert False, "Invalid use of tensor setitem" + Convert to a sparse tensor:: - def _set_linear(self, key, value): + >>> S = T.to_sptensor() + >>> print(S) + sparse tensor of shape (2, 2, 2) with 4 nonzeros and order F + [0, 0, 0] = 0.5 + [0, 1, 0] = 0.25 + [1, 1, 0] = 0.75 + [1, 1, 1] = 0.25 + """ + subs, vals = self.find() + return ttb.sptensor(subs, vals, self.shape, copy=False) + + def to_tenmat( + self, + rdims: np.ndarray | None = None, + cdims: np.ndarray | None = None, + cdims_cyclic: Literal["fc"] | Literal["bc"] | Literal["t"] | None = None, + copy: bool = True, + ) -> ttb.tenmat: + """ + Construct a :class:`pyttb.tenmat` from a :class:`pyttb.tensor`. + + Parameters + ---------- + rdims: + Mapping of row indices. + cdims: + Mapping of column indices. + cdims_cyclic: + When only rdims is specified maps a single rdim to the rows and the + remaining dimensions span the columns. _fc_ (forward cyclic) in the order + range(rdims,self.ndims()) followed by range(0, rdims). _bc_ (backward + cyclic) range(rdims-1, -1, -1) then range(self.ndims(), rdims, -1). + copy: + Whether to make a copy of provided data or just reference it. + + Notes + ----- + Forward cyclic is defined by Kiers [1]_ and backward cyclic is defined by + De Lathauwer, De Moor, and Vandewalle [2]_. + + References + ---------- + .. [1] KIERS, H. A. L. 2000. Towards a standardized notation and terminology in + multiway analysis. J. Chemometrics 14, 105-122. + .. [2] DE LATHAUWER, L., DE MOOR, B., AND VANDEWALLE, J. 2000b. On the best + rank-1 and rank-(R1, R2, ... , RN ) approximation of higher-order tensors. + SIAM J. Matrix Anal. Appl. 21, 4, 1324-1342. + + Examples + -------- + Create a :class:`pyttb.tensor`:: + + >>> tshape = (2, 2, 2) + >>> data = np.reshape(np.arange(prod(tshape)), tshape) + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] + + Convert to a :class:`pyttb.tenmat` unwrapping around the first dimension. Either + allow for implicit column or explicit column dimension specification:: + + >>> TM1 = T.to_tenmat(rdims=np.array([0])) + >>> TM2 = T.to_tenmat(rdims=np.array([0]), cdims=np.array([1, 2])) + >>> TM1.isequal(TM2) + True + + Convert using cyclic column ordering. For the three mode case _fc_ is the same + result:: + + >>> TM3 = T.to_tenmat(rdims=np.array([0]), cdims_cyclic="fc") + >>> TM3 # doctest: +NORMALIZE_WHITESPACE + matrix corresponding to a tensor of shape (2, 2, 2) with order F + rindices = [ 0 ] (modes of tensor corresponding to rows) + cindices = [ 1, 2 ] (modes of tensor corresponding to columns) + data[:, :] = + [[0 2 4 6] + [1 3 5 7]] + + Backwards cyclic reverses the order:: + + >>> TM4 = T.to_tenmat(rdims=np.array([0]), cdims_cyclic="bc") + >>> TM4 # doctest: +NORMALIZE_WHITESPACE + matrix corresponding to a tensor of shape (2, 2, 2) with order F + rindices = [ 0 ] (modes of tensor corresponding to rows) + cindices = [ 2, 1 ] (modes of tensor corresponding to columns) + data[:, :] = + [[0 4 2 6] + [1 5 3 7]] + """ + n = self.ndims + alldims = np.array([range(n)]) + tshape = self.shape + + # Verify inputs + if rdims is None and cdims is None: + assert False, "Either rdims or cdims or both must be specified." + if rdims is not None and not sum(np.isin(rdims, alldims)) == len(rdims): + assert False, "Values in rdims must be in [0, source.ndims]." + if cdims is not None and not sum(np.isin(cdims, alldims)) == len(cdims): + assert False, "Values in cdims must be in [0, source.ndims]." + + rdims, cdims = gather_wrap_dims(n, rdims, cdims, cdims_cyclic) + # if rdims or cdims is empty, hstack will output an array of float not int + if rdims.size == 0: + dims = cdims.copy() + elif cdims.size == 0: + dims = rdims.copy() + else: + dims = np.hstack([rdims, cdims]) + if not len(dims) == n or not (alldims == np.sort(dims)).all(): + assert False, ( + "Incorrect specification of dimensions, the sorted concatenation " + "of rdims and cdims must be range(source.ndims)." + ) + rprod = 1 if rdims.size == 0 else np.prod(np.array(tshape)[rdims]) + cprod = 1 if cdims.size == 0 else np.prod(np.array(tshape)[cdims]) + data = np.reshape( + self.permute(dims).data, + (rprod, cprod), + order=self.order, + ) + assert data.flags["F_CONTIGUOUS"] + return ttb.tenmat(data, rdims, cdims, tshape=tshape, copy=copy) + + def __setitem__(self, key, value): + """ + Subscripted assignment for a tensor. + + We can assign elements to a tensor in three ways. + + Case 1: `T[R1,R2,...,Rn] = Y`, in which case we replace the rectangular + subtensor (or single element) specified by the ranges `R1`,...,`Rn` with `Y`. + The right-hand-side can be a scalar, a tensor, or a :class:`numpy.ndarray`. + + Case 2a: `T[S] = V`, where `S` is a `p` x `n` array of subscripts and `V` is a + scalar or a vector containing `p` values. + + Case 2b: `T[I] = V`, where `I` is a set of `p` linear indices and `V` is a + scalar or a vector containing p values. Resizing is not allowed in this case. + + Examples + -------- + >>> T = tenones((3, 4, 2)) + >>> # replaces subtensor + >>> T[0:2, 0:2, 0] = np.ones((2, 2)) + >>> # replaces two elements + >>> T[np.array([[1, 1, 1], [1, 1, 2]])] = [5, 7] + >>> # replaces two elements with linear indices + >>> T[np.array([1, 13])] = [5, 7] + >>> # grows tensor to accept new element + >>> T[1, 1, 2:3] = 1 + >>> T[1, 1, 4] = 1 + """ + access_type = get_index_variant(key) + + # Case 1: Rectangular Subtensor + if access_type == IndexVariant.SUBTENSOR: + return self._set_subtensor(key, value) + + # Case 2a: Subscript indexing + if access_type == IndexVariant.SUBSCRIPTS: + return self._set_subscripts(key, value) + + # Case 2b: Linear Indexing + if access_type == IndexVariant.LINEAR: + if isinstance(key, list): + key = np.array(key) + return self._set_linear(key, value) + + assert False, "Invalid use of tensor setitem" + + def _set_linear(self, key, value): idx = key if not isinstance(idx, slice) and (idx > np.prod(self.shape)).any(): assert False, ( @@ -2268,46 +2391,45 @@ def __getitem__(self, item): # noqa: PLR0912 """ Subscripted reference for tensors. - We can extract elements or subtensors from a tensor in the - following ways. + We can extract elements or subtensors from a tensor in the following ways. - Case 1a: `y = T[I1,I2,...,In]`, where each `I` is an index, returns a - scalar. + Case 1a: `y = T[I1,I2,...,In]`, where each `I` is an index, returns a scalar. - Case 1b: `Y = T[R1,R2,...,Rn]`, where one or more `R` is a range and - the rest are indices, returns a tensor. + Case 1b: `Y = T[R1,R2,...,Rn]`, where one or more `R` is a range and the rest + are indices, returns a tensor. - Case 2a: `V = T[S]` where `S` is a `p` x `n` array - of subscripts, returns a vector of `p` values. + Case 2a: `V = T[S]` where `S` is a `p` x `n` array of subscripts, returns a + vector of `p` values. - Case 2b: `V = T[I]` where `I` is a set of `p` - linear indices, returns a vector of `p` values. + Case 2b: `V = T[I]` where `I` is a set of `p` linear indices, returns a vector + of `p` values. - Any ambiguity results in executing the first valid case. This - is particularly an issue if `self.ndims == 1`. + Any ambiguity results in executing the first valid case. This is particularly an + issue if `self.ndims == 1`. Examples -------- - >>> T = tenones((3, 4, 2, 1)) - >>> T[0, 0, 0, 0] # produces a scalar - 1.0 - >>> # produces a tensor of order 1 and size 1 - >>> T[1, 1, 1, :] # doctest: +NORMALIZE_WHITESPACE - tensor of shape (1,) with order F - data[:] = - [1.] - >>> # produces a tensor of size 2 x 2 x 1 - >>> T[0:2, [2, 3], 1, :] # doctest: +NORMALIZE_WHITESPACE - tensor of shape (2, 2, 1) with order F - data[:, :, 0] = - [[1. 1.] - [1. 1.]] - >>> # returns a vector of length 2 - >>> # Equivalent to selecting [0,0,0,0] and [1,1,1,0] separately - >>> T[np.array([[0, 0, 0, 0], [1, 1, 1, 0]])] - array([1., 1.]) - >>> T[[0, 1, 2]] # extracts the first three linearized indices - array([1., 1., 1.]) + >>> T = tenones((3, 4, 2, 1)) + >>> T[0, 0, 0, 0] # produces a scalar + 1.0 + >>> # produces a tensor of order 1 and size 1 + >>> T[1, 1, 1, :] + tensor of shape (1,) with order F + data[:] = + [1.] + >>> # produces a tensor of size 2 x 2 x 1 + >>> T[0:2, [2, 3], 1, :] + tensor of shape (2, 2, 1) with order F + data[:, :, 0] = + [[1. 1.] + [1. 1.]] + >>> # returns a vector of length 2 + >>> # Equivalent to selecting [0, 0, 0, 0] and [1, 1, 1 ,0] separately + >>> T[np.array([[0, 0, 0, 0], [1, 1, 1, 0]])] + array([1., 1.]) + >>> # extracts the first three linearized indices + >>> T[[0, 1, 2]] + array([1., 1., 1.]) """ # Case 0: Single Index Linear if isinstance(item, (int, float, np.generic, slice)): @@ -2397,17 +2519,23 @@ def __eq__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T == T - tensor of shape (2, 2) with order F - data[:, :] = - [[ True True] - [ True True]] - >>> T == 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[ True False] - [False False]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T == T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True True] + [ True True]] + data[:, :, 1] = + [[ True True] + [ True True]] + >>> T == 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False False] + [ True False]] + data[:, :, 1] = + [[False False] + [False False]] """ def tensor_equality(x, y): @@ -2429,17 +2557,23 @@ def __ne__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T != T - tensor of shape (2, 2) with order F - data[:, :] = - [[False False] - [False False]] - >>> T != 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[False True] - [ True True]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T != T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False False] + [False False]] + data[:, :, 1] = + [[False False] + [False False]] + >>> T != 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True True] + [False True]] + data[:, :, 1] = + [[ True True] + [ True True]] """ def tensor_not_equal(x, y): @@ -2461,17 +2595,23 @@ def __ge__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T >= T - tensor of shape (2, 2) with order F - data[:, :] = - [[ True True] - [ True True]] - >>> T >= 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[ True True] - [ True True]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T >= T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True True] + [ True True]] + data[:, :, 1] = + [[ True True] + [ True True]] + >>> T >= 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False True] + [ True True]] + data[:, :, 1] = + [[ True True] + [ True True]] """ def greater_or_equal(x, y): @@ -2493,17 +2633,23 @@ def __le__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T <= T - tensor of shape (2, 2) with order F - data[:, :] = - [[ True True] - [ True True]] - >>> T <= 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[ True False] - [False False]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T <= T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True True] + [ True True]] + data[:, :, 1] = + [[ True True] + [ True True]] + >>> T <= 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True False] + [ True False]] + data[:, :, 1] = + [[False False] + [False False]] """ def less_or_equal(x, y): @@ -2525,17 +2671,23 @@ def __gt__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T > T - tensor of shape (2, 2) with order F - data[:, :] = - [[False False] - [False False]] - >>> T > 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[False True] - [ True True]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T > T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False False] + [False False]] + data[:, :, 1] = + [[False False] + [False False]] + >>> T > 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False True] + [False True]] + data[:, :, 1] = + [[ True True] + [ True True]] """ def greater(x, y): @@ -2557,17 +2709,23 @@ def __lt__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T < T - tensor of shape (2, 2) with order F - data[:, :] = - [[False False] - [False False]] - >>> T < 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[False False] - [False False]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T < T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[False False] + [False False]] + data[:, :, 1] = + [[False False] + [False False]] + >>> T < 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ True False] + [False False]] + data[:, :, 1] = + [[False False] + [False False]] """ def less(x, y): @@ -2589,17 +2747,23 @@ def __sub__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T - T - tensor of shape (2, 2) with order F - data[:, :] = - [[0 0] - [0 0]] - >>> T - 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[0 1] - [2 3]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T - T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 0] + [0 0]] + data[:, :, 1] = + [[0 0] + [0 0]] + >>> T - 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[-1 1] + [ 0 2]] + data[:, :, 1] = + [[3 5] + [4 6]] """ def minus(x, y): @@ -2621,17 +2785,23 @@ def __add__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T + T - tensor of shape (2, 2) with order F - data[:, :] = - [[2 4] - [6 8]] - >>> T + 1 - tensor of shape (2, 2) with order F - data[:, :] = - [[2 3] - [4 5]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T + T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 4] + [2 6]] + data[:, :, 1] = + [[ 8 12] + [10 14]] + >>> T + 1 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[1 3] + [2 4]] + data[:, :, 1] = + [[5 7] + [6 8]] """ # If rhs is sumtensor, treat as such if isinstance(other, ttb.sumtensor): @@ -2643,7 +2813,8 @@ def tensor_add(x, y): return self.tenfun(tensor_add, other) def __radd__(self, other): - """Right binary addition (+) for tensors. + """ + Right binary addition (+) for tensors. Parameters ---------- @@ -2655,12 +2826,15 @@ def __radd__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> 1 + T - tensor of shape (2, 2) with order F - data[:, :] = - [[2 3] - [4 5]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> 1 + T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[1 3] + [2 4]] + data[:, :, 1] = + [[5 7] + [6 8]] """ return self.__add__(other) @@ -2678,12 +2852,15 @@ def __pow__(self, power): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T**2 - tensor of shape (2, 2) with order F - data[:, :] = - [[ 1 4] - [ 9 16]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T**2 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 4] + [1 9]] + data[:, :, 1] = + [[16 36] + [25 49]] """ def tensor_pow(x, y): @@ -2692,7 +2869,8 @@ def tensor_pow(x, y): return self.tenfun(tensor_pow, power) def __mul__(self, other): - """Element-wise multiplication (*) for tensors, self*other. + """ + Element-wise multiplication (*) for tensors. Parameters ---------- @@ -2704,17 +2882,23 @@ def __mul__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T * T - tensor of shape (2, 2) with order F - data[:, :] = - [[ 1 4] - [ 9 16]] - >>> T * 2 - tensor of shape (2, 2) with order F - data[:, :] = - [[2 4] - [6 8]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T * T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 4] + [1 9]] + data[:, :, 1] = + [[16 36] + [25 49]] + >>> T * 2 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 4] + [2 6]] + data[:, :, 1] = + [[ 8 12] + [10 14]] """ def mul(x, y): @@ -2726,7 +2910,8 @@ def mul(x, y): return self.tenfun(mul, other) def __rmul__(self, other): - """Element wise right multiplication (*) for tensors, other*self. + """ + Elementwise right multiplication (*) for tensors. Parameters ---------- @@ -2738,17 +2923,21 @@ def __rmul__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> 2 * T - tensor of shape (2, 2) with order F - data[:, :] = - [[2 4] - [6 8]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> 2 * T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 4] + [2 6]] + data[:, :, 1] = + [[ 8 12] + [10 14]] """ return self.__mul__(other) def __truediv__(self, other): - """Element-wise left division (/) for tensors, self/other. + """ + Element-wise left division (/) for tensors. Parameters ---------- @@ -2760,17 +2949,23 @@ def __truediv__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T / T - tensor of shape (2, 2) with order F - data[:, :] = - [[1. 1.] - [1. 1.]] - >>> T / 2 - tensor of shape (2, 2) with order F - data[:, :] = - [[0.5 1. ] - [1.5 2. ]] + >>> T = ttb.tensor(np.arange(8) + 1, (2, 2, 2)) + >>> T / T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[1. 1.] + [1. 1.]] + data[:, :, 1] = + [[1. 1.] + [1. 1.]] + >>> T / 2 + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0.5 1.5] + [1. 2. ]] + data[:, :, 1] = + [[2.5 3.5] + [3. 4. ]] """ def div(x, y): @@ -2782,7 +2977,8 @@ def div(x, y): return self.tenfun(div, other) def __rtruediv__(self, other): - """Element wise right division (/) for tensors, other/self. + """ + Element wise right division (/) for tensors. Parameters ---------- @@ -2794,13 +2990,15 @@ def __rtruediv__(self, other): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> np.set_printoptions(precision=8) - >>> 2 / T # doctest: +ELLIPSIS - tensor of shape (2, 2) with order F - data[:, :] = - [[2. 1. ] - [0.66666... 0.5 ]] + >>> T = ttb.tensor(np.arange(8) + 11, (2, 2, 2)) + >>> 1 / T # doctest: +ELLIPSIS + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0.0909... 0.0769...] + [0.0833... 0.0714...]] + data[:, :, 1] = + [[0.0666... 0.0588...] + [0.0625... 0.0555...]] """ def div(x, y): @@ -2821,12 +3019,15 @@ def __pos__(self): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> +T - tensor of shape (2, 2) with order F - data[:, :] = - [[1 2] - [3 4]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> +T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] """ return self.copy() @@ -2840,17 +3041,21 @@ def __neg__(self): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> -T - tensor of shape (2, 2) with order F - data[:, :] = - [[-1 -2] - [-3 -4]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> -T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[ 0 -2] + [-1 -3]] + data[:, :, 1] = + [[-4 -6] + [-5 -7]] """ return ttb.tensor(-1 * self.data) def __repr__(self): - """Return string representation of the tensor. + """ + Return string representation of the tensor. Returns ------- @@ -2858,12 +3063,15 @@ def __repr__(self): Examples -------- - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> T - tensor of shape (2, 2) with order F - data[:, :] = - [[1 2] - [3 4]] + >>> T = ttb.tensor(np.arange(8), (2, 2, 2)) + >>> T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0 2] + [1 3]] + data[:, :, 1] = + [[4 6] + [5 7]] """ if self.ndims == 0: s = "" @@ -2923,11 +3131,20 @@ def _matlab_str(self, format: str | None = None, name: str | None = None) -> str return matlab_str + "\n" + textwrap.indent(array_str, "\t") -def tenones(shape: Shape, order: MemoryLayout = "F") -> tensor: - """Create a tensor of all ones. +def tendiag( + elements: OneDArray, + shape: Shape | None = None, + order: MemoryLayout = "F", +) -> tensor: + """ + Create a tensor with elements along super diagonal. + + If provided shape is too small the tensor will be enlarged to accommodate. Parameters ---------- + elements: + Elements to set along the diagonal. shape: Shape of resulting tensor. order: @@ -2939,28 +3156,104 @@ def tenones(shape: Shape, order: MemoryLayout = "F") -> tensor: Examples -------- - >>> T = ttb.tenones((3,)) - >>> T - tensor of shape (3,) with order F - data[:] = - [1. 1. 1.] - >>> T = ttb.tenones((3, 3)) - >>> T - tensor of shape (3, 3) with order F - data[:, :] = - [[1. 1. 1.] - [1. 1. 1.] - [1. 1. 1.]] + >>> diagonal_values = np.ones(3) + >>> T = ttb.tendiag(diagonal_values) + >>> T + tensor of shape (3, 3, 3) with order F + data[:, :, 0] = + [[1. 0. 0.] + [0. 0. 0.] + [0. 0. 0.]] + data[:, :, 1] = + [[0. 0. 0.] + [0. 1. 0.] + [0. 0. 0.]] + data[:, :, 2] = + [[0. 0. 0.] + [0. 0. 0.] + [0. 0. 1.]] + >>> # dimensions enlarged to match length of diagonal_values + >>> T2 = ttb.tendiag(diagonal_values, (1, 2, 3)) + >>> T.isequal(T2) + True + """ + # Flatten provided elements + elements = parse_one_d(elements) + N = len(elements) + if shape is None: + constructed_shape = (N,) * N + else: + shape = parse_shape(shape) + constructed_shape = tuple(max(N, dim) for dim in shape) + X = tenzeros(constructed_shape, order=order) + subs = np.tile(np.arange(0, N)[:, None], (len(constructed_shape),)) + X[subs] = elements + return X + + +def teneye(ndims: int, size: int, order: MemoryLayout = "F") -> tensor: """ + Create identity tensor of specified shape. - def ones(shape: tuple[int, ...]) -> np.ndarray: - return np.ones(shape, order=order) + T is an identity tensor if T.ttsv(x, skip_dim=0) = x for all x such that + norm(x) == 1. - return tensor.from_function(ones, shape) + An identity tensor only exists if :attr:`ndims` is even. This method is resource + intensive for even moderate orders or sizes (>=6). + Parameters + ---------- + ndims: + Number of dimensions of tensor. + size: + Number of elements in any dimension of the tensor. + order: + Memory layout for resulting tensor. -def tenzeros(shape: Shape, order: MemoryLayout = "F") -> tensor: - """Create a tensor of all zeros. + Returns + ------- + Identity tensor. + + Examples + -------- + >>> T = ttb.teneye(ndims=4, size=2) + >>> T + tensor of shape (2, 2, 2, 2) with order F + data[:, :, 0, 0] = + [[1. 0. ] + [0. 0.33333333]] + data[:, :, 1, 0] = + [[0. 0.33333333] + [0.33333333 0. ]] + data[:, :, 0, 1] = + [[0. 0.33333333] + [0.33333333 0. ]] + data[:, :, 1, 1] = + [[0.33333333 0. ] + [0. 1. ]] + >>> # check identity tensor using ttsv method and unit vector x + >>> x = np.ones(2) + >>> x /= np.linalg.norm(x) + >>> np.allclose(T.ttsv(x, 0), x) + True + """ + if ndims % 2 != 0: + raise ValueError(f"ndims must be even but received {ndims}") + idx_iterator = combinations_with_replacement(range(size), ndims) + A = tenzeros((size,) * ndims, order=order) + s = np.zeros((factorial(ndims), ndims // 2), order=order) + for _i, indices in enumerate(idx_iterator): + p = np.array(list(permutations(indices))) + for j in range(ndims // 2): + s[:, j] = p[:, 2 * j - 1] == p[:, 2 * j] + v = np.sum(np.sum(s, axis=1) == ndims // 2) + A[tuple(zip(*p))] = v / factorial(ndims) + return A + + +def tenones(shape: Shape, order: MemoryLayout = "F") -> tensor: + """ + Create a tensor of all ones. Parameters ---------- @@ -2975,28 +3268,29 @@ def tenzeros(shape: Shape, order: MemoryLayout = "F") -> tensor: Examples -------- - >>> T = ttb.tenzeros((3,)) + >>> T = ttb.tenones((3,)) >>> T tensor of shape (3,) with order F data[:] = - [0. 0. 0.] - >>> T = ttb.tenzeros((3, 3)) + [1. 1. 1.] + >>> T = ttb.tenones((3, 3)) >>> T tensor of shape (3, 3) with order F data[:, :] = - [[0. 0. 0.] - [0. 0. 0.] - [0. 0. 0.]] + [[1. 1. 1.] + [1. 1. 1.] + [1. 1. 1.]] """ - def zeros(shape: tuple[int, ...]) -> np.ndarray: - return np.zeros(shape, order=order) + def ones(shape: tuple[int, ...]) -> np.ndarray: + return np.ones(shape, order=order) - return tensor.from_function(zeros, shape) + return tensor.from_function(ones, shape) def tenrand(shape: Shape, order: MemoryLayout = "F") -> tensor: - """Create a tensor with entries drawn from a uniform distribution on [0, 1]. + """ + Create a tensor with entries drawn from a uniform distribution on [0, 1]. Parameters ---------- @@ -3011,12 +3305,16 @@ def tenrand(shape: Shape, order: MemoryLayout = "F") -> tensor: Examples -------- - >>> np.random.seed(1) - >>> T = ttb.tenrand((3,)) - >>> T # doctest: +ELLIPSIS - tensor of shape (3,) with order F - data[:] = - [4.170...e-01 7.203...e-01 1.143...e-04] + >>> np.random.seed(1) + >>> T = ttb.tenrand((2, 2, 2)) + >>> T # doctest: +ELLIPSIS + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[4.1702...e-01 1.1437...e-04] + [7.2032...e-01 3.0233...e-01]] + data[:, :, 1] = + [[0.1467... 0.1862...] + [0.0923... 0.3455...]] """ # Typing doesn't play nice with partial @@ -3028,19 +3326,12 @@ def unit_uniform(pass_through_shape: tuple[int, ...]) -> np.ndarray: return tensor.from_function(unit_uniform, shape) -def tendiag( - elements: OneDArray, - shape: Shape | None = None, - order: MemoryLayout = "F", -) -> tensor: - """Create a tensor with elements along super diagonal. - - If provided shape is too small the tensor will be enlarged to accommodate. +def tenzeros(shape: Shape, order: MemoryLayout = "F") -> tensor: + """ + Create a tensor of all zeros. Parameters ---------- - elements: - Elements to set along the diagonal. shape: Shape of resulting tensor. order: @@ -3052,78 +3343,26 @@ def tendiag( Examples -------- - >>> shape = (3,) - >>> values = np.ones(shape) - >>> T1 = ttb.tendiag(values) - >>> T2 = ttb.tendiag(values, (3, 3, 3)) - >>> T1.isequal(T2) - True + >>> T = ttb.tenzeros((2, 2, 2)) + >>> T + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[0. 0.] + [0. 0.]] + data[:, :, 1] = + [[0. 0.] + [0. 0.]] """ - # Flatten provided elements - elements = parse_one_d(elements) - N = len(elements) - if shape is None: - constructed_shape = (N,) * N - else: - shape = parse_shape(shape) - constructed_shape = tuple(max(N, dim) for dim in shape) - X = tenzeros(constructed_shape, order=order) - subs = np.tile(np.arange(0, N)[:, None], (len(constructed_shape),)) - X[subs] = elements - return X - - -def teneye(ndims: int, size: int, order: MemoryLayout = "F") -> tensor: - """Create identity tensor of specified shape. - - T is an "identity tensor if T.ttsv(x, skip_dim=0) = x for all x such that - norm(x) == 1. - - An identity tensor only exists if order is even. - This method is resource intensive - for even moderate orders or sizes (>=6). - - Parameters - ---------- - ndims: Number of dimensions of tensor. - size: Number of elements in any dimension of the tensor. - order: - Memory layout for resulting tensor. - Examples - -------- - >>> ttb.teneye(2, 3) - tensor of shape (3, 3) with order F - data[:, :] = - [[1. 0. 0.] - [0. 1. 0.] - [0. 0. 1.]] - >>> x = np.ones((5,)) - >>> x /= np.linalg.norm(x) - >>> T = ttb.teneye(4, 5) - >>> np.allclose(T.ttsv(x, 0), x) - True + def zeros(shape: tuple[int, ...]) -> np.ndarray: + return np.zeros(shape, order=order) - Returns - ------- - Identity tensor. - """ - if ndims % 2 != 0: - raise ValueError(f"Order must be even but received {ndims}") - idx_iterator = combinations_with_replacement(range(size), ndims) - A = tenzeros((size,) * ndims, order=order) - s = np.zeros((factorial(ndims), ndims // 2), order=order) - for _i, indices in enumerate(idx_iterator): - p = np.array(list(permutations(indices))) - for j in range(ndims // 2): - s[:, j] = p[:, 2 * j - 1] == p[:, 2 * j] - v = np.sum(np.sum(s, axis=1) == ndims // 2) - A[tuple(zip(*p))] = v / factorial(ndims) - return A + return tensor.from_function(zeros, shape) def mttv_left(W_in: np.ndarray, U1: np.ndarray) -> np.ndarray: - """Contract leading mode in partial MTTKRP W_in using factor matrix U1. + """ + Contract leading mode in partial MTTKRP W_in using factor matrix U1. The leading mode is the mode for which consecutive increases in index address elements at consecutive increases in the memory offset. @@ -3138,7 +3377,7 @@ def mttv_left(W_in: np.ndarray, U1: np.ndarray) -> np.ndarray: Returns ------- - Matrix with modes (m2 x ... x mN, C). + Matrix with modes (m2 x ... x mN, C). """ r = U1.shape[1] W_in = np.reshape(W_in, (U1.shape[0], -1, r), order="F") @@ -3165,7 +3404,7 @@ def mttv_mid(W_in: np.ndarray, U_mid: Sequence[np.ndarray]) -> np.ndarray: Returns ------- - Matrix with modes (m1, C). + Matrix with modes (m1, C). """ if len(U_mid) == 0: return W_in @@ -3179,7 +3418,8 @@ def mttv_mid(W_in: np.ndarray, U_mid: Sequence[np.ndarray]) -> np.ndarray: def min_split(shape: Shape) -> int: - """Scan for optimal splitting with minimal memory footprint. + """ + Scan for optimal splitting with minimal memory footprint. Parameters ---------- @@ -3188,9 +3428,8 @@ def min_split(shape: Shape) -> int: Returns ------- - Optimal splitting to minimize partial MTTKRP memory footprint. - Modes 0:split will contract in left-partial computation and the - rest will contract in right-partial. + Optimal splitting to minimize partial MTTKRP memory footprint. Modes 0:split will + contract in left-partial computation and the rest will contract in right-partial. """ shape = parse_shape(shape) m_left = shape[0] From 84c36043ecaad9b667b4052de4abee1a23a9567b Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 4 Oct 2025 21:09:17 -0600 Subject: [PATCH 15/17] Updating doctest output for floats --- pyttb/tensor.py | 68 ++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index f87a5fa5..574eecea 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -273,18 +273,18 @@ def from_function( >>> randn = lambda s : np.random.randn(np.prod(s)) >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (4, 3, 2)) - >>> print(T) + >>> print(T) # doctest: +ELLIPSIS tensor of shape (4, 3, 2) with order F data[:, :, 0] = - [[ 1.76405235 1.86755799 -0.10321885] - [ 0.40015721 -0.97727788 0.4105985 ] - [ 0.97873798 0.95008842 0.14404357] - [ 2.2408932 -0.15135721 1.45427351]] + [[ 1.7640... 1.8675... -0.1032...] + [ 0.4001... -0.9772... 0.4105...] + [ 0.9787... 0.9500... 0.1440...] + [ 2.2408... -0.1513... 1.4542...]] data[:, :, 1] = - [[ 0.76103773 1.49407907 -2.55298982] - [ 0.12167502 -0.20515826 0.6536186 ] - [ 0.44386323 0.3130677 0.8644362 ] - [ 0.33367433 -0.85409574 -0.74216502]] + [[ 0.7610... 1.4940... -2.55298982] + [ 0.1216... -0.2051... 0.6536186 ] + [ 0.4438... 0.3130... 0.8644362 ] + [ 0.3336... -0.8540... -0.74216502]] Create a :class:`pyttb.tensor` with all entries equal to 1 using :func:`numpy.ones`. Observe that we specifically specify Fortran order:: @@ -383,20 +383,20 @@ def collapse( >>> randn = lambda s: np.random.randn(np.prod(s)) >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) - >>> print(T) + >>> print(T) # doctest: +ELLIPSIS tensor of shape (2, 2, 2) with order F data[:, :, 0] = - [[1.76405235 0.97873798] - [0.40015721 2.2408932 ]] + [[1.7640... 0.9787...] + [0.4001... 2.2408...]] data[:, :, 1] = - [[ 1.86755799 0.95008842] - [-0.97727788 -0.15135721]] + [[ 1.8675... 0.9500...] + [-0.9772... -0.1513...]] >>> max_val = T.collapse(fun=np.max) >>> min_val = T.collapse(fun=np.min) - >>> print(f"Max value: {max_val}") - Max value: 2.240893199201458 - >>> print(f"Min value: {min_val}") - Min value: -0.977277879876411 + >>> print(f"Max value: {max_val:1.4f}") + Max value: 2.2409 + >>> print(f"Min value: {min_val:1.4f}") + Min value: -0.9773 """ if self.data.size == 0: # TODO verify this is the only thing that returns np array @@ -600,14 +600,14 @@ def exp(self) -> tensor: data[:, :, 1] = [[4 6] [5 7]] - >>> print(T.exp()) + >>> print(T.exp()) # doctest: +ELLIPSIS tensor of shape (2, 2, 2) with order F data[:, :, 0] = - [[ 1. 7.3890561 ] - [ 2.71828183 20.08553692]] + [[ 1. 7.3890...] + [ 2.7182... 20.0855...]] data[:, :, 1] = - [[ 54.59815003 403.42879349] - [ 148.4131591 1096.63315843]] + [[ 54.5981... 403.4287...] + [ 148.4131... 1096.6331...]] """ return ttb.tensor(np.exp(self.data), copy=False) @@ -628,14 +628,14 @@ def find(self) -> tuple[np.ndarray, np.ndarray]: >>> sprandint = lambda s: np.where(np.random.rand(np.prod(s)) < 0.5, ... 0.0, np.random.rand(np.prod(s))) >>> T = ttb.tensor.from_function(sprandint, (2,2,2)) - >>> print(T) + >>> print(T) # doctest: +ELLIPSIS tensor of shape (2, 2, 2) with order F data[:, :, 0] = - [[0.33540785 0.43814143] + [[0.3354... 0.4381...] [0. 0. ]] data[:, :, 1] = - [[0. 0.6453551] - [0.5788586 0. ]] + [[0. 0.6453...] + [0.5788... 0. ]] Find the nonzero entries in the tensor:: @@ -3217,19 +3217,19 @@ def teneye(ndims: int, size: int, order: MemoryLayout = "F") -> tensor: Examples -------- >>> T = ttb.teneye(ndims=4, size=2) - >>> T + >>> T # doctest: +ELLIPSIS tensor of shape (2, 2, 2, 2) with order F data[:, :, 0, 0] = [[1. 0. ] - [0. 0.33333333]] + [0. 0.3333...]] data[:, :, 1, 0] = - [[0. 0.33333333] - [0.33333333 0. ]] + [[0. 0.3333...] + [0.3333... 0. ]] data[:, :, 0, 1] = - [[0. 0.33333333] - [0.33333333 0. ]] + [[0. 0.3333...] + [0.3333... 0. ]] data[:, :, 1, 1] = - [[0.33333333 0. ] + [[0.3333... 0. ] [0. 1. ]] >>> # check identity tensor using ttsv method and unit vector x >>> x = np.ones(2) From d8a604d6b93306620a5665795aa2e23d77f46ae7 Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 4 Oct 2025 21:20:18 -0600 Subject: [PATCH 16/17] Adding normalize_whitespace directive to address mix of ints and floats in output --- pyttb/tensor.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 574eecea..fcf843cd 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -273,7 +273,7 @@ def from_function( >>> randn = lambda s : np.random.randn(np.prod(s)) >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (4, 3, 2)) - >>> print(T) # doctest: +ELLIPSIS + >>> print(T) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (4, 3, 2) with order F data[:, :, 0] = [[ 1.7640... 1.8675... -0.1032...] @@ -383,7 +383,7 @@ def collapse( >>> randn = lambda s: np.random.randn(np.prod(s)) >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) - >>> print(T) # doctest: +ELLIPSIS + >>> print(T) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (2, 2, 2) with order F data[:, :, 0] = [[1.7640... 0.9787...] @@ -600,10 +600,10 @@ def exp(self) -> tensor: data[:, :, 1] = [[4 6] [5 7]] - >>> print(T.exp()) # doctest: +ELLIPSIS + >>> print(T.exp()) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (2, 2, 2) with order F data[:, :, 0] = - [[ 1. 7.3890...] + [[ 1. 7.3890...] [ 2.7182... 20.0855...]] data[:, :, 1] = [[ 54.5981... 403.4287...] @@ -628,11 +628,11 @@ def find(self) -> tuple[np.ndarray, np.ndarray]: >>> sprandint = lambda s: np.where(np.random.rand(np.prod(s)) < 0.5, ... 0.0, np.random.rand(np.prod(s))) >>> T = ttb.tensor.from_function(sprandint, (2,2,2)) - >>> print(T) # doctest: +ELLIPSIS + >>> print(T) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (2, 2, 2) with order F data[:, :, 0] = [[0.3354... 0.4381...] - [0. 0. ]] + [0. 0. ]] data[:, :, 1] = [[0. 0.6453...] [0.5788... 0. ]] @@ -1397,14 +1397,14 @@ def symmetrize( # noqa: PLR0912,PLR0915 Symmetrize the tensor:: - >>> T.symmetrize() # doctest: +ELLIPSIS + >>> T.symmetrize() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (2, 2, 2) with order F data[:, :, 0] = - [[0... 2.3333...] + [[0. 2.3333...] [2.3333... 4.6666...]] data[:, :, 1] = [[2.3333... 4.6666...] - [4.6666... 7... ]] + [4.6666... 7. ]] """ n = self.ndims sz = np.array(self.shape) @@ -3217,20 +3217,20 @@ def teneye(ndims: int, size: int, order: MemoryLayout = "F") -> tensor: Examples -------- >>> T = ttb.teneye(ndims=4, size=2) - >>> T # doctest: +ELLIPSIS + >>> T # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE tensor of shape (2, 2, 2, 2) with order F data[:, :, 0, 0] = - [[1. 0. ] - [0. 0.3333...]] + [[1. 0. ] + [0. 0.3333...]] data[:, :, 1, 0] = - [[0. 0.3333...] + [[0. 0.3333...] [0.3333... 0. ]] data[:, :, 0, 1] = - [[0. 0.3333...] + [[0. 0.3333...] [0.3333... 0. ]] data[:, :, 1, 1] = [[0.3333... 0. ] - [0. 1. ]] + [0. 1. ]] >>> # check identity tensor using ttsv method and unit vector x >>> x = np.ones(2) >>> x /= np.linalg.norm(x) From c43e9398473e2a61e3da8718de4dcae0d15d7adb Mon Sep 17 00:00:00 2001 From: Danny Dunlavy Date: Sat, 4 Oct 2025 21:26:37 -0600 Subject: [PATCH 17/17] More doctest fixes --- pyttb/tensor.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index fcf843cd..2cf4abee 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -281,10 +281,10 @@ def from_function( [ 0.9787... 0.9500... 0.1440...] [ 2.2408... -0.1513... 1.4542...]] data[:, :, 1] = - [[ 0.7610... 1.4940... -2.55298982] - [ 0.1216... -0.2051... 0.6536186 ] - [ 0.4438... 0.3130... 0.8644362 ] - [ 0.3336... -0.8540... -0.74216502]] + [[ 0.7610... 1.4940... -2.5529...] + [ 0.1216... -0.2051... 0.6536...] + [ 0.4438... 0.3130... 0.8644...] + [ 0.3336... -0.8540... -0.7421...]] Create a :class:`pyttb.tensor` with all entries equal to 1 using :func:`numpy.ones`. Observe that we specifically specify Fortran order:: @@ -645,11 +645,11 @@ def find(self) -> tuple[np.ndarray, np.ndarray]: [0 1 0] [1 0 1] [0 1 1]] - >>> print(vals) - [[0.33540785] - [0.43814143] - [0.5788586 ] - [0.6453551 ]] + >>> print(vals) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + [[0.3354...] + [0.4381...] + [0.5788...] + [0.6453...]] """ idx = np.nonzero(np.ravel(self.data, order=self.order))[0]