Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions magpy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from .core import PauliString, X, Y, Z, Id, FunctionProduct, HamiltonianOperator, kron, frobenius, timegrid
from .solver import System
from .solver import evolve
from torch import set_default_device

__all__ = [
'PauliString', 'X', 'Y', 'Z', 'Id', 'FunctionProduct', 'HamiltonianOperator', 'kron', 'frobenius', 'timegrid',
'System'
'evolve'
]


Expand All @@ -18,4 +19,7 @@ def set_device(device):
"""

from ._device import _DEVICE_CONTEXT
from .solver.gauss_legendre_quadrature import _update_device
_DEVICE_CONTEXT.device = device
set_default_device(_DEVICE_CONTEXT.device)
_update_device()
42 changes: 28 additions & 14 deletions magpy/core/function_product.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from numbers import Number
from copy import deepcopy
import torch
import magpy as mp


Expand Down Expand Up @@ -29,7 +31,6 @@ def __init__(self, *funcs):

self.funcs = {}
self.scale = 1
self.__name__ = "FP"

for f in funcs:
try:
Expand All @@ -42,7 +43,7 @@ def __init__(self, *funcs):
self.scale *= f
except TypeError:
# Other type of function.
self.funcs = self.__add_func(f)
self.funcs = FunctionProduct.__add_func(self, f)

def __eq__(self, other):
return self.funcs == other.funcs and self.scale == other.scale
Expand All @@ -52,17 +53,17 @@ def __mul__(self, other):
return other * self

out = FunctionProduct()
out.funcs = self.funcs.copy()
out.scale = self.scale
out.funcs = deepcopy(self.funcs)
out.scale = deepcopy(self.scale)

if isinstance(other, Number):
if isinstance(other, Number | torch.Tensor):
out.scale *= other
else:
try:
out.scale *= other.scale
out.funcs = self.__merge_funcs(other.funcs)
except AttributeError:
out.funcs = out.__add_func(other)
out.funcs = FunctionProduct.__add_func(out, other)

return out

Expand All @@ -74,24 +75,37 @@ def __neg__(self):
def __call__(self, arg):
out = 1
for f in self.funcs:
out *= f(arg)
try:
out *= f(arg.clone().detach())
except AttributeError:
out *= f(torch.tensor(arg))

return out * self.scale

def __repr__(self):
return f"{str(self.scale)}*{str(self.funcs)}"

def __str__(self):
return (str(self.scale) + "*" if isinstance(self.scale, torch.Tensor) or self.scale != 1 else "") \
+ '*'.join([f.__name__ + (f"^{str(n)}" if n > 1 else "") for f, n in self.funcs.items()])

def __hash__(self):
return hash(tuple(self.funcs)) + hash(self.scale)

def __repr__(self):
return str(self.scale) + "*" + str(self.funcs)
def is_empty(self):
"""Return true if function product contains no functions.
"""
return not self.funcs

def __merge_funcs(self, funcs):
# Combine funcs dict with own funcs dict, summing values with shared keys.
return {f: self.funcs.get(f, 0) + funcs.get(f, 0) for f in set(self.funcs) | set(funcs)}

def __add_func(self, f):
# Add function to own funcs dict, adding new key or incremented existing value accordingly.
@staticmethod
def __add_func(out, f):
# Add function to funcs dict, adding new key or incrementing existing value accordingly.
try:
self.funcs[f] += 1
out.funcs[f] += 1
except KeyError:
self.funcs[f] = 1
return self.funcs
out.funcs[f] = 1
return out.funcs
167 changes: 121 additions & 46 deletions magpy/core/hamiltonian_operator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from itertools import chain
from copy import deepcopy
from numbers import Number
import torch
Expand Down Expand Up @@ -46,7 +45,7 @@ def __init__(self, *pairs):
except AttributeError:
self.data[pair[0]] = [self.data[pair[0]], pair[1]]

HamiltonianOperator.__simplify(self.data)
self.data = HamiltonianOperator.__simplify_and_sort_data(self.data)

def __eq__(self, other):
return self.data == other.data
Expand All @@ -55,14 +54,7 @@ def __mul__(self, other):
out = HamiltonianOperator()

try:
self_data = HamiltonianOperator.__expand(self.data)
other_data = HamiltonianOperator.__expand(other.data)

for p in self_data:
for q in other_data:
out += mp.FunctionProduct() * p[0] * q[0] * p[1] * q[1]

return out
out = sum((p[0]*p[1]*q[0]*q[1] for q in other.unpack_data() for p in self.unpack_data()), out)

except AttributeError:
out.data = deepcopy(self.data)
Expand All @@ -80,7 +72,8 @@ def __mul__(self, other):
for coeff in list(out.data):
out.data[mp.FunctionProduct(coeff, other)] = out.data.pop(coeff)

return out
out.data = HamiltonianOperator.__simplify_and_sort_data(out.data)
return out

__rmul__ = __mul__

Expand All @@ -91,7 +84,7 @@ def __add__(self, other):
out.data = self.data | other.data
except AttributeError:
# other is PauliString; add it to constants.
out.data = self.data.copy()
out.data = deepcopy(self.data)

try:
out.data[1].append(other)
Expand All @@ -114,7 +107,7 @@ def __add__(self, other):
except TypeError:
out.data[coeff].append(other.data[coeff])

HamiltonianOperator.__simplify(out.data)
out.data = HamiltonianOperator.__simplify_and_sort_data(out.data)
return out

def __neg__(self):
Expand All @@ -127,42 +120,60 @@ def __sub__(self, other):
return out

def __repr__(self):
return '{' + ', '.join((str(f) if isinstance(f, Number)
else f.__name__) + ': ' + str(q) for f, q in self.data.items()) + '}'
return str(self.data)

def __str__(self):
out = ""
for f, p in self.data.items():
try:
p_str = str(p)
scale_pos = p_str.find('*')

if isinstance(p.scale, torch.Tensor) or p.scale != 1:
out += p_str[:scale_pos] + '*'

def __call__(self, t=None, n=None):
if n is None:
pauli_strings = chain.from_iterable(p if isinstance(p, list) else [p] for p in self.data.values())
n = max(max(p.qubits) for p in pauli_strings)
out = HamiltonianOperator.__add_coeff_to_str(out, f)
out += p_str[scale_pos + 1:] if scale_pos > 0 else p_str

except AttributeError:
out = HamiltonianOperator.__add_coeff_to_str(out, f)

out += '(' if f != 1 else ""
out += " + ".join([str(q) for q in p])
out += ')' if f != 1 else ""

out += " + "

return out[:-3]

def __call__(self, t=None, n_qubits=None):
if n_qubits is None:
n_qubits = max(max(p.qubits) if p.qubits else 0 for p in self.pauli_operators())

if self.is_constant():
try:
return sum(p(n).type(torch.complex128) for p in self.data[1])
except TypeError:
return self.data[1](n).type(torch.complex128)
return self.__call_time_independent(n_qubits)

if t is None:
raise ValueError(
"Hamiltonian is not constant. A value of t is required.")

out = 0
for coeff, ps in self.data.items():
try:
out += coeff(torch.tensor(t)) * ps(n).type(torch.complex128)
except TypeError:
if isinstance(coeff, Number):
out += coeff * ps(n).type(torch.complex128)
else:
for p in ps:
out += coeff(torch.tensor(t)) * p(n).type(torch.complex128)
# Convert input to tensor.
try:
t = t.clone().detach()
except AttributeError:
t = torch.tensor(t)

return out.to(_DEVICE_CONTEXT.device)
return self.__call_time_dependent(t, n_qubits)

def is_constant(self):
"Return true if the Hamiltonian is time-independent."
for coeff in self.data:
if not isinstance(coeff, Number):
return False
if not isinstance(coeff, Number | torch.Tensor):
try:
if not coeff.is_empty():
return False
except AttributeError:
return False
return True

def is_interacting(self):
Expand Down Expand Up @@ -190,20 +201,84 @@ def pauli_operators(self):
"""All Pauli operators in H."""
return [u[1] for u in self.unpack_data()]

def __call_time_independent(self, n_qubits):
# Return matrix representation when constant.
out = 0
for p in self.pauli_operators():
try:
p_val = p(n_qubits)

# If p is a batch, repeat the current value to agree with its shape.
if out.dim() == 2 and p_val.dim() == 3:
out = out.repeat(len(p_val), 1, 1)

except AttributeError:
pass

out += p(n_qubits)

return out

def __call_time_dependent(self, t, n_qubits):
out = 0
for coeff, p in self.unpack_data():
p_val = p(n_qubits).to(_DEVICE_CONTEXT.device)

# Evaluate coefficient if it's a function.
try:
coeff = coeff(t).to(_DEVICE_CONTEXT.device)
except TypeError:
pass

# Evaluate next term in data.
next_term = 0
try:
next_term = coeff.reshape(-1,1,1) * p_val
except AttributeError:
next_term = coeff * p_val

# If p is a batch, repeat the current value to agree with its shape.
try:
if out.dim() == 2 and next_term.dim() == 3:
out = out.repeat(len(next_term), 1, 1)
except AttributeError:
pass

out += next_term

return out

@staticmethod
def __simplify(arrs):
def __simplify_data(arrs):
# Collect all PauliStrings in all lists in arrs.
for coeff in arrs:
arrs[coeff] = mp.PauliString.collect(arrs[coeff])

@staticmethod
def __expand(data):
# Expand all functions and lists of qubits into pairs of functions with single qubits.
expanded_data = []
for pair in data.items():
def __add_coeff_to_str(out, f):
try:
out += f.__name__ + '*'
except AttributeError:
try:
for qubit in pair[1]:
expanded_data.append((pair[0], qubit))
except TypeError:
expanded_data.append(pair)
return expanded_data
if f != 1:
out += str(f) + '*'
except (RuntimeError, AttributeError):
out += str(f) + '*'

return out

@staticmethod
def __sort_data(data):
# Move all constant keys to the start of the dictionary.
const_keys = []
other_keys = []

for key in data:
(const_keys if isinstance(key, Number | torch.Tensor) else other_keys).append(key)

return dict((key, data[key]) for key in const_keys + other_keys)

@staticmethod
def __simplify_and_sort_data(data):
HamiltonianOperator.__simplify_data(data)
return HamiltonianOperator.__sort_data(data)
12 changes: 5 additions & 7 deletions magpy/core/linalg.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import functools
import torch
from .._device import _DEVICE_CONTEXT

def kron(*args):
"""Compute the Kronecker product of the input arguments.
Expand All @@ -11,11 +10,11 @@ def kron(*args):
Resultant product
"""

return functools.reduce(torch.kron, args).to(_DEVICE_CONTEXT.device)
return functools.reduce(torch.kron, args)


def frobenius(a, b):
"""Compute the Frobenius inner product of `a` and `b`.
"""Compute the Frobenius inner product of `a` and `b`.

If `a` is a 3D tensor and `b` is a 2D tensor, then the inner product is
batched across `a`. Otherwise `a` and `b` must both be 2D tensors.
Expand All @@ -34,10 +33,9 @@ def frobenius(a, b):
"""

try:
return torch.vmap(torch.trace)(torch.matmul(torch.conj(torch.transpose(a, 1, -1)), b)) \
.to(_DEVICE_CONTEXT.device)
return torch.vmap(torch.trace)(torch.matmul(torch.conj(torch.transpose(a, 1, -1)), b))
except RuntimeError:
return torch.trace(torch.conj(torch.transpose(a, 0, 1)) @ b).to(_DEVICE_CONTEXT.device)
return torch.trace(torch.conj(torch.transpose(a, 0, 1)) @ b)


def timegrid(start, stop, step):
Expand All @@ -59,4 +57,4 @@ def timegrid(start, stop, step):
Grid of values
"""

return torch.arange(start, stop + step, step).to(_DEVICE_CONTEXT.device)
return torch.arange(start, stop + step, step)
Loading