Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 29 additions & 5 deletions minitorch/module.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Any, Dict, Optional, Sequence, Tuple
from typing import Any, Dict, Optional, Sequence, Tuple, Iterable


class Module:
Expand Down Expand Up @@ -32,12 +32,20 @@ def modules(self) -> Sequence[Module]:
def train(self) -> None:
"""Set the mode of this module and all descendent modules to `train`."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
def update(cur: Module) -> None:
cur.training = True
for child in cur.__dict__["_modules"].values():
update(child)
update(self)

def eval(self) -> None:
"""Set the mode of this module and all descendent modules to `eval`."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
def update(cur: Module) -> None:
cur.training = False
for child in cur.__dict__["_modules"].values():
update(child)
update(self)

def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
"""Collect all the parameters of this module and its descendents.
Expand All @@ -48,12 +56,28 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:

"""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
parameters ={}
def helper(name: str, node: Module) -> None:
prefix = name + "." if name else ""
for k, v in node._parameters.items():
parameters[prefix + k] = v
for k, v in node._modules.items():
helper(prefix + k, v)
helper("", self)
return parameters

def parameters(self) -> Sequence[Parameter]:
"""Enumerate over all the parameters of this module and its descendents."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
parameters =[]
def get_paras(cur:Module, parameters:Iterable[Parameter]) -> Iterable[Parameter]:
for para in cur.__dict__["_parameters"].values():
parameters.append(para)
for module in cur.__dict__["_modules"].values():
get_paras(module, parameters)
get_paras(self, parameters)
return parameters


def add_parameter(self, k: str, v: Any) -> Parameter:
"""Manually add a parameter. Useful helper for scalar parameters.
Expand Down
157 changes: 156 additions & 1 deletion minitorch/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,71 @@
# $f(x) = |x - y| < 1e-2$


# TODO: Implement for Task 0.1.
def mul(x: float, y: float) -> float: # noqa: D103
return x * y


def id(x: float) -> float: # noqa: D103
return x


def add(x: float, y: float) -> float: # noqa: D103
return x + y


def neg(x: float) -> float: # noqa: D103
return 0.0-x


def lt(x: float, y: float) -> bool: # noqa: D103
return x < y


def eq(x: float, y: float) -> bool: # noqa: D103
return x == y


def max(x: float, y: float) -> float: # noqa:D103
return x if x > y else y


def is_close(x: float, y: float) -> bool: # noqa:D103
return abs(x - y) < 1e-2


def sigmoid(x: float) -> float: # noqa:D103
return 1.0 / (1.0 + math.exp(-x)) if x >= 0 else math.exp(x) / (1.0 + math.exp(x))


def relu(x: float) -> float: # noqa:D103
return x if x > 0 else 0.0


def log(x: float) -> float: # noqa:D103
return math.log(x)


def exp(x: float) -> float: # noqa: D103
return math.exp(x)


def inv(x: float) -> float: # noqa:D103
return 1.0 / x


def relu_back(x: float, y: float) -> float: # noqa:D103
if x > 0:
return y
else:
return 0.0


def inv_back(x: float, y: float) -> float: # noqa:D103
return -1.0 / (math.pow(x, 2)) * y


def log_back(x: float, y: float) -> float: # noqa:D103
return 1.0 / x * y


# ## Task 0.3
Expand All @@ -52,3 +116,94 @@


# TODO: Implement for Task 0.3.
def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[float]]:
"""Higher-order map.

See https://en.wikipedia.org/wiki/Map_(higher-order_function)

Args:
fn: Function from one value to one value.

Returns:
A function that takes a list, applies `fn` to each element, and returns a
new list

"""

# TODO: Implement for Task 0.3.
def func(x: Iterable[float]) -> Iterable[float]: # noqa:D103
ans = []
for item in x:
ans.append(fn(item))
return ans
return func


def zipWith(
fn: Callable[[float, float], float]
) -> Callable[[Iterable[float], Iterable[float]], Iterable[float]]:
"""Higher-order zipwith (or map2).

See https://en.wikipedia.org/wiki/Map_(higher-order_function)

Args:
fn: combine two values

Returns:
Function that takes two equally sized lists `ls1` and `ls2`, produce a new list by
applying fn(x, y) on each pair of elements.

"""
# TODO: Implement for Task 0.3.
def func(ls1:Iterable[float], ls2:Iterable[float]) ->Iterable[float]:
ans = []
for value1, value2 in zip(ls1, ls2):
ans.append(fn(value1, value2))
return ans

return func


def reduce(
fn: Callable[[float, float], float], start: float
) -> Callable[[Iterable[float]], float]:
r"""Higher-order reduce.

Args:
fn: combine two values
start: start value $x_0$

Returns:
Function that takes a list `ls` of elements
$x_1 \ldots x_n$ and computes the reduction :math:`fn(x_3, fn(x_2,
fn(x_1, x_0)))`

"""
# TODO: Implement for Task 0.3.
def func(ls: Iterable[float]) -> float:
ans = start
for item in ls:
ans = fn(item, ans)
return ans
return func

def negList(ls: Iterable[float]) -> Iterable[float]:
"""Use `map` and `neg` to negate each element in `ls`"""
# TODO: Implement for Task 0.3.
return map(neg)(ls)

def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]:
"""Add the elements of `ls1` and `ls2` using `zipWith` and `add`"""
# TODO: Implement for Task 0.3.
return zipWith(add)(ls1,ls2)

def sum(ls: Iterable[float]) -> float:
"""Sum up a list using `reduce` and `add`."""
# TODO: Implement for Task 0.3.
return reduce(add, 0.0)(ls)


def prod(ls: Iterable[float]) -> float:
"""Product of a list using `reduce` and `mul`."""
# TODO: Implement for Task 0.3.
return reduce(mul, 1.0)(ls)
2 changes: 1 addition & 1 deletion project/interface/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def get_train(hidden_layers):

if parameter_control:
st.markdown("### Parameters")
for n, p in train.model.named_parameters():
for n, p in train.model.named_parameters().items():
value = st.slider(
f"Parameter: {n}", min_value=-10.0, max_value=10.0, value=p.value
)
Expand Down
2 changes: 1 addition & 1 deletion requirements.extra.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ pydot==1.4.1
python-mnist
streamlit==1.12.0
streamlit-ace
torch
torch == 2.2.1
watchdog==1.0.2
altair==4.2.2
networkx==3.3
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
colorama==0.4.3
hypothesis == 6.54
numba == 0.60
numpy == 2.0.0
numpy == 1.26.4
pre-commit == 2.20.0
pytest == 8.3.2
pytest-env
Expand Down
24 changes: 16 additions & 8 deletions tests/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
relu,
relu_back,
sigmoid,
is_close,
zipWith
)

from .strategies import assert_close, small_floats
Expand Down Expand Up @@ -108,40 +110,46 @@ def test_sigmoid(a: float) -> None:
* It is strictly increasing.
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert 0.0 <= sigmoid(a) <= 1.0
assert abs(1.0 - sigmoid(a)-sigmoid(-a)) < 1e-2
assert 0.5 == sigmoid(0.0)
assert sigmoid(a + 1.0) >= sigmoid(a)


@pytest.mark.task0_2
@given(small_floats, small_floats, small_floats)
def test_transitive(a: float, b: float, c: float) -> None:
"""Test the transitive property of less-than (a < b and b < c implies a < c)"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
if a < b and b < c:
assert lt(a, c)


@pytest.mark.task0_2
def test_symmetric() -> None:
@given(small_floats, small_floats)
def test_symmetric(a: float, b: float) -> None:
"""Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e.
gives the same value regardless of the order of its input.
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert mul(a, b) == mul(b, a)


@pytest.mark.task0_2
def test_distribute() -> None:
@given(small_floats, small_floats, small_floats)
def test_distribute(a: float, b: float, c: float) -> None:
r"""Write a test that ensures that your operators distribute, i.e.
:math:`z \times (x + y) = z \times x + z \times y`
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert is_close(mul(a, b + c), mul(a, b) + mul(a, c))


@pytest.mark.task0_2
def test_other() -> None:
"""Write a test that ensures some other property holds for your functions."""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
pass


# ## Task 0.3 - Higher-order functions
Expand Down Expand Up @@ -169,7 +177,7 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None:
is the same as the sum of each element of `ls1` plus each element of `ls2`.
"""
# TODO: Implement for Task 0.3.
raise NotImplementedError("Need to implement for Task 0.3")
assert_close(sum(ls1) + sum(ls2), sum(zipWith(add)(ls1, ls2)))


@pytest.mark.task0_3
Expand Down