aboutsummaryrefslogtreecommitdiff
path: root/xor.py
blob: 0f8390d8ec5f927eabd35964152bd604d0d6a86c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import torch
import torch.nn as nn
import torch.onnx
import nneq

class xor_mlp(nn.Module):
    def __init__(self, hidden_dim=8):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Linear(2, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )
    def forward(self, x):
        return self.layers(x)

def train_model(name: str, dim):
    X = torch.tensor([[0,0], [0,1], [1,0], [1,1]], dtype=torch.float32)
    Y = torch.tensor([[0], [1], [1], [0]], dtype=torch.float32)

    net = xor_mlp(hidden_dim=dim)
    loss_fn = nn.MSELoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.1)

    print(f"Training {name}...")
    for epoch in range(1000):
        optimizer.zero_grad()
        out = net(X)
        loss = loss_fn(out, Y)
        loss.backward()
        optimizer.step()
        if (epoch+1) % 100 == 0:
            print(f"  Epoch {epoch+1}, Loss: {loss.item():.4f}")
    return net

if __name__ == "__main__":
    torch_net_a = train_model("Network A", 8).eval()
    torch_net_b = train_model("Network B", 16).eval()

    onnx_net_a = torch.onnx.export(torch_net_a, (torch.randn(1, 2),), verbose=False, dynamo=True).model_proto # type: ignore
    onnx_net_b = torch.onnx.export(torch_net_b, (torch.randn(1, 2),), verbose=False, dynamo=True).model_proto # type: ignore

    z3_net_a = nneq.net(onnx_net_a)
    z3_net_b = nneq.net(onnx_net_b)

    print("")
    nneq.strict_equivalence(z3_net_a, z3_net_b)
    print("")
    nneq.epsilon_equivalence(z3_net_a, z3_net_b, 0.1)
    print("")
    nneq.argmax_equivalence(z3_net_a, z3_net_b)