diff options
| author | ericmarin <maarin.eric@gmail.com> | 2026-03-26 20:28:38 +0100 |
|---|---|---|
| committer | ericmarin <maarin.eric@gmail.com> | 2026-03-26 21:27:10 +0100 |
| commit | 3e338c3be65638ef1898c32c707c50422acafb18 (patch) | |
| tree | 80a29ac6b7baee3bbfe4f161fc893fd5948d9409 /xor/xor.py | |
| parent | 689c34076d08e59b1382864f9efcd983c8665ae5 (diff) | |
| download | vein-3e338c3be65638ef1898c32c707c50422acafb18.tar.gz vein-3e338c3be65638ef1898c32c707c50422acafb18.zip | |
added LICENSE
Diffstat (limited to 'xor/xor.py')
| -rw-r--r-- | xor/xor.py | 40 |
1 files changed, 0 insertions, 40 deletions
diff --git a/xor/xor.py b/xor/xor.py deleted file mode 100644 index ebc5477..0000000 --- a/xor/xor.py +++ /dev/null @@ -1,40 +0,0 @@ -import torch -import torch.nn as nn -import torch.onnx - -class xor_mlp(nn.Module): - def __init__(self, hidden_dim): - super().__init__() - self.layers = nn.Sequential( - nn.Linear(2, hidden_dim), - nn.ReLU(), - nn.Linear(hidden_dim, 1) - ) - def forward(self, x): - return self.layers(x) - -def train_model(name: str, dim): - X = torch.tensor([[0,0], [0,1], [1,0], [1,1]], dtype=torch.float32) - Y = torch.tensor([[0], [1], [1], [0]], dtype=torch.float32) - - net = xor_mlp(hidden_dim=dim) - loss_fn = nn.MSELoss() - optimizer = torch.optim.Adam(net.parameters(), lr=0.1) - - print(f"Training {name}...") - for epoch in range(1000): - optimizer.zero_grad() - out = net(X) - loss = loss_fn(out, Y) - loss.backward() - optimizer.step() - if (epoch+1) % 100 == 0: - print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") - return net - -if __name__ == "__main__": - torch_net_a = train_model("Network A", 8).eval() - torch_net_b = train_model("Network B", 16).eval() - - torch.onnx.export(torch_net_a, (torch.randn(1, 2),), "xor_a.onnx") - torch.onnx.export(torch_net_b, (torch.randn(1, 2),), "xor_b.onnx") |
