diff options
| author | ericmarin <maarin.eric@gmail.com> | 2026-04-13 19:42:39 +0200 |
|---|---|---|
| committer | ericmarin <maarin.eric@gmail.com> | 2026-04-13 21:38:16 +0200 |
| commit | fcbbc960f43137aa170b78ba0be2d89aec3bc766 (patch) | |
| tree | 15e0249bf429888d9b64f19eb0c6e2d9af0901e4 /examples/mnist | |
| parent | 8f4f24523235965cfa2041ed00cc40fc0b4bd367 (diff) | |
| download | vein-fcbbc960f43137aa170b78ba0be2d89aec3bc766.tar.gz vein-fcbbc960f43137aa170b78ba0be2d89aec3bc766.zip | |
New ops: Slice, Squeeze, Unsqueeze
New tests based on papers:
- Wide-to-Deep, Deep-to-Wide Transformation
- Pruining of stably inactive (always negative) and active (always
positive) ReLUs
Diffstat (limited to '')
| -rw-r--r-- | examples/mnist/mnist.py | 5 | ||||
| -rw-r--r-- | examples/mnist/mnist_deep_to_wide.py | 85 | ||||
| -rw-r--r-- | examples/mnist/mnist_stably_active.py | 69 | ||||
| -rw-r--r-- | examples/mnist/mnist_stably_inactive.py | 52 | ||||
| -rw-r--r-- | examples/mnist/mnist_wide_to_deep.py | 82 |
5 files changed, 290 insertions, 3 deletions
diff --git a/examples/mnist/mnist.py b/examples/mnist/mnist.py index 0a81878..a1706be 100644 --- a/examples/mnist/mnist.py +++ b/examples/mnist/mnist.py @@ -24,7 +24,7 @@ def train_model(name: str, dim): optimizer = torch.optim.Adam(net.parameters(), lr=0.5e-4) print(f"Training {name} ({dim} neurons)...") - for epoch in range(100): + for epoch in range(10): global loss for data in trainloader: inputs, targets = data @@ -33,8 +33,7 @@ def train_model(name: str, dim): loss = loss_fn(outputs, targets) loss.backward() optimizer.step() - if (epoch + 1) % 10 == 0: - print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") + print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") return net if __name__ == "__main__": diff --git a/examples/mnist/mnist_deep_to_wide.py b/examples/mnist/mnist_deep_to_wide.py new file mode 100644 index 0000000..6c4dde9 --- /dev/null +++ b/examples/mnist/mnist_deep_to_wide.py @@ -0,0 +1,85 @@ +import torch, torch.nn as nn +from torchvision.datasets import MNIST +from torch.utils.data import DataLoader +from torchvision import transforms + +class Deep_MNIST_Block(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(784, 1) + self.fc2 = nn.Linear(1, 10, bias=False) + + def forward(self, x_s): + x, s = x_s + z = torch.relu(self.fc1(x)) + ds = self.fc2(z) + return (x, s + ds) + +class Deep_MNIST_MLP(nn.Module): + def __init__(self, num_blocks): + super().__init__() + self.flatten = nn.Flatten() + self.blocks = nn.Sequential(*[Deep_MNIST_Block() for _ in range(num_blocks)]) + self.final_bias = nn.Parameter(torch.zeros(10)) + + def forward(self, x): + x = self.flatten(x) + s = torch.zeros(x.shape[0], 10, device=x.device) + _, final_s = self.blocks((x, s)) + return final_s + self.final_bias + +class Wide_MNIST_MLP(nn.Module): + def __init__(self, deep_net): + super().__init__() + self.flatten = nn.Flatten() + num_neurons = len(deep_net.blocks) + self.layers = nn.Sequential( + nn.Linear(784, num_neurons), + nn.ReLU(), + nn.Linear(num_neurons, 10), + ) + with torch.no_grad(): + w1_all = [] + b1_all = [] + w2_all = [] + + for block in deep_net.blocks: + w1_all.append(block.fc1.weight.data) + b1_all.append(block.fc1.bias.data) + w2_all.append(block.fc2.weight.data) + + self.layers[0].weight.copy_(torch.cat(w1_all, dim=0)) # pyright: ignore + self.layers[0].bias.copy_(torch.cat(b1_all, dim=0)) # pyright: ignore + self.layers[2].weight.copy_(torch.cat(w2_all, dim=1)) # pyright: ignore + self.layers[2].bias.copy_(deep_net.final_bias.data) # pyright: ignore + + def forward(self, x): + x = self.flatten(x) + return self.layers(x) + +train_dataset = MNIST('./', download=True, transform=transforms.ToTensor(), train=True) +trainloader = DataLoader(train_dataset, batch_size=128, shuffle=True) + +def train_deep_model(name: str, num_blocks): + net = Deep_MNIST_MLP(num_blocks=num_blocks) + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(net.parameters(), lr=1e-3) + + print(f"Training {name} ({num_blocks} blocks)...") + for epoch in range(10): + global loss + for inputs, targets in trainloader: + optimizer.zero_grad() + outputs = net(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + optimizer.step() + print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") + return net + +if __name__ == "__main__": + torch_net_a = train_deep_model("Deep Network", 8).eval() + torch_net_b = Wide_MNIST_MLP(torch_net_a).eval() + + torch.onnx.export(torch_net_a, (torch.randn(1, 28, 28),), "mnist_a.onnx") + torch.onnx.export(torch_net_b, (torch.randn(1, 28, 28),), "mnist_b.onnx") diff --git a/examples/mnist/mnist_stably_active.py b/examples/mnist/mnist_stably_active.py new file mode 100644 index 0000000..267d682 --- /dev/null +++ b/examples/mnist/mnist_stably_active.py @@ -0,0 +1,69 @@ +import torch, torch.nn as nn +from torchvision.datasets import MNIST +from torch.utils.data import DataLoader +from torchvision import transforms + +class MNIST_MLP(nn.Module): + def __init__(self, hidden_dim): + super().__init__() + self.flatten = nn.Flatten() + self.layers = nn.Sequential( + nn.Linear(784, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 10), + ) + def forward(self, x): + x = self.flatten(x) + return self.layers(x) + +class MNIST_Linear(nn.Module): + def __init__(self, weight, bias): + super().__init__() + self.flatten = nn.Flatten() + self.fc = nn.Linear(784, 10) + self.fc.weight.data = weight + self.fc.bias.data = bias + def forward(self, x): + x = self.flatten(x) + return self.fc(x) + +train_dataset = MNIST('./', download=True, transform=transforms.ToTensor(), train=True) +trainloader = DataLoader(train_dataset, batch_size=128, shuffle=True) + +def train_model(name: str, dim): + net = MNIST_MLP(hidden_dim=dim) + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(net.parameters(), lr=0.5e-4) + + print(f"Training {name} ({dim} neurons)...") + for epoch in range(10): + global loss + for data in trainloader: + inputs, targets = data + optimizer.zero_grad() + outputs = net(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + optimizer.step() + print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") + return net + +if __name__ == "__main__": + torch_net_a = train_model("Base Network", 6).eval() + + with torch.no_grad(): + torch_net_a.layers[0].weight.fill_(0.01) # pyright: ignore + torch_net_a.layers[0].bias.fill_(5.0) # pyright: ignore + + W1 = torch_net_a.layers[0].weight.data + b1 = torch_net_a.layers[0].bias.data + W2 = torch_net_a.layers[2].weight.data + b2 = torch_net_a.layers[2].bias.data + + W_collapsed = torch.matmul(W2, W1) # pyright: ignore + b_collapsed = torch.matmul(W2, b1) + b2 # pyright: ignore + + torch_net_b = MNIST_Linear(W_collapsed, b_collapsed).eval() + + torch.onnx.export(torch_net_a, (torch.randn(1, 28, 28),), "mnist_a.onnx") + torch.onnx.export(torch_net_b, (torch.randn(1, 28, 28),), "mnist_b.onnx") diff --git a/examples/mnist/mnist_stably_inactive.py b/examples/mnist/mnist_stably_inactive.py new file mode 100644 index 0000000..ad81461 --- /dev/null +++ b/examples/mnist/mnist_stably_inactive.py @@ -0,0 +1,52 @@ +import torch, torch.nn as nn +from torchvision.datasets import MNIST +from torch.utils.data import DataLoader +from torchvision import transforms + +class MNIST_MLP(nn.Module): + def __init__(self, hidden_dim): + super().__init__() + self.layers = nn.Sequential( + nn.Flatten(), + nn.Linear(28 * 28, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 10), + ) + def forward(self, x): + return self.layers(x) + +train_dataset = MNIST('./', download=True, transform=transforms.ToTensor(), train=True) +trainloader = DataLoader(train_dataset, batch_size=128, shuffle=True) + +def train_model(name: str, dim): + net = MNIST_MLP(hidden_dim=dim) + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(net.parameters(), lr=0.5e-4) + + print(f"Training {name} ({dim} neurons)...") + for epoch in range(10): + global loss + for data in trainloader: + inputs, targets = data + optimizer.zero_grad() + outputs = net(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + optimizer.step() + print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") + return net + +if __name__ == "__main__": + torch_net_a = train_model("Base Network", 6).eval() + + with torch.no_grad(): + torch_net_a.layers[1].weight[5] = -1.0 # pyright: ignore + torch_net_a.layers[1].bias[5] = -1.0 # pyright: ignore + + torch_net_b = MNIST_MLP(6).eval() + torch_net_b.load_state_dict(torch_net_a.state_dict()) + + torch_net_b.layers[3].weight[:, 5] = 0.0 # pyright: ignore + + torch.onnx.export(torch_net_a, (torch.randn(1, 28, 28),), "mnist_a.onnx") + torch.onnx.export(torch_net_b, (torch.randn(1, 28, 28),), "mnist_b.onnx") diff --git a/examples/mnist/mnist_wide_to_deep.py b/examples/mnist/mnist_wide_to_deep.py new file mode 100644 index 0000000..64f4ec7 --- /dev/null +++ b/examples/mnist/mnist_wide_to_deep.py @@ -0,0 +1,82 @@ +import torch, torch.nn as nn +from torchvision.datasets import MNIST +from torch.utils.data import DataLoader +from torchvision import transforms + +class Wide_MNIST_MLP(nn.Module): + def __init__(self, hidden_dim): + super().__init__() + self.flatten = nn.Flatten() + self.layers = nn.Sequential( + nn.Linear(28 * 28, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 10), + ) + def forward(self, x): + x = self.flatten(x) + return self.layers(x) + +class Deep_MNIST_Block(nn.Module): + def __init__(self, w1, b1, w2): + super().__init__() + self.fc1 = nn.Linear(784, 1) + self.fc1.weight.data = w1.clone().unsqueeze(0) + self.fc1.bias.data = b1.clone().unsqueeze(0) + self.fc2 = nn.Linear(1, 10, bias=False) + self.fc2.weight.data = w2.clone().unsqueeze(1) + + def forward(self, x_s): + x, s = x_s + z = torch.relu(self.fc1(x)) + ds = self.fc2(z) + return (x, s + ds) + +class Deep_MNIST_MLP(nn.Module): + def __init__(self, wide_net): + super().__init__() + self.flatten = nn.Flatten() + + w1 = wide_net.layers[0].weight.data + b1 = wide_net.layers[0].bias.data + w2 = wide_net.layers[2].weight.data + b2 = wide_net.layers[2].bias.data + num_neurons = w1.shape[0] + + self.blocks = nn.Sequential(*[ + Deep_MNIST_Block(w1[j], b1[j], w2[:, j]) for j in range(num_neurons) + ]) + self.final_bias = nn.Parameter(b2.clone()) + + def forward(self, x): + x = self.flatten(x) + s = torch.zeros(x.shape[0], 10, device=x.device) + _, final_s = self.blocks((x, s)) + return final_s + self.final_bias + +train_dataset = MNIST('./', download=True, transform=transforms.ToTensor(), train=True) +trainloader = DataLoader(train_dataset, batch_size=128, shuffle=True) + +def train_model(name: str, dim): + net = Wide_MNIST_MLP(hidden_dim=dim) + loss_fn = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(net.parameters(), lr=0.5e-4) + + print(f"Training {name} ({dim} neurons)...") + for epoch in range(10): + global loss + for data in trainloader: + inputs, targets = data + optimizer.zero_grad() + outputs = net(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + optimizer.step() + print(f" Epoch {epoch+1}, Loss: {loss.item():.4f}") + return net + +if __name__ == "__main__": + torch_net_a = train_model("Wide Network", 8).eval() + torch_net_b = Deep_MNIST_MLP(torch_net_a).eval() + + torch.onnx.export(torch_net_a, (torch.randn(1, 28, 28),), "mnist_a.onnx") + torch.onnx.export(torch_net_b, (torch.randn(1, 28, 28),), "mnist_b.onnx") |
