293 lines
9.2 KiB
Python
Executable File
293 lines
9.2 KiB
Python
Executable File
from __future__ import print_function
|
|
import os
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torch.optim as optim
|
|
import torchvision
|
|
from torchvision import datasets, transforms
|
|
import torchvision.models as models
|
|
import matplotlib.pyplot as plt
|
|
import numpy as np
|
|
from visdom import Visdom
|
|
|
|
|
|
# viz=Visdom()
|
|
# viz.delete_env('main')
|
|
|
|
|
|
batchsize=128
|
|
|
|
DATA_FOLDER = os.path.split(os.path.realpath(__file__))[0]+'/Dataset/'
|
|
print("Dataset Path :" + DATA_FOLDER)
|
|
|
|
class Net(nn.Module):
|
|
def __init__(self):
|
|
super(Net, self).__init__()
|
|
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
|
|
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
|
|
self.conv2_drop = nn.Dropout2d()
|
|
self.fc1 = nn.Linear(320, 50)
|
|
self.fc2 = nn.Linear(50, 10)
|
|
|
|
# Spatial transformer localization-network
|
|
self.localization = nn.Sequential(
|
|
nn.Conv2d(1, 8, kernel_size=7),
|
|
nn.MaxPool2d(2, stride=2),
|
|
nn.ReLU(True),
|
|
nn.Conv2d(8, 10, kernel_size=5),
|
|
nn.MaxPool2d(2, stride=2),
|
|
nn.ReLU(True)
|
|
)
|
|
|
|
# Regressor for the 3 * 2 affine matrix
|
|
self.fc_loc = nn.Sequential(
|
|
nn.Linear(10 * 3 * 3, 32),
|
|
nn.ReLU(True),
|
|
nn.Linear(32, 3 * 2)
|
|
)
|
|
|
|
# Initialize the weights/bias with identity transformation
|
|
self.fc_loc[2].weight.data.zero_()
|
|
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
|
|
|
|
# Spatial transformer network forward function
|
|
def stn(self, x):
|
|
xs = self.localization(x)
|
|
xs = xs.view(-1, 10 * 3 * 3)
|
|
theta = self.fc_loc(xs)
|
|
theta = theta.view(-1, 2, 3)
|
|
|
|
grid = F.affine_grid(theta, x.size())
|
|
x = F.grid_sample(x, grid)
|
|
|
|
return x
|
|
|
|
def forward(self, x):
|
|
# transform the input
|
|
x = self.stn(x)
|
|
|
|
# Perform the usual forward pass
|
|
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
|
x = x.view(-1, 320)
|
|
x = F.relu(self.fc1(x))
|
|
x = F.dropout(x, training=self.training)
|
|
x = self.fc2(x)
|
|
return F.log_softmax(x, dim=1)
|
|
|
|
#99.01
|
|
class NetMnist(nn.Module):
|
|
def __init__(self):
|
|
super(NetMnist, self).__init__()
|
|
channels=32
|
|
self.conv1_1 = nn.Conv2d(1, channels, kernel_size=5 , padding=0)
|
|
|
|
self.conv2_1 = nn.Conv2d(channels, channels, kernel_size=3, padding=0)
|
|
self.conv2_2 = nn.Conv2d(channels, channels, kernel_size=3, padding=0)
|
|
# self.conv2_3 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
# self.conv2_4 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
|
|
self.conv3_1 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_2 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_3 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_4 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
|
|
self.conv3_5 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_6 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_7 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
self.conv3_8 = nn.Conv2d(channels, channels, kernel_size=5, padding=0)
|
|
|
|
self.fc1 = nn.Linear(256, 10)
|
|
|
|
def forward(self, x):
|
|
|
|
con1 = F.relu(F.max_pool2d(self.conv1_1(x), 2))
|
|
|
|
con2_1 = F.relu(F.max_pool2d(self.conv2_1(con1), 2))
|
|
con2_2 = F.relu(F.max_pool2d(self.conv2_2(con1), 2))
|
|
# con2_3 = F.relu(F.max_pool2d(self.conv2_3(con1), 2))
|
|
# con2_4 = F.relu(F.max_pool2d(self.conv2_4(con1), 2))
|
|
|
|
con3_1 = F.relu((self.conv3_1(con2_1)))
|
|
con3_2 = F.relu((self.conv3_2(con2_1)))
|
|
con3_3 = F.relu((self.conv3_3(con2_1)))
|
|
con3_4 = F.relu((self.conv3_4(con2_1)))
|
|
|
|
con3_5 = F.relu((self.conv3_1(con2_2)))
|
|
con3_6 = F.relu((self.conv3_2(con2_2)))
|
|
con3_7 = F.relu((self.conv3_3(con2_2)))
|
|
con3_8 = F.relu((self.conv3_4(con2_2)))
|
|
|
|
cc=[con3_1,con3_2,con3_3,con3_4,con3_5,con3_6,con3_7,con3_8]
|
|
#cc=[con4_1,con4_2,con4_3,con4_4,con4_5,con4_6,con4_7,con4_8]
|
|
ccs=torch.cat(cc,1)
|
|
ccs=ccs.view(-1,256)
|
|
x = self.fc1(ccs)
|
|
return x
|
|
#return F.log_softmax(x, dim=1)
|
|
|
|
class NetMnistRatio(nn.Module):
|
|
def __init__(self):
|
|
super(NetMnistRatio, self).__init__()
|
|
channels=32
|
|
|
|
self.conv1 = nn.Conv2d(1, channels, kernel_size=5)
|
|
self.conv2 = nn.Conv2d(channels, channels * 2, kernel_size=3)
|
|
self.conv3 = nn.Conv2d(channels*2, channels * 4, kernel_size=5)
|
|
|
|
self.fc1 = nn.Linear(128, 10)
|
|
|
|
def forward(self, x):
|
|
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
x = F.relu(F.max_pool2d(self.conv2(x), 2))
|
|
x = F.relu(self.conv3(x))
|
|
|
|
x = x.view(-1, 128)
|
|
x = self.fc1(x)
|
|
return x
|
|
|
|
#96.4 train slow
|
|
class NetMnistNormal(nn.Module):
|
|
def __init__(self):
|
|
super(NetMnistNormal, self).__init__()
|
|
channels=128
|
|
|
|
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
|
|
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
|
|
self.conv2_drop = nn.Dropout2d()
|
|
self.fc1 = nn.Linear(320, 50)
|
|
self.fc2 = nn.Linear(50, 10)
|
|
|
|
def forward(self, x):
|
|
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
|
x = x.view(-1, 320)
|
|
x = F.relu(self.fc1(x))
|
|
x = F.dropout(x, training=self.training)
|
|
x = self.fc2(x)
|
|
return x
|
|
|
|
#98.9
|
|
class NetMnistFCNN(nn.Module):
|
|
def __init__(self):
|
|
super(NetMnistFCNN, self).__init__()
|
|
channels=128
|
|
self.conv1 = nn.Conv2d(1, channels, kernel_size=3 , padding=1)
|
|
|
|
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=0)
|
|
|
|
self.conv3 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
|
|
|
self.conv4 = nn.Conv2d(channels, 10, kernel_size=3, padding=0)
|
|
|
|
def forward(self, x):
|
|
|
|
con1 = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
|
|
con2 = F.relu(F.max_pool2d(self.conv2(con1), 2))
|
|
|
|
con3 = F.relu(F.max_pool2d((self.conv3(con2)),2))
|
|
|
|
con4 = self.conv4(con3)
|
|
|
|
x = con4.view(-1,10)
|
|
|
|
return x
|
|
|
|
|
|
#region loder dataset
|
|
train_loader = torch.utils.data.DataLoader(
|
|
datasets.MNIST(root=DATA_FOLDER, train=True, download=True,
|
|
transform=transforms.Compose([
|
|
transforms.ToTensor(),
|
|
transforms.Normalize((0.1307,), (0.3081,))
|
|
])), batch_size=batchsize, shuffle=True, num_workers=8)
|
|
# Test dataset
|
|
val_loader = torch.utils.data.DataLoader(
|
|
datasets.MNIST(root=DATA_FOLDER, train=False, transform=transforms.Compose([
|
|
transforms.ToTensor(),
|
|
transforms.Normalize((0.1307,), (0.3081,))
|
|
])), batch_size=batchsize, shuffle=True, num_workers=8)
|
|
#endregion loder dataset
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
model = (NetMnistRatio()).to(device)
|
|
#########################################################
|
|
optimizer = optim.Adadelta(model.parameters(), lr=0.000001)
|
|
lossfunc=torch.nn.CrossEntropyLoss().to(device)
|
|
#lossfunc=torch.nn.NLLLoss()
|
|
|
|
# gpu_ids=[0,1,2,3]
|
|
# model = torch.nn.DataParallel(model, device_ids = gpu_ids)
|
|
# optimizer = torch.nn.DataParallel(optimizer, device_ids = gpu_ids)
|
|
|
|
def train(epoch):
|
|
model.train()
|
|
correct = 0
|
|
for batch_idx, (data, target) in enumerate(train_loader):
|
|
data, target = data.to(device), target.to(device)
|
|
|
|
|
|
|
|
output = model(data)
|
|
loss = lossfunc(output, target)
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
pred = output.max(1, keepdim=True)[1]
|
|
correct += pred.eq(target.view_as(pred)).sum().item()
|
|
|
|
if batch_idx == (len(train_loader) - 2):
|
|
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
|
|
lens = len(train_loader.dataset)
|
|
correct = float(correct) / lens
|
|
testtxt.write(str(correct)+'\n')
|
|
print('Test set: val: {:.6f}'.format(correct))
|
|
def val():
|
|
with torch.no_grad():
|
|
model.eval()
|
|
correct = 0
|
|
for data, target in val_loader:
|
|
data, target = data.to(device), target.to(device)
|
|
output = model(data)
|
|
# get the index of the max log-probability
|
|
pred = output.max(1, keepdim=True)[1]
|
|
correct += pred.eq(target.view_as(pred)).sum().item()
|
|
lens=len(val_loader.dataset)
|
|
correct = float(correct)/lens
|
|
valtxt.write(str(correct) + '\n')
|
|
print('Val set: val: {:.6f}'.format(correct))
|
|
|
|
|
|
testtxt=open('TestPrecision.txt','w')
|
|
valtxt=open('ValPrecision.txt','w')
|
|
|
|
# optimizer.zero_grad()
|
|
|
|
for epoch in range(1, 1000):
|
|
train(epoch)
|
|
val()
|
|
|
|
|
|
|
|
|
|
|
|
# mm = model.conv1(ad)
|
|
# datodis = mm * 256 + 128
|
|
# datodis = datodis.view(64, 1, 24 * 10, 24)
|
|
# imaglis.append(datodis.detach().cpu().numpy()[0:8,:,:,:])
|
|
|
|
|
|
#cdsa=np.reshape(np.array(imaglis),newshape=[-1,1,240,24])
|
|
#viz.images(cdsa, win=imagewin, opts=dict(title='Random!', caption='How random.'), nrow=8,padding=2)
|
|
|
|
|
|
# Visualize the STN transformation on some input batch
|
|
# visualize_stn()
|
|
|
|
#plt.ioff()
|
|
#plt.show()
|
|
|