witnn/Other/UnSuppervise.py

123 lines
3.0 KiB
Python
Executable File

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import torchvision.models as models
import matplotlib.pyplot as plt
import numpy as np
from visdom import Visdom
import cv2
# viz=Visdom()
# viz.delete_env('main')
DATA_FOLDER = os.path.split(os.path.realpath(__file__))[0]+'/Dataset/'
print("Dataset Path :" + DATA_FOLDER)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Training dataset
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=DATA_FOLDER, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=1, shuffle=True, num_workers=1)
class NetMnist(nn.Module):
def __init__(self):
super(NetMnist, self).__init__()
channels=1
self.conv1 = nn.Conv2d(1, channels, kernel_size=3 , padding=0)
def forward(self, x):
da = self.conv1.weight.data
da = da.view(9)
damean=da.mean()
da = da - damean
daabssum=da.abs().sum()
da = da/daabssum
da = da.view(1,1,3,3)
self.conv1.weight.data = da
con1 = self.conv1(x)
con1 = con1.abs()
# con1 = F.sigmoid(F.max_pool2d(self.conv1(x), 2))
#
# con2 = F.sigmoid(F.max_pool2d(self.conv2(con1), 2))
#
# con3 = F.sigmoid(F.max_pool2d((self.conv3(con2)),2))
#
# con4 = F.sigmoid(self.conv4(con3))
#
# x = con4.view(-1,10)
return con1
model = (NetMnist()).to(device)
#########################################################
optimizer = optim.SGD(model.parameters(), lr=0.1)
#lossfunc=torch.nn.CrossEntropyLoss()
lossfunc=torch.nn.MSELoss()
gpu_ids=[0,1,2,3]
#model = torch.nn.DataParallel(model, device_ids = gpu_ids)
#optimizer = torch.nn.DataParallel(optimizer, device_ids = gpu_ids)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
target = output + 0.1
var_no_grad = target.detach()
loss = lossfunc(output, var_no_grad)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0 and batch_idx>0 :
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
da=model.conv1.weight.data
da = da.view(9)
damean = da.mean()
da = da - damean
daabssum = da.abs().sum()
da = da / daabssum
da = da.view(1, 1, 3, 3)
print(da)
for epoch in range(1, 3000):
train(epoch)