Update unsuper learning.
This commit is contained in:
parent
22464e7724
commit
f3690fd47f
|
@ -105,7 +105,7 @@ model = ConvNet().to(device)
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
# Train the model unsuper
|
# Train the model unsuper
|
||||||
epochs = 10
|
epochs = 20
|
||||||
model.conv1.weight.requires_grad = True
|
model.conv1.weight.requires_grad = True
|
||||||
model.conv2.weight.requires_grad = False
|
model.conv2.weight.requires_grad = False
|
||||||
model.fc1.weight.requires_grad = False
|
model.fc1.weight.requires_grad = False
|
||||||
|
@ -117,16 +117,24 @@ for epoch in range(epochs):
|
||||||
|
|
||||||
outputs = outputs.permute(0, 2, 3, 1) # 64 8 24 24 -> 64 24 24 8
|
outputs = outputs.permute(0, 2, 3, 1) # 64 8 24 24 -> 64 24 24 8
|
||||||
sample = outputs.reshape(-1, outputs.shape[3]) # -> 36864 8
|
sample = outputs.reshape(-1, outputs.shape[3]) # -> 36864 8
|
||||||
abs = torch.abs(sample)
|
abs = torch.abs(sample).detach()
|
||||||
max, max_index = torch.max(abs, dim=1)
|
max, max_index = torch.max(abs, dim=1)
|
||||||
label = sample * 0.9
|
mean = torch.mean(abs, dim=1)
|
||||||
all = range(0, label.shape[0])
|
mean = torch.expand_copy(mean.reshape(-1, 1), sample.shape)
|
||||||
label[all, max_index] = label[all, max_index] * 1.1
|
max = torch.expand_copy(max.reshape(-1, 1), sample.shape)
|
||||||
|
|
||||||
|
all = range(0, sample.shape[0])
|
||||||
|
ratio_max = abs / mean
|
||||||
|
ratio_nor = (max - abs) / max
|
||||||
|
ratio_nor[all, max_index] = ratio_max[all, max_index].clone()
|
||||||
|
ratio_nor = torch.where(torch.isnan(ratio_nor), 1.0, ratio_nor)
|
||||||
|
label = sample * ratio_nor
|
||||||
|
|
||||||
loss = F.l1_loss(sample, label)
|
loss = F.l1_loss(sample, label)
|
||||||
model.conv1.weight.grad = None
|
model.conv1.weight.grad = None
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
|
||||||
model.conv1.weight.data = model.conv1.weight.data - model.conv1.weight.grad * 100
|
model.conv1.weight.data = model.conv1.weight.data - model.conv1.weight.grad * 10
|
||||||
|
|
||||||
if (i + 1) % 100 == 0:
|
if (i + 1) % 100 == 0:
|
||||||
print(f"Epoch [{epoch+1}/{epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.8f}")
|
print(f"Epoch [{epoch+1}/{epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.8f}")
|
||||||
|
@ -136,6 +144,10 @@ show.DumpTensorToImage(w.view(-1, w.shape[2], w.shape[3]).cpu(), "conv1_weight_g
|
||||||
w = model.conv1.weight.data
|
w = model.conv1.weight.data
|
||||||
show.DumpTensorToImage(w.view(-1, w.shape[2], w.shape[3]), "conv1_weight_update.png", Contrast=[-1.0, 1.0])
|
show.DumpTensorToImage(w.view(-1, w.shape[2], w.shape[3]), "conv1_weight_update.png", Contrast=[-1.0, 1.0])
|
||||||
|
|
||||||
|
# loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
|
||||||
|
# images, labels = next(iter(loader))
|
||||||
|
# images = images.to(device)
|
||||||
|
|
||||||
# Train the model
|
# Train the model
|
||||||
model.conv1.weight.requires_grad = False
|
model.conv1.weight.requires_grad = False
|
||||||
model.conv2.weight.requires_grad = True
|
model.conv2.weight.requires_grad = True
|
||||||
|
@ -155,7 +167,7 @@ for epoch in range(num_epochs):
|
||||||
if (i + 1) % 100 == 0:
|
if (i + 1) % 100 == 0:
|
||||||
print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}")
|
print(f"Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}")
|
||||||
|
|
||||||
# print("Finished Training")
|
print("Finished Training")
|
||||||
|
|
||||||
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
|
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
|
||||||
test_loader = iter(test_loader)
|
test_loader = iter(test_loader)
|
||||||
|
|
Loading…
Reference in New Issue