Revert "Refine kernal Random to w*h*16 kinds possiable."
This reverts commit 675cd8567e
.
This commit is contained in:
parent
675cd8567e
commit
e0de909d62
|
@ -16,6 +16,7 @@ import random
|
||||||
import cv2
|
import cv2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
|
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
|
||||||
print("Current Path :" + CurrentPath)
|
print("Current Path :" + CurrentPath)
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ import random
|
||||||
import cv2
|
import cv2
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import threading
|
import threading
|
||||||
from itertools import combinations
|
|
||||||
|
|
||||||
|
|
||||||
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
|
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
|
||||||
|
@ -27,19 +26,6 @@ import Model as Model
|
||||||
from tools import utils, Train, Loader
|
from tools import utils, Train, Loader
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# iterable = combinations(range(72), 4)
|
|
||||||
# count = iterable.count()
|
|
||||||
# pool = tuple(iterable)
|
|
||||||
# n = len(pool)
|
|
||||||
# indices = sorted(random.sample(range(n), 2))
|
|
||||||
# fdafda = tuple(pool[i] for i in indices)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def GetScore(netmodel,layer,SearchLayer,DataSet):
|
def GetScore(netmodel,layer,SearchLayer,DataSet):
|
||||||
netmodel.eval()
|
netmodel.eval()
|
||||||
sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0)))
|
sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0)))
|
||||||
|
@ -198,12 +184,10 @@ def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=1
|
||||||
|
|
||||||
|
|
||||||
# search best weight from random data
|
# search best weight from random data
|
||||||
# Test total sample size = stepsize * stepsize * interation
|
def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=1000, interation=1000):
|
||||||
# Random kernel number = in_channels * kernel_size[0] * kernel_size[1] * 16
|
|
||||||
def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=20, interation=1000):
|
|
||||||
interationbar = tqdm(total=interation)
|
interationbar = tqdm(total=interation)
|
||||||
forwardlayer = layer -1
|
forwardlayer = layer -1
|
||||||
samplesize = stepsize*stepsize
|
|
||||||
netmodel.eval()
|
netmodel.eval()
|
||||||
tl = netmodel.features[layer]
|
tl = netmodel.features[layer]
|
||||||
outchannels = tl.out_channels
|
outchannels = tl.out_channels
|
||||||
|
@ -227,55 +211,93 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st
|
||||||
for i in range(outchannels):
|
for i in range(outchannels):
|
||||||
shift.append(1<<i)
|
shift.append(1<<i)
|
||||||
shift = utils.SetDevice(torch.from_numpy(np.array(shift).astype("uint8")))
|
shift = utils.SetDevice(torch.from_numpy(np.array(shift).astype("uint8")))
|
||||||
|
# shift = torch.from_numpy(np.array(shift).astype("uint8"))
|
||||||
newweightshape = list(newlayer.weight.data.shape)
|
|
||||||
samplekernelsize = newlayer.in_channels*newlayer.kernel_size[0]*newlayer.kernel_size[1]*16
|
|
||||||
newweightshape[0] = samplekernelsize
|
|
||||||
newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32")
|
|
||||||
newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2]))
|
|
||||||
newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1)
|
|
||||||
newweight = np.swapaxes(newweight,0,1).reshape(newweightshape)
|
|
||||||
newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight))
|
|
||||||
outputs = newlayer(datasnetout).transpose(0,1)
|
|
||||||
|
|
||||||
indexs = np.random.randint(0, samplekernelsize, samplesize*interation*outchannels).reshape(interation,samplesize, -1)
|
|
||||||
|
|
||||||
# da = np.random.randint(0,5,2*9).reshape(9,2)
|
|
||||||
# te = np.sort(da,1)
|
|
||||||
# tg = np.unique(te ,axis=0)
|
|
||||||
# list(combinations(range(newweightshape[0]), 8))
|
|
||||||
|
|
||||||
bestweight = []
|
bestweight = []
|
||||||
bestentropy = [100.0]
|
bestentropy = [100.0]
|
||||||
|
bittedset = []
|
||||||
|
bittedLock = threading.Lock()
|
||||||
|
bestLock = threading.Lock()
|
||||||
|
|
||||||
|
class CPU (threading.Thread):
|
||||||
|
def __init__(self):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
def run(self):
|
||||||
|
hasdata = 0
|
||||||
|
bittedLock.acquire()
|
||||||
|
if len(bittedset)>0:
|
||||||
|
bitted = bittedset.pop()
|
||||||
|
hasdata = 1
|
||||||
|
bittedLock.release()
|
||||||
|
if hasdata > 0:
|
||||||
|
entropys = []
|
||||||
|
for i in range(len(indexs)):
|
||||||
|
histced = bitted[:,i].histc(256,0,255).type(torch.float32)
|
||||||
|
histced = histced[histced>0]
|
||||||
|
histced = histced/histced.sum()
|
||||||
|
entropy = (histced.log2()*histced).sum()
|
||||||
|
entropys.append(entropy.numpy())
|
||||||
|
argmin = np.argmin(entropys)
|
||||||
|
|
||||||
|
bestLock.acquire()
|
||||||
|
if entropys[argmin] < bestentropy[0]:
|
||||||
|
bestweight = newweight[indexs[argmin]]
|
||||||
|
bestentropy[0] = entropys[argmin]
|
||||||
|
print("finded better entropy")
|
||||||
|
bestLock.release()
|
||||||
|
|
||||||
for j in range(interation):
|
for j in range(interation):
|
||||||
# 400 8 4096 5 5
|
newweightshape = list(newlayer.weight.data.shape)
|
||||||
output = outputs[indexs[j]]
|
newweightshape[0] = stepsize
|
||||||
# 102400 400 8
|
newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32")
|
||||||
reshaped = output.reshape(samplesize, outchannels, -1).permute(2, 0, 1)
|
newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2]))
|
||||||
# 400 8
|
newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1)
|
||||||
meaned = reshaped.mean(0)
|
newweight = np.swapaxes(newweight,0,1).reshape(newweightshape)
|
||||||
# 102400 400
|
|
||||||
# bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu()
|
newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight))
|
||||||
bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32)
|
outputs = newlayer(datasnetout).transpose(0,1)
|
||||||
|
samplesize = stepsize*stepsize
|
||||||
|
indexs = np.random.randint(0,stepsize,samplesize*outchannels).reshape(samplesize,-1)
|
||||||
|
# 1000 8 4096 5 5
|
||||||
|
outputs = outputs[indexs]
|
||||||
|
# 102400 1000 8
|
||||||
|
reshaped = outputs.reshape(samplesize, outchannels, -1).permute(2, 0, 1)
|
||||||
|
# 1000 8
|
||||||
|
meaned = reshaped.mean(0)
|
||||||
|
# 102400 1000
|
||||||
|
bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu()
|
||||||
|
|
||||||
|
bittedLock.acquire()
|
||||||
|
bittedset.append(bitted)
|
||||||
|
bittedLock.release()
|
||||||
|
threadcpu = CPU()
|
||||||
|
threadcpu.start()
|
||||||
|
|
||||||
|
# entropys = []
|
||||||
|
# for i in range(len(indexs)):
|
||||||
|
# histced = bitted[:,i].histc(256,0,255).type(torch.float32)
|
||||||
|
# histced = histced[histced>0]
|
||||||
|
# histced = histced/histced.sum()
|
||||||
|
# entropy = (histced.log2()*histced).sum()
|
||||||
|
# entropys.append(entropy.detach().cpu().numpy())
|
||||||
|
|
||||||
|
# argmin = np.argmin(entropys)
|
||||||
|
# if entropys[argmin] < bestentropy:
|
||||||
|
# bestweight = newweight[indexs[argmin]]
|
||||||
|
# bestentropy = entropys[argmin]
|
||||||
|
|
||||||
entropys = []
|
|
||||||
for i in range(samplesize):
|
|
||||||
histced = bitted[:,i].histc(256,0,255).type(torch.float32)
|
|
||||||
histced = histced[histced>0]
|
|
||||||
histced = histced/histced.sum()
|
|
||||||
entropy = (histced.log2()*histced).sum()
|
|
||||||
entropys.append(entropy.detach().cpu().numpy())
|
|
||||||
argmin = np.argmin(entropys)
|
|
||||||
if entropys[argmin] < bestentropy:
|
|
||||||
bestweight = newweight[indexs[j][argmin]]
|
|
||||||
bestentropy = entropys[argmin]
|
|
||||||
interationbar.update(1)
|
interationbar.update(1)
|
||||||
interationbar.set_description("entropy:"+str(bestentropy))
|
interationbar.set_description("left:"+str(len(bittedset)))
|
||||||
|
|
||||||
|
while bittedset:
|
||||||
|
time.sleep(100)
|
||||||
|
|
||||||
interationbar.close()
|
interationbar.close()
|
||||||
return bestweight
|
return bestweight
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def SetModelConvWeight(model, layer, weight):
|
def SetModelConvWeight(model, layer, weight):
|
||||||
w = utils.SetDevice(torch.from_numpy(weight))
|
w = utils.SetDevice(torch.from_numpy(weight))
|
||||||
model.features[layer].weight.data = w
|
model.features[layer].weight.data = w
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue