Refine kernal Random to w*h*16 kinds possiable.
Donot use Cpu any more.
This commit is contained in:
		
							parent
							
								
									8404d93880
								
							
						
					
					
						commit
						675cd8567e
					
				|  | @ -16,7 +16,6 @@ import random | |||
| import cv2 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" | ||||
| print("Current Path :" + CurrentPath) | ||||
| 
 | ||||
|  |  | |||
|  | @ -16,6 +16,7 @@ import random | |||
| import cv2 | ||||
| from tqdm import tqdm | ||||
| import threading | ||||
| from itertools import combinations | ||||
| 
 | ||||
| 
 | ||||
| CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" | ||||
|  | @ -26,6 +27,19 @@ import Model as Model | |||
| from tools import utils, Train, Loader | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| # iterable = combinations(range(72), 4) | ||||
| # count = iterable.count() | ||||
| # pool = tuple(iterable) | ||||
| # n = len(pool) | ||||
| # indices = sorted(random.sample(range(n), 2)) | ||||
| # fdafda = tuple(pool[i] for i in indices) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| def GetScore(netmodel,layer,SearchLayer,DataSet): | ||||
|     netmodel.eval() | ||||
|     sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0))) | ||||
|  | @ -184,10 +198,12 @@ def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=1 | |||
| 
 | ||||
| 
 | ||||
| # search best weight from random data | ||||
| def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=1000, interation=1000): | ||||
| # Test total sample size = stepsize * stepsize * interation | ||||
| # Random kernel number = in_channels * kernel_size[0] * kernel_size[1] * 16 | ||||
| def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=20, interation=1000): | ||||
|     interationbar = tqdm(total=interation)     | ||||
|     forwardlayer = layer -1 | ||||
| 
 | ||||
|     samplesize = stepsize*stepsize | ||||
|     netmodel.eval() | ||||
|     tl = netmodel.features[layer] | ||||
|     outchannels = tl.out_channels | ||||
|  | @ -211,93 +227,55 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st | |||
|     for i in range(outchannels): | ||||
|         shift.append(1<<i) | ||||
|     shift = utils.SetDevice(torch.from_numpy(np.array(shift).astype("uint8"))) | ||||
|     # shift = torch.from_numpy(np.array(shift).astype("uint8")) | ||||
|      | ||||
| 
 | ||||
|     newweightshape = list(newlayer.weight.data.shape) | ||||
|     samplekernelsize = newlayer.in_channels*newlayer.kernel_size[0]*newlayer.kernel_size[1]*16 | ||||
|     newweightshape[0] = samplekernelsize | ||||
|     newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32") | ||||
|     newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2])) | ||||
|     newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1) | ||||
|     newweight = np.swapaxes(newweight,0,1).reshape(newweightshape) | ||||
|     newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight)) | ||||
|     outputs = newlayer(datasnetout).transpose(0,1) | ||||
| 
 | ||||
|     indexs = np.random.randint(0, samplekernelsize, samplesize*interation*outchannels).reshape(interation,samplesize, -1) | ||||
| 
 | ||||
|     # da = np.random.randint(0,5,2*9).reshape(9,2) | ||||
|     # te = np.sort(da,1) | ||||
|     # tg = np.unique(te ,axis=0) | ||||
|     # list(combinations(range(newweightshape[0]), 8)) | ||||
| 
 | ||||
|     bestweight = [] | ||||
|     bestentropy = [100.0] | ||||
|     bittedset = [] | ||||
|     bittedLock = threading.Lock() | ||||
|     bestLock = threading.Lock() | ||||
| 
 | ||||
|     class CPU (threading.Thread):   | ||||
|         def __init__(self): | ||||
|             threading.Thread.__init__(self) | ||||
|         def run(self): | ||||
|             hasdata = 0 | ||||
|             bittedLock.acquire() | ||||
|             if len(bittedset)>0: | ||||
|                 bitted = bittedset.pop() | ||||
|                 hasdata = 1 | ||||
|             bittedLock.release() | ||||
|             if hasdata > 0: | ||||
|                 entropys = [] | ||||
|                 for i in range(len(indexs)): | ||||
|                     histced = bitted[:,i].histc(256,0,255).type(torch.float32) | ||||
|                     histced = histced[histced>0] | ||||
|                     histced = histced/histced.sum() | ||||
|                     entropy = (histced.log2()*histced).sum() | ||||
|                     entropys.append(entropy.numpy()) | ||||
|                 argmin = np.argmin(entropys) | ||||
| 
 | ||||
|                 bestLock.acquire() | ||||
|                 if entropys[argmin] < bestentropy[0]: | ||||
|                     bestweight = newweight[indexs[argmin]] | ||||
|                     bestentropy[0] = entropys[argmin] | ||||
|                     print("finded better entropy") | ||||
|                 bestLock.release() | ||||
| 
 | ||||
|     for j in range(interation): | ||||
|         newweightshape = list(newlayer.weight.data.shape) | ||||
|         newweightshape[0] = stepsize | ||||
|         newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32") | ||||
|         newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2])) | ||||
|         newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1) | ||||
|         newweight = np.swapaxes(newweight,0,1).reshape(newweightshape) | ||||
|          | ||||
|         newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight)) | ||||
|         outputs = newlayer(datasnetout).transpose(0,1) | ||||
|         samplesize = stepsize*stepsize | ||||
|         indexs = np.random.randint(0,stepsize,samplesize*outchannels).reshape(samplesize,-1) | ||||
|         # 1000 8 4096 5 5 | ||||
|         outputs = outputs[indexs] | ||||
|         # 102400 1000 8         | ||||
|         reshaped = outputs.reshape(samplesize, outchannels, -1).permute(2, 0, 1) | ||||
|         # 1000 8 | ||||
|         # 400 8 4096 5 5 | ||||
|         output = outputs[indexs[j]] | ||||
|         # 102400 400 8         | ||||
|         reshaped = output.reshape(samplesize, outchannels, -1).permute(2, 0, 1) | ||||
|         # 400 8 | ||||
|         meaned = reshaped.mean(0) | ||||
|         # 102400 1000 | ||||
|         bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu() | ||||
|         # 102400 400 | ||||
|         # bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu() | ||||
|         bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32) | ||||
|          | ||||
|         bittedLock.acquire() | ||||
|         bittedset.append(bitted) | ||||
|         bittedLock.release() | ||||
|         threadcpu = CPU() | ||||
|         threadcpu.start() | ||||
| 
 | ||||
|         # entropys = [] | ||||
|         # for i in range(len(indexs)): | ||||
|         #     histced = bitted[:,i].histc(256,0,255).type(torch.float32) | ||||
|         #     histced = histced[histced>0] | ||||
|         #     histced = histced/histced.sum() | ||||
|         #     entropy = (histced.log2()*histced).sum() | ||||
|         #     entropys.append(entropy.detach().cpu().numpy()) | ||||
|          | ||||
|         # argmin = np.argmin(entropys) | ||||
|         # if entropys[argmin] < bestentropy: | ||||
|         #     bestweight = newweight[indexs[argmin]] | ||||
|         #     bestentropy = entropys[argmin] | ||||
| 
 | ||||
|         entropys = [] | ||||
|         for i in range(samplesize): | ||||
|             histced = bitted[:,i].histc(256,0,255).type(torch.float32) | ||||
|             histced = histced[histced>0] | ||||
|             histced = histced/histced.sum() | ||||
|             entropy = (histced.log2()*histced).sum() | ||||
|             entropys.append(entropy.detach().cpu().numpy()) | ||||
|         argmin = np.argmin(entropys) | ||||
|         if entropys[argmin] < bestentropy: | ||||
|             bestweight = newweight[indexs[j][argmin]] | ||||
|             bestentropy = entropys[argmin] | ||||
|         interationbar.update(1) | ||||
|         interationbar.set_description("left:"+str(len(bittedset))) | ||||
| 
 | ||||
|     while bittedset: | ||||
|         time.sleep(100) | ||||
|      | ||||
|         interationbar.set_description("entropy:"+str(bestentropy)) | ||||
|     interationbar.close() | ||||
|     return bestweight | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| def SetModelConvWeight(model, layer, weight): | ||||
|     w = utils.SetDevice(torch.from_numpy(weight)) | ||||
|     model.features[layer].weight.data = w | ||||
|  |  | |||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
		Loading…
	
		Reference in New Issue