Refine train,
This commit is contained in:
		
							parent
							
								
									ef08359a94
								
							
						
					
					
						commit
						17de117bda
					
				
							
								
								
									
										22
									
								
								wit/train.py
								
								
								
								
							
							
						
						
									
										22
									
								
								wit/train.py
								
								
								
								
							| 
						 | 
				
			
			@ -17,7 +17,7 @@ pretrain_model_name = None  # "qwen/Qwen-1_8B-Chat"
 | 
			
		|||
learning_rate = 0.0001
 | 
			
		||||
use_tril_attention_mask = None
 | 
			
		||||
precision = "32-true"  # "precision:bf16-mixed,16-mixed,32-true"
 | 
			
		||||
train_batch_size = 2
 | 
			
		||||
train_batch_size = 1
 | 
			
		||||
val_batch_size = 1
 | 
			
		||||
num_proc = 8
 | 
			
		||||
max_epochs = 1000
 | 
			
		||||
| 
						 | 
				
			
			@ -28,23 +28,21 @@ seed = 42
 | 
			
		|||
vocab_size = 256
 | 
			
		||||
level_ratio = 6
 | 
			
		||||
level = 4
 | 
			
		||||
dataset_level = 1
 | 
			
		||||
dataset_level = 1.5
 | 
			
		||||
min_subitem = 2
 | 
			
		||||
 | 
			
		||||
hidden_size = 1024  # 128 1024 2048  32
 | 
			
		||||
num_attention_heads = 16  # 8 8 16
 | 
			
		||||
num_hidden_layers = 3  # 6 12 24  3
 | 
			
		||||
num_hidden_layers = 6  # 6 12 24  3
 | 
			
		||||
 | 
			
		||||
mask_level = [0, 1]
 | 
			
		||||
mask_idx = [0, 0]
 | 
			
		||||
 | 
			
		||||
# mask_level = [0, 1]
 | 
			
		||||
# mask_idx = [0, -1]
 | 
			
		||||
mask_level = [0]
 | 
			
		||||
mask_idx = [-1]
 | 
			
		||||
 | 
			
		||||
# name = "vocab_ratio_level_data_hidden_head_layer"
 | 
			
		||||
# name = "mask_level_idx"
 | 
			
		||||
name = "single_token"
 | 
			
		||||
name = "small"
 | 
			
		||||
 | 
			
		||||
ver = f"{vocab_size}" + "_" + f"{level_ratio}" + "_" + f"{level}" + "_" + f"{dataset_level}"
 | 
			
		||||
ver = f"{vocab_size}" + "_" + f"{level_ratio}" + "_" + f"{level}" + "_" + f"{min_subitem}" + "_" + f"{dataset_level}"
 | 
			
		||||
ver = ver + "_" + f"{hidden_size}" + "_" + f"{num_attention_heads}" + "_" + f"{num_hidden_layers}"
 | 
			
		||||
ver = ver + "_" + f"{mask_level}" + "_" + f"{mask_idx}"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -61,8 +59,8 @@ if __name__ == "__main__":
 | 
			
		|||
    tokenizer = QWenTokenizer("./wit_b64.tiktoken", "./wit_char.tiktoken")
 | 
			
		||||
 | 
			
		||||
    start = vocab_size * (level_ratio**level)
 | 
			
		||||
    size = vocab_size * (level_ratio**dataset_level)
 | 
			
		||||
    raw_dataset = MeaningDataset(start, start + size, size, vocab_size, level_ratio)
 | 
			
		||||
    size = vocab_size * int((level_ratio**dataset_level))
 | 
			
		||||
    raw_dataset = MeaningDataset(start, start + size, size, vocab_size, level_ratio, min_subitem)
 | 
			
		||||
    raw_dataset.set_mask(mask_level, mask_idx)
 | 
			
		||||
    train_dataset, val_dataset = raw_dataset.split(0.9)
 | 
			
		||||
    train_dataloader = BatchGroupMeaningDataloader(train_dataset, train_batch_size)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue