Witllm/wit/train.py

84 lines
2.7 KiB
Python

import pytorch_lightning as pl
import torch
from lit_module import LitModule
from tokenization_qwen import QWenTokenizer
from logger import TBLogger
from meaning_dataset import MeaningDataset, BatchGroupMeaningDataloader
from wit.configuration import ModelConfig
pretrain_model_name = None # "qwen/Qwen-1_8B-Chat"
learning_rate = 0.0001
use_tril_attention_mask = None
precision = "32-true" # "precision:bf16-mixed,16-mixed,32-true"
train_batch_size = 1
val_batch_size = 1
num_proc = 8
max_epochs = 1000
strategy = "auto"
resume_from_ckpt_path = None
seed = 42
dataloader_works = 2
vocab_size = 256
level_ratio = 5
level = 5
dataset_level = 3
min_subitem = 2
hidden_size = 128 # 128 1024 2048 32
num_attention_heads = 16 # 8 8 16
num_hidden_layers = 6 # 6 12 24 3
mask_level = [0, 1, 2]
mask_idx = [0, 0, -1]
# name = "vocab_ratio_level_data_hidden_head_layer"
# name = "mask_level_idx"
name = "bigger"
ver = f"{vocab_size}" + "_" + f"{level_ratio}" + "_" + f"{level}" + "_" + f"{min_subitem}" + "_" + f"{dataset_level}"
ver = ver + "_" + f"{hidden_size}" + "_" + f"{num_attention_heads}" + "_" + f"{num_hidden_layers}"
ver = ver + "_" + f"{mask_level}" + "_" + f"{mask_idx}"
if __name__ == "__main__":
torch.manual_seed(seed)
config = ModelConfig()
config.vocab_size = vocab_size
config.hidden_size = hidden_size
config.num_hidden_layers = num_hidden_layers
config.num_attention_heads = num_attention_heads
lit_module = LitModule(pretrain_model_name, learning_rate, config, use_tril_attention_mask)
tokenizer = QWenTokenizer("./wit_b64.tiktoken", "./wit_char.tiktoken")
start = vocab_size * (level_ratio**level)
size = vocab_size * int((level_ratio**dataset_level))
raw_dataset = MeaningDataset(start, start + size, vocab_size, None, level_ratio, min_subitem)
# print(raw_dataset.token_frequency())
raw_dataset.set_mask(mask_level, mask_idx)
train_dataset, val_dataset = raw_dataset.split(0.9)
train_dataloader = BatchGroupMeaningDataloader(train_dataset, train_batch_size).dataloader(dataloader_works)
val_dataloader = BatchGroupMeaningDataloader(val_dataset, val_batch_size).dataloader(dataloader_works)
# for i in range(len(train_dataloader)):
# print(train_dataloader.print_mapping(i))
torch.set_float32_matmul_precision("medium")
lit_trainer = pl.Trainer(
accelerator="cuda",
precision=precision,
logger=TBLogger("./log/", name=name, version=ver, default_hp_metric=False),
strategy=strategy,
max_epochs=max_epochs,
)
lit_trainer.fit(
lit_module,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
ckpt_path=resume_from_ckpt_path,
)