Witllm/wit/train.py

87 lines
2.3 KiB
Python
Raw Normal View History

2024-02-25 20:20:32 +08:00
import argparse
from functools import partial
from itertools import chain
from typing import Dict, Tuple
import datasets
import pytorch_lightning as pl
import torch
2024-03-04 21:41:46 +08:00
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split, Subset
2024-02-25 20:20:32 +08:00
from transformers import (
BatchEncoding,
DefaultDataCollator,
PreTrainedTokenizer,
set_seed,
)
from modelscope import snapshot_download
from lit_module import LitModule
from tokenization_qwen import QWenTokenizer
2024-03-05 22:09:28 +08:00
from logger import TBLogger
2024-02-25 20:20:32 +08:00
2024-03-13 19:41:02 +08:00
from special_dataset import SpecialDataset
from meaning_dataset import MeaningDataset
2024-02-25 20:20:32 +08:00
model_name = "qwen/Qwen-1_8B-Chat"
learning_rate = 0.0001
use_tril_attention_mask = None
precision = "32-true" # "precision:bf16-mixed,16-mixed,32-true"
2024-02-25 20:20:32 +08:00
tokenizer_name_or_path = None
2024-03-13 19:41:02 +08:00
train_batch_size = 16
val_batch_size = 16
2024-02-25 20:20:32 +08:00
num_proc = 8
max_epochs = 1000
2024-03-05 22:09:28 +08:00
strategy = "auto"
2024-02-25 20:20:32 +08:00
resume_from_ckpt_path = None
seed = 42
2024-03-05 22:09:28 +08:00
vocab_size = 4096
2024-02-25 20:20:32 +08:00
if __name__ == "__main__":
if tokenizer_name_or_path is None:
tokenizer_name_or_path = model_name
set_seed(seed)
model_dir = snapshot_download(model_name)
lit_module = LitModule(model_dir, learning_rate, use_tril_attention_mask)
tokenizer = QWenTokenizer("./wit_b64.tiktoken", "./wit_char.tiktoken")
2024-03-13 19:41:02 +08:00
# raw_dataset = SpecialDataset()
raw_dataset = MeaningDataset(start=131072, end=1048576, size=32768)
train_dataset, val_dataset = random_split(raw_dataset, [0.95, 0.05])
# daf = next(iter(train_dataset))["input_ids"].numpy().tolist()
2024-02-26 00:31:47 +08:00
2024-02-25 20:20:32 +08:00
train_dataloader = DataLoader(
train_dataset,
batch_size=train_batch_size,
num_workers=num_proc,
collate_fn=DefaultDataCollator(),
persistent_workers=True,
shuffle=True,
)
val_dataloader = DataLoader(
val_dataset,
batch_size=val_batch_size,
num_workers=num_proc,
collate_fn=DefaultDataCollator(),
persistent_workers=True,
)
torch.set_float32_matmul_precision("medium")
2024-03-05 22:09:28 +08:00
lit_trainer = pl.Trainer(
accelerator="gpu",
precision=precision,
logger=TBLogger("./", default_hp_metric=False),
strategy=strategy,
max_epochs=max_epochs,
)
2024-02-25 20:20:32 +08:00
lit_trainer.fit(
lit_module,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
ckpt_path=resume_from_ckpt_path,
)