diff --git a/lit_train.py b/lit_train.py index 9a9b3bd..f0b0fe2 100644 --- a/lit_train.py +++ b/lit_train.py @@ -22,11 +22,11 @@ from utils import load_tokenizer def split_raw_dataset( raw_dataset: datasets.DatasetDict, ) -> Tuple[datasets.Dataset, datasets.Dataset]: - if 'validation' in raw_dataset: - train_dataset, val_dataset = raw_dataset['train'], raw_dataset['validation'] + if "validation" in raw_dataset: + train_dataset, val_dataset = raw_dataset["train"], raw_dataset["validation"] else: - raw_dataset = raw_dataset['train'].train_test_split(test_size=0.05, seed=args.seed) - train_dataset, val_dataset = raw_dataset['train'], raw_dataset['test'] + raw_dataset = raw_dataset["train"].train_test_split(test_size=0.05, seed=args.seed) + train_dataset, val_dataset = raw_dataset["train"], raw_dataset["test"] return train_dataset, val_dataset @@ -39,32 +39,44 @@ def process_dataset(dataset: datasets.Dataset, tokenizer: PreTrainedTokenizer) - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } - result['labels'] = result['input_ids'].copy() + result["labels"] = result["input_ids"].copy() result = BatchEncoding(result) return result + def format_inputs(examples): + p = examples["段落"] + mergeLine = "" + for line in p: + mergeLine += line["内容"] + "\n" + return {"text": mergeLine} + def tokenize_inputs( examples: Dict[str, list], tokenizer: PreTrainedTokenizer, - column_name: str = 'text', + column_name: str = "text", ) -> BatchEncoding: return tokenizer(examples[column_name], return_attention_mask=False) dataset_column_names = list(dataset.features) dataset = dataset.map( - partial( - tokenize_inputs, - tokenizer=tokenizer, - column_name=dataset_column_names[0], - ), + partial(format_inputs), + batched=False, + num_proc=args.num_proc, + remove_columns=dataset_column_names, + ) + dataset_column_names = list(dataset.features) + dataset = dataset.map( + partial(tokenize_inputs, tokenizer=tokenizer), batched=True, num_proc=args.num_proc, remove_columns=dataset_column_names, - ).map( + ) + dataset = dataset.map( partial(group_texts, block_size=tokenizer.model_max_length), batched=True, num_proc=args.num_proc, ) + return dataset @@ -74,7 +86,7 @@ def parse_args(): "--model_name", type=str, help="Name of or path to model", - default='gpt2', + default="gpt2", ) parser.add_argument( "--learning_rate", @@ -87,8 +99,12 @@ def parse_args(): help="Use tril attention mask during training", action="store_true", ) - parser.add_argument("--fp16", help="Enable fp16", action="store_true") - parser.add_argument("--bf16", help="Enable bf16", action="store_true") + parser.add_argument( + "--precision", + help="precision:bf16-mixed,16-mixed,32-true", + action="store_true", + default="16-mixed", + ) parser.add_argument( "--tokenizer_name_or_path", type=str, @@ -97,10 +113,10 @@ def parse_args(): ) parser.add_argument( "--dataset_name", - nargs='+', + nargs="+", type=str, help="Name(s) of dataset. To specify a config, pass a :", - default=["wikitext:wikitext-2-v1"], + default=["/home/colin/develop/dataset/liwu/MNBVC/wiki"], ) parser.add_argument( "--train_batch_size", @@ -124,7 +140,7 @@ def parse_args(): "--num_proc", type=str, help="Number of data processes", - default=1, + default=12, ) parser.add_argument( "--max_epochs", @@ -136,7 +152,7 @@ def parse_args(): "--strategy", type=str, help="Name of pytorch lightning distribution strategy", - default='ddp', + default="fsdp", ) parser.add_argument( "--resume_from_ckpt_path", @@ -154,7 +170,7 @@ def parse_args(): return args -if __name__ == '__main__': +if __name__ == "__main__": args = parse_args() if args.tokenizer_name_or_path is None: @@ -170,8 +186,11 @@ if __name__ == '__main__': train_dataset_list = [] val_dataset_list = [] for dataset_name in args.dataset_name: - dataset_args = dataset_name.split(':') - raw_dataset = datasets.load_dataset(*dataset_args) + dataset_args = dataset_name.split(":") + raw_dataset = datasets.load_dataset( + "json", data_files="/home/colin/develop/dataset/liwu/MNBVC/wiki/20230197/0.jsonl.gz" + ) + # raw_dataset = datasets.load_dataset(*dataset_args) train_dataset, val_dataset = split_raw_dataset(raw_dataset) train_dataset = process_dataset(train_dataset, tokenizer) val_dataset = process_dataset(val_dataset, tokenizer) @@ -198,19 +217,13 @@ if __name__ == '__main__': ) ne = next(train_dataloader._get_iterator()) - print((ne["input_ids"]-ne["labels"]).numpy().tolist()) # trainer # apply_all_patches() - torch.set_float32_matmul_precision('medium') - if args.bf16: - precision = 'bf16-mixed' - elif args.fp16: - precision = '16-mixed' - else: - precision = "32-true" + torch.set_float32_matmul_precision("medium") + precision = args.precision lit_trainer = pl.Trainer( - accelerator='gpu', + accelerator="gpu", precision=precision, log_every_n_steps=5, accumulate_grad_batches=args.accumulate_grad_batches,