In this article we see how to run our transformer model training in an environment with multiple gpu. For example on Kaggle.com we can use the 2 T4 GPU with 16GB vRAM.

With Multiple GPU we can parallel the process of training and so reduce the total training time.

In this example we use the library accelerate and the function notebook_launcher to run Multiple GPU on Kaggle.com environment but is the same for all Jupyter Notebook env.

At first we need to install necessary library on our jupyter notebook

!pip install transformers==4.35.2
!pip install datasets==2.15.0
!pip install accelerate==0.25.0
!pip install "numpy<2.0"

Then we need to add all the necessary import

import pandas as pd
import numpy as np
from datasets import load_dataset
from transformers import Trainer, TrainingArguments, DataCollatorForLanguageModeling, AutoTokenizer, XLMRobertaForMaskedLM, TrainerCallback, EarlyStoppingCallback, get_scheduler
from sklearn.model_selection import train_test_split
import math
import torch
import os
from accelerate import notebook_launcher

Now we need to create our training like a function so we can pass it to notebook_launcher.

I have create my custom train for MLM model, so in the tokenizer i have no label, but in different case you need to add label on the preprocess_function.

I load the dataset from local model on the kaggle.com website, but you can load it also from huggingface etc.
In the model is most important the param device_map=’auto’ so the model can load an all the gpu

def notebook_train_launch():
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, do_lower_case=True)
    model = XLMRobertaForMaskedLM.from_pretrained(MODEL_NAME, device_map='auto')
    model.gradient_checkpointing_enable()


    def preprocess_function(examples):
       label_mapping = {'i': 0, 'g': 1}
       inputs = tokenizer(examples['text'], truncation=True, padding='max_length', max_length=512, return_tensors='pt')
       return {
        "input_ids": inputs["input_ids"],
        "attention_mask": inputs["attention_mask"]
       }
    
    dataset = load_dataset("text", data_files="/kaggle/input/datasetfile.txt")
   
    dataset_split = dataset['train'].train_test_split(test_size=0.8, train_size=0.2, shuffle=True)

    tokenized_datasets_test = dataset_split['test'].map(preprocess_function, batched=True, num_proc=4)
    tokenized_datasets_train = dataset_split['train'].map(preprocess_function, batched=True, num_proc=4)
    
    tokenized_datasets_test=tokenized_datasets_test.remove_columns(["text"])
    tokenized_datasets_test.set_format("torch")
    
    tokenized_datasets_train=tokenized_datasets_train.remove_columns(["text"])
    tokenized_datasets_train.set_format("torch")
    
    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer,
        mlm=True,                   
        mlm_probability=0.15        
    )
    
    training_args = TrainingArguments(
        output_dir="/kaggle/working/TrainingLog",
        run_name="latinxlm",
        save_strategy="steps",
        evaluation_strategy="steps",
        save_steps=600,
        eval_steps=600,
        learning_rate=2e-5,
        auto_find_batch_size=True,
        num_train_epochs=3,
        weight_decay=0.05,
        max_grad_norm=1.0,
        logging_dir="/kaggle/working/TrainingLog/logs",
        fp16=True,
        gradient_checkpointing=True,
        gradient_accumulation_steps=8,
        eval_accumulation_steps=16,
        logging_steps=300,
        warmup_steps=1000,
        save_total_limit=2,
        greater_is_better=False,
        load_best_model_at_end=True,
        metric_for_best_model="eval_loss",
        overwrite_output_dir=True,
        report_to="wandb",
        optim="adamw_torch",
        lr_scheduler_type="linear",
        save_safetensors=False
    )
    
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets_train,
        eval_dataset=tokenized_datasets_test,
        data_collator=data_collator,
        callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
        tokenizer=tokenizer
    )
    
    trainer.train()
    
    model.save_pretrained("/kaggle/working/modelname")
    tokenizer.save_pretrained("/kaggle/working/modelname")

notebook_launcher(notebook_train_launch, num_processes=2)

Leave a Reply

Your email address will not be published. Required fields are marked *