Showing posts with label transformer. Show all posts
Showing posts with label transformer. Show all posts

5/23/2023

Create custom tokenizer simple code.

In the sample code, vocabulary is "0,1,2,3,4" and max length is 20.


.

from typing import List, Union

class CustomTokenizer:
def __init__(self, vocab: Union[str, List[str]], pad_token="<PAD>", cls_token="<BOS>", sep_token="<SEP>", max_len=20):
if isinstance(vocab, str):
with open(vocab, 'r') as f:
self.vocab = {word.strip(): i for i, word in enumerate(f.readlines())}
elif isinstance(vocab, list):
self.vocab = {word: i for i, word in enumerate(vocab)}
else:
raise ValueError("vocab must be either a filepath (str) or a list of words")
print('vocab: ', self.vocab)
self.pad_token = pad_token
self.cls_token = cls_token
self.sep_token = sep_token
self.max_len = max_len
self.inv_vocab = {v: k for k, v in self.vocab.items()}

def tokenize(self, text: str):
tokens = [c for c in text if c in self.vocab]
tokens = tokens[:self.max_len]
padding_length = self.max_len - len(tokens)
return [self.cls_token] + tokens + [self.sep_token] + [self.pad_token] * padding_length

def convert_tokens_to_ids(self, tokens):
return [self.vocab.get(token, self.vocab.get(self.pad_token)) for token in tokens]

def convert_ids_to_tokens(self, ids):
return [self.inv_vocab.get(id, self.pad_token) for id in ids]



vocab = ["<PAD>", "<BOS>", "<SEP>", "0", "1", "2", "3", "4"]
with open('vocab.txt', 'w') as f:
for token in vocab:
f.write(token + '\n')

# Initialize your custom tokenizer
tokenizer = CustomTokenizer(vocab='vocab.txt')

# Now you can use this tokenizer to tokenize your data, study.marearts.com
tokenized_text = tokenizer.tokenize('22342')
print("tokenized_text: ", tokenized_text)

# Convert tokens to ids
token_ids = tokenizer.convert_tokens_to_ids(tokenized_text)
print("token_ids: ", token_ids)

# Convert ids back to tokens, marearts.com
tokens = tokenizer.convert_ids_to_tokens(token_ids)
print("tokens: ", tokens)

..


Thank you.

πŸ™‡πŸ»‍♂️


4/25/2023

vit encoder + transformer decoder model - export onnx example

refer to this code:

.



# If you want to combine a Vision Transformer (ViT) as an encoder with a Transformer-based decoder,
# you can follow the steps below.
# We will use the Hugging Face Transformers library and PyTorch.

# Install the required libraries:
# pip install torch torchvision transformers onnx

# Define the combined model:
# -----------------------------------------
import torch
import torch.nn as nn
from transformers import ViTModel, ViTConfig, AutoModelForSeq2SeqLM

class ViTTransformer(nn.Module):
def __init__(self, vit_model, transformer_decoder):
super(ViTTransformer, self).__init__()
self.vit = vit_model
self.transformer_decoder = transformer_decoder

def forward(self, x, decoder_input_ids, **kwargs):
encoder_outputs = self.vit(x)
outputs = self.transformer_decoder(decoder_input_ids, encoder_outputs=encoder_outputs, **kwargs)
return outputs
# -----------------------------------------

# Load the ViT and Transformer decoder models:
# Assuming you have a pre-trained ViT model and a pre-trained Transformer decoder model, load them as follows:

# -----------------------------------------
vit_config = ViTConfig()
vit_model = ViTModel(vit_config)
transformer_decoder = AutoModelForSeq2SeqLM.from_pretrained("your-pretrained-transformer-decoder")


# Create the combined model and load the checkpoint if you have one:
# -----------------------------------------
combined_model = ViTTransformer(vit_model, transformer_decoder)
# -----------------------------------------

# # If you have a checkpoint, load it as follows:
# # checkpoint = torch.load('path/to/checkpoint.pth')
# # combined_model.load_state_dict(checkpoint['model_state_dict'])
# Export the combined model to ONNX format:
# The process of exporting the combined model to ONNX is more complicated due to the dynamic nature of the Transformer-based decoder.
# You might need to modify the export code depending on your specific use case.
# However, here is a general example:

# -----------------------------------------
# # Set the combined model to evaluation mode
combined_model.eval()
# Create dummy input tensors with the correct dimensions
# (B x C x H x W) for image input and (B x seq_len) for decoder input
dummy_image_input = torch.randn(1, 3, 224, 224)
dummy_decoder_input = torch.randint(0, transformer_decoder.config.vocab_size, (1, 5))

# Export the combined model to ONNX format
torch.onnx.export(
combined_model,
(dummy_image_input, dummy_decoder_input),
"vit_transformer.onnx",
input_names=["image_input", "decoder_input"],
output_names=["output"],
dynamic_axes={
"image_input": {0: "batch_size"},
"decoder_input": {0: "batch_size", 1: "sequence_length"},
"output": {0: "batch_size", 1: "sequence_length"},
},
opset_version=12,
)
# -----------------------------------------

# This code will create an ONNX file (vit_transformer.onnx) containing the combined ViT and Transformer decoder model.
# Note that you might need to adjust the code according to the specific needs of your application.

..

Thank you.πŸ™‡πŸ»‍♂️