Showing posts with label onnx. Show all posts
Showing posts with label onnx. Show all posts

5/18/2023

swin transformer v2 - model forward and export onnx


1. load pre-trained model

2. export onnx

3. load onnx


refer to code:


.

import warnings
from torch.jit import TracerWarning
warnings.filterwarnings("ignore", category=TracerWarning)

#------------------
#swin-transformer v2 pretrained model
#------------------

from transformers import AutoImageProcessor, Swinv2Model
import torch
from datasets import load_dataset

dataset = load_dataset("huggingface/cats-image")
image = dataset["test"]["image"][0]

image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
model = Swinv2Model.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")

inputs = image_processor(image, return_tensors="pt")

with torch.no_grad():
outputs = model(**inputs)

last_hidden_states = outputs.last_hidden_state

# print( list(last_hidden_states.shape) )
# Convert last_hidden_states to numpy
last_hidden_states_numpy = last_hidden_states.detach().numpy()
print(f"Shape of last_hidden_states: {last_hidden_states_numpy.shape}")
print(last_hidden_states)



#----------------
#onnx export
#------------------
import torch
from torch.autograd import Variable

# ensure the model is in evaluation mode
model.eval()

# create a dummy variable with the same size as your input
# for this example, let's assume the input is of size [1, 3, 256, 256]
dummy_input = Variable(torch.randn(1, 3, 256, 256))

# specify the file path
file_path = "./swinv2_tiny.onnx"

# export the model
torch.onnx.export(model, dummy_input, file_path)

#------------------
#onnx inference
#------------------
import onnxruntime as ort

# load the ONNX model
ort_session = ort.InferenceSession(file_path)

# convert the PyTorch tensor to numpy array for onnxruntime
print(inputs.keys())
inputs_numpy = inputs["pixel_values"].numpy()
# inputs_numpy = inputs["input_ids"].numpy()

# create a dictionary from model input name to the actual input data
ort_inputs = {ort_session.get_inputs()[0].name: inputs_numpy}

# forward
ort_outs = ort_session.run(None, ort_inputs)
print(f"Shape of ort_outs: {ort_outs[0].shape}")
print(ort_outs)
# print(type(ort_outs))
# print( list(ort_outs.shape) )

..


Thank you.

www.marearts.com

🙇🏻‍♂️

4/25/2023

vit encoder + transformer decoder model - export onnx example

refer to this code:

.



# If you want to combine a Vision Transformer (ViT) as an encoder with a Transformer-based decoder,
# you can follow the steps below.
# We will use the Hugging Face Transformers library and PyTorch.

# Install the required libraries:
# pip install torch torchvision transformers onnx

# Define the combined model:
# -----------------------------------------
import torch
import torch.nn as nn
from transformers import ViTModel, ViTConfig, AutoModelForSeq2SeqLM

class ViTTransformer(nn.Module):
def __init__(self, vit_model, transformer_decoder):
super(ViTTransformer, self).__init__()
self.vit = vit_model
self.transformer_decoder = transformer_decoder

def forward(self, x, decoder_input_ids, **kwargs):
encoder_outputs = self.vit(x)
outputs = self.transformer_decoder(decoder_input_ids, encoder_outputs=encoder_outputs, **kwargs)
return outputs
# -----------------------------------------

# Load the ViT and Transformer decoder models:
# Assuming you have a pre-trained ViT model and a pre-trained Transformer decoder model, load them as follows:

# -----------------------------------------
vit_config = ViTConfig()
vit_model = ViTModel(vit_config)
transformer_decoder = AutoModelForSeq2SeqLM.from_pretrained("your-pretrained-transformer-decoder")


# Create the combined model and load the checkpoint if you have one:
# -----------------------------------------
combined_model = ViTTransformer(vit_model, transformer_decoder)
# -----------------------------------------

# # If you have a checkpoint, load it as follows:
# # checkpoint = torch.load('path/to/checkpoint.pth')
# # combined_model.load_state_dict(checkpoint['model_state_dict'])
# Export the combined model to ONNX format:
# The process of exporting the combined model to ONNX is more complicated due to the dynamic nature of the Transformer-based decoder.
# You might need to modify the export code depending on your specific use case.
# However, here is a general example:

# -----------------------------------------
# # Set the combined model to evaluation mode
combined_model.eval()
# Create dummy input tensors with the correct dimensions
# (B x C x H x W) for image input and (B x seq_len) for decoder input
dummy_image_input = torch.randn(1, 3, 224, 224)
dummy_decoder_input = torch.randint(0, transformer_decoder.config.vocab_size, (1, 5))

# Export the combined model to ONNX format
torch.onnx.export(
combined_model,
(dummy_image_input, dummy_decoder_input),
"vit_transformer.onnx",
input_names=["image_input", "decoder_input"],
output_names=["output"],
dynamic_axes={
"image_input": {0: "batch_size"},
"decoder_input": {0: "batch_size", 1: "sequence_length"},
"output": {0: "batch_size", 1: "sequence_length"},
},
opset_version=12,
)
# -----------------------------------------

# This code will create an ONNX file (vit_transformer.onnx) containing the combined ViT and Transformer decoder model.
# Note that you might need to adjust the code according to the specific needs of your application.

..

Thank you.🙇🏻‍♂️

2/09/2023

Exporting our checkpoint with the transformers.onnx. (AutoModelForSequenceClassification)

 refer to codeL

..

import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer # load model and tokenizer model_id = "distilbert-base-uncased-finetuned-sst-2-english" model = AutoModelForSequenceClassification.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) dummy_model_input = tokenizer("This is a sample", return_tensors="pt") # export torch.onnx.export( model, tuple(dummy_model_input.values()), f="torch-model.onnx", input_names=['input_ids', 'attention_mask'], output_names=['logits'], dynamic_axes={'input_ids': {0: 'batch_size', 1: 'sequence'}, 'attention_mask': {0: 'batch_size', 1: 'sequence'}, 'logits': {0: 'batch_size', 1: 'sequence'}}, do_constant_folding=True, opset_version=13, )

..


Thank you.

5/13/2022

convert simple transformer ner model to onnx

 

..

!python -m transformers.onnx --model=./checkpoint-21-epoch-11 --feature=token-classification onnx/

..