hugging face onnx exporting model quantisation method

 refer to example code


from functools import partial
from transformers import AutoTokenizer
from optimum.onnxruntime import ORTQuantizer, ORTModelForSequenceClassification
from optimum.onnxruntime.configuration import AutoQuantizationConfig, AutoCalibrationConfig

model_id = "distilbert-base-uncased-finetuned-sst-2-english"

onnx_model = ORTModelForSequenceClassification.from_pretrained(model_id, export=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
quantizer = ORTQuantizer.from_pretrained(onnx_model)
qconfig = AutoQuantizationConfig.arm64(is_static=True, per_channel=False)

def preprocess_fn(ex, tokenizer):
return tokenizer(ex["sentence"])

calibration_dataset = quantizer.get_calibration_dataset(
preprocess_function=partial(preprocess_fn, tokenizer=tokenizer),

calibration_config = AutoCalibrationConfig.minmax(calibration_dataset)

ranges = quantizer.fit(

model_quantized_path = quantizer.quantize(


options for several instructions


optimum-cli onnxruntime quantize --help
usage: optimum-cli <command> [<args>] onnxruntime quantize [-h] --onnx_model ONNX_MODEL -o OUTPUT [--per_channel] (--arm64 | --avx2 | --avx512 | --avx512_vnni | --tensorrt | -c CONFIG)

-h, --help show this help message and exit
--arm64 Quantization for the ARM64 architecture.
--avx2 Quantization with AVX-2 instructions.
--avx512 Quantization with AVX-512 instructions.
--avx512_vnni Quantization with AVX-512 and VNNI instructions.
--tensorrt Quantization for NVIDIA TensorRT optimizer.
-c CONFIG, --config CONFIG
`ORTConfig` file to use to optimize the model.

Required arguments:
--onnx_model ONNX_MODEL
Path to the repository where the ONNX models to quantize are located.
-o OUTPUT, --output OUTPUT
Path to the directory where to store generated ONNX model.

Optional arguments:
--per_channel Compute the quantization parameters on a per-channel basis.


refer to this page for details:


refer to this code as well


you may be able to get idea. 

# Export to ONNX
model = ORTModelForSeq2SeqLM.from_pretrained(model_path, from_transformers=True, export=True, provider='CUDAExecutionProvider').to(device)

# quantization code
encoder_quantizer = ORTQuantizer.from_pretrained(onnx_path, file_name='encoder_model.onnx')
decoder_quantizer = ORTQuantizer.from_pretrained(onnx_path, file_name='decoder_model.onnx')
decoder_wp_quantizer = ORTQuantizer.from_pretrained(onnx_path, file_name='decoder_with_past_model.onnx')
quantizer = [encoder_quantizer, decoder_quantizer, decoder_wp_quantizer]
dqconfig = AutoQuantizationConfig.avx512_vnni(is_static=False, per_channel=False)
for q in quantizer:
q.quantize(save_dir=output_path, quantization_config=dqconfig)

#inference code
model = ORTModelForSeq2SeqLM.from_pretrained(
tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-large')


dataset = self.dataset(input_dict)
dataset.set_format(type='torch', device=self.device, columns=['input_ids', 'attention_mask'])
data_loader = DataLoader(dataset, batch_size=self.batch_size, collate_fn=self.data_collator)
generated_outputs: List[OUTPUT_TYPE] = []
for i, batch in enumerate(data_loader):
_batch = {key: val.to(self.device) for key, val in batch.items()}
outputs = self.model.generate(**_batch, generation_config=self.generation_config)
decoded_outputs = self.tokenizer.batch_decode(outputs.cpu().tolist(), skip_special_tokens=True)


Thank you.

note! quantisation and optimise is different. 


comparing custom custom_vit_image_processor vs vit_image_processor of tranformers

 check custom image process is same with origin inner processing function in transformers.


pixel_values1 = self.feature_extractor(images=image, return_tensors="pt").pixel_values

# Convert numpy array to PyTorch tensor
pixel_values2 = self.custom_vit_image_processor(image)
pixel_values2 = torch.tensor(pixel_values2, dtype=torch.float32).unsqueeze(0) # Add batch dimension and ensure float32 type

# 1. Shape Check
assert pixel_values1.shape == pixel_values2.shape, "The tensors have different shapes
# 2. Absolute Difference
diff = torch.abs(pixel_values1 - pixel_values2)

# 3. Summarize Discrepancies www.marearts.com
mean_diff = torch.mean(diff).item()
max_diff = torch.max(diff).item()
min_diff = torch.min(diff).item()
print(f"Mean Absolute Difference: {mean_diff}")
print(f"Maximum Absolute Difference: {max_diff}")
print(f"Minimum Absolute Difference: {min_diff}")

# Additionally, if you want to see where the maximum difference occurs:
max_diff_position = torch.where(diff == max_diff)
print(f"Position of Maximum Difference: {max_diff_position}")


Thank you.

Hope to helpful.


nerf studio installation error


. VanillaPipeline.get_train_loss_dict: 12.6875 Traceback (most recent call last): File "/home/mare/anaconda3/envs/nerfstudio/bin/ns-train", line 8, in sys.exit(entrypoint()) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 262, in entrypoint main( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 247, in main launch( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 189, in launch main_func(local_rank=0, world_size=world_size, config=config) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py", line 100, in train_loop trainer.train() File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/engine/trainer.py", line 259, in train loss, loss_dict, metrics_dict = self.train_iteration(step) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/utils/profiler.py", line 127, in inner out = func(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/engine/trainer.py", line 479, in train_iteration _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/utils/profiler.py", line 127, in inner out = func(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/pipelines/base_pipeline.py", line 298, in get_train_loss_dict ray_bundle, batch = self.datamanager.next_train(step) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/data/datamanagers/base_datamanager.py", line 542, in next_train ray_bundle = self.train_ray_generator(ray_indices) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/model_components/ray_generators.py", line 56, in forward ray_bundle = self.cameras.generate_rays( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/cameras/cameras.py", line 463, in generate_rays raybundle = cameras._generate_rays_from_coords( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/cameras/cameras.py", line 646, in _generate_rays_from_coords coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/eval_frame.py", line 328, in _fn return fn(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/eval_frame.py", line 490, in catch_errors return callback(frame, cache_entry, hooks, frame_state) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 641, in _convert_frame result = inner_convert(frame, cache_size, hooks, frame_state) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 133, in _fn return fn(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 389, in _convert_frame_assert return _compile( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 569, in _compile guarded_code = compile_inner(code, one_graph, hooks, transform) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/utils.py", line 189, in time_wrapper r = func(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 491, in compile_inner out_code = transform_code_object(code, transform) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/bytecode_transformation.py", line 1028, in transform_code_object transformations(instructions, code_options) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/convert_frame.py", line 458, in transform tracer.run() File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/symbolic_convert.py", line 2074, in run super().run() File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/symbolic_convert.py", line 724, in run and self.step() File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/symbolic_convert.py", line 688, in step getattr(self, inst.opname)(inst) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/symbolic_convert.py", line 2162, in RETURN_VALUE self.output.compile_subgraph( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/output_graph.py", line 833, in compile_subgraph self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/contextlib.py", line 75, in inner return func(*args, **kwds) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/output_graph.py", line 957, in compile_and_call_fx_graph compiled_fn = self.call_user_compiler(gm) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/utils.py", line 189, in time_wrapper r = func(*args, **kwargs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/output_graph.py", line 1024, in call_user_compiler raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/output_graph.py", line 1009, in call_user_compiler compiled_fn = compiler_fn(gm, self.example_inputs()) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/_dynamo/repro/after_dynamo.py", line 117, in debug_wrapper compiled_gm = compiler_fn(gm, example_inputs) File "/home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/torch/__init__.py", line 1607, in __call__ return self.compiler_fn(model_, inputs_, **self.kwargs) torch._dynamo.exc.BackendCompilerFailed: backend='eager' raised: TypeError: eager() got an unexpected keyword argument 'mode' Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information You can suppress this exception and fall back to eager by setting: import torch._dynamo torch._dynamo.config.suppress_errors = True ..


* find where train.py is installed
(nerfstudio) mare@mare>NerfStudio$ find /home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/ -name 'train.py'

in my case it installed in here:

* edit train.py code 
nano /home/mare/anaconda3/envs/nerfstudio/lib/python3.8/site-packages/nerfstudio/scripts/train.py

add this two line on top of code
import torch._dynamo
torch._dynamo.config.suppress_errors = True

ok it's my solution.

Thank you.


round to first significant decimal

The code extract first found and convert to 1


Input: 0.02323, Output: 0.01
Input: 0.0023, Output: 0.001
Input: 23, Output: 1
Input: 0.0042323, Output: 0.001


 refer to this code:

def custom_marearts_round(number):
# Ensure number is a float
float_number = float(number)

# If number is zero, return it as is
if number == 0:
return 0
# If the number is an integer, return 1
elif float_number.is_integer():
return 1
# Convert the number to exponential notation
exponential_notation = f'{float_number:.1e}'
# Extract the exponent part and convert to int
exponent = int(exponential_notation.split('e')[-1])
# Convert to desired output format
result = float(f'1e{exponent}')
return result

# Examples:
input1 = 0.02323
output1 = custom_marearts_round(input1)
print(f"Input: {input1}, Output: {output1}")

input2 = 0.0023
output2 = custom_marearts_round(input2)
print(f"Input: {input2}, Output: {output2}")

input3 = 23
output3 = custom_marearts_round(input3)
print(f"Input: {input3}, Output: {output3}")

input4 = 0.0042323
output4 = custom_marearts_round(input4)
print(f"Input: {input4}, Output: {output4}")


๐Ÿ™‡๐Ÿป‍♂️ Thank you




QR detector python code

 refer to code:


import cv2
#pip install pyzbar
from pyzbar.pyzbar import decode

def scan_qr_code():
# Start the webcam
cap = cv2.VideoCapture(0)
cap.set(3, 640) # Set the width of the window
cap.set(4, 480) # Set the height of the window
while True:
success, img = cap.read()
if not success:
print("Failed to grab frame")
# Decode the QR Code
for barcode in decode(img):
# Get the QR Code position
my_data = barcode.data.decode('utf-8')
pts = barcode.polygon
if len(pts) == 4: # If we have 4 points, then we have a QR code
pts2 = barcode.rect
cv2.putText(img, my_data, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 255), 2)
for point in pts:
cv2.circle(img, (point[0], point[1]), 5, (0, 255, 0), cv2.FILLED)
# Display the frame
cv2.imshow('QR Code Scanner', img)
if cv2.waitKey(1) & 0xFF == ord('q'): # Press 'q' to quit

if __name__ == "__main__":


you need to install pyzbar

Thank you.




python code to convert heic (Mac image) to jpeg

 refer to code:


# pip install pyheif pillow
# brew install libheif

import os
import subprocess

input_folder = "./input/path"
output_folder = "./output/path"

if not os.path.exists(output_folder):

for filename in os.listdir(input_folder):
if filename.endswith(".heic") or filename.endswith(".HEIC"):
heic_path = os.path.join(input_folder, filename)
jpeg_path = os.path.join(output_folder, os.path.splitext(filename)[0] + ".jpg")

# Run heif-convert to convert the HEIC file to JPEG
subprocess.run(["heif-convert", heic_path, jpeg_path])

print("Conversion complete!")


Thank you.


Download Youtube Video, url to local video file.

The code need to install two YouTube downloader package.

Those are pytube, youtube_dl.

This code try to use one of them because sometime it's fail in certain library.

Enjoy code:


import ssl
from pytube import YouTube
import youtube_dl

#pip install pytube
#pip install youtube_dl

def configure_ssl():
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
ssl._create_default_https_context = _create_unverified_https_context

def pytube_download(video_url):
yt = YouTube(video_url)
video = yt.streams.get_highest_resolution()
print(f"Downloading: {video.title}")
print("Video downloaded successfully using pytube.")
return True
except Exception as e:
print(f"An error occurred while downloading the video with pytube: {e}")
return False

def download_youtube_video(video_url):
download_options = {
'format': 'best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # Set the output filename format
'progress_hooks': [hook],

with youtube_dl.YoutubeDL(download_options) as ydl:
print("Video downloaded successfully using youtube-dl.")
except Exception as e:
print(f"An error occurred while downloading the video with youtube-dl: {e}")

def hook(d):
if d['status'] == 'downloading':
print(d['_percent_str'], end="\r")

def main(video_url):
if not pytube_download(video_url):

# Example usage:
if __name__ == "__main__":
video_url = 'https://youtu.be/MareArts' # Add your video url here


Thank you.




3D mesh generation form 3D cloud points, using open3D

 refer to code


import open3d as o3d
import numpy as np
import os

def load_point_cloud(file_path):
print("Loading point cloud...")
return o3d.io.read_point_cloud(file_path)

def estimate_normals(pcd):
print("Estimating normals...")
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
return pcd

def remove_invalid_normals(pcd):
print("Removing invalid normals...")
valid_indices = np.where(np.linalg.norm(np.asarray(pcd.normals), axis=1) != 0)[0]
return pcd.select_by_index(valid_indices)

def poisson_reconstruction(pcd):
print("Performing Poisson surface reconstruction...")
mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=8)
return mesh

def simplify_mesh(mesh):
print("Simplifying mesh...")
return mesh.simplify_quadric_decimation(target_number_of_triangles=10000)

def save_mesh(mesh, file_path):
print(f"Mesh saved to '{file_path}'")
o3d.io.write_triangle_mesh(file_path, mesh)

def main():
file_path = "/path/3d_cloud.ply"
pcd = load_point_cloud(file_path)
pcd = estimate_normals(pcd)
pcd = remove_invalid_normals(pcd)
mesh = poisson_reconstruction(pcd)
mesh = simplify_mesh(mesh)
mesh_file = os.path.join(os.path.dirname(file_path), 'mesh.ply')
save_mesh(mesh, mesh_file)

if __name__ == "__main__":


install open3d using pip install open3d

Thank you.



print image exif python code

 refer to code:


from PIL import Image, ExifTags
import os

def print_exif_data(directory_path):
Print the EXIF data of every JPEG image in the given directory.
directory_path (str): Path to the directory containing JPEG images.
# Loop through all files in the directory
for filename in os.listdir(directory_path):
if filename.lower().endswith('.jpg') or filename.lower().endswith('.jpeg'):
filepath = os.path.join(directory_path, filename)
# Open image file
image = Image.open(filepath)
# Extract EXIF data
exif_data = image._getexif()
if exif_data is not None:
# Print EXIF data
print(f"EXIF data for {filename}:")
for tag_id, value in exif_data.items():
tag_name = ExifTags.TAGS.get(tag_id, tag_id)
print(f"{tag_name} ({tag_id}): {value}")
print('-' * 50)
print(f"No EXIF data found for {filename}")
print('-' * 50)

if __name__ == "__main__":
# Define the directory path
directory_path = "/your_path/images"
# Print EXIF data


this is example result 


EXIF data for IMG_5602.JPG:
ResolutionUnit (296): 2
ExifOffset (34665): 224
Make (271): Apple
Model (272): iPhone 12 Pro
Software (305): 16.6.1
Orientation (274): 6
DateTime (306): 2023:09:18 16:32:55
YCbCrPositioning (531): 1
XResolution (282): 72.0
YResolution (283): 72.0
HostComputer (316): iPhone 12 Pro
ExifVersion (36864): b'0232'
ComponentsConfiguration (37121): b'\x01\x02\x03\x00'
ShutterSpeedValue (37377): 5.915630897377497
DateTimeOriginal (36867): 2023:09:18 16:32:55
DateTimeDigitized (36868): 2023:09:18 16:32:55
ApertureValue (37378): 1.3561438092556088
BrightnessValue (37379): 2.0295000055765606
ExposureBiasValue (37380): 0.0
MeteringMode (37383): 5
Flash (37385): 16
FocalLength (37386): 4.2
ColorSpace (40961): 65535
ExifImageWidth (40962): 4032
FocalLengthIn35mmFilm (41989): 26
SceneCaptureType (41990): 0
OffsetTime (36880): +03:00
OffsetTimeOriginal (36881): +03:00
OffsetTimeDigitized (36882): +03:00
SubsecTimeOriginal (37521): 447
SubsecTimeDigitized (37522): 447
ExifImageHeight (40963): 3024
SensingMethod (41495): 2
ExposureTime (33434): 0.016666666666666666
FNumber (33437): 1.6
SceneType (41729): b'\x01'
ExposureProgram (34850): 2
ISOSpeedRatings (34855): 160
ExposureMode (41986): 0
FlashPixVersion (40960): b'0100'
WhiteBalance (41987): 0
LensSpecification (42034): (4.2, 4.2, 1.6, 1.6)
LensMake (42035): Apple
LensModel (42036): iPhone 12 Pro back camera 4.2mm f/1.6
MakerNote (37500): b"Apple iOS\x00\x00\x01MM\x00,\x00\x01\x00\t\x00\x00\x00\x0....


this is code to update exif


from PIL import Image
from PIL.ExifTags import TAGS, TAGS_V2

def update_exif_dimensions(image_path):
# Open the image
img = Image.open(image_path)
# Get EXIF data
exif_data = img._getexif()
# Convert it to a dictionary for easier access
exif_dict = {TAGS.get(k, k): v for k, v in exif_data.items()}
# Update ExifImageWidth and ExifImageHeight with actual dimensions
exif_dict['ExifImageWidth'] = img.width
exif_dict['ExifImageHeight'] = img.height
# Update the EXIF data in the image
# Save the image with updated EXIF data

# Update for a specific image
image_path = "path/to/your/image.jpg" # Replace with the actual path to your image


Thank you.




convert you opencv camera calibration yaml file to openSFM camera.json file

 refer to code:


from PIL import Image
import json
import yaml
import os
import argparse

def get_image_dimensions(image_path):
with Image.open(image_path) as img:
return img.size

def convert_yaml_to_opensfm_json(yaml_file, json_file, image_path):
image_width, image_height = get_image_dimensions(image_path)

with open(yaml_file, 'r') as f:
calibration_data = yaml.safe_load(f)

# Extract the camera matrix and distortion coefficients
camera_matrix = calibration_data['camera_matrix']
dist_coeff = calibration_data['dist_coeff']

# Compute the normalized focal length
focal_normalized = camera_matrix[0][0] / image_width

# Prepare the JSON data
json_data = {
f"custom_camera {image_width} {image_height} perspective 0.0": {
"projection_type": "perspective",
"width": image_width,
"height": image_height,
"focal": focal_normalized,
"k1": dist_coeff[0][0],
"k2": dist_coeff[0][1]

# Write the JSON data to file
with open(json_file, 'w') as f:
json.dump(json_data, f, indent=4)

def main():
convert_yaml_to_opensfm_json(yaml_file, json_file, image_path)

if __name__ == '__main__':


reference :



Thank you.



python print exponential notation

 refer to code:


x = 0.003
formatted_x = "{:.1e}".format(x)
print(formatted_x) # Output will be "3.0e-03"