9/27/2023

9/26/2023

QR detector python code

 refer to code:


.

import cv2
#pip install pyzbar
from pyzbar.pyzbar import decode

def scan_qr_code():
# Start the webcam
cap = cv2.VideoCapture(0)
cap.set(3, 640) # Set the width of the window
cap.set(4, 480) # Set the height of the window
while True:
success, img = cap.read()
if not success:
print("Failed to grab frame")
break
# Decode the QR Code
for barcode in decode(img):
# Get the QR Code position
my_data = barcode.data.decode('utf-8')
pts = barcode.polygon
if len(pts) == 4: # If we have 4 points, then we have a QR code
pts2 = barcode.rect
cv2.putText(img, my_data, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 255), 2)
for point in pts:
cv2.circle(img, (point[0], point[1]), 5, (0, 255, 0), cv2.FILLED)
# Display the frame
cv2.imshow('QR Code Scanner', img)
if cv2.waitKey(1) & 0xFF == ord('q'): # Press 'q' to quit
break
cap.release()
cv2.destroyAllWindows()

if __name__ == "__main__":
scan_qr_code()

..


you need to install pyzbar


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️




9/23/2023

python code to convert heic (Mac image) to jpeg

 refer to code:


.

# pip install pyheif pillow
# brew install libheif



import os
import subprocess

input_folder = "./input/path"
output_folder = "./output/path"

if not os.path.exists(output_folder):
os.makedirs(output_folder)

for filename in os.listdir(input_folder):
if filename.endswith(".heic") or filename.endswith(".HEIC"):
heic_path = os.path.join(input_folder, filename)
jpeg_path = os.path.join(output_folder, os.path.splitext(filename)[0] + ".jpg")

# Run heif-convert to convert the HEIC file to JPEG
subprocess.run(["heif-convert", heic_path, jpeg_path])

print("Conversion complete!")


..


Thank you.



9/22/2023

Download Youtube Video, url to local video file.

The code need to install two YouTube downloader package.

Those are pytube, youtube_dl.

This code try to use one of them because sometime it's fail in certain library.


Enjoy code:

.

import ssl
from pytube import YouTube
import youtube_dl

#pip install pytube
#pip install youtube_dl


def configure_ssl():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context

def pytube_download(video_url):
try:
yt = YouTube(video_url)
video = yt.streams.get_highest_resolution()
print(f"Downloading: {video.title}")
video.download()
print("Video downloaded successfully using pytube.")
return True
except Exception as e:
print(f"An error occurred while downloading the video with pytube: {e}")
return False

def download_youtube_video(video_url):
download_options = {
'format': 'best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # Set the output filename format
'progress_hooks': [hook],
}

try:
with youtube_dl.YoutubeDL(download_options) as ydl:
ydl.download([video_url])
print("Video downloaded successfully using youtube-dl.")
except Exception as e:
print(f"An error occurred while downloading the video with youtube-dl: {e}")

def hook(d):
if d['status'] == 'downloading':
print(d['_percent_str'], end="\r")

def main(video_url):
configure_ssl()
if not pytube_download(video_url):
download_youtube_video(video_url)

# Example usage:
if __name__ == "__main__":
video_url = 'https://youtu.be/MareArts' # Add your video url here
main(video_url)

..


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️

9/19/2023

3D mesh generation form 3D cloud points, using open3D

 refer to code


.

import open3d as o3d
import numpy as np
import os

def load_point_cloud(file_path):
print("Loading point cloud...")
return o3d.io.read_point_cloud(file_path)

def estimate_normals(pcd):
print("Estimating normals...")
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
return pcd

def remove_invalid_normals(pcd):
print("Removing invalid normals...")
valid_indices = np.where(np.linalg.norm(np.asarray(pcd.normals), axis=1) != 0)[0]
return pcd.select_by_index(valid_indices)

def poisson_reconstruction(pcd):
print("Performing Poisson surface reconstruction...")
mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=8)
return mesh

def simplify_mesh(mesh):
print("Simplifying mesh...")
return mesh.simplify_quadric_decimation(target_number_of_triangles=10000)

def save_mesh(mesh, file_path):
print(f"Mesh saved to '{file_path}'")
o3d.io.write_triangle_mesh(file_path, mesh)

def main():
file_path = "/path/3d_cloud.ply"
pcd = load_point_cloud(file_path)
pcd = estimate_normals(pcd)
pcd = remove_invalid_normals(pcd)
mesh = poisson_reconstruction(pcd)
mesh = simplify_mesh(mesh)
mesh_file = os.path.join(os.path.dirname(file_path), 'mesh.ply')
save_mesh(mesh, mesh_file)

if __name__ == "__main__":
main()

..


install open3d using pip install open3d


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️

print image exif python code

 refer to code:


.

from PIL import Image, ExifTags
import os

def print_exif_data(directory_path):
"""
Print the EXIF data of every JPEG image in the given directory.
Args:
directory_path (str): Path to the directory containing JPEG images.
"""
# Loop through all files in the directory
for filename in os.listdir(directory_path):
if filename.lower().endswith('.jpg') or filename.lower().endswith('.jpeg'):
filepath = os.path.join(directory_path, filename)
# Open image file
image = Image.open(filepath)
# Extract EXIF data
exif_data = image._getexif()
if exif_data is not None:
# Print EXIF data
print(f"EXIF data for {filename}:")
for tag_id, value in exif_data.items():
tag_name = ExifTags.TAGS.get(tag_id, tag_id)
print(f"{tag_name} ({tag_id}): {value}")
print('-' * 50)
else:
print(f"No EXIF data found for {filename}")
print('-' * 50)

if __name__ == "__main__":
# Define the directory path
directory_path = "/your_path/images"
# Print EXIF data
print_exif_data(directory_path)

..


this is example result 

.




EXIF data for IMG_5602.JPG:
ResolutionUnit (296): 2
ExifOffset (34665): 224
Make (271): Apple
Model (272): iPhone 12 Pro
Software (305): 16.6.1
Orientation (274): 6
DateTime (306): 2023:09:18 16:32:55
YCbCrPositioning (531): 1
XResolution (282): 72.0
YResolution (283): 72.0
HostComputer (316): iPhone 12 Pro
ExifVersion (36864): b'0232'
ComponentsConfiguration (37121): b'\x01\x02\x03\x00'
ShutterSpeedValue (37377): 5.915630897377497
DateTimeOriginal (36867): 2023:09:18 16:32:55
DateTimeDigitized (36868): 2023:09:18 16:32:55
ApertureValue (37378): 1.3561438092556088
BrightnessValue (37379): 2.0295000055765606
ExposureBiasValue (37380): 0.0
MeteringMode (37383): 5
Flash (37385): 16
FocalLength (37386): 4.2
ColorSpace (40961): 65535
ExifImageWidth (40962): 4032
FocalLengthIn35mmFilm (41989): 26
SceneCaptureType (41990): 0
OffsetTime (36880): +03:00
OffsetTimeOriginal (36881): +03:00
OffsetTimeDigitized (36882): +03:00
SubsecTimeOriginal (37521): 447
SubsecTimeDigitized (37522): 447
ExifImageHeight (40963): 3024
SensingMethod (41495): 2
ExposureTime (33434): 0.016666666666666666
FNumber (33437): 1.6
SceneType (41729): b'\x01'
ExposureProgram (34850): 2
ISOSpeedRatings (34855): 160
ExposureMode (41986): 0
FlashPixVersion (40960): b'0100'
WhiteBalance (41987): 0
LensSpecification (42034): (4.2, 4.2, 1.6, 1.6)
LensMake (42035): Apple
LensModel (42036): iPhone 12 Pro back camera 4.2mm f/1.6
MakerNote (37500): b"Apple iOS\x00\x00\x01MM\x00,\x00\x01\x00\t\x00\x00\x00\x0....

..


this is code to update exif

.

from PIL import Image
from PIL.ExifTags import TAGS, TAGS_V2

def update_exif_dimensions(image_path):
# Open the image
img = Image.open(image_path)
# Get EXIF data
exif_data = img._getexif()
# Convert it to a dictionary for easier access
exif_dict = {TAGS.get(k, k): v for k, v in exif_data.items()}
# Update ExifImageWidth and ExifImageHeight with actual dimensions
exif_dict['ExifImageWidth'] = img.width
exif_dict['ExifImageHeight'] = img.height
# Update the EXIF data in the image
img._getexif().update(exif_dict)
# Save the image with updated EXIF data
img.save(image_path)

# Update for a specific image
image_path = "path/to/your/image.jpg" # Replace with the actual path to your image
update_exif_dimensions(image_path)


..


Thank you.

๐Ÿ™‡๐Ÿป‍♂️

www.marearts.com

9/14/2023

convert you opencv camera calibration yaml file to openSFM camera.json file

 refer to code:


.

from PIL import Image
import json
import yaml
import os
import argparse

def get_image_dimensions(image_path):
with Image.open(image_path) as img:
return img.size

def convert_yaml_to_opensfm_json(yaml_file, json_file, image_path):
image_width, image_height = get_image_dimensions(image_path)

with open(yaml_file, 'r') as f:
calibration_data = yaml.safe_load(f)

# Extract the camera matrix and distortion coefficients
camera_matrix = calibration_data['camera_matrix']
dist_coeff = calibration_data['dist_coeff']

# Compute the normalized focal length
focal_normalized = camera_matrix[0][0] / image_width

# Prepare the JSON data
json_data = {
f"custom_camera {image_width} {image_height} perspective 0.0": {
"projection_type": "perspective",
"width": image_width,
"height": image_height,
"focal": focal_normalized,
"k1": dist_coeff[0][0],
"k2": dist_coeff[0][1]
}
}

# Write the JSON data to file
with open(json_file, 'w') as f:
json.dump(json_data, f, indent=4)

def main():
yaml_file="calibration.yaml"
json_file="./camera_models.json"
image_path="IMG_5306.JPG"
convert_yaml_to_opensfm_json(yaml_file, json_file, image_path)

if __name__ == '__main__':
main()

..


reference :

https://github.com/mapillary/OpenSfM/issues/95

https://opensfm.org/docs/geometry.html#camera-models


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️


python print exponential notation

 refer to code:


-

x = 0.003
formatted_x = "{:.1e}".format(x)
print(formatted_x) # Output will be "3.0e-03"

--




9/13/2023

print docker memory usage size and image size on command line

Print docker container men usage 

.

docker stats --no-stream --format "table {{.Container}}\t{{.MemUsage}}"
CONTAINER MEM USAGE / LIMIT
df1db3352f5c 62.85MiB / 7.581GiB
e225c0866cef 778.8MiB / 7.581GiB
8e40a961b59d 1.121GiB / 7.581GiB
f66e33681593 173MiB / 7.581GiB

..

Print image size

.

docker ps --format '{{.Image}}' | uniq | xargs -I {} docker image ls --format "table {{.Repository}}\t{{.Size}}" {}
REPOSITORY SIZE
5978.com/fast-api 643MB
REPOSITORY SIZE
5978.com/recognition 2.52GB
REPOSITORY SIZE
5978.com/detector 746MB

..


.

All together 

#!/bin/bash
echo "Memory Usage of Running Containers:"
docker stats --no-stream --format "table {{.Container}}\t{{.MemUsage}}"
echo ""

echo "Image Size of Running Containers:"
docker ps --format '{{.Image}}' | uniq | xargs -I {} docker image ls --format "table {{.Repository}}\t{{.Size}}" {}

..


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️

OpenCV Camera Calibration source code

refer to code:

.

import numpy as np
import cv2
import glob
import yaml
from icecream import ic
import os

def calibrate_camera(images, chess_box_scale_mm):
# Termination criteria for refining the detected corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# Prepare object points: (0,0,0), (1,0,0), (2,0,0), ..., (9,6,0)
objp = np.zeros((9*6,3), np.float32)
objp[:,:2] = np.mgrid[0:6, 0:9].T.reshape(-1,2) * chess_box_scale_mm# Scale by 7.5mm or 17.5mm or 25mm

# Arrays to store object and image points from all the images
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane

if not images:
raise Exception("No images found in the calibration directory.")

for fname in images:
ic(fname)
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (6,9), None)
ic(ret)

# If found, add object points and image points
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)

# Draw and display the corners
cv2.drawChessboardCorners(img, (6,9), corners2, ret)
# Save the image with corners in the same directory but with a .png extension
base_name = os.path.basename(fname)
file_root, file_ext = os.path.splitext(base_name)
save_path = os.path.join(os.path.dirname(fname), f"{file_root}.png")
cv2.imwrite(save_path, img)
cv2.imshow('img', img)
cv2.waitKey(500)


cv2.destroyAllWindows()

if not objpoints or not imgpoints:
raise Exception("Chessboard corners not found in any images.")

# Calibrate the camera using the last value of gray from the loop
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return ret, mtx, dist, rvecs, tvecs

def main():
path = "./path/to/images/"
images = glob.glob(f'{path}/*.JPG')

ret, mtx, dist, rvecs, tvecs = calibrate_camera(images, 7.5) #chess_box_scale_mm is 7.5 mm

print("Camera matrix: \n", mtx)
print("Distortion coefficients: \n", dist)

# Save to YAML file
data = {'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
with open(f"{path}/calibration.yaml", "w") as f:
yaml.dump(data, f)

# Display one of the images after undistortion
img = cv2.imread(images[0]) # Replace with an image from your calibration set
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))

# Undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

# Crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imshow('origin Image', img)
cv2.imshow('Undistorted Image', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()

if __name__ == "__main__":
main()

.. 


Here is chess board which has 10x7.


png files in same folder are that images succeed for finding pattern.

yaml file will be generated in image folder for camera intrinsic params.


Thank you.

www.marearts.com

๐Ÿ™‡๐Ÿป‍♂️


9/11/2023

docker Multi-Architecture Builds



Multi-Architecture Builds

Docker's Buildx extension allows you to build multi-architecture images. You can specify multiple target architectures and create a single image that works on both. Here's a simplified example:

This will create an image that can run on both Intel (amd64) and ARM (arm64) based systems.


# Initialize Buildx (one-time operation)
docker buildx create --use

# Build multi-architecture image
docker buildx build --platform linux/amd64,linux/arm64 -t your-image-name:tag .

comparing t4g.medium, t3a.medium, and t3.medium

 



the t4g.medium, t3a.medium, and t3.medium are all part of Amazon's EC2 T-series instances, which are designed to provide a baseline level of CPU performance with the ability to burst above the baseline when needed. However, they differ in the underlying processor architecture and some other characteristics. Below is a comparative table:

Instance TypeCPU TypevCPUsMemory (GiB)ProcessorNetwork BandwidthEBS Bandwidth
t4g.mediumARM-based24Graviton2Up to 5 GbpsUp to 3.5 Gbps
t3a.mediumAMD-based24AMD EPYC 7000 seriesUp to 5 GbpsUp to 3.5 Gbps
t3.mediumIntel-based24Intel Xeon Scalable (Skylake and Broadwell options)Up to 5 GbpsUp to 3.5 Gbps

Key Differences:

  1. Processor Architecture:

    • t4g.medium uses ARM-based Graviton2 processors.
    • t3a.medium uses AMD EPYC 7000 series processors.
    • t3.medium uses Intel Xeon Scalable processors.
  2. Price:

    • t3a.medium instances are generally cheaper than t3.medium instances but offer similar performance characteristics.
    • t4g.medium instances are also generally cost-effective due to the efficiency of the Graviton2 processor.
  3. Performance:

    • The ARM-based Graviton2 processors in t4g.medium instances are designed for better power efficiency.
    • Both AMD and Intel options in t3a and t3 are more traditional and have been in use for longer periods, and their performance characteristics are well understood.
  4. Compatibility:

    • Software that is dependent on specific instruction sets might not be compatible with ARM-based processors, so t3 and t3a could be a safer bet for those applications.

For the most current and accurate information, it's always best to consult the official AWS EC2 documentation or pricing pages.