2/26/2024

Dominant frequency extraction.

 



Let's say we have channel x Length signal data ex)EEG (electroencephalogram) or time series data.

We might wonder what dominant Hz is there.

The code analysis this question and return 5 top dominant frequency. 

.

import numpy as np
from collections import Counter
from scipy.signal import welch

def identify_dominant_frequencies(signal, fs, top_n=5):
freqs, psd = welch(signal, fs)
peak_indices = np.argsort(psd)[-top_n:]
dominant_freqs = freqs[peak_indices]
return dominant_freqs

..
dominant_freqs = identify_dominant_frequencies(signal, fs, top_n)
dominant_freqs_summary[channel].extend(dominant_freqs) # Append the frequencies
..
median_dominant_freqs = {channel: np.median(freqs) if freqs else None for channel, freqs in dominant_freqs_summary.items()}
..

def get_top_n_frequencies(freq_list, top_n=5, bin_width=1.0):
# Bin frequencies into discrete intervals
binned_freqs = np.round(np.array(freq_list) / bin_width) * bin_width
# Count the frequency of each binned frequency
freq_counter = Counter(binned_freqs)
# Find the top N most common binned frequencies
top_freqs = freq_counter.most_common(top_n)
# Extract just the frequencies from the top N tuples (freq, count)
top_freqs = [freq for freq, count in top_freqs]
return top_freqs

# Initialize a dictionary to store the top 5 frequencies for each channel
top_5_freqs_all_channels = {}
bin_width = 1.0

# Calculate the top 5 frequencies for each channel
for channel, freqs in dominant_freqs_summary.items():
top_5_freqs = get_top_n_frequencies(freqs, top_n=5, bin_width=bin_width)
top_5_freqs_all_channels[channel] = top_5_freqs
print(f"{channel}: Top 5 Frequencies = {top_5_freqs}")

..


2/18/2024

GroupShuffleSplit, sklearn

 

There are same eeg_id in data, but we can split it based on same id to train, val using GroupShuffleSplit.

Refer to code:

.



import pandas as pd
from sklearn.model_selection import GroupShuffleSplit

# Load your dataset
train = pd.read_csv('./train.csv')

# Display the shape of the dataset
print("Dataset shape:", train.shape)

# Count unique eeg_id values
unique_eeg_id_count = train['eeg_id'].nunique()
print("Unique eeg_id count:", unique_eeg_id_count)

# Initialize the GroupShuffleSplit
gss = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=42)

# Split the dataset based on the 'eeg_id' to ensure group cohesion
for train_idx, val_idx in gss.split(train, groups=train['eeg_id']):
train_set = train.iloc[train_idx]
val_set = train.iloc[val_idx]

# Now, train_set and val_set are split according to unique eeg_ids,
# ensuring that all records of a single eeg_id are in the same subset
print("Training set shape:", train_set.shape)
print("Validation set shape:", val_set.shape)

..

Thank you.

πŸ™‡πŸ»‍♂️

2/15/2024

interpolation 1d data list, ex) [1, 2, 3, 4] -> [1. , 1.5, 2. , 2.5, 3. , 3.5, 4. ]

 

expand and interpolation n by m data to n x (m+l) 

.

    data = np.array( [[1, 2, 3, 4], [4, 3, 2, 1]] )
data_len = 7
x = np.linspace(0, 1, data.shape[-1])
x2 = np.linspace(0, 1, data_len)
f = interp1d(x, data)
data = f(x2)

..

import below lib.

Thank you!!

from scipy.interpolate import interp1d
.
this is output:

array([[1. , 1.5, 2. , 2.5, 3. , 3.5, 4. ],
                 [4. , 3.5, 3. , 2.5, 2. , 1.5, 1. ]])

2/10/2024

pytorch lightning, save pth with ckpt for top k

 


it's custom checkpoint function

.

class CustomModelCheckpoint(ModelCheckpoint):
def __init__(self, save_top_k_pth=0, *args, **kwargs):
super(CustomModelCheckpoint, self).__init__(*args, **kwargs)
self.save_top_k_pth = save_top_k_pth
# Keep track of saved .pth files to manage the top K
self.saved_pth_files = []

def on_save_checkpoint(self, trainer, pl_module, checkpoint):
# Construct checkpoint path manually (simplified example)
epoch = trainer.current_epoch
metric_score = "{:.2f}".format(trainer.callback_metrics['val_loss'].item())
filename = f"model-epoch={epoch}-val_loss={metric_score}.pth"
dirpath = self.dirpath if self.dirpath else trainer.default_root_dir
pth_path = os.path.join(dirpath, filename)

torch.save(pl_module.state_dict(), pth_path)
self.saved_pth_files.append(pth_path)
# Manage the top K saved .pth files
while len(self.saved_pth_files) > self.save_top_k_pth:
oldest_pth = self.saved_pth_files.pop(0)
if os.path.exists(oldest_pth):
os.remove(oldest_pth)

# Ensure to call the superclass method
return super().on_save_checkpoint(trainer, pl_module, checkpoint)

..


call it on training process

.

logger = loggers.TensorBoardLogger(save_dir="lightning_logs", name=config.model_version)

# Define the checkpoint callback
checkpoint_callback = CustomModelCheckpoint(
monitor='val_loss',
dirpath=f"{logger.save_dir}/{logger.name}/version_{logger.version}",
filename='model-{epoch:02d}-{val_loss:.2f}',
save_top_k=2, # Top 2 checkpoints
save_top_k_pth=2, # Also save top 2 .pth files
mode='min'
)

trainer = Trainer(max_epochs=config.num_epochs, accelerator='gpu',
devices=1, callbacks=[checkpoint_callback],
logger=logger, log_every_n_steps=10)

..



saved top k files (ckpt, pth) file showing up on folder.

Thank you.

πŸ™‡πŸ»‍♂️

2/08/2024

git find large big file which committed.

 Find large file in GitHub repository

.

git rev-list --objects --all | \
git cat-file --batch-check='%(objecttype) %(objectname) %(objectsize) %(rest)' | \
awk '$3 > 100*1024*1024' | sort -k3nr


..

  • git rev-list --objects --all lists all objects in the repository.
  • git cat-file --batch-check='...' checks the type, size, and other details of these objects.
  • awk '$3 > 100*1024*1024' filters objects larger than 100 MB (note: 1024*1024 bytes = 1MB).
  • sort -k3nr sorts these objects by size in descending order.

πŸ™‡πŸ»‍♂️

2/06/2024

iOS swift dictionary example code

 .


// Existing dictionary of ages

var ages: [String: Int] = ["John": 30, "Emma": 25]


// Adding a new dictionary with String keys and String values

var occupations: [String: String] = ["John": "Engineer", "Emma": "Doctor"]


// Adding a new key-value pair to the occupations dictionary

occupations["Mike"] = "Teacher"


// Updating a value for a key in the occupations dictionary

occupations["Emma"] = "Senior Doctor" // Emma got a promotion!


// Accessing a value for a given key in the occupations dictionary

if let occupation = occupations["John"] {

    print("John's occupation is \(occupation).")

} else {

    print("John's occupation is not available.")

}


// Merging the ages and occupations dictionaries

// Assuming you want to create a summary for each person

for (name, age) in ages {

    if let occupation = occupations[name] {

        print("\(name) is \(age) years old and works as a \(occupation).")

    } else {

        print("\(name) is \(age) years old.")

    }

}


// Removing a key-value pair from the occupations dictionary

occupations["Mike"] = nil // Mike's occupation is removed


// Iterating over all key-value pairs in the occupations dictionary

for (name, occupation) in occupations {

    print("\(name) works as a \(occupation).")

}


// Checking the count of elements in both dictionaries

print("There are \(ages.count) people in the ages dictionary.")

print("There are \(occupations.count) occupations listed.")

..


refer to code, hope to get some useful idea.

Thank you.

πŸ™‡πŸ»‍♂️

2/05/2024

Download all YouTube videos in playlist (python)

pip install pytube

replace playlist url in string

.

from pytube import Playlist, YouTube

def download_video(url, max_attempts=3):
for attempt in range(1, max_attempts + 1):
try:
yt = YouTube(url)
video = yt.streams.get_highest_resolution()
video.download()
print(f"Downloaded: {yt.title}")
break
except Exception as e:
print(f"Error downloading video (attempt {attempt}): {url}\n{e}")
if attempt == max_attempts:
print(f"Failed to download video after {max_attempts} attempts: {url}")

# Replace with your playlist URL
playlist_url = 'https://www.youtube.com/playlist?list=xxx'

playlist = Playlist(playlist_url)

# Fetch video URLs
video_urls = playlist.video_urls

# Download each video
for url in video_urls:
download_video(url)

..


Thank you.

πŸ™‡πŸ»‍♂️

SWIFT Examples of Initializing and Forcefully Unwrapping Optionals

 String

var nonOptionalString: String = "Hello"
print(nonOptionalString) // Directly prints "Hello"

var optionalString: String? = "Hello"
print(optionalString!) // Forcefully unwraps and prints "Hello"

.

String optional

var optionalString: String? = "Hello"
if let unwrappedString = optionalString {
print(unwrappedString) // Prints "Hello"
} else {
print("optionalString was nil")
}

optionalString = nil
if let unwrappedString = optionalString {
print(unwrappedString)
} else {
print("optionalString was nil") // Prints "optionalString was nil" because it's now nil
}

.


Int

var nonOptionalInt: Int = 4
print(nonOptionalInt) // Directly prints 4

var optionalInt: Int? = 4
print(optionalInt!) // Forcefully unwraps and prints 4

.


Int optional

var optionalInt: Int? = 4
if let unwrappedInt = optionalInt {
print(unwrappedInt) // Prints 4
} else {
print("optionalInt was nil")
}

optionalInt = nil
if let unwrappedInt = optionalInt {
print(unwrappedInt)
} else {
print("optionalInt was nil") // Prints "optionalInt was nil" because it's now nil
}

.


Float

var nonOptionalFloat: Float = 3.14
print(nonOptionalFloat) // Directly prints 3.14

var optionalFloat: Float? = 3.14
print(optionalFloat!) // Forcefully unwraps and prints 3.14

.


Double

var nonOptionalDouble: Double = 3.14159
print(nonOptionalDouble) // Directly prints 3.14159

var optionalDouble: Double? = 3.14159
print(optionalDouble!) // Forcefully unwraps and prints 3.14159

.


Bool

var nonOptionalBool: Bool = true
print(nonOptionalBool) // Directly prints true

var optionalBool: Bool? = true
print(optionalBool!) // Forcefully unwraps and prints true

.


optional type is added '?' in the end of type.

so that the value can be have "nil". "nil" is also classified false in if logic.

The reason why exist such a type is swift is UI based language and value from ui button or action might have any value, so to prevent error, optional type would be needed. 


πŸ™‡πŸ»‍♂️



2/01/2024

get list of torch from conda installation

 input > conda list | grep torch

> conda list | grep torch
ffmpeg 4.3 hf484d3e_0 pytorch
libjpeg-turbo 2.0.0 h9bf148f_0 pytorch
pytorch 2.2.0 py3.8_cpu_0 pytorch
pytorch-mutex 1.0 cpu pytorch
torchaudio 2.2.0 py38_cpu pytorch
torchvision 0.17.0 py38_cpu pytorch

1/30/2024

checking torch + cuda installed correctly

 

 

Run this script 

.

 

import torch
from torch.utils.cpp_extension import CUDAExtension, BuildExtension

def check_cuda_setup():
cuda_available = torch.cuda.is_available()
print(f"CUDA available: {cuda_available}")

if cuda_available:
cuda_version = torch.version.cuda
print(f"CUDA version (PyTorch): {cuda_version}")

try:
# Attempt to create a CUDA extension
ext = CUDAExtension(
name='test_ext',
sources=[]
)
print("CUDAExtension can be created successfully.")
except Exception as e:
print(f"Error creating CUDAExtension: {e}")

try:
# Attempt to create a BuildExtension object
build_ext = BuildExtension()
print("BuildExtension can be created successfully.")
except Exception as e:
print(f"Error creating BuildExtension: {e}")

if __name__ == "__main__":
check_cuda_setup()


..

If return 'False' then you need to fix your system.

Thank you.


1/16/2024

from pytorch3d.renderer import ( ModuleNotFoundError: No module named 'pytorch3d' (gw_text3d_py39...

1. requirements

conda create -n pytorch3d python=3.9
conda activate pytorch3d
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
conda install -c fvcore -c iopath -c conda-forge fvcore iopath

 

conda install -c bottler nvidiacub

 * otherwise

curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
tar xzf 1.10.0.tar.gz
export CUB_HOME=$PWD/cub-1.10.0


 .

 

 2.Installing prebuilt binaries for PyTorch3D

# Anaconda Cloud
conda install pytorch3d -c pytorch3d

 

or 

# Anaconda Cloud
conda install pytorch3d -c pytorch3d-nightly


or install from whell

pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html



refer to this page: https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md


Thank  you!



 

1/15/2024

unreal engine, create asset by python widget and copy asset to game env

 refer to code:

.

import unreal
import os

def main_process(input_args):
# Use a directory within your user's documents or another location you have write access to
local_directory = "/Users/user/Documents/Unreal Projects/prj_name/Content/prj_name/Scripts"
# Example usage
filename = "chamfered_cube.obj"
file_path = create_chamfered_cube_obj_file(filename, local_directory, 100.0, 0.1)
imported_asset_path = import_obj_to_unreal(file_path, "/Game/prj_name")
place_static_mesh_in_world('/Game/prj_name/chamfered_cube', (1000, 1000, 100))

def place_static_mesh_in_world(mesh_asset_path, location, rotation=(0, 0, 0), scale=(1, 1, 1)):
# Load the Static Mesh asset
static_mesh = unreal.load_asset(mesh_asset_path, unreal.StaticMesh)
# Get the current editor world
editor_world = unreal.EditorLevelLibrary.get_editor_world()
# Spawn a new StaticMeshActor in the world
static_mesh_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor, location, rotation
)
if static_mesh_actor:
# Access the StaticMeshComponent property and set the static mesh
static_mesh_component = static_mesh_actor.get_component_by_class(unreal.StaticMeshComponent)
if static_mesh_component:
static_mesh_component.set_static_mesh(static_mesh)
# Set the scale if necessary
static_mesh_actor.set_actor_scale3d(unreal.Vector(*scale))
print(f"Placed Static Mesh at location: {location}")
return static_mesh_actor
else:
print("Failed to access StaticMeshComponent.")
return None
else:
print("Failed to place Static Mesh in the world.")
return None

def import_obj_to_unreal(obj_file_path, unreal_asset_path):
# Set up the import task
import_task = unreal.AssetImportTask()
import_task.filename = obj_file_path # The full path to the OBJ file on disk
import_task.destination_path = unreal_asset_path # The path in Unreal where to import the asset
import_task.automated = True
import_task.save = True

# Set up the import options for Static Mesh
options = unreal.FbxImportUI()
# Set various options on the options object here...

import_task.options = options

# Execute the import task
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([import_task])

# Return the imported asset path if successful, None otherwise
return import_task.imported_object_paths[0] if import_task.imported_object_paths else None

import os

def create_chamfered_cube_obj_file(filename, directory, scale=1.0, chamfer_ratio=0.1):
# Calculate the chamfer size
chamfer_size = scale * chamfer_ratio
half_scale = scale / 2
inner_size = half_scale - chamfer_size

# Define the vertices for a chamfered cube
vertices = [
# Bottom vertices (4 corners)
f"v {-inner_size} {-inner_size} {-half_scale}", f"v {inner_size} {-inner_size} {-half_scale}",
f"v {inner_size} {inner_size} {-half_scale}", f"v {-inner_size} {inner_size} {-half_scale}",
# Top vertices (4 corners)
f"v {-inner_size} {-inner_size} {half_scale}", f"v {inner_size} {-inner_size} {half_scale}",
f"v {inner_size} {inner_size} {half_scale}", f"v {-inner_size} {inner_size} {half_scale}",
# Chamfer vertices on the bottom (4)
f"v {-half_scale} {-half_scale} {-inner_size}", f"v {half_scale} {-half_scale} {-inner_size}",
f"v {half_scale} {half_scale} {-inner_size}", f"v {-half_scale} {half_scale} {-inner_size}",
# Chamfer vertices on the top (4)
f"v {-half_scale} {-half_scale} {inner_size}", f"v {half_scale} {-half_scale} {inner_size}",
f"v {half_scale} {half_scale} {inner_size}", f"v {-half_scale} {half_scale} {inner_size}",
]

# Define the faces for a chamfered cube (using the vertex indices)
faces = [
# Bottom square
"f 1 2 3 4",
# Top square
"f 5 6 7 8",
# Side squares (4 sides)
"f 1 2 6 5", "f 2 3 7 6",
"f 3 4 8 7", "f 4 1 5 8",
# Chamfer triangles (8 triangles)
"f 1 9 2", "f 2 10 3",
"f 3 11 4", "f 4 12 1",
"f 5 13 6", "f 6 14 7",
"f 7 15 8", "f 8 16 5",
# Chamfer squares (connecting the triangles - 4 squares)
"f 9 10 14 13", "f 10 11 15 14",
"f 11 12 16 15", "f 12 9 13 16",
]
# Ensure the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Create a full system file path
file_path = os.path.join(directory, filename)
# Writing vertices and faces to the OBJ file
with open(file_path, 'w') as file:
for v in vertices:
file.write(f"{v}\n")
for f in faces:
file.write(f"{f}\n")
print(f"Chamfered Cube OBJ file created at {file_path}")
return file_path

def create_cube_obj_file(filename, directory):
# Create a full system file path
file_path = os.path.join(directory, filename)
# Cube vertices and faces
vertices = [
"v -0.5 -0.5 -0.5", "v -0.5 -0.5 0.5", "v -0.5 0.5 -0.5", "v -0.5 0.5 0.5",
"v 0.5 -0.5 -0.5", "v 0.5 -0.5 0.5", "v 0.5 0.5 -0.5", "v 0.5 0.5 0.5"
]
faces = [
"f 1 3 4 2", "f 5 7 8 6", "f 1 5 6 2", "f 3 7 8 4",
"f 1 5 7 3", "f 2 6 8 4"
]
# Ensure the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Writing vertices and faces to the OBJ file
with open(file_path, 'w') as file:
for v in vertices:
file.write(f"{v}\n")
for f in faces:
file.write(f"{f}\n")
print(f"Cube OBJ file created at {file_path}")
return file_path


..