Added support for different drums on the noise channel

This commit is contained in:
iAmInActions 2024-01-04 22:15:18 +01:00
parent 341271a4b7
commit 80961a5d3f
13 changed files with 123 additions and 12 deletions

1
drums/drum1.txt Normal file

File diff suppressed because one or more lines are too long

BIN
drums/drum1.wav Normal file

Binary file not shown.

1
drums/drum2.txt Normal file

File diff suppressed because one or more lines are too long

BIN
drums/drum2.wav Normal file

Binary file not shown.

1
drums/drum3.txt Normal file
View File

@ -0,0 +1 @@
[0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 1.4296875, 1.421875, 1.421875, 1.421875, 1.421875, 1.4375, 0.9921875, 0.9921875, 0.9921875, 0.9921875, 0.984375, 0.984375, 0.984375, 0.984375, 0.9765625, 0.9765625, 0.9765625, 0.96875, 0.96875, 0.96875, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.9609375, 0.953125, 0.953125, 0.9453125, 0.9453125, 0.9453125, 0.9375, 0.9375, 0.9375, 0.9296875, 0.921875, 0.921875, 0.921875, 0.90625, 0.90625, 0.90625, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.8984375, 0.890625, 0.890625, 0.8828125, 0.8828125, 0.875, 0.875, 0.8671875, 0.8671875, 0.859375, 0.859375, 0.859375, 0.859375, 0.859375, 0.859375, 0.8515625, 0.8515625, 0.8515625, 0.8515625, 0.8515625, 0.8515625, 0.8515625, 0.8515625, 0.84375, 0.84375, 0.8359375, 0.828125, 0.8203125, 0.8203125, 0.8125, 0.8125, 0.8046875, 0.796875, 0.796875, 0.7890625, 0.78125, 0.7734375, 0.765625, 0.75, 0.7421875, 0.734375, 0.7265625, 0.71875, 0.7109375, 0.6953125, 0.6875, 0.671875, 0.6640625, 0.65625, 0.640625, 0.6328125, 0.6171875, 0.609375, 0.59375, 0.578125, 0.5625, 0.546875, 0.5390625, 0.53125, 0.5078125, 0.4921875, 0.484375, 0.4765625, 0.46875, 0.453125, 0.4375, 0.421875, 0.40625, 0.390625, 0.3828125, 0.375, 0.3671875, 0.3515625, 0.34375, 0.328125, 0.3203125, 0.3125, 0.296875, 0.2890625, 0.2734375, 0.265625, 0.25, 0.2421875, 0.2265625, 0.21875, 0.203125, 0.1953125, 0.1875, 0.171875, 0.1640625, 0.1484375, 0.140625, 0.1328125, 0.125, 0.1015625, 0.0859375, 0.078125, 0.078125, 0.0703125, 0.0546875, 0.046875, 0.03125, 0.015625, 0.015625, 0.0078125, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875, 0.0, 1.9921875, 0.0, 0.0, 1.9921875]

BIN
drums/drum3.wav Normal file

Binary file not shown.

1
drums/drum4.txt Normal file

File diff suppressed because one or more lines are too long

BIN
drums/drum4.wav Normal file

Binary file not shown.

1
drums/drum5.txt Normal file

File diff suppressed because one or more lines are too long

BIN
drums/drum5.wav Normal file

Binary file not shown.

64
drums/wav2drumtxt.py Normal file
View File

@ -0,0 +1,64 @@
import os
import wave
import numpy as np
def save_noise_data(file_path, noise_data):
try:
with open(file_path, 'w') as file:
file.write(str(noise_data.tolist()))
print(f"Noise data saved to {file_path}")
except Exception as e:
print(f"Error saving noise data to {file_path}: {e}")
def extract_noise_data(file_path, duration_ms=100, sample_rate=44100):
try:
with wave.open(file_path, 'rb') as wav_file:
num_channels = wav_file.getnchannels()
sample_width = wav_file.getsampwidth()
frame_rate = wav_file.getframerate()
total_frames = wav_file.getnframes()
if num_channels > 1:
print("Input file must be mono (single channel).")
return
# Adjust for 8-bit audio
if sample_width == 1:
dtype = np.uint8
elif sample_width == 2:
dtype = np.int16
else:
print("Unsupported sample width.")
return
# Use the minimum of specified duration and actual duration of the file
duration_secs = min(duration_ms / 1000.0, total_frames / frame_rate)
num_frames = int(duration_secs * frame_rate)
signal = np.frombuffer(wav_file.readframes(num_frames), dtype=dtype)
# Normalize the signal for 8-bit audio
if sample_width == 1:
signal = (signal - 128) / 128.0
return signal
except Exception as e:
print(f"Error: {e}")
return None
def main():
directory_path = input("Enter the path to the directory containing WAV files (press Enter for current directory): ").strip() or '.'
duration_ms = float(input("Enter the maximum duration in milliseconds: "))
for file_name in os.listdir(directory_path):
if file_name.endswith(".wav"):
input_wave_file = os.path.join(directory_path, file_name)
output_noise_data = extract_noise_data(input_wave_file, duration_ms)
if output_noise_data is not None:
output_file_name = os.path.splitext(file_name)[0] + ".txt"
save_noise_data(output_file_name, output_noise_data)
if __name__ == "__main__":
main()

View File

@ -1,3 +1,7 @@
Frequency1,Effect1,Frequency2,Effect2,Frequency3,Effect3,Noise,Duration
440,50,247,10,311,70,0,500
440,25,247,10,311,40,5,500
Frequency1,Effect1,Noise,Duration
0,0,0,500
0,0,1,500
0,0,2,500
0,0,3,500
0,0,4,500
0,0,5,500

1 Frequency1 Effect1 Frequency2 Noise Effect2 Duration Frequency3 Effect3
2 440 0 50 0 247 0 10 500 311 70
3 440 0 25 0 247 5 1 10 500 311 40
4 0 0 2 500
5 0 0 3 500
6 0 0 4 500
7 0 0 5 500

View File

@ -2,6 +2,7 @@ import sys
import csv
import numpy as np
import sounddevice as sd
import os
# OffiTracker, the tracker that no one asked for but I made it anyways :3
# Usage: Make a CSV table in Excel or LibreOffice with the following format:
@ -9,13 +10,42 @@ import sounddevice as sd
# You can make as many channels as you want.
# Effect = pulse width from 0 to 100
# Frequency = tone in Hz.
# Noise = noise amplitude from 0 to 10
# Noise:
# - 0 = No extra sound
# - 1 = Bass drum
# - 2 = Kick drum
# - 3 = Click
# - 4 = Snare
# - 5 = Hihat
# Duration = tone duration in ms
# (c) 2024 mueller_minki, Feel free to modify or share.
stop_signal = False
noise_data_cache = {} # Cache to store loaded noise data
def load_noise_data(noise_type, sample_rate):
amplitude_factor = 0.5 # Adjust the amplitude factor as needed
noise_file_path = os.path.join('drums', f'drum{noise_type}.txt')
try:
with open(noise_file_path, 'r') as file:
noise_data = np.array(eval(file.readline()))
return amplitude_factor * noise_data
except Exception as e:
print(f"Error loading noise data from {noise_file_path}: {e}")
return None
def load_all_noise_data():
global noise_data_cache
for i in range(1, 6):
noise_data_cache[i] = load_noise_data(i, 44100)
def generate_noise(noise_type):
return noise_data_cache.get(noise_type, None)
def play_square_waves(output_stream, frequencies, effects, duration, amplitude=1, noise_amplitude=0, sample_rate=44100):
if stop_signal == True:
global stop_signal
if stop_signal:
output_stream.stop()
else:
num_waves = len(frequencies)
@ -24,19 +54,27 @@ def play_square_waves(output_stream, frequencies, effects, duration, amplitude=1
# Generate and sum square waves for each frequency with corresponding effects
waves = [amplitude * (effect / 100) * np.sign(np.sin(2 * np.pi * freq * t)) for freq, effect in zip(frequencies, effects)]
# Add optional noise channel
# Add optional noise channel based on the noise column values
if noise_amplitude > 0:
noise = noise_amplitude * np.random.uniform(-1, 1, len(t))
waves.append(noise)
noise_type = int(noise_amplitude)
noise = generate_noise(noise_type)
if noise is not None:
# Pad the noise with zeros to match the duration of the other waves
noise = np.concatenate((noise, np.zeros(len(t) - len(noise))))
waves.append(noise)
combined_wave = np.sum(waves, axis=0)
combined_wave = combined_wave.astype(np.float32)
output_stream.write(combined_wave)
def play_csv_file(file_path):
stop_signal = False
global stop_signal
global noise_data_cache
# Load all noise data into the cache
load_all_noise_data()
with open(file_path, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
@ -54,7 +92,7 @@ def play_csv_file(file_path):
noise_amplitude = float(row.get('Noise', 0))
if stop_signal == False:
play_square_waves(output_stream, frequencies, effects, duration, noise_amplitude=noise_amplitude)
if __name__ == "__main__":
print(' ')
print(' Mueller\'s Software Domain proudly presents:')
@ -64,7 +102,7 @@ if __name__ == "__main__":
print('/ | \ | | | | | | | | | \// __ \\\\ \___| <\ ___/| | \/')
print('\_______ /__| |__| |__| |____| |__| (____ /\___ >__|_ \\\\___ >__| ')
print(' \/ \/ \/ \/ \/ ')
print(' Version 1.2')
print(' Version 1.3')
if len(sys.argv) > 1:
csv_file_path = sys.argv[1]
else: