Skip to content

Commit

Permalink
Format nitpick; bump version
Browse files Browse the repository at this point in the history
  • Loading branch information
maxrmorrison committed Jul 15, 2024
1 parent 9b366a3 commit 6fe69eb
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 8 deletions.
3 changes: 3 additions & 0 deletions penn/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def frequency_to_samples(frequency, sample_rate=penn.SAMPLE_RATE):
"""Convert frequency in Hz to number of samples per period"""
return sample_rate / frequency


def frequency_to_midi(frequency):
"""
Convert frequency to MIDI note number(s)
Expand All @@ -53,6 +54,7 @@ def frequency_to_midi(frequency):
"""
return 12 * (torch.log2(frequency) - torch.log2(torch.tensor(440.0))) + 69


def midi_to_frequency(midi):
"""
Convert MIDI note number to frequency
Expand All @@ -61,6 +63,7 @@ def midi_to_frequency(midi):
"""
return 440.0 * (2.0 ** ((midi - 69.0) / 12.0))


###############################################################################
# Time conversions
###############################################################################
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
setup(
name='penn',
description='Pitch Estimating Neural Networks (PENN)',
version='0.0.13',
version='0.0.14',
author='Max Morrison, Caedon Hsieh, Nathan Pruyne, and Bryan Pardo',
author_email='interactiveaudiolab@gmail.com',
url='https://github.com/interactiveaudiolab/penn',
Expand All @@ -22,6 +22,7 @@
],
'test': [
'librosa', # 0.9.1
'pytest', # 8.2.2
]
},
install_requires=[
Expand Down
19 changes: 12 additions & 7 deletions test/test_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,27 @@

def test_convert_frequency_to_midi():
"""Test that conversion from Hz to MIDI matches librosa implementation"""

sample_data = torch.tensor([110.0, 220.0, 440.0, 500.0, 880.0])

# Convert
penn_midi = penn.convert.frequency_to_midi(sample_data)

librosa_midi = librosa.hz_to_midi(sample_data.numpy())

assert torch.allclose(penn_midi, torch.tensor(librosa_midi, dtype=torch.float32))
# Compare
assert torch.allclose(
penn_midi,
torch.tensor(librosa_midi, dtype=torch.float32))


def test_convert_midi_to_frequency():
"""Test that conversion from MIDI to Hz matches librosa implementation"""
sample_data = torch.tensor([45.0, 57.0, 69.0, 71.2131, 81.0])

sample_data = torch.tensor([45.0, 57.0, 69.0, 71.2131, 81.0])

# Convert
penn_frequency = penn.convert.midi_to_frequency(sample_data)

librosa_frequency = librosa.midi_to_hz(sample_data.numpy())

assert torch.allclose(penn_frequency, torch.tensor(librosa_frequency, dtype=torch.float32))
# Compare
assert torch.allclose(
penn_frequency,
torch.tensor(librosa_frequency, dtype=torch.float32))

0 comments on commit 6fe69eb

Please sign in to comment.