-
Notifications
You must be signed in to change notification settings - Fork 0
/
MusicXMLprocessor.py
229 lines (220 loc) · 10.8 KB
/
MusicXMLprocessor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
from music21 import *
import xml.etree.ElementTree as ET
import numpy as np
RATE = 44100
CHUNK = 4096
class MusicXMLprocessor:
'''
MusicXMLprocessor: accepts musicxml file as input, calculates chroma
vector representation of piece of music. intended as pre-processor
before actual comparison happens...
'''
def __init__(self, filename):
self.file = filename
# self.chroma[0] = C, ..., self.chroma[11] = B
self.chroma = np.array([[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[]])
self.eventTriggerOnset = []
#key = note name
#value = chroma index for chroma matrix
self._chromaToIndex = {'C':0,
'C#':1,
'D-':1,
'D':2,
'D#':3,
'E-':3,
'E':4,
'F':5,
'F#':6,
'G-':6,
'G':7,
'G#':8,
'A-':8,
'A':9,
'A#':10,
'B-':10,
'B':11}
#key = chroma index for a particular note (see chroma to index)
#value = for each chroma index, amplitude of fund. freq. and partials
#see https://en.wikipedia.org/wiki/Harmonic_series_(music)
self._cOvertones = np.array([1.825,0,0,0,.2,0,0,.5,0,0,.143,0])
self._harmonics = {}
for i in range(0,12):
self._harmonics[i] = np.roll(self._cOvertones, i)
def musicXMLtoChroma(self):
importer = musicxml.xmlToM21.MusicXMLImporter()
#because tempo is impossible to get with music21 for some reason
#i'm extracting it directly from the xml using a parser
#it's very dinky but works...
root = ET.parse(self.file).getroot()
beatUnit = ""
perMinute = ""
for attribute in root.iter('beat-unit'):
beatUnit = attribute.text
for attribute in root.iter('per-minute'):
perMinute = attribute.text
#load file
score = importer.scoreFromFile(self.file)
#must make tempo a metronome mark object to attach to music21 streams
scoreTempo = tempo.MetronomeMark(None, int(perMinute), beatUnit)
score.insert(scoreTempo)
#print("Debug: score.seconds",score.seconds)
parts_extracted = score.parts
parts_and_voices = []
for part in parts_extracted:
part.insert(scoreTempo)
#print(part.partName, part.seconds)
if part.hasVoices():
#voices could get lost if not extracted separately...
for voice in part.voices:
parts_and_voices.append(voice)
else:
parts_and_voices.append(part)
#duration for calculations later
length_score_seconds = score.seconds
################################################
#extracting note names and indexes by part
#a chroma node is a tuple which contains:
# the pitch(es) of a particular frame
# (if there's only one, type(pitch) == str
# else, type(pitches) == list
# the start time of those pitches
# (which is 0 if the pitch is first
# or the end of the previous note if
# i > 0)
# the end time of those pitches
# (duration of pitch in seconds + start time)
# these nodes are stored in a dictionary, chroma_nodes_per_part
# where the key == a part in the piece
# and value == list of chroma nodes in that part.
################################################
chroma_nodes_per_part= {}
chroma_vector_per_part = {}
for i in range(len(parts_and_voices)):
# cover all parts
chroma_nodes_per_part[parts_and_voices[i].partName] = []
#each part is key in dict, list of notes in each part
for note in parts_and_voices[i].flat.notesAndRests:
#cover all notes in specific part
if not (chroma_nodes_per_part[parts_and_voices[i].partName] or
note.isChord):
start = 0
end = note.seconds + start
if parts_and_voices[i].partName != "GO":
chroma_node = (note.name, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
else:
if note.name is not "rest":
trigger_node = (note.lyric, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(trigger_node)
else:
chroma_node = (note.name, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
elif chroma_nodes_per_part[parts_and_voices[i].partName] and not note.isChord:
start = chroma_nodes_per_part[parts_and_voices[i].partName][-1][2]
end = note.seconds + start
if parts_and_voices[i].partName != "GO":
chroma_node = (note.name, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
else:
if note.name is not "rest":
trigger_node = (note.lyric, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(trigger_node)
else:
chroma_node = (note.name, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
elif not chroma_nodes_per_part[parts_and_voices[i].partName] and note.isChord:
note_temp = []
for pitch in note.pitches:
note_temp.append(pitch.name)
start = 0
end = note.seconds + start
chroma_node = (note_temp, start, end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
elif chroma_nodes_per_part[parts_and_voices[i].partName] and note.isChord:
note_temp = []
for pitch in note.pitches:
note_temp.append(pitch.name)
start = chroma_nodes_per_part[parts_and_voices[i].partName][-1][2]
end = note.seconds + start
chroma_node = (note_temp,start,end)
chroma_nodes_per_part[parts_and_voices[i].partName].append(chroma_node)
#############################################################
## generating chroma vectors for each part ##################
num_of_frames = 0
counter = 0
seen_triggers = []
for part in chroma_nodes_per_part:
#print("part:", part)
if part != "GO":
chroma_vector_per_part[part] = [[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[]]
for i in np.arange(0, length_score_seconds, ((CHUNK)/RATE)):
counter += 1
notes_in_frame = []
chroma_in_frame = np.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
for node in chroma_nodes_per_part[part]:
if node[1] < i and i <= node[2] and type(node[0]) == str:
#if in current frame and only one note
#print(node)
if part != "GO":
if node[0] in self._chromaToIndex:
notes_in_frame.append(self._chromaToIndex[node[0]])
else:
notes_in_frame.append("R")
else:
if node[0] != 'rest' and node[0] not in seen_triggers:
self.eventTriggerOnset.append((counter, node[0]))
seen_triggers.append(node[0])
elif node[1] < i and i<= node[2] and type(node[0]) == list:
#if in current frame and multiple notes
#print(node[0])
note_index = []
for note in node[0]:
if note in self._chromaToIndex:
note_index = self._chromaToIndex[note]
elif note == "R":
pass
if note_index not in notes_in_frame:
notes_in_frame.append(note_index)
elif node[2] > i:
break
#print(notes_in_frame)
for note in notes_in_frame:
if type(note) == int:
chroma_in_frame += self._harmonics[note]
elif note == "R":
pass
if part != "GO":
for j in range(len(chroma_vector_per_part[part])):
num_of_frames = len(chroma_vector_per_part[part][j])+1
chroma_vector_per_part[part][j].append(chroma_in_frame[j])
full_chroma = np.zeros((12, num_of_frames))
for part in chroma_vector_per_part:
chroma_vector_per_part[part] = np.array(chroma_vector_per_part[part])
for i in range(len(chroma_vector_per_part[part])):
full_chroma[i] += chroma_vector_per_part[part][i]
chroma_normed = full_chroma / full_chroma.max(axis=0)
np.place(chroma_normed, np.isnan(chroma_normed), [0])
self.chroma = chroma_normed
return self.chroma