我最近编码了我的第一个神经网络。这是一个LSTM网络,它接受了我一年前设计的一些旧算法生成的数据训练,这些算法可以生成音乐序列。由于我的预测函数中的某些原因,对于任何给定的异构模式输入,它都会返回同质输出。感谢您的反馈!
例如:
network input:
[[ 7]
[ 1]
[17]
[11]
[ 5]
[21]
[15]
[ 9]
[ 3]
[19]
[13]
[ 7]]
network output:
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
D2
这是Forecast.py
import numpy
from music21 import instrument, note, stream, chord
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from data.vedic import Vedic
def generate(notes):
# load the notes used to train the model
pitchnames = sorted(set(item for item in notes))
# Get all pitch names
n_vocab = len(set(notes))
network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab)
model = create_network(normalized_input, n_vocab)
prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)
create_midi(prediction_output)
def prepare_sequences(notes, pitchnames, n_vocab):
pitchnames = sorted(set(item for item in notes))
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
sequence_length = 12
network_input = []
output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
normalized_input = normalized_input / float(n_vocab)
return (network_input, normalized_input)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
return_sequences=True
))
model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Load the weights to each node
model.load_weights('/home/bloom510/weights-improvement-06-0.0939-bigger.hdf5')
return model
def generate_notes(model, network_input, pitchnames, n_vocab):
""" Generate notes from the neural network based on a sequence of notes """
# pick a random sequence from the input as a starting point for the prediction
start = numpy.random.randint(0, len(network_input)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pattern = network_input[start]
print('pattern', pattern)
prediction_output = []
# generate 500 notes
for note_index in range(500):
prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
result = int_to_note[index]
print(result)
prediction_output.append(result)
numpy.append(pattern, index)
pattern = pattern[0:len(pattern)]
return prediction_output
def create_midi(prediction_output):
""" convert the output from the prediction to notes and create a midi file
from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid')
def clean_data():
nine_square = Vedic(500, 'C', [2,5])
data = []
for i in range(0, len(nine_square.table)):
for j in range(0, len(nine_square.table)):
data.append(nine_square.table[i][j])
return data
data = clean_data()
generate(data)
这是我的模型lstm.py
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from data.vedic import Vedic
class VedicNN:
def __init__(self, sequence_length, notes):
self.network_input = []
self.network_output = []
self.sequence_length = sequence_length
self.n_vocab = len(set(notes))
self.pitchnames = sorted(set(item for item in notes))
self.note_to_int = dict((note, number) for number, note in enumerate(self.pitchnames))
self.train_network(notes)
def train_network(self, notes):
self.prepare_sequences(notes)
model = self.create_network(self.network_input)
self.train(model, self.network_input, self.network_output)
def prepare_sequences(self, notes):
for i in range(0, len(notes) - self.sequence_length, 1):
sequence_in = notes[i:i + self.sequence_length]
sequence_out = notes[i + self.sequence_length]
self.network_input.append([self.note_to_int[char] for char in sequence_in])
self.network_output.append(self.note_to_int[sequence_out])
n_patterns = len(self.network_input)
# reshape the input into a format compatible with LSTM layers
self.network_input = numpy.reshape(self.network_input, (n_patterns, self.sequence_length, 1))
# normalize input
self.network_input = self.network_input / float(self.n_vocab)
self.network_output = np_utils.to_categorical(self.network_output)
def create_network(self, network_input):
"""Defines network structure"""
model = Sequential()
model.add(LSTM(
512,
input_shape=(self.network_input.shape[1], self.network_input.shape[2]),
return_sequences=True
))
model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(self.n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
#
return model
def train(self, model, network_input, network_output):
""" train the neural network """
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit(network_input, network_output, epochs=200, batch_size=64, callbacks=callbacks_list)
model.save_weights('my_model_weights.h5')
# Will need to batch process table data into one large list. For now this works.
def clean_data():
nine_square = Vedic(500 , 'C', [2,5])
data = []
for i in range(0, len(nine_square.table)):
for j in range(0, len(nine_square.table)):
data.append(nine_square.table[i][j])
return data
data = clean_data()
testNN = VedicNN(12, data)