我有功课可以对PCA进行编程,而无需使用库自动进行。我目前正在对此进行编程。但是尽管正在执行某些操作,但我不确定是否正确。因此,请您检查我的代码。
def input_to_output(self, x):
# Computes output layer - transforms input pattern to new PCA
# coordinate system.
### YOUR CODE GOES HERE ###
count = x.shape[0]
return x[0] # dummy return
def output_to_input(self, y):
# Computes input layer from output - transforms output pattern back to
# its original coordinate system.
### YOUR CODE GOES HERE ###
return [self.w[0] * y, self.w[1] * y]
def denoise_data(self, data):
# Transforms all data to new coordinate system, and then perform inverse
# transformation back to original coordinate system. Side effect is
# noise removal. Returns array of size [data_dimension x data_count] = [2 x 100]
### YOUR CODE GOES HERE ###
count = data.shape[1] # number of data points
return self.output_to_input(self.input_to_output(data))
def train_iterative(self, data, num_epochs, alpha=0.1):
# Trains the neural network iteratively (NN approach)
count = data.shape[1] # number of data points
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
for ep in range(num_epochs):
for p in np.random.permutation(count):
x = data[:, p]
### YOUR CODE GOES HERE ###
y = self.input_to_output(x)
for i, znak in enumerate(self.w):
self.w[i] = self.w[i] + alpha * y * (x[i] - y * self.w[i])
self.w = normalize(self.w)
def train_analytic(self, data):
# Trains the neural network analytically (eigenvector computation)
### YOUR CODE GOES HERE ###
count = data.shape[1] # number of data points
Q = 1 / len(data[0]) * np.outer(data,data.T)
lambdas, v = np.linalg.eig(Q)
# tuple (eigenvalue, eigenvector)
eig_pairs = [(np.abs(lambdas[i]), v[:,i]) for i in range(len(lambdas))]
# sort tuple (eigenvalue, eigenvector) from highest to lowest according to eigenvalue
eig_pairs.sort(key=lambda x: x[0], reverse=True)
print(eig_pairs[0][1])
self.w = eig_pairs[0][1]
图形如下: