@rishiraj
这是我使用纯 Numpy 用 Python 编写的简单神经网络。它的 Mojo 等价物是什么?
%%python
import numpy as np
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# Initialize weights and biases
self.W1 = np.random.randn(self.input_size, self.hidden_size)
self.b1 = np.zeros((1, self.hidden_size))
self.W2 = np.random.randn(self.hidden_size, self.output_size)
self.b2 = np.zeros((1, self.output_size))
def forward(self, X):
# Perform forward propagation
self.z1 = np.dot(X, self.W1) + self.b1
self.a1 = np.tanh(self.z1)
self.z2 = np.dot(self.a1, self.W2) + self.b2
self.a2 = self.sigmoid(self.z2)
return self.a2
def backward(self, X, y, learning_rate):
m = X.shape[0]
# Compute gradients
dZ2 = self.a2 - y
dW2 = (1 / m) * np.dot(self.a1.T, dZ2)
db2 = (1 / m) * np.sum(dZ2, axis=0)
dZ1 = np.dot(dZ2, self.W2.T) * (1 - np.power(self.a1, 2))
dW1 = (1 / m) * np.dot(X.T, dZ1)
db1 = (1 / m) * np.sum(dZ1, axis=0)
# Update weights and biases
self.W2 -= learning_rate * dW2
self.b2 -= learning_rate * db2
self.W1 -= learning_rate * dW1
self.b1 -= learning_rate * db1
def train(self, X, y, num_epochs, learning_rate):
for epoch in range(num_epochs):
# Forward propagation
output = self.forward(X)
# Backward propagation
self.backward(X, y, learning_rate)
# Print the loss every 100 epochs
if epoch % 100 == 0:
loss = self.compute_loss(y, output)
print(f"Epoch {epoch}, Loss: {loss}")
@staticmethod
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compute_loss(self, y, output):
return np.mean(-y * np.log(output) - (1 - y) * np.log(1 - output))