做鞋子有什么好网站,新浪舆情通官网,美武汉有什么网站建设公司,哪家公司制作网站使用Tensorflow和numpy分别构建简单神经网络
numpy构建 用底层numpy实现神经网络
这里使用的时pytorch风格的实现方式import numpy as np
import matplotlib.pyplot as plt
定义layer基本类class Layer:def __init__(self):passdef forward(self,input):return inputdef backw…
使用Tensorflow和numpy分别构建简单神经网络
numpy构建
'''
用底层numpy实现神经网络
这里使用的时pytorch风格的实现方式
'''
import numpy as np
import matplotlib.pyplot as plt'''
定义layer基本类
'''
class Layer:def __init__(self):passdef forward(self,input):return inputdef backward(self,input,grad_output):pass'''
定义基本的神经网络激活函数
'''
#ReLU损失函数
class ReLU(Layer):def __init__(self):passdef forward(self,input):return np.maximum(0,input)def backward(self,input,grad_output):#判断输入是否大于0,如果不大于0,则梯度直接就是0,否则为1relu_grad=input>0return grad_output*relu_grad
#Sigmoid损失函数
class Sigmoid(Layer):def __init__(self):passdef _sigmoid(self,x):return 1.0/(1+np.exp(-x))def forward(self,input):return self._sigmoid(input)def backward(self,input,grad_output):#这里时sigmoid函数的特性sigmoid_grad=self._sigmoid(input)*(1-self._sigmoid(input))return grad_output*sigmoid_grad#Tanh损失函数
class Tanh(Layer):def __init__(self):passdef _tanh(self,x):return np.tanh(x)def forward(self,input):return self._tanh(input)def backward(self,input,grad_output):tanh_grad=1-(self._tanh(input))**2return grad_output*tanh_grad'''
定义全连接层
'''
class Dense(Layer):def __init__(self,input_units,output_units,learning_rate=0.01):self.learning_rate=learning_rateself.weights=np.random.rand(input_units,output_units)*0.01self.biases=np.zeros(output_units)def forward(self,input):return np.dot(input,self.weights)+self.biasesdef backward(self,input,grad_output):grad_input=np.dot(grad_output,self.weights.T)grad_weights=np.dot(input.T,grad_output)/input.shape[0]grad_biases=grad_output.mean(axis=0)self.weights=self.weights-self.learning_rate*grad_weightsself.biases=self.biases-self.learning_rate*grad_biasesreturn grad_inputclass NNet(Layer):def __init__(self):self.network=[]self.network.append(Dense(1,94))self.network.append(Tanh())self.network.append(Dense(94,1))def forward(self,X):activations=[]input=Xfor layer in self.network:activations.append(layer.forward(input))input=activations[-1]return activationsdef predict(self,X):logits=self.forward(X)[-1]return logitsdef train(self,X,y):layer_activations=self.forward(X)layer_inputs=[X]+layer_activationslogits=layer_activations[-1]loss=np.square(logits-y).sum()loss_grad=2.0*(logits-y)for layer_i in range(len(self.network))[::-1]:layer=self.network[layer_i]loss_grad=layer.backward(layer_inputs[layer_i],loss_grad)return np.mean(loss)if __name__=="__main__":x_train=np.linspace(-np.pi,0.7*np.pi,140).reshape(140,-1)y_train=np.sin(x_train)x_test=np.linspace(np.pi*0.7,np.pi,60).reshape(60,-1)y_test=np.sin(x_test)nnet=NNet()losses=[]for epoch in range(100000):loss=nnet.train(x_train,y_train)losses.append(loss)print(loss)plt.plot(range(len(losses)),losses)plt.show()
numpy方法构建主要采用的pytorch常用的模板,也就是构建,前向传播,反向传播
Tensorflow构建
#!/usr/bin/python3
# coding=utf-8
import tensorflow as tf
import pandas as pd
import numpy as np
import random
batch_size=30
n_epochs=10
n_batches=85//batch_size
n_test_batches=68//batch_size
'''
构建数据集
'''
def transform(mclass):if mclass==1:return [1,0,0]elif mclass==2:return [0,1,0]else:return [0,0,1]iris_test=np.loadtxt("iris/train.txt",encoding="UTF-8-sig")
random.shuffle(iris_test)
iris_x_test=iris_test[:,:4]
print(iris_x_test)
iris_y_test=iris_test[:,4]
iris_y_test=np.array(list(map(transform,iris_y_test)))
print(iris_y_test)
print(iris_test.shape)iris=np.loadtxt("iris/test.txt",encoding="UTF-8-sig")
random.shuffle(iris)
iris_x=iris[:,:4]
print(iris_x)
iris_y=iris[:,4]
iris_y=np.array(list(map(transform,iris_y)))
print(iris_y)
print(iris.shape)X=tf.placeholder(shape=[None,4],dtype=tf.float32,name='X')
Y=tf.placeholder(shape=[None,3],dtype=tf.float32,name='Y')W_l1=tf.Variable(tf.random_normal([4,8]))
b_l1=tf.Variable(tf.zeros([8]))
Wb_l1=tf.matmul(X,W_l1)+b_l1
l1=tf.nn.relu(Wb_l1)W_l2=tf.Variable(tf.random_normal([8,3]))
b_l2=tf.Variable(tf.zeros([3]))
prediction=tf.matmul(l1,W_l2)+b_l2
#prediction=tf.nn.relu(Wb_l2)# W_l3=tf.Variable(tf.random_normal([5,3]))
# b_l3=tf.Variable(tf.zeros([3]))
# prediction=tf.matmul(l2,W_l3)+b_l3loss=tf.nn.softmax_cross_entropy_with_logits(labels=Y,logits=prediction)
loss=tf.reduce_mean(loss)train_step=tf.train.AdamOptimizer(0.1).minimize((loss))correct=tf.equal(tf.argmax(Y,1),tf.argmax(prediction,1))
accuracy=tf.reduce_mean(tf.cast(correct,tf.float32))init=tf.global_variables_initializer()
with tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):train_acc=.0test_acc=.0for batch in range(n_batches):X_batch=iris_x[batch*batch_size:(batch+1)*batch_size]Y_batch=iris_y[batch*batch_size:(batch+1)*batch_size]train_val,acc_val,y=sess.run([train_step,accuracy,prediction],feed_dict={X:X_batch,Y:Y_batch})print(acc_val)train_acc+=acc_valtrain_acc/=n_batchesprint("train_acc:",train_acc)for batch in range(n_test_batches):X_batch=iris_x_test[batch*batch_size:(batch+1)*batch_size]Y_batch=iris_y_test[batch*batch_size:(batch+1)*batch_size]acc_val=sess.run(accuracy,feed_dict={X:X_batch,Y:Y_batch})test_acc+=acc_valtest_acc/=n_batchesprint("test_acc:", test_acc)