Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
JoexRedding authored Mar 14, 2022
1 parent 2bb8489 commit 2783e8a
Show file tree
Hide file tree
Showing 3 changed files with 191 additions and 0 deletions.
53 changes: 53 additions & 0 deletions 31-洪久远 北京/第九周/Keras2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# import tensorflow as tf
from tensorflow.python.keras.datasets import mnist
import matplotlib.pyplot as plt
# import cv2
from tensorflow.python.keras import models
from tensorflow.python.keras import layers
from tensorflow.python.keras.utils import to_categorical


# 1. 导入数据并查看

(trainImages,trainLables),(testImages,testLables)=mnist.load_data()
# print('trainImages.shape:',trainImages.shape)
# print('trainLables:',trainLables)
# print('testImages.shape:',testImages.shape)
# print('testLables:',testLables)
#查看测试集第一张图片
# test0=testImages[0]
# plt.imshow(test0,cmap=plt.cm.binary)
# plt.show()
#cv2.imshow('test0',test0)
#cv显示较小




#2. 使用keras构建神经网络

net=models.Sequential()
net.add(layers.Dense(units=512,activation='relu',input_shape=(28*28,)))
net.add(layers.Dense(units=10,activation='softmax'))
net.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])

#数据重塑与归一化
trainImages=trainImages.reshape((60000,28*28))
trainImages=trainImages.astype('float32')/255
testImages=testImages.reshape((10000,28*28))
testImages=testImages.astype('float32')/255
#标签独热化
testLablesOriginal=testLables
trainLables=to_categorical(trainLables)
testLables=to_categorical(testLables)

#3. 训练与测试

net.fit(trainImages,trainLables,epochs=7,batch_size=128)

testLoss,testAccuracy=net.evaluate(testImages,testLables,verbose=1)
print('\nTest Loss:',testLoss,'Test Accuracy:',testAccuracy)

result=net.predict(testImages)


88 changes: 88 additions & 0 deletions 31-洪久远 北京/第九周/NeuralNetWork2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import numpy as np
import scipy.special


class NeuralNetWork:
def __init__(self, inputNodes, hiddenNodes, outputNodes, learningRate):
self.iNodes = inputNodes
self.hNodes = hiddenNodes
self.oNodes = outputNodes
self.lr = learningRate
# 以上为基本参数初始化
self.wih = (np.random.normal(loc=0.0, scale=pow(self.hNodes, -0.5), size=(self.hNodes, self.iNodes)))
self.who = (np.random.normal(loc=0.0, scale=pow(self.oNodes, -0.5), size=(self.oNodes, self.hNodes)))
# 以上为权重矩阵初始化,wih为输入-隐藏层权重矩阵,who为隐藏-输出权重矩阵
# loc为均值,scale为标准差
self.activationFunction = lambda x: scipy.special.expit(x)
# 以上为激活函数sigmoid

def train(self, inputList, targetList):
inputs = np.array(inputList, ndmin=2).T
targets = np.array(targetList, ndmin=2).T
hiddenInputs = np.dot(self.wih, inputs)
# 以上为隐藏层的输入
hiddenOutputs = self.activationFunction(hiddenInputs)
# 以上为隐藏层经过激活后的输出
finalInputs = np.dot(self.who, hiddenOutputs)
# 以上为输出层的输入
finalOutputs = self.activationFunction(finalInputs)
# 以上为最终输出
outputErrors = targets - finalOutputs
hiddenErrors = np.dot(self.who.T, outputErrors * finalOutputs * (1 - finalOutputs))
self.who += self.lr * np.dot((outputErrors * finalOutputs * (1 - finalOutputs)), np.transpose(hiddenOutputs))
self.wih += self.lr * np.dot((hiddenErrors*hiddenOutputs*(1-hiddenOutputs)),np.transpose(inputs))


def query(self,inputs):
hiddenInputs=np.dot(self.wih,inputs)
hiddenOutputs = self.activationFunction(hiddenInputs)
finalInputs = np.dot(self.who, hiddenOutputs)
finalOutputs = self.activationFunction(finalInputs)
print(finalOutputs)
return finalOutputs


inputNodes=784
hiddenNodes=200
outputNodes=10
learningRate=0.1
n=NeuralNetWork(inputNodes,hiddenNodes,outputNodes,learningRate)

#以下读取训练数据
trainDataFile=open("dataset/mnist_train.csv",'r')
trainDataList=trainDataFile.readlines()
trainDataFile.close()

#加入epochs,即循环次数

epochs=5
for e in range(epochs):
for record in trainDataList:
allValues=record.split(',')
inputs=(np.asfarray(allValues[1:]))/255.0*0.99+0.01
#此为图片与数值的对应关系
targets=np.zeros(outputNodes)+0.01
targets[int(allValues[0])]=0.99
n.train(inputs,targets)

testDataFile=open("dataset/mnist_test.csv")
testDataList=testDataFile.readlines()
testDataFile.close()

scores=[]
for record in testDataList:
allValuesTest=record.split(',')
correctNumber=int(allValuesTest[0])
print("该图片对应数字为:",correctNumber)
inputs=(np.asfarray(allValuesTest[1:]))/255.0*0.99+0.01
outputs=n.query(inputs)
label=np.argmax(outputs)
print("网络认为的图片为:",label)
if label==correctNumber:
scores.append(1)
else:
scores.append(0)
print(scores)

scoresArray=np.asarray(scores)
print('performance=',scoresArray.sum()/scoresArray.size)
50 changes: 50 additions & 0 deletions 31-洪久远 北京/第九周/tf2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

#使用np生成200随机点
xData=np.linspace(start=-0.5,stop=0.5,num=200)[:,np.newaxis]
#横向变纵向
print(xData.shape)
noise=np.random.normal(loc=0,scale=0.02,size=xData.shape)
yData=np.square(xData)+noise
#建立y=x^2函数并加上噪音

#定义两个placeholder存放输入数据
#shape:数据形状。默认是None,就是一维值,
# 也可以是多维,比如[2,3], [None, 3]表示列是3,行不定)
x=tf.placeholder(dtype=tf.float32,shape=[None,1])
y=tf.placeholder(dtype=tf.float32,shape=[None,1])

#定义中间层
#权重使用正态初始化,形状为一行10列
weightsL1=tf.Variable(tf.random_normal([1, 20]))
print(weightsL1)
biasesL1=tf.Variable(tf.zeros([1,20]))
outputL1= tf.matmul(x, weightsL1) + biasesL1
print(outputL1)
L1=tf.nn.tanh(outputL1)

#定义输出层
weightsL2=tf.Variable(tf.random_normal([20,1]))
biasesL2=tf.Variable(tf.zeros([1,1]))
outputL2=tf.matmul(L1,weightsL2)+biasesL2
prediction=tf.nn.tanh(outputL2)

#定义损失函数为均方差,
loss=tf.reduce_mean(tf.square(y-prediction))
#使用学习率为0.1的反向传播算法最小化loss
trainStep=tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#全局变量初始化
for i in range(1000):
sess.run(trainStep,feed_dict={x:xData,y:yData})

predictValues=sess.run(prediction,feed_dict={x:xData})

plt.figure()
plt.scatter(xData,yData)
plt.plot(xData,predictValues,'r-',lw=4)
plt.show()

0 comments on commit 2783e8a

Please sign in to comment.