import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from matplotlib.colors import ListedColormap,ColorConverter
def generate(sample_size,mean,cov,diff,regression,num_classes):
"""sample_size:样本个数;mean:不同特征的分布均值;cov:不同特征的分布方差;diff:特征分布的偏移量;regression:是否One Hot;num_classes:分类个数。"""
sample_size_per_class=int(sample_size//num_classes)
X0=np.random.multivariate_normal(mean,cov,sample_size_per_class)
Y0=np.zeros(sample_size_per_class)
for ci, d in enumerate(diff):
Xtmp=np.random.multivariate_normal(mean+d,cov,sample_size_per_class)
Ytmp=(ci+1)*np.ones(sample_size_per_class)
X0=np.concatenate((X0,Xtmp))
Y0=np.concatenate((Y0,Ytmp))
if regression==False:
class_ind=[Y0==class_number for class_number in range(num_classes)]
Y0=np.asarray(class_ind)
Y0=Y0.T
X,Y=shuffle(X0,Y0)
return X0,Y0
np.random.seed(10)
num_classes=4
input_dim=2
n_hidden=20
mean=np.random.randn(input_dim)
cov=np.eye(input_dim)
#定义占位符
X,Y=generate(320,mean,cov,[[3.0,0],[3.0,3.0],[0,3.0]],True,num_classes)
Y=Y%2#将数据转换成只有二分类的0和1
Y=np.reshape(Y,[-1,1])
xr=[]
xb=[]
for (l,k) in zip(Y[:],X[:]):
if l==0:
xr.append([k[0],k[1]])
else:
xb.append([k[0],k[1]])
xr=np.array(xr)
xb=np.array(xb)
plt.scatter(xr[:,0],xr[:,1],c='r',marker='+',)
plt.scatter(xb[:,0],xb[:,1],c='b',marker='o')
plt.show()
测试数据:
lab_dim=1
#定义占位符
input_features=tf.placeholder(tf.float32,[None,input_dim])
input_labels=tf.placeholder(tf.float32,[None,lab_dim])
#定义变量
weight={
'h1':tf.Variable(tf.random_normal([input_dim,n_hidden]),name='h1_weight'),
'h2':tf.Variable(tf.random_normal([n_hidden,lab_dim]),name='h2_weight')
}
bias={
'h1':tf.Variable(tf.zeros([n_hidden]),name='h1_bias'),
'h2':tf.Variable(tf.zeros([lab_dim]),name='h2_bias')
}
#定义计算节点
layer_1=tf.nn.relu(tf.add(tf.matmul(input_features,weight['h1']),bias['h1']))
output=tf.add(tf.matmul(layer_1,weight['h2']),bias['h2'])
softmaxOutput=tf.nn.sigmoid(output)
#定义损失函数
cross_entropy=tf.nn.sigmoid_cross_entropy_with_logits(labels=input_labels,logits=output)
loss=tf.reduce_mean(cross_entropy)
learning_rate=0.0001
train=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
maxEpochs=2000
minbatch=25
prediction_out=[]
nb_of_xs=50
xs1=np.linspace(-3,8,num=nb_of_xs)
xs2=np.linspace(-3,8,num=nb_of_xs)
xx,yy=np.meshgrid(xs1,xs2)
classification_plane=np.zeros((nb_of_xs,nb_of_xs))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(maxEpochs):
for batch in range(np.int32(len(Y)/minbatch)):
x1=X[batch*minbatch:(batch+1)*minbatch,:]
y1=Y[batch*minbatch:(batch+1)*minbatch,:]
_,lossval=sess.run([train,loss],feed_dict={input_features:x1,input_labels:y1})
if(epoch+1)%1000==0:
print("Epoch=","%04d"%(epoch+1),"cost=","{:9f}".format(lossval))
print("Finish!")
for i in range(nb_of_xs):
for j in range(nb_of_xs):
classification_plane[i][j]=(sess.run(softmaxOutput,feed_dict={input_features:[[xx[i,j],yy[i,j]]]}))
prediction_out.append(sess.run([softmaxOutput],feed_dict={input_features:X}))
模型效果可视化:
for i in range(nb_of_xs):
for j in range(nb_of_xs):
if classification_plane[i][j]>0.5:
classification_plane[i][j]=1
else:
classification_plane[i][j]=0
cmap=ListedColormap([
ColorConverter.to_rgba('r',alpha=0.3),
ColorConverter.to_rgba('b',alpha=0.3)])
plt.contourf(xx,yy,classification_plane,cmp=cmap)
plt.scatter(xr[:,0],xr[:,1],c='b',marker='+',)
plt.scatter(xb[:,0],xb[:,1],c='y',marker='o')
plt.show()
代码参考《深度学习之TensorFlow入门、原理与进阶实战》
因篇幅问题不能全部显示,请点此查看更多更全内容