以下代码出现:AttributeError: 'tuple' object has no attribute 'ndims'
但是调试不出哪一行出现问题
import tensorflow as tf
import numpy as np
from random import choice
#1、产生数据,定义参数,输入层
#定义参数
reuse=tf.AUTO_REUSE
Im = 100
In = tf.random_uniform([1], minval=0, maxval=0.1, dtype=tf.float32)
Um = 220
W0 = 50
A_list = [46.70,28.01,20.01,15.56]
index = choice([0,1,2,3])
A = A_list[index]
Fai = np.random.random((180,180))
t = np.arange(0,100,0.0001)
T = np.arange(0,0.1,0.0001)
M = tf.reciprocal(np.multiply(2,T))
#基波电流信号
x0 = np.multiply(Um,np.sin(np.multiply(W0,t)))
def Harmonic_signal(t,W0,Fai,a):
if a==None:
#在电网基波信号上叠加正数次的谐波信号的稳态电网畸变信号
x1 = np.add(np.multiply(Im,np.sin(W0,t)),np.multiply(In,np.sin(np.add(np.multiply(A,W0,t),Fai))))
return x1
else:
#振荡谐波稳态电网畸变信号
x1 = np.add(np.multiply(Im,np.sin(W0,t)),np.multiply(In,np.sin(np.add(np.multiply(A,W0,t),Fai)),np.multiply(M,np.exp(np.divide(-t,T,dtype = tf.float32)))))
return x1
#x1 = Harmonic_signal(t,W0,Fai,None)
#瞬时电压暂升或暂降,q大于1,为暂升,q小于1,为暂降
def Voltage_flicker(t,x0,q):
t1 = tf.get_variable("t1",dtype=tf.float32,initializer=tf.random_uniform_initializer(minval=0,maxval=t,dtype=tf.float32))
x2 = np.select([t<t1, t>np.add(t1,6), True], [x0, x0,np.multiply(q,x0)])
return x2
#x2 = Voltage_flicker(t,x0,q)
#2,运行参数设定
training_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
n_input = 28
n_steps = 28
n_hidden = 128
n_classes = 5
#中间隐藏单元
def Model(net,n_hidden,
data_format='channels_last',
kernel_regularizer=None,
bias_initializer=tf.zeros_initializer(),
activation = tf.nn.relu,
scope = None,
reuse = None):
for i in range(n_hidden):
with tf.variable_scope('block_1d',net,reuse = reuse):
#tf.layters.conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,...)
with tf.variable_scope("BRANCH_0"):
branch_0 = tf.layers.conv1d(net,1,1,strides=1,padding="VALID",name = "Conv1d_0a_1x1")
branch_0 = tf.layers.conv1d(net,128,3,strides=1,padding="SAME",name = "Conv1d_1a_1x3")
with tf.variable_scope("BRANCH_1"):
branch_1 = tf.layers.conv1d(net,1,1,strides=1,padding="VALID",name = "Conv1d_0b_1x1")
branch_1 = tf.layers.conv1d(net,64,5,strides=1,padding="SAME",name = "Conv1d_1b_1x5")
with tf.variable_scope("BRANCH_2"):
branch_2 = tf.layers.conv1d(net,1,1,strides=1,padding="VALID",name = "Conv1d_0c_1x1")
branch_2 = tf.layers.conv1d(net,32,7,strides=1,padding="SAME",name = "Conv1d_1c_1x7")
link = tf.concat(1,[branch_0,branch_1,branch_2])
link = tf.multiply(link,link)
return Model(link)
x = tf.placeholder("float",[None,n_steps,n_input])
y = tf.placeholder("float",[None,n_classes])
out0 = Model(x0,n_hidden)
out1 = Model(Harmonic_signal(t,W0,Fai,None))
out2 = Model(Harmonic_signal(t,W0,Fai,1))
out3 = Model(Voltage_flicker(t,x0,0.5))
out4 = Model(Voltage_flicker(t,x0,1))
#3,输出层
out = [out0,out1,out2,out3,out4]
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = out,labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate = training_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(out,1),tf.argmax(y,1))
accuaracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init = tf.glorot_normal_initializer()
#启动会话
with tf.Session() as sess:
sess = tf.Session(config=tf.ConfigProto(gpu_options = True))
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch_x,batch_y = tf.train.next_batch(batch_size)
batch_x = batch_x.reshape(batch_size,n_steps,n_input)
sess.run(optimizer,feed_dict = {x:batch_x,y:batch_y})
if step % display_step == 0:
acc = sess.run(accuaracy,feed_dict = {x:batch_x,y:batch_y})
loss = sess.run(accuaracy,feed_dict = {x:batch_x,y:batch_y})
print("iter" + str(step * batch_size) + ", Minibatch Loss =" + "\{:.6f}".format(loss) + ",Training Accuracy=" + "\{:.5f}".format(acc))
step += 1
相似问题