首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >问答首页 >在使用customize train_step()的Keras中,如何在使用customize test_step()评估模型时设置'training=False‘?

在使用customize train_step()的Keras中,如何在使用customize test_step()评估模型时设置'training=False‘?
EN

Stack Overflow用户
提问于 2020-11-06 03:26:05
回答 1查看 333关注 0票数 0

我正在使用tensorflow.keras,并希望使用customize test_step()来评估我的模型,该方法使用customize train_step()。我想训练和评估以下模型:

代码语言:javascript
运行
复制
class Whole_model(tf.keras.Model):
     def __init__(self, EEG_gen_model, emg_feature_extractor, 
                  eeg_feature_extractor, seq2seq_model):
      super(Whole_model, self).__init__()
      self.EEG_gen_model= EEG_gen_model
      self.emg_feature_extractor= emg_feature_extractor
      self.eeg_feature_extractor= eeg_feature_extractor
      self.seq2seq_model=seq2seq_model
    
    
  
   def compile(self, EEG_gen_optimizer, emg_feature_optim, eeg_feature_optim, 
       seq2seq_optim, EEG_gen_loss, seq2seq_loss_fn, gen_accuracy, accuracy):
       super(Whole_model, self).compile()
       self.EEG_gen_optimizer = EEG_gen_optimizer
       self.emg_feature_optim=emg_feature_optim
       self.eeg_feature_optim=eeg_feature_optim
       self.seq2seq_optim=seq2seq_optim
       self.EEG_gen_loss = EEG_gen_loss
       self.seq2seq_loss_fn=seq2seq_loss_fn
       self.gen_accuracy=gen_accuracy
       self.accuracy=accuracy
   

    #we can use different optimizer for each model

   def train_step(self, data):
       x_train, [y_train_eeg, y]= data
       y = tf.reshape(y, [-1, no_Epochs , 5])
       n_samples_per_epoch = x_train.shape[1]
       print(n_samples_per_epoch)
       eeg_gen_input=tf.reshape(x_train, [-1, n_samples_per_epoch, 1])
       y_eeg_gen= tf.reshape(y_train_eeg, [-1, n_samples_per_epoch, 1])


       #tf.argmax(pred_classes,1)
       # Train the EEG generator
       with tf.GradientTape() as tape:
          EEG_Gen= self.EEG_gen_model(eeg_gen_input)
          gen_model_loss= self.EEG_gen_loss(y_train_eeg, EEG_Gen)
          gen_accuracy=self.accuracy(y_train_eeg, EEG_Gen)
      grads = tape.gradient(gen_model_loss, self.EEG_gen_model.trainable_weights)
      self.EEG_gen_optimizer.apply_gradients(zip(grads, self.EEG_gen_model.trainable_weights))

    # #SEQ2SEQ 
    emg_inp = x_train
    eeg_inp = self.EEG_gen_model(emg_inp)
    emg_enc_seq=self.emg_feature_extractor(emg_inp)
    eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
    emg_eeg_attention_seq = tf.keras.layers.Attention()([emg_enc_seq, eeg_enc_seq])
    input_layer=tf.keras.layers.Concatenate()([emg_enc_seq, emg_eeg_attention_seq])
    len_epoch=input_layer.shape[1] 
    inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
   

    # Train the discriminator
    with tf.GradientTape() as tape:
        outputs=self.seq2seq_model(inputs)
        seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
        accuracy=self.accuracy(y_train, tf.argmax(outputs,1))
    grads = tape.gradient(seq2seq_loss, self.seq2seq_model.trainable_weights)
    self.seq2seq_optim.apply_gradients(zip(grads, self.seq2seq_model.trainable_weights))

    #fEATURE EXTRACTOR
    emg_inp = x_train
    eeg_inp = self.EEG_gen_model(emg_inp)
    eeg_enc_seq=self.emg_feature_extractor(emg_inp)
   
    with tf.GradientTape() as tape:
        eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
        emg_eeg_attention_seq = tf.keras.layers.Attention()([emg_enc_seq, eeg_enc_seq])
        input_layer=tf.keras.layers.Concatenate()([emg_enc_seq, emg_eeg_attention_seq])
        len_epoch=input_layer.shape[1] 
        inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
        outputs=self.seq2seq_model(inputs)
        seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
     grads = tape.gradient(seq2seq_loss, self.eeg_feature_extractor.trainable_weights)
     self.seq2seq_optim.apply_gradients(zip(grads, self.eeg_feature_extractor.trainable_weights))    



    emg_inp = x_train
    eeg_inp = self.EEG_gen_model(emg_inp)
    with tf.GradientTape() as tape:
        eeg_enc_seq=self.emg_feature_extractor(emg_inp)
        eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
        emg_eeg_attention_seq = tf.keras.layers.Attention()([emg_enc_seq, eeg_enc_seq])
        input_layer=tf.keras.layers.Concatenate()([emg_enc_seq, emg_eeg_attention_seq])
        len_epoch=input_layer.shape[1] 
        inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
        outputs=self.seq2seq_model(inputs)
        seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
        accuracy=self.accuracy(y_train, tf.argmax(outputs,1))
    grads = tape.gradient(seq2seq_loss, self.emg_feature_extractor.trainable_weights)
   
   
  self.emg_feature_extractor.apply_gradients(zip(grads,self.emg_feature_extractor.trainable_weights))
    return {"seq2seq_loss": seq2seq_loss, 'gen_model_loss':gen_model_loss, "gen_accuracy": gen_accuracy, "accuracy": accuracy}

其中,EEG_gen_model、emg_feature_extractor、eeg_feature_extractor和seq2seq_model是主模型的子模型。

现在,我想使用customize test_step()来评估模型。类似于以下内容:

代码语言:javascript
运行
复制
    def test_step(self, data):
       x_emg, y = data
       no_Epochs=3
       y = tf.reshape(y, [-1, no_Epochs , 5])

       with tf.GradientTape() as tape:
         emg_inp = x_emg
         eeg_inp = self.EEG_gen_model(emg_inp)
         emg_enc_seq=self.emg_feature_extractor(emg_inp)
         eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
         emg_eeg_attention_seq = tf.keras.layers.Attention()([emg_enc_seq, eeg_enc_seq])
         input_layer=tf.keras.layers.Concatenate()([emg_enc_seq, emg_eeg_attention_seq])
         len_epoch=input_layer.shape[1] 
         inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
         outputs=self.seq2seq_model(inputs) # Forward pass
         # Compute our own loss
         loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)

    # Compute gradients
    trainable_vars = self.trainable_variables
    gradients = tape.gradient(loss, trainable_vars)

    # Update weights
    self.optimizer.apply_gradients(zip(gradients, trainable_vars))
    # Update metrics (includes the metric that tracks the loss)
    self.compiled_metrics.update_state(y, y_pred)
    # Return a dict mapping metric names to current value
    return {m.name: m.result() for m in self.metrics}

我的疑问是“如何在这个场景的test_step()部分设置'training=False'”?

任何建议都是可以理解的。

EN

回答 1

Stack Overflow用户

回答已采纳

发布于 2020-11-07 16:33:56

试试这个:

代码语言:javascript
运行
复制
def test_step(self, data):
   x_emg, y = data
   no_Epochs=3
   y = tf.reshape(y, [-1, no_Epochs , 5])

   emg_inp = x_emg
   eeg_inp = self.EEG_gen_model(emg_inp)
   emg_enc_seq=self.emg_feature_extractor(emg_inp)
   eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
   emg_eeg_attention_seq = tf.keras.layers.Attention()([emg_enc_seq, eeg_enc_seq])
   input_layer=tf.keras.layers.Concatenate()([emg_enc_seq, emg_eeg_attention_seq])
   len_epoch=input_layer.shape[1] 
   inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
   outputs=self.seq2seq_model(inputs) # Forward pass
   # Compute our own loss
   loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)

   self.compiled_metrics.update_state(y, y_pred)
   # Return a dict mapping metric names to current value
   return {m.name: m.result() for m in self.metrics}
票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/64704040

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档