沈阳模板建站公司推荐,电子商务网站保密协议,wordpress网站打包app,seo网站优化推广教程背景
睡眠对人体健康很重要。监测人体的睡眠分期对于人体健康和医疗具有重要意义。
亮点
架构在第一层使用两个具有不同滤波器大小的 CNN 和双向 LSTM。 CNN 可以被训练来学习滤波器#xff0c;以从原始单通道 EEG 中提取时不变特征#xff0c;而双向 LSTM 可以被训练来将…
背景
睡眠对人体健康很重要。监测人体的睡眠分期对于人体健康和医疗具有重要意义。
亮点
架构在第一层使用两个具有不同滤波器大小的 CNN 和双向 LSTM。 CNN 可以被训练来学习滤波器以从原始单通道 EEG 中提取时不变特征而双向 LSTM 可以被训练来将时间信息例如睡眠阶段转换规则编码到模型中。实现了一种两步训练算法可以通过反向传播有效地端到端训练我们的模型同时防止模型遭受大睡眠中出现的类别不平衡问题即学习仅对大多数睡眠阶段进行分类 数据集。在不改变模型架构和训练算法的情况下模型可以从两个数据集的不同原始单通道脑电图自动学习睡眠阶段评分的特征这两个数据集具有不同的属性例如采样率和评分标准 AASM 和 RK。
环境配置
python3.5.4tensorflowgpu 1.15.2
数据
Sleep-EDF MASS 方法 模型主要代码
class MyModel(DeepFeatureNet):def __init__(self, batch_size, input_dims, n_classes, seq_length,n_rnn_layers,return_last,is_train, reuse_params,use_dropout_feature, use_dropout_sequence,namedeepsleepnet):super(self.__class__, self).__init__(batch_sizebatch_size, input_dimsinput_dims, n_classesn_classes, is_trainis_train, reuse_paramsreuse_params, use_dropoutuse_dropout_feature, namename)self.seq_length seq_lengthself.n_rnn_layers n_rnn_layersself.return_last return_lastself.use_dropout_sequence use_dropout_sequencedef _build_placeholder(self):# Inputname x_train if self.is_train else x_validself.input_var tf.compat.v1.placeholder(tf.float32, shape[self.batch_size*self.seq_length, self.input_dims, 1, 1],namename _inputs)# Targetself.target_var tf.compat.v1.placeholder(tf.int32, shape[self.batch_size*self.seq_length, ],namename _targets)def build_model(self, input_var):# Create a network with superclass methodnetwork super(self.__class__, self).build_model(input_varself.input_var)# Residual (or shortcut) connectionoutput_conns []# Fully-connected to select some part of the output to add with the output from bi-directional LSTMname l{}_fc.format(self.layer_idx)with tf.compat.v1.variable_scope(name) as scope:output_tmp fc(namefc, input_varnetwork, n_hiddens1024, biasNone, wd0)output_tmp batch_norm_new(namebn, input_varoutput_tmp, is_trainself.is_train)# output_tmp leaky_relu(nameleaky_relu, input_varoutput_tmp)output_tmp tf.nn.relu(output_tmp, namerelu)self.activations.append((name, output_tmp))self.layer_idx 1output_conns.append(output_tmp)####################################################################### Reshape the input from (batch_size * seq_length, input_dim) to# (batch_size, seq_length, input_dim)name l{}_reshape_seq.format(self.layer_idx)input_dim network.get_shape()[-1].valueseq_input tf.reshape(network,shape[-1, self.seq_length, input_dim],namename)assert self.batch_size seq_input.get_shape()[0].valueself.activations.append((name, seq_input))self.layer_idx 1# Bidirectional LSTM networkname l{}_bi_lstm.format(self.layer_idx)hidden_size 512 # will output 1024 (512 forward, 512 backward)with tf.compat.v1.variable_scope(name) as scope:def lstm_cell():cell tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size, use_peepholesTrue,state_is_tupleTrue,reusetf.compat.v1.get_variable_scope().reuse) if self.use_dropout_sequence:keep_prob 0.5 if self.is_train else 1.0cell tf.compat.v1.nn.rnn_cell.DropoutWrapper(cell,output_keep_probkeep_prob)return cellfw_cell tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple True)bw_cell tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple True)# Initial state of RNNself.fw_initial_state fw_cell.zero_state(self.batch_size, tf.float32)self.bw_initial_state bw_cell.zero_state(self.batch_size, tf.float32)# Feedforward to MultiRNNCelllist_rnn_inputs tf.unstack(seq_input, axis1)#outputs, fw_state, bw_state tf.nn.bidirectional_rnn(outputs, fw_state, bw_state tf.compat.v1.nn.static_bidirectional_rnn(cell_fwfw_cell,cell_bwbw_cell,inputslist_rnn_inputs,initial_state_fwself.fw_initial_state,initial_state_bwself.bw_initial_state)if self.return_last:network outputs[-1]else:network tf.reshape(tf.concat(axis1, valuesoutputs), [-1, hidden_size*2],namename)self.activations.append((name, network))self.layer_idx 1self.fw_final_state fw_stateself.bw_final_state bw_state# Append outputoutput_conns.append(network)####################################################################### Addname l{}_add.format(self.layer_idx)network tf.add_n(output_conns, namename)self.activations.append((name, network))self.layer_idx 1# Dropoutif self.use_dropout_sequence:name l{}_dropout.format(self.layer_idx)if self.is_train:network tf.nn.dropout(network, keep_prob0.5, namename)else:network tf.nn.dropout(network, keep_prob1.0, namename)self.activations.append((name, network))self.layer_idx 1return networkdef init_ops(self):self._build_placeholder()# Get loss and prediction operationswith tf.compat.v1.variable_scope(self.name) as scope:# Reuse variables for validationif self.reuse_params:scope.reuse_variables()# Build modelnetwork self.build_model(input_varself.input_var)# Softmax linearname l{}_softmax_linear.format(self.layer_idx)network fc(namename, input_varnetwork, n_hiddensself.n_classes, bias0.0, wd0)self.activations.append((name, network))self.layer_idx 1# Outputs of softmax linear are logitsself.logits network######### Compute loss ########## Weighted cross-entropy loss for a sequence of logits (per example)loss tf.contrib.legacy_seq2seq.sequence_loss_by_example([self.logits],[self.target_var],[tf.ones([self.batch_size * self.seq_length])],namesequence_loss_by_example)loss tf.reduce_sum(loss) / self.batch_size# Regularization lossregular_loss tf.add_n(tf.compat.v1.get_collection(losses, scopescope.name \/),nameregular_loss)# print # print Params to compute regularization loss:# for p in tf.compat.v1.get_collection(losses, scopescope.name \/):# print p.name# print # Total lossself.loss_op tf.add(loss, regular_loss)# Predictionsself.pred_op tf.argmax(self.logits, 1)
结果
睡眠分期效果图 MASS数据集分类表 代码获取
后台私信 1