划分数据集以及模型定义
def data_split(datax, datay, val_size = 0.1, test_size = 0.05):'''输入:datax datay 输出:trainx, valx, testx, trainy, valy, testy, 分别按比例得到训练集、验证集、测试集'''pos_test = int(len(datax) * (1 - test_size))pos_val = int(len(datax[:pos_test]) * (1 - val_size))trainx, valx, testx = datax[:pos_val], datax[pos_val:pos_test], datax[pos_test:]trainy, valy, testy = datay[:pos_val], datay[pos_val:pos_test], datay[pos_test:]return trainx, valx, testx, trainy, valy, testydef lvdnnework(nenum, laynum = 3, necoef = 1, regcoef = 0.01,drnum = 1,ratcoef = 0.3):'''输入: 输入维度, 层数、每层维度、正则化系数、dorpout层层数以及系数输出:模型,注意不要使用函数''' model = tf.keras.Sequential()model.add(tf.keras.layers.Dense(nenum, activation='relu'))for i in range(laynum):model.add(tf.keras.layers.Dense(int(nenum*necoef),kernel_regularizer=tf.keras.regularizers.l2(regcoef), activation='relu'))if i<drnum:model.add(tf.keras.layers.Dropout(rate =ratcoef))model.add(tf.keras.layers.Dense(nenum))return modeldef pk_dp(lvnk):mdds_topy = pickle.dumps(lvnk.to_json())mdds_wts = pickle.dumps(lvnk.get_weights()) return mdds_topy, mdds_wtstrainx, valx, testx, trainy, valy, testy = data_split(data_x_delt,data_y_delt)
lvdnnk = lvdnnework(trainx.shape[1], laynum = 3, necoef = 1.0, regcoef = 0.002, drnum = 0, ratcoef = 0.2)
optimizer = Adam(learning_rate=0.004, beta_1=0.99, beta_2=0.999, epsilon=1e-7)
lvdnnk.compile(optimizer=optimizer, loss='mse')
trainr = lvdnnk.fit(trainx, trainy, epochs=200, batch_size=20, shuffle = True, validation_data=[valx, valy], validation_steps=2,verbose= 0)
mdds_topy, mdds_wts = pk_dp(lvdnnk)
pre = lvdnnk.predict(trainx)
corr = np.corrcoef(trainy.reshape(1,-1),pre.reshape(1,-1))
pre2 = lvdnnk.predict(testx)
corr2 = np.corrcoef(trainy.reshape(1,-1),pre2.reshape(1,-1))
对比结果画图
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(pd.DataFrame(pre2.reshape(-1,1)), color="red")
plt.plot(pd.DataFrame(trainy.reshape(-1,1)), color="blue")
plt.title(str(tg_id)+ '_' +str(round(corr2[1,0], 2)))
plt.savefig(f'./picture/{tg_id}.jpg')
plt.close()