当前位置: 首页 > news >正文

唐山地方志网站建设杭州网站建设专业公司

唐山地方志网站建设,杭州网站建设专业公司,wordpress采集商品,深圳做关键词优化平台#x1f368; 本文为#x1f517;365天深度学习训练营 中的学习记录博客#x1f356; 原作者#xff1a;K同学啊 目标 复用LSTM模型实现火灾温度预测 具体实现 #xff08;一#xff09;环境 语言环境#xff1a;Python 3.10 编 译 器: PyCharm 框 架: Pytorch 本文为365天深度学习训练营 中的学习记录博客 原作者K同学啊 目标 复用LSTM模型实现火灾温度预测 具体实现 一环境 语言环境Python 3.10 编 译 器: PyCharm 框 架: Pytorch 二具体步骤 1. 具体代码 import torch.nn.functional as F import numpy as np import pandas as pd import torch from torch import nn import matplotlib.pyplot as plt import seaborn as sns device torch.device(cuda if torch.cuda.is_available() else cpu) print(device) data pd.read_csv(data/woodpine2.csv) print(data) plt.rcParams[savefig.dpi] 500 plt.rcParams[figure.dpi] 500 fig, ax plt.subplots(1, 3, constrained_layoutTrue, figsize(14, 3)) sns.lineplot(datadata[Tem1], axax[0]) sns.lineplot(datadata[CO 1], axax[1]) sns.lineplot(datadata[Soot 1], axax[2]) plt.show() from sklearn.preprocessing import MinMaxScaler # 从原始数据中去除第一列通常为索引列保留所有行和其他列并创建数据副本 dataFrame data.iloc[:,1:].copy() # 将数据归一化范围是0-1 sc MinMaxScaler(feature_range(0, 1)) for i in [CO 1, Soot 1, Tem1]: dataFrame[i] sc.fit_transform(dataFrame[i].values.reshape(-1, 1)) print(dataFrame) print(dataFrame.shape) width_X 8 width_y 1 X [] y [] print(f原始数据长度: {len(dataFrame)}) print(f可创建的序列数量: {len(dataFrame) - width_X}) # 数据窗口划分主循环 # 功能将时间序列数据划分为输入样本X和对应标签y # 参数说明 # data: 原始数据DataFrame # dataFrame: 转换后的numpy数组 # width_X: 输入序列长度 # width_y: 输出序列长度 # 返回值 # X: 输入样本列表 # y: 标签列表 # 窗口滑动逻辑 # 1. 计算当前窗口结束索引 # 2. 检查窗口是否超出数据边界 # 3. 合法窗口则提取对应数据片段 # 4. 窗口起始位置逐步后移 in_start 0 for _, _ in data.iterrows(): in_end in_start width_X out_end in_end width_y if out_end len(dataFrame): X_ np.array(dataFrame.iloc[in_start:in_end, ]) y_ np.array(dataFrame.iloc[in_end:out_end, 0]) X.append(X_) y.append(y_) in_start 1 X np.array(X) y np.array(y).reshape(-1, 1) print(X.shape) print(y.shape) print(np.any(np.isnan(X))) print(np.any(np.isnan(y))) max_samples min(5000, len(X)) X_train torch.tensor(np.array(X[:max_samples]), dtypetorch.float32).to(device) y_train torch.tensor(np.array(y[:max_samples]), dtypetorch.float32).to(device) X_test torch.tensor(np.array(X[max_samples:]), dtypetorch.float32).to(device) y_test torch.tensor(np.array(y[max_samples:]), dtypetorch.float32).to(device) print(训练集大小:, X_train.shape, y_train.shape) print(测试集大小:, X_test.shape, y_test.shape) from torch.utils.data import TensorDataset, DataLoader train_dl DataLoader(TensorDataset(X_train, y_train), batch_size64, shuffleFalse) test_dl DataLoader(TensorDataset(X_test, y_test), batch_size64, shuffleFalse) class model_lstm(nn.Module): LSTM神经网络模型包含两个LSTM层和一个全连接层。 参数: input_size (int): 输入特征的维度默认为3CO、烟雾、温度 hidden_size (int): 隐藏层维度默认为320 num_layers (int): LSTM层数默认为1 batch_first (bool): 输入数据是否以batch为第一维度默认True 返回值: torch.Tensor: 形状为(batch_size, 1)的预测结果 def __init__(self): super(model_lstm, self).__init__() self.lstm0 nn.LSTM(input_size3, hidden_size320, num_layers1, batch_firstTrue) self.lstm1 nn.LSTM(input_size320, hidden_size320, num_layers1, batch_firstTrue) self.fc0 nn.Linear(320, 1) def forward(self, x): 前向传播函数。 参数: x (torch.Tensor): 输入张量形状为(batch_size, seq_length, input_size) 返回值: torch.Tensor: 输出张量形状为(batch_size, 1) out, hidden1 self.lstm0(x) out, _ self.lstm1(out, hidden1) out self.fc0(out) return out[:, -1, :] model model_lstm() print(model) print(model(torch.rand(30, 8, 3)).shape) import copy def train(train_dl, model, loss_fn, opt, lr_schedulerNone): 训练模型单个epoch。 参数: train_dl (DataLoader): 训练数据加载器 model (nn.Module): 神经网络模型 loss_fn (nn.Module): 损失函数 opt (Optimizer): 优化器 lr_scheduler (LRScheduler): 学习率调度器可选 返回值: float: 当前epoch的平均训练损失 size len(train_dl.dataset) num_batches len(train_dl) train_loss 0 for x, y in train_dl: x, y x.to(device), y.to(device) pred model(x) loss loss_fn(pred, y) opt.zero_grad() loss.backward() opt.step() train_loss loss.item() if lr_scheduler is not None: lr_scheduler.step() print(当前学习率:, lr_scheduler.get_last_lr()) train_loss / num_batches return train_loss def test(dataloader, model, loss_fn): 测试模型性能。 参数: dataloader (DataLoader): 测试数据加载器 model (nn.Module): 训练好的神经网络模型 loss_fn (nn.Module): 损失函数 返回值: float: 测试集的平均损失 size len(dataloader.dataset) num_batches len(dataloader) test_loss 0 with torch.no_grad(): for x, y in dataloader: x, y x.to(device), y.to(device) pred model(x) loss loss_fn(pred, y) test_loss loss.item() test_loss / num_batches return test_loss # 训练 model model_lstm().to(device) loss_fn nn.MSELoss() learn_rate 1e-1 opt torch.optim.SGD(model.parameters(), lrlearn_rate, weight_decay1e-4) epochs 50 train_loss [] test_loss [] lr_scheduler torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_maxepochs, last_epoch-1) for epoch in range(epochs): model.train() epoch_train_loss train(train_dl, model, loss_fn, opt, lr_scheduler) model.eval() epoch_test_loss test(test_dl, model, loss_fn) train_loss.append(epoch_train_loss) test_loss.append(epoch_test_loss) template Epoch:{:2d}, Train_loss:{:.3f}, Test_loss:{:.3f} print(template.format(epoch 1, epoch_train_loss, epoch_test_loss)) print(*20, Done, *20) from datetime import datetime current_time datetime.now() plt.figure(figsize(5, 3), dpi120) plt.plot(train_loss, labelLSTM Training Loss) plt.plot(test_loss, labelLSTM Testing Loss) plt.title(Training and Valiadation Loss) plt.xlabel(current_time) plt.legend() plt.show() # 调用模型进行预测 model.eval() with torch.no_grad(): predicted_y_lstm sc.inverse_transform(model(X_test).cpu().detach().numpy().reshape(-1, 1)) y_test_1 sc.inverse_transform(y_test.cpu().reshape(-1, 1)) y_test_one [i[0] for i in y_test_1] predicted_y_lstm_one [i[0] for i in predicted_y_lstm] plt.figure(figsize(10, 5), dpi120) plt.plot(y_test_one[:2000], colorred, labelread_temp) plt.plot(predicted_y_lstm_one[:2000], colorblue, labelpredict) plt.title(Title) plt.xlabel(X) plt.ylabel(Y) plt.legend() plt.show()2. 结果 C:\Users\feng_\anaconda3\envs\Pytorch\python.exe E:\dev\AI\Pytorch\RNN\R4-LSTM火灾温度预测\LSTM-HUOZAI.py cudaTime Tem1 CO 1 Soot 1 0 0.000 25.0 0.000000 0.000000 1 0.228 25.0 0.000000 0.000000 2 0.456 25.0 0.000000 0.000000 3 0.685 25.0 0.000000 0.000000 4 0.913 25.0 0.000000 0.000000 ... ... ... ... ... 5943 366.000 295.0 0.000077 0.000496 5944 366.000 294.0 0.000077 0.000494 5945 367.000 292.0 0.000077 0.000491 5946 367.000 291.0 0.000076 0.000489 5947 367.000 290.0 0.000076 0.000487[5948 rows x 4 columns]Tem1 CO 1 Soot 1 0 0.000000 0.000000 0.000000 1 0.000000 0.000000 0.000000 2 0.000000 0.000000 0.000000 3 0.000000 0.000000 0.000000 4 0.000000 0.000000 0.000000 ... ... ... ... 5943 0.957447 0.968672 0.968750 5944 0.953901 0.963659 0.964844 5945 0.946809 0.958647 0.958984 5946 0.943262 0.954887 0.955078 5947 0.939716 0.951128 0.951172[5948 rows x 3 columns] (5948, 3) 原始数据长度: 5948 可创建的序列数量: 5940 (5939, 8, 3) (5939, 1) False False 训练集大小: torch.Size([5000, 8, 3]) torch.Size([5000, 1]) 测试集大小: torch.Size([939, 8, 3]) torch.Size([939, 1]) model_lstm((lstm0): LSTM(3, 320, batch_firstTrue)(lstm1): LSTM(320, 320, batch_firstTrue)(fc0): Linear(in_features320, out_features1, biasTrue) ) torch.Size([30, 1]) 当前学习率: [0.09990133642141358] Epoch: 1, Train_loss:0.001, Test_loss:0.012 当前学习率: [0.0996057350657239] Epoch: 2, Train_loss:0.014, Test_loss:0.012 当前学习率: [0.09911436253643444] Epoch: 3, Train_loss:0.014, Test_loss:0.011 当前学习率: [0.09842915805643154] Epoch: 4, Train_loss:0.013, Test_loss:0.011 当前学习率: [0.09755282581475767] Epoch: 5, Train_loss:0.013, Test_loss:0.010 当前学习率: [0.09648882429441256] Epoch: 6, Train_loss:0.012, Test_loss:0.010 当前学习率: [0.09524135262330098] Epoch: 7, Train_loss:0.012, Test_loss:0.009 当前学习率: [0.09381533400219318] Epoch: 8, Train_loss:0.011, Test_loss:0.009 当前学习率: [0.09221639627510075] Epoch: 9, Train_loss:0.010, Test_loss:0.008 当前学习率: [0.09045084971874737] Epoch:10, Train_loss:0.009, Test_loss:0.007 当前学习率: [0.08852566213878946] Epoch:11, Train_loss:0.008, Test_loss:0.006 当前学习率: [0.08644843137107057] Epoch:12, Train_loss:0.007, Test_loss:0.005 当前学习率: [0.08422735529643442] Epoch:13, Train_loss:0.006, Test_loss:0.005 当前学习率: [0.08187119948743447] Epoch:14, Train_loss:0.005, Test_loss:0.004 当前学习率: [0.07938926261462366] Epoch:15, Train_loss:0.004, Test_loss:0.003 当前学习率: [0.07679133974894982] Epoch:16, Train_loss:0.003, Test_loss:0.003 当前学习率: [0.07408768370508576] Epoch:17, Train_loss:0.002, Test_loss:0.002 当前学习率: [0.07128896457825362] Epoch:18, Train_loss:0.002, Test_loss:0.002 当前学习率: [0.06840622763423389] Epoch:19, Train_loss:0.001, Test_loss:0.001 当前学习率: [0.06545084971874736] Epoch:20, Train_loss:0.001, Test_loss:0.001 当前学习率: [0.06243449435824272] Epoch:21, Train_loss:0.001, Test_loss:0.001 当前学习率: [0.05936906572928623] Epoch:22, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.056266661678215216] Epoch:23, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.053139525976465665] Epoch:24, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.049999999999999996] Epoch:25, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.046860474023534326] Epoch:26, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.04373333832178478] Epoch:27, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.040630934270713764] Epoch:28, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.03756550564175726] Epoch:29, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.03454915028125265] Epoch:30, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.03159377236576612] Epoch:31, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.028711035421746366] Epoch:32, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.025912316294914226] Epoch:33, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.023208660251050155] Epoch:34, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.020610737385376346] Epoch:35, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.01812880051256551] Epoch:36, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.015772644703565562] Epoch:37, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.013551568628929433] Epoch:38, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.011474337861210542] Epoch:39, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.009549150281252632] Epoch:40, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.007783603724899257] Epoch:41, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.0061846659978068205] Epoch:42, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.004758647376699033] Epoch:43, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.0035111757055874327] Epoch:44, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.0024471741852423235] Epoch:45, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.0015708419435684518] Epoch:46, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.000885637463565564] Epoch:47, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.00039426493427611173] Epoch:48, Train_loss:0.000, Test_loss:0.001 当前学习率: [9.866357858642205e-05] Epoch:49, Train_loss:0.000, Test_loss:0.001 当前学习率: [0.0] Epoch:50, Train_loss:0.000, Test_loss:0.001Done 三总结 数据处理采用了滑动窗口技术将时间序列划分为长度为8的输入序列和长度为1的预测目标多变量输入 同时使用温度(Tem1)、CO浓度(CO 1)和烟雾浓度(Soot 1)三个特征双层LSTM结构: 两个320隐藏单元的LSTM层串联增强模型的时序建模能力单一全连接层直接输出预测结果
http://www.zqtcl.cn/news/917552/

相关文章:

  • 网站后台怎么做飘窗wordpress add_theme_page
  • 网站建设哪家信誉好网店美工具体要求
  • 网站管理建设工作wordpress 媒体库 直链
  • 网站备案是域名备案还是空间备案wordpress注册数学验证
  • 网站的种类有哪些怎么做代理人金沙网站
  • 西城顺德网站建设网站开发网上教学
  • 公司网站成本百度制作公司网页
  • 政务网站建设情况汇报网线制作的标准
  • 天空网站开发者wordpress 用户登陆后跳转到首页
  • 网站没有内容可以备案吗哪家网站设计好
  • 承德网站建设价格app 网站运营ppt
  • 外贸建站 服务器山西孝义网站开发
  • 廊坊网站建设开发全网整合营销公司
  • html网站建设流程网站模板二次开发
  • wordpress建站需要学什么意思sem扫描电镜
  • 总结 设网站门户网站开发一般多少钱
  • 政务网站无障碍建设wordpress虚拟资源主题
  • 确山专业网站建设百度一下一下你就知道
  • 设计类网站排名优秀广告案例
  • 自己做网站如何挣钱天津南开做网站
  • 小型教育网站的开发建设论文前端开发培训哪里好
  • 久免费域名注册网站朋友圈广告推广
  • 深圳著名设计网站西安管控最新消息
  • 珠海网站快速排名提升ftp查看网站后台密码
  • php电子商务网站源码百搜网络科技有限公司
  • 做外贸的网站怎么建立小说网站流量怎么做
  • 官网整站优化四川省造价工程信息网
  • 公司内部网站怎么建立网站建设与管理ppt
  • 做正规网站有哪些前端好学吗需要学多久
  • 1企业网站案例用自己服务器做网站用备案