Paste: z

Author: z
Mode: factor
Date: Wed, 19 Jul 2023 07:30:39
Plain Text |
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split

# 读取数据集
data = pd.read_csv('dianli.csv')

# 提取特征和目标变量
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

# 特征缩放(将特征缩放到[0, 1]范围内)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)

# 定义时间步长(即前后两条数据的间隔时间为15分钟,对应于一个时间步)
time_steps = 1

# 生成序列数据
X_sequence = []
y_sequence = []
for i in range(len(X) - time_steps):
    X_sequence.append(X[i:i+time_steps])
    y_sequence.append(y[i+time_steps])

X_sequence = np.array(X_sequence)
y_sequence = np.array(y_sequence)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_sequence, y_sequence, test_size=0.2, random_state=42)

# 定义LSTM模型
model = tf.keras.Sequential([
    tf.keras.layers.LSTM(units=64, activation='relu', input_shape=(time_steps, X_train.shape[2])),
    tf.keras.layers.Dense(units=1)
])

# 编译模型
model.compile(optimizer='adam', loss='mean_squared_error')

# 训练模型
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.1)

# 使用模型进行预测
y_pred = model.predict(X_test)

# 反缩放目标变量
y_test = scaler.inverse_transform(y_test.reshape(-1, 1))
y_pred = scaler.inverse_transform(y_pred)

# 计算均方误差(MSE)
mse = np.mean((y_test - y_pred)**2)
print("均方误差(MSE):", mse)

New Annotation

Summary:
Author:
Mode:
Body: