Paste: z

Author: z
Mode: factor
Date: Tue, 18 Jul 2023 08:28:00
Plain Text |
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler

# 加载数据集
data = np.loadtxt('dianli.csv', delimiter=',')
X = data[:, :-1]  # 特征矩阵
y = data[:, -1]  # 目标变量

# 数据预处理
scaler = MinMaxScaler(feature_range=(0, 1))
X_scaled = scaler.fit_transform(X)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, shuffle=False)

# 定义模型
model = tf.keras.Sequential([
    tf.keras.layers.Dense(64, activation='relu', input_shape=(X_train.shape[1],)),
    tf.keras.layers.Dense(64, activation='relu'),
    tf.keras.layers.Dense(1)
])

# 编译模型
model.compile(optimizer='adam', loss='mean_squared_error')

# 训练模型
model.fit(X_train, y_train, epochs=100, batch_size=32, verbose=0)

# 在测试集上进行预测
y_pred = model.predict(X_test)

# 评估模型
loss = model.evaluate(X_test, y_test, verbose=0)
print("Loss:", loss)

# 取最后20%的数据作为测试集
test_size = int(len(X_scaled) * 0.2)
X_test = X_scaled[-test_size:]
y_test = y[-test_size:]

# 在测试集上进行预测
y_pred = model.predict(X_test)

# 打印预测结果
print("Predictions:", y_pred)

New Annotation

Summary:
Author:
Mode:
Body: