这是代码:
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Input
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, f1_score, mean_absolute_error
DATADIR = "./Dataset"
TRAIN_TEST_CUTOFF = '2016-04-21'
TRAIN_VALID_RATIO = 0.75
# https://datascience.stackexchange.com/questions/45165/how-to-get-accuracy-f1-precision-and-
recall-for-a-keras-model
# to implement F1 score for validation in a batch
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f1macro(y_true, y_pred):
f_pos = f1_m(y_true, y_pred)
# negative version of the data and prediction
f_neg = f1_m(1-y_true, 1-K.clip(y_pred,0,1))
return (f_pos + f_neg)/2
def cnnpred_2d(seq_len=60, n_features=82, n_filters=(8,8,8), droprate=0.1):
"2D-CNNpred model according to the paper"
model = Sequential([
Input(shape=(seq_len, n_features, 1)),
Conv2D(n_filters[0], kernel_size=(1, n_features), activation="relu"),
Conv2D(n_filters[1], kernel_size=(3,1), activation="relu"),
MaxPool2D(pool_size=(2,1)),
Conv2D(n_filters[2], kernel_size=(3,1), activation="relu"),
MaxPool2D(pool_size=(2,1)),
Flatten(),
Dropout(droprate),
Dense(1, activation="sigmoid")
])
return model
def datagen(data, seq_len, batch_size, targetcol, kind):
"As a generator to produce samples for Keras model"
batch = []
while True:
# Pick one dataframe from the pool
key = random.choice(list(data.keys()))
df = data[key]
input_cols = [c for c in df.columns if c != targetcol]
index = df.index[df.index < TRAIN_TEST_CUTOFF]
split = int(len(index) * TRAIN_VALID_RATIO)
assert split > seq_len, "Training data too small for sequence length
{}".format(seq_len)
if kind == 'train':
index = index[:split] # range for the training set
elif kind == 'valid':
index = index[split:] # range for the validation set
else:
raise NotImplementedError
# Pick one position, then clip a sequence length
while True:
t = random.choice(index) # pick one time step
n = (df.index == t).argmax() # find its position in the dataframe
if n-seq_len+1 < 0:
continue # this sample is not enough for one sequence length
frame = df.iloc[n-seq_len+1:n+1]
batch.append([frame[input_cols].values, df.loc[t, targetcol]])
break
# if we get enough for a batch, dispatch
if len(batch) == batch_size:
X, y = zip(*batch)
X, y = np.expand_dims(np.array(X), 3), np.array(y)
yield X, y
batch = []
def testgen(data, seq_len, targetcol):
"Return array of all test samples"
batch = []
for key, df in data.items():
input_cols = [c for c in df.columns if c != targetcol]
# find the start of test sample
t = df.index[df.index >= TRAIN_TEST_CUTOFF][0]
n = (df.index == t).argmax()
# extract sample using a sliding window
for i in range(n+1, len(df)+1):
frame = df.iloc[i-seq_len:i]
batch.append([frame[input_cols].values, frame[targetcol][-1]])
X, y = zip(*batch)
return np.expand_dims(np.array(X),3), np.array(y)
# Read data into pandas DataFrames
data = {}
for filename in os.listdir(DATADIR):
if not filename.lower().endswith(".csv"):
continue # read only the CSV files
filepath = os.path.join(DATADIR, filename)
X = pd.read_csv(filepath, index_col="Date", parse_dates=True)
# basic preprocessing: get the name, the classification
# Save the target variable as a column in dataframe for easier dropna()
name = X["Name"][0]
del X["Name"]
cols = X.columns
X["Target"] = (X["Close"].pct_change().shift(-1) > 0).astype(int)
X.dropna(inplace=True)
# Fit the standard scaler using the training dataset
index = X.index[X.index < TRAIN_TEST_CUTOFF]
index = index[:int(len(index) * TRAIN_VALID_RATIO)]
scaler = StandardScaler().fit(X.loc[index, cols])
# Save scale transformed dataframe
X[cols] = scaler.transform(X[cols])
data[name] = X
seq_len = 60
batch_size = 128
n_epochs = 20
n_features = 82
# Produce CNNpred as a binary classification problem
model = cnnpred_2d(seq_len, n_features)
model.compile(optimizer="adam", loss="mae", metrics=["acc", f1macro])
model.summary() # print model structure to console
# Set up callbacks and fit the model
# We use custom validation score f1macro() and hence monitor for "val_f1macro"
checkpoint_path = "./cp2d-{epoch}-{val_f1macro:.2f}.h5"
callbacks = [
ModelCheckpoint(checkpoint_path,
monitor='val_f1macro', mode="max",
verbose=0, save_best_only=True, save_weights_only=False,
save_freq="epoch")
]
model.fit(datagen(data, seq_len, batch_size, "Target", "train"),
validation_data=datagen(data, seq_len, batch_size, "Target", "valid"),
epochs=n_epochs, steps_per_epoch=400, validation_steps=10, verbose=1,
callbacks=callbacks)
# Prepare test data
test_data, test_target = testgen(data, seq_len, "Target")
# Test the model
test_out = model.predict(test_data)
test_pred = (test_out > 0.5).astype(int)
print("accuracy:", accuracy_score(test_pred, test_target))
print("MAE:", mean_absolute_error(test_pred, test_target))
print("F1:", f1_score(test_pred, test_target))
这是错误:
File ~\OneDrive\Desktop\SAMIR\Nile University Undergraduate\Final Year Project\CNNpred-
Keras-main\2DCNNPredOG.py:6 in <module>
import tensorflow as tf
File ~\anaconda3\lib\site-packages\tensorflow\__init__.py:37 in <module>
from tensorflow.python.tools import module_util as _module_util
File ~\anaconda3\lib\site-packages\tensorflow\python\__init__.py:42 in <module>
from tensorflow.python import data
File ~\anaconda3\lib\site-packages\tensorflow\python\data\__init__.py:21 in <module>
from tensorflow.python.data import experimental
File ~\anaconda3\lib\site-packages\tensorflow\python\data\experimental\__init__.py:95 in
<module>
from tensorflow.python.data.experimental import service
File ~\anaconda3\lib\site-
packages\tensorflow\python\data\experimental\service\__init__.py:387 in <module>
from tensorflow.python.data.experimental.ops.data_service_ops import distribute
File ~\anaconda3\lib\site-
packages\tensorflow\python\data\experimental\ops\data_service_ops.py:23 in <module>
from tensorflow.python.data.experimental.ops import compression_ops
File ~\anaconda3\lib\site-
packages\tensorflow\python\data\experimental\ops\compression_ops.py:16 in <module>
from tensorflow.python.data.util import structure
File ~\anaconda3\lib\site-packages\tensorflow\python\data\util\structure.py:22 in <module>
from tensorflow.python.data.util import nest
File ~\anaconda3\lib\site-packages\tensorflow\python\data\util\nest.py:36 in <module>
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
File ~\anaconda3\lib\site-packages\tensorflow\python\framework\sparse_tensor.py:24 in
<module>
from tensorflow.python.framework import constant_op
File ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:25 in <module>
from tensorflow.python.eager import execute
File ~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py:23 in <module>
from tensorflow.python.framework import dtypes
File ~\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:29 in <module>
_np_bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
TypeError: Unable to convert function return value to a Python type! The signature was
() -> handle
如何解决这个问题?
你遇到的错误信息 "TypeError: Unable to convert function return value to a Python type! The signature was () -> handle" 通常表示 TensorFlow 在尝试将 C++ 函数的返回值(一个句柄)转换为 Python 类型时遇到了问题。这通常与 TensorFlow 版本不兼容或安装问题有关。
以下是一些解决此问题的步骤:
1. 更新 TensorFlow 和相关库:
-
确保你的 TensorFlow 版本是最新的。你可以使用以下命令更新:
bash pip install --upgrade tensorflow
-
同时更新其他相关库,例如 numpy、pandas 等:
bash pip install --upgrade numpy pandas
2. 检查 Anaconda 环境:
- 确保你在使用正确的 Anaconda 环境,并且该环境已激活。
- 尝试创建一个新的 Anaconda 环境并重新安装 TensorFlow 和其他依赖项。
3. 检查 GPU 支持:
- 如果你正在使用 GPU 版本的 TensorFlow,请确保已正确安装和配置 GPU 驱动程序和 CUDA。
4. 禁用 eager execution:
-
在 TensorFlow 2.x 中,默认情况下启用了 eager execution。尝试在代码开头禁用它:
python import tensorflow as tf tf.compat.v1.disable_eager_execution()
5. 检查代码中的其他错误:
- 虽然错误信息指向 TensorFlow,但也可能是代码中的其他问题导致了此错误。仔细检查你的代码,特别是与数据类型和函数返回值相关的部分。
如果以上步骤都无法解决问题,请提供以下信息以便我更好地帮助你:
- 你的 TensorFlow 版本
- 你的操作系统
- 你是否正在使用 GPU
- 完整的错误消息和代码片段
此外,在
datagen
函数中,你的代码在
index = index[:split]
和
index = index[split:]
部分可能存在潜在的错误。由于你在循环中修改
index
,所以可能会导致索引超出范围的错误。建议使用切片创建
index
的副本,例如
index = index[:split].copy()
和
index = index[split:].copy()
。
希望这些信息能帮到你!如果问题仍然存在,请提供更多细节,我会尽力提供进一步的帮助。
标签:python,python-3.x,tensorflow,machine-learning,keras From: 72179285