Categories
Misc

My AI model doesn’t provide me with ‘accuracy’, it always say its 0. Why is that?

My AI model doesn’t provide me with ‘accuracy’, it always say
its 0. Why is that?

my code:

(the data i use to train is just a list of a few thousand high
prices of BTC)

# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in tqdm(range(len(sequence)), desc=’Creating training
array’):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence/,010,0,
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def create_training_sequence(price_type=’high’, n_steps=5):
df = pd.read_csv(‘candlesticks.csv’)
df = df.drop([‘Unnamed: 0’, ‘open’, ‘close’, ‘volume’], axis=1)
if price_type == ‘high’:
sequence = df.drop([‘closeTime’, ‘low’], axis=1)
if price_type == ‘low’:
sequence = df.drop([‘closeTime’, ‘high’], axis=1)
print(sequence.head())

X, Y = split_sequence(sequence[price_type], n_steps)
print(X[1])
# reshape from [samples, timesteps] into [samples, timesteps,
features]
n_features = 1
return X, Y

import tensorflow as TheFuckImDoingHere
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
from sklearn.model_selection import train_test_split
use_CPU = False
epochs = 100
n_steps = 60
batch_size = n_steps
n_features = 1
x, y = create_training_sequence(n_steps=n_steps)
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.2)
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1],
n_features))
x_test = x_test.reshape((x_test.shape[0], x_train.shape[1],
n_features))
print(‘————————-‘)
print(‘x_train shape: ‘+str(x_train.shape)+’, x_test shape:
‘+str(x_test.shape))
print(‘————————-‘)
if use_CPU == True:
#limit ram and cpu usage
TheFuckImDoingHere.config.threading.set_intra_op_parallelism_threads(2)
TheFuckImDoingHere.config.threading.set_inter_op_parallelism_threads(2)
# define model
model = Sequential()
model.add(LSTM(64, activation=’relu’, return_sequences=True,
input_shape=(n_steps, n_features)))
model.add(LSTM(128, activation=’relu’, return_sequences=True))
model.add(LSTM(256, activation=’relu’, return_sequences=True))
model.add(LSTM(128, activation=’relu’, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(64, activation=’relu’))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(optimizer=’adam’, loss=’mse’, metrics=[‘acc’,
‘mse’])
model.summary()
#
callback = TheFuckImDoingHere.keras.callbacks.EarlyStopping(
monitor=’val_loss’, min_delta=0, patience=15, verbose=1,
mode=’min’, baseline=None, restore_best_weights=False)
#
# fit model
if use_CPU == True:
# Run inference on CPU
with TheFuckImDoingHere.device(‘/CPU:0’):
hist = model.fit(x_train, y_train, epochs=epochs,
batch_size=batch_size, validation_data=(x_test, y_test),
callbacks=[callback])
elif use_CPU == False:
# Run inference on GPU
with TheFuckImDoingHere.device(‘/GPU:0’):
hist = model.fit(x_train, y_train, epochs=epochs,
batch_size=batch_size, validation_data=(x_test, y_test),
callbacks=[callback])
prediction = model.predict(x_test[0].reshape((1, x_train.shape[1],
n_features)), verbose=0)
print(‘Prediction is: ‘ + str(prediction))
print(‘Real value is: ‘ + str(y_test[0]))
#print evaluation
loss1, acc1, mse1 = model.evaluate(x_test, y_test)
print(f”Loss is {loss1:.20E},nAccuracy is {float(acc1)*100},nMSE
is {mse1:.8E}”)

submitted by /u/Chris-hsr

[visit reddit]

[comments]

Leave a Reply

Your email address will not be published. Required fields are marked *