gpt4 book ai didi

python - 如何提高lstm训练的准确性

转载 作者:行者123 更新时间:2023-12-02 00:17:39 24 4
gpt4 key购买 nike

我用 LSTM 训练了 quora 问题对检测,但训练精度非常低,并且在我训练时总是变化。我不明白我犯了什么错误。

我尝试改变损失和优化器,并增加 epoch。

import numpy as np
from numpy import array
from keras.callbacks import ModelCheckpoint
import keras
from keras.optimizers import SGD
import tensorflow as tf
from sklearn import preprocessing
import xgboost as xgb
from keras import backend as K
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from keras.preprocessing.text import Tokenizer , text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.models import Sequential, model_from_json, load_model
from keras.layers import LSTM, Dense, Input, concatenate, Concatenate, Activation, Flatten
from keras.models import Model
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
import nltk

from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import pickle

df = pd.read_csv("questions.csv")
df.drop(['id','qid1', 'qid2'], axis=1, inplace=True)

df2 = pd.read_csv("testmenew.csv")
## 过滤数据集
 SPECIAL_TOKENS = {
'quoted': 'quoted_item',
'non-ascii': 'non_ascii_word',
'undefined': 'something'
}

def clean(text, stem_words=True):
import re
from string import punctuation
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords

def pad_str(s):
return ' '+s+' '

if pd.isnull(text):
return ''

if type(text) != str or text=='':
return ''

text = re.sub("\'s", " ", text)
text = re.sub(" whats ", " what is ", text, flags=re.IGNORECASE)
text = re.sub("\'ve", " have ", text)
text = re.sub("can't", "can not", text)
text = re.sub("n't", " not ", text)
text = re.sub("i'm", "i am", text, flags=re.IGNORECASE)
text = re.sub("\'re", " are ", text)
text = re.sub("\'d", " would ", text)
text = re.sub("\'ll", " will ", text)
text = re.sub("e\.g\.", " eg ", text, flags=re.IGNORECASE)
text = re.sub("b\.g\.", " bg ", text, flags=re.IGNORECASE)
text = re.sub("(\d+)(kK)", " \g<1>000 ", text)
text = re.sub("e-mail", " email ", text, flags=re.IGNORECASE)
text = re.sub("(the[\s]+|The[\s]+)?U\.S\.A\.", " America ", text, flags=re.IGNORECASE)
text = re.sub("(the[\s]+|The[\s]+)?United State(s)?", " America ", text, flags=re.IGNORECASE)
text = re.sub("\(s\)", " ", text, flags=re.IGNORECASE)
text = re.sub("[c-fC-F]\:\/", " disk ", text)

text = re.sub('(?<=[0-9])\,(?=[0-9])', "", text)
text = re.sub('\$', " dollar ", text)
text = re.sub('\%', " percent ", text)
text = re.sub('\&', " and ", text)
text = re.sub('[^\x00-\x7F]+', pad_str(SPECIAL_TOKENS['non-ascii']), text)
text = re.sub("(?<=[0-9])rs ", " rs ", text, flags=re.IGNORECASE)
text = re.sub(" rs(?=[0-9])", " rs ", text, flags=re.IGNORECASE)
text = re.sub(r" (the[\s]+|The[\s]+)?US(A)? ", " America ", text)
text = re.sub(r" UK ", " England ", text, flags=re.IGNORECASE)
text = re.sub(r" india ", " India ", text)
text = re.sub(r" switzerland ", " Switzerland ", text)
text = re.sub(r" china ", " China ", text)
text = re.sub(r" chinese ", " Chinese ", text)
text = re.sub(r" imrovement ", " improvement ", text, flags=re.IGNORECASE)
text = re.sub(r" intially ", " initially ", text, flags=re.IGNORECASE)
text = re.sub(r" quora ", " Quora ", text, flags=re.IGNORECASE)
text = re.sub(r" dms ", " direct messages ", text, flags=re.IGNORECASE)
text = re.sub(r" demonitization ", " demonetization ", text, flags=re.IGNORECASE)
text = re.sub(r" actived ", " active ", text, flags=re.IGNORECASE)
text = re.sub(r" kms ", " kilometers ", text, flags=re.IGNORECASE)
text = re.sub(r" cs ", " computer science ", text, flags=re.IGNORECASE)
text = re.sub(r" upvote", " up vote", text, flags=re.IGNORECASE)
text = re.sub(r" iPhone ", " phone ", text, flags=re.IGNORECASE)
text = re.sub(r" \0rs ", " rs ", text, flags=re.IGNORECASE)
text = re.sub(r" calender ", " calendar ", text, flags=re.IGNORECASE)
text = re.sub(r" ios ", " operating system ", text, flags=re.IGNORECASE)
text = re.sub(r" gps ", " GPS ", text, flags=re.IGNORECASE)
text = re.sub(r" gst ", " GST ", text, flags=re.IGNORECASE)
text = re.sub(r" programing ", " programming ", text, flags=re.IGNORECASE)
text = re.sub(r" bestfriend ", " best friend ", text, flags=re.IGNORECASE)
text = re.sub(r" dna ", " DNA ", text, flags=re.IGNORECASE)
text = re.sub(r" III ", " 3 ", text)
text = re.sub(r" banglore ", " Banglore ", text, flags=re.IGNORECASE)
text = re.sub(r" J K ", " JK ", text, flags=re.IGNORECASE)
text = re.sub(r" J\.K\. ", " JK ", text, flags=re.IGNORECASE)
text = re.sub('[0-9]+\.[0-9]+', " 87 ", text)
text = ''.join([c for c in text if c not in punctuation]).lower()
return text

text = re.sub('(?<=[0-9])\,(?=[0-9])', "", text)

df['question1'] = df['question1'].apply(clean)
df['question2'] = df['question2'].apply(clean)

df2['q1'] = df2['q1'].apply(clean)
df2['q2'] = df2['q2'].apply(clean)

main =df['is_duplicate'].values

main.shape
(404351,)


vocabularySize = 20000
lstm_out = 200
embed_dim = 128

Rawdata=df['question1'].apply(word_tokenize)
Rawdata2=df['question2'].apply(word_tokenize)

testme = df2['q1'].apply(word_tokenize)
testme2=df2['q2'].apply(word_tokenize)

tokenizer2 = Tokenizer(num_words = vocabularySize )

tokenizer2.fit_on_texts(testme)
tokenizer2.fit_on_texts(testme2)

tokenizer = Tokenizer(num_words = vocabularySize )

tokenizer.fit_on_texts(Rawdata)
tokenizer.fit_on_texts(Rawdata2)

sequences = tokenizer.texts_to_sequences(Rawdata)
sequences2 = tokenizer.texts_to_sequences(Rawdata2)

sequences3 = tokenizer2.texts_to_sequences(testme)
sequences4 = tokenizer2.texts_to_sequences(testme2)

data = pad_sequences(sequences, maxlen=2)
data2 = pad_sequences(sequences2, maxlen=2)

data3 = pad_sequences(sequences3, maxlen=2)
data4 = pad_sequences(sequences4, maxlen=2)

TestInput = np.array([data3,data4])
TestInput = TestInput.reshape(1,2,2)
Input = np.array([data,data2])
Input = Input.reshape(404351,2,2)

#opt = SGD(lr = 0.001, momentum = 0.60)

model = Sequential()
#model.add(Embedding(1, 4,input_length = 2 , dropout = 0.4))
model.add(LSTM((1), input_shape = (2,2), return_sequences=False))
model.add(Activation ('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
X_train,X_test,y_train,y_test = train_test_split(Input,main,test_size = 0.2,random_state = 4)

Input.shape
(404351, 2, 2)

history = model.fit(X_train,y_train,epochs = 10,validation_data= (X_test,y_test) )
model.save_weights('newoutput2.h5')

训练 323480 个样本,验证 80871 个样本纪元 1/10323480/323480 [==============================] - 27s 83us/step - loss: 0.6931 - acc: 0.6304 - val_loss :0.6931 - val_acc:0.6323纪元 2/10323480/323480 [==============================] - 24s 73us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 3/10323480/323480 [==============================] - 23s 71us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 4/10323480/323480 [==============================] - 23s 71us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323纪元 5/10323480/323480 [==============================] - 23s 72us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 6/10323480/323480 [==============================] - 23s 71us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 7/10323480/323480 [==============================] - 23s 71us/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 8/10323480/323480 [==============================] - 25s 76us/step - loss: 0.6931 - acc: 0.6304 - val_loss :0.6931 - val_acc:0.6323时代 9/10323480/323480 [==============================] - 25 秒 78 微秒/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323时代 10/10323480/323480 [==============================] - 25 秒 78 微秒/步 - 损失:0.6931 - 加速:0.6304 - val_loss :0.6931 - val_acc:0.6323

filename = 'newoutput2.h5'
model.load_weights(filename)
new = model.predict(TestInput)
if new > 0.6:
print("Duplication detected")
else:
print("No duplicate")
new

giving output around 0.6567 but not atall increasing, Please help !!

我需要提高训练的准确性

最佳答案

有 4 种方法可以提高深度学习的性能:

  • 利用数据提高绩效。
  • 使用算法提高性能。
  • 通过算法调整提高性能。
  • 使用 Ensembles 提高性能。

利用数据提高性能:

  1. 获取更多数据。
  2. 发明更多数据。
  3. 重新缩放您的数据。
  4. 转换您的数据。
  5. 特征选择

利用算法提高性能

  1. 抽查算法:也许您选择的算法不是最适合您的问题。
  2. 重采样方法:您必须知道您的模型有多好。您对模型性能的估计是否可靠?

通过算法调优提高性能

有关调整神经网络算法以充分利用它们的一些想法。

  1. 诊断。
  2. 权重初始化。
  3. 学习率。
  4. 激活函数。
  5. 网络拓扑。
  6. 批处理和时期。
  7. 正则化。
  8. 优化和损失。
  9. 提前停止。

使用集成提高性能

您可能需要考虑集成的三个一般领域:

  1. 组合模型。
  2. 合并 View 。
  3. 堆叠。

查看以下链接了解更多信息: https://machinelearningmastery.com/improve-deep-learning-performance/

关于python - 如何提高lstm训练的准确性,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56575579/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com