๋ฐ์ดํฐ ๋ก๋
import tensorflow_datasets as tfds
import numpy as np
imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)
train_data, test_data = imdb['train'], imdb['test']
training_sentences = []
training_labels = []
testing_sentences = []
testing_labels = []
for s, l in train_data:
training_sentences.append(s.numpy().decode('utf8'))
training_labels.append(l.numpy())
for s, l in test_data:
testing_sentences.append(s.numpy().decode('utf8'))
testing_labels.append(l.numpy())
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
tokenizing
- vocab_size, embeding_dim, max_length ์กฐ์ ํ๋ฉฐ ํ์ต ์งํ
# hyperparameter
vocab_size = 10000
embedding_dim = 16
max_length = 120
trunc_type = 'post'
oov_token = '<OOV>'
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, truncating=trunc_type)
model
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
#tf.keras.layers.Flatten(),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
compile
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
fit
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get('acc')>0.95:
self.model.stop_training=True
callbacks = myCallback()
history = model.fit(
x=training_padded,
y=training_labels,
batch_size=32,
epochs=5,
validation_data=(testing_padded, testing_labels), callbacks=[callbacks])
+)
# embedding layer
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # (vocab_size, embedding_dim)
index_word = dict([(value, key) for (key, value) in word_index.items()])
index_word[0] = '<PAD>'
def decode_review(text):
return " ".join([index_word[i] for i in text])
print(decode_review(training_padded[3]))
print(training_sentences[3])
'๐ > Coursera_TF' ์นดํ ๊ณ ๋ฆฌ์ ๋ค๋ฅธ ๊ธ
WEEK3 : NLP in Tensorflow (LSTM, Text CNN) (0) | 2021.01.11 |
---|---|
[TF] return_sequences vs return_states (0) | 2021.01.11 |
WEEK1 : NLP in Tensorflow (Tokenizer, OOV token, pad_sequences) (0) | 2021.01.11 |
WEEK4 : CNN in TensorFlow (multi-class classification) (0) | 2021.01.10 |
WEEK3 : CNN in TensorFlow (transfer learning, dropout) (0) | 2021.01.10 |