Professional Documents
Culture Documents
#############################
#tokenization problem
#############################
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
import tensorflow_datasets as tfds
import os
k=len(some_tokens)-3
train_data=[]
train_labels=[]
for i in range(k-1):
train_data.append(encode_example1[i:i+3])
train_labels.append(encode_example1[i+3])
#train_labels1=tf.convert_to_tensor(train_labels)
features, labels = (train_data, train_labels)
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
history = model.fit(
dataset,
epochs=10,
)
history_dict = history.history
acc = history_dict['accuracy']
loss = history_dict['loss']
plt.figure(figsize=(12,9))
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.figure(figsize=(12,9))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim((0.5,1))
plt.show()