You are on page 1of 13

Project 10

Advanced Deep Learning:


(ADL)

Submitted by:
Khawar Abbas Khan
MS CS Semester 1

Submitted to :
Dr M Shamim Baig

Muslim Youth University Islamabad


Binary Classification Problem using Internet Movie
Classification Database (IMDB)
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\jd\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:34: FutureWarning: Conversion of
the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be
treated as `np.float64 == np.dtype(float).type`.\n",
" from ._conv import register_converters as _register_converters\n",
"Using TensorFlow backend.\n"
]
}
],
"source": [
"from keras.datasets import imdb"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"(train_data, train_labels),(test_data, test_labels) = imdb.load_data(num_words=10000)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"train_data[0][0]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# word_index is a dictionary mapping words to an integer index\n",
"word_index = imdb.get_word_index()\n",
"# We reverse it, mapping integer indices to words\n",
"reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n",
"# We decode the review; note that our indices were offset by 3\n",
"# because 0, 1 and 2 are reserved indices for \"padding\", \"start of sequence\", and \"unknown\".\
n",
"decoded_review = ' '.join([reverse_word_index.get(i - 3 , '.') for i in train_data[0]])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\". this film was just brilliant casting location scenery story direction everyone's really suited
the part they played and you could just imagine being there robert . is an amazing actor and now the same
being director . father came from the same scottish island as myself so i loved the fact there was a real
connection with this film the witty remarks throughout the film were great it was just brilliant so much
that i bought the film as soon as it was released for . and would recommend it to everyone to watch and
the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at
a film it must have been good and this definitely was also . to the two little boy's that played the . of
norman and paul they were just brilliant children are often left out of the . list i think because the
stars that play them all grown up are such a big profile for the whole film but these children are amazing
and should be praised for what they have done don't you think the whole story was so lovely because it was
true and was someone's life after all that was shared with us all\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"decoded_review"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"25000"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(train_data)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"25000"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(test_data)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"25000"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(train_labels)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"25000"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(test_labels)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<enumerate at 0x182f5da9c18>"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"enumerate(train_data)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"\n",
"def vectorize_sequences(sequences, dimension=10000):\n",
" # Create an all-zero matrix of shape (len(sequences), dimension)\n",
" results = np.zeros((len(sequences), dimension))\n",
" for i, sequence in enumerate(sequences):\n",
" results[i, sequence] = 1. # set specific indices of results[i] to 1s\n",
" return results\n",
"\n",
"# Our vectorized training data\n",
"x_train = vectorize_sequences(train_data)\n",
"# Our vectorized test data\n",
"x_test = vectorize_sequences(test_data)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([0., 1., 1., ..., 0., 0., 0.])"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x_train[0]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Our vectorized labels\n",
"y_train = np.asarray(train_labels).astype('float32')\n",
"y_test = np.asarray(test_labels).astype('float32')"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(25000, 10000)"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x_train.shape"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"25000"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(x_train)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"numpy.ndarray"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"type(x_train)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from keras import models\n",
"from keras import layers\n",
"\n",
"model = models.Sequential()\n",
"model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\n",
"model.add(layers.Dense(16, activation='relu'))\n",
"model.add(layers.Dense(1, activation='sigmoid'))"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"model.compile(optimizer='rmsprop',\n",
" loss='binary_crossentropy',\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"x_val = x_train[:12000]\n",
"partial_x_train = x_train[12000:]\n",
"\n",
"y_val = y_train[:12000]\n",
"partial_y_train = y_train[12000:]"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train on 13000 samples, validate on 12000 samples\n",
"Epoch 1/20\n",
"13000/13000 [==============================] - 4s 274us/step - loss: 0.0254 - acc: 0.9922 -
val_loss: 0.0888 - val_acc: 0.9828\n",
"Epoch 2/20\n",
"13000/13000 [==============================] - 4s 270us/step - loss: 0.0027 - acc: 0.9995 -
val_loss: 0.0950 - val_acc: 0.9809\n",
"Epoch 3/20\n",
"13000/13000 [==============================] - 4s 274us/step - loss: 8.4791e-04 - acc: 1.0000 -
val_loss: 0.0954 - val_acc: 0.9807\n",
"Epoch 4/20\n",
"13000/13000 [==============================] - 4s 304us/step - loss: 0.0012 - acc: 0.9998 -
val_loss: 0.3052 - val_acc: 0.9319\n",
"Epoch 5/20\n",
"13000/13000 [==============================] - 4s 273us/step - loss: 0.0021 - acc: 0.9995 -
val_loss: 0.1074 - val_acc: 0.9778\n",
"Epoch 6/20\n",
"13000/13000 [==============================] - 4s 300us/step - loss: 1.7521e-04 - acc: 1.0000 -
val_loss: 0.1129 - val_acc: 0.9761\n",
"Epoch 7/20\n",
"13000/13000 [==============================] - 5s 348us/step - loss: 1.2573e-04 - acc: 1.0000 -
val_loss: 0.1225 - val_acc: 0.9742\n",
"Epoch 8/20\n",
"13000/13000 [==============================] - 4s 301us/step - loss: 8.3031e-05 - acc: 1.0000 -
val_loss: 0.1355 - val_acc: 0.9708\n",
"Epoch 9/20\n",
"13000/13000 [==============================] - 4s 275us/step - loss: 0.0038 - acc: 0.9989 -
val_loss: 0.1509 - val_acc: 0.9686\n",
"Epoch 10/20\n",
"13000/13000 [==============================] - 4s 277us/step - loss: 4.2788e-05 - acc: 1.0000 -
val_loss: 0.1558 - val_acc: 0.9667\n",
"Epoch 11/20\n",
"13000/13000 [==============================] - 4s 271us/step - loss: 3.2614e-05 - acc: 1.0000 -
val_loss: 0.1645 - val_acc: 0.9650\n",
"Epoch 12/20\n",
"13000/13000 [==============================] - 4s 285us/step - loss: 2.6642e-05 - acc: 1.0000 -
val_loss: 0.1754 - val_acc: 0.9640\n",
"Epoch 13/20\n",
"13000/13000 [==============================] - 4s 280us/step - loss: 2.0782e-05 - acc: 1.0000 -
val_loss: 0.1922 - val_acc: 0.9622\n",
"Epoch 14/20\n",
"13000/13000 [==============================] - 4s 285us/step - loss: 1.4104e-05 - acc: 1.0000 -
val_loss: 0.2538 - val_acc: 0.9515\n",
"Epoch 15/20\n",
"13000/13000 [==============================] - 4s 283us/step - loss: 0.0031 - acc: 0.9990 -
val_loss: 0.2164 - val_acc: 0.9586\n",
"Epoch 16/20\n",
"13000/13000 [==============================] - 4s 284us/step - loss: 9.0088e-06 - acc: 1.0000 -
val_loss: 0.2214 - val_acc: 0.9582\n",
"Epoch 17/20\n",
"13000/13000 [==============================] - 4s 281us/step - loss: 7.8821e-06 - acc: 1.0000 -
val_loss: 0.2270 - val_acc: 0.9567\n",
"Epoch 18/20\n",
"13000/13000 [==============================] - 4s 281us/step - loss: 6.9975e-06 - acc: 1.0000 -
val_loss: 0.2338 - val_acc: 0.9562\n",
"Epoch 19/20\n",
"13000/13000 [==============================] - 4s 279us/step - loss: 5.9837e-06 - acc: 1.0000 -
val_loss: 0.2433 - val_acc: 0.9545\n",
"Epoch 20/20\n",
"13000/13000 [==============================] - 4s 277us/step - loss: 4.5119e-06 - acc: 1.0000 -
val_loss: 0.2547 - val_acc: 0.9527\n"
]
}
],
"source": [
"history = model.fit(partial_x_train,\n",
" partial_y_train,\n",
" epochs=20,\n",
" batch_size=512,\n",
" validation_data=(x_val, y_val))"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/4\n",
"25000/25000 [==============================] - 17s 669us/step - loss: 0.4419 - acc: 0.8249\n",
"Epoch 2/4\n",
"25000/25000 [==============================] - 6s 243us/step - loss: 0.2528 - acc: 0.9104\n",
"Epoch 3/4\n",
"25000/25000 [==============================] - 11s 453us/step - loss: 0.1991 - acc: 0.9286\n",
"Epoch 4/4\n",
"25000/25000 [==============================] - 5s 195us/step - loss: 0.1669 - acc: 0.9410\n",
"25000/25000 [==============================] - 13s 522us/step\n"
]
}
],
"source": [
"model = models.Sequential()\n",
"model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\n",
"model.add(layers.Dense(16, activation='relu'))\n",
"model.add(layers.Dense(1, activation='sigmoid'))\n",
"\n",
"model.compile(optimizer='rmsprop',\n",
" loss='binary_crossentropy',\n",
" metrics=['accuracy'])\n",
"\n",
"model.fit(x_train, y_train, epochs=4, batch_size=512)\n",
"results = model.evaluate(x_test, y_test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

2: Multiclass Classification using Reuter Dataset


library(keras)
reuters <- dataset_reuters(num_words = 10000)
c(c(train_data, train_labels), c(test_data, test_labels)) %<-% reuters
length(train_data)
[1] 8982
.
length(test_data)
train_data[[1]]
[1] 1 2 2 8 43 10 447 5 25 207 270 5 3095 111
16 369 186 90 67 7
[21] 89 5 19 102 6 19 124 15 90 67 84 22 482 26
7 48 4 49 8 864
[41] 39 209 154 6 151 6 83 11 15 22 155 11 15 7
48 9 4579 1005 504 6
[61] 258 6 272 11 15 22 134 44 11 15 16 8 197 1245
90 67 52 29 209 30
[81] 32 132 6 109 15 17 12
word_index <- dataset_reuters_word_index()
reverse_word_index <- names(word_index)
names(reverse_word_index) <- word_index
decoded_newswire <- sapply(train_data[[1]], function(index) {
# Note that our indices were offset by 3 because 0, 1, and 2
# are reserved indices for "padding", "start of sequence", and "unknown".
word <- if (index >= 3) reverse_word_index[[as.character(index - 3)]]
if (!is.null(word)) word else "?"
})
cat(decoded_newswire)
? ? ? said as a result of its december acquisition of space co it expects
earnings per share in 1987 of 1 15 to 1 30 dlrs per share up from 70 cts in
1986 the company said pretax net should rise to nine to 10 mln dlrs from six
mln dlrs in 1986 and rental operation revenues to 19 to 22 mln dlrs from 12 5
mln dlrs it said cash flow per share this year should be 2 50 to three dlrs
reuter 3
train_labels[[1]]
[1] 3
vectorize_sequences <- function(sequences, dimension = 10000) {
results <- matrix(0, nrow = length(sequences), ncol = dimension)
for (i in 1:length(sequences))
results[i, sequences[[i]]] <- 1
results
}
x_train <- vectorize_sequences(train_data)
x_test <- vectorize_sequences(test_data)
to_one_hot <- function(labels, dimension = 46) {
results <- matrix(0, nrow = length(labels), ncol = dimension)
for (i in 1:length(labels))
results[i, labels[[i]] + 1] <- 1
results
}
one_hot_train_labels <- to_one_hot(train_labels)
one_hot_test_labels <- to_one_hot(test_labels)
one_hot_train_labels <- to_categorical(train_labels)
one_hot_test_labels <- to_categorical(test_labels)
model <- keras_model_sequential() %>%
layer_dense(units = 64, activation = "relu", input_shape = c(10000)) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dense(units = 46, activation = "softmax")
model %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
val_indices <- 1:1000
x_val <- x_train[val_indices,]
partial_x_train <- x_train[-val_indices,]
y_val <- one_hot_train_labels[val_indices,]
partial_y_train = one_hot_train_labels[-val_indices,]
#Now let’s train our network for 20 epochs:

history <- model %>% fit(


partial_x_train,
partial_y_train,
epochs = 20,
batch_size = 512,
validation_data = list(x_val, y_val)
)
plot(history)

model <- keras_model_sequential() %>%


layer_dense(units = 64, activation = "relu", input_shape = c(10000)) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dense(units = 46, activation = "softmax")

model %>% compile(


optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
history <- model %>% fit(
partial_x_train,
partial_y_train,
epochs = 9,
batch_size = 512,
validation_data = list(x_val, y_val)
)
results <- model %>% evaluate(x_test, one_hot_test_labels)
test_labels_copy <- test_labels
test_labels_copy <- sample(test_labels_copy)
length(which(test_labels == test_labels_copy)) / length(test_labels)
#large layers

model <- keras_model_sequential() %>%


layer_dense(units = 64, activation = "relu", input_shape = c(10000)) %>%
layer_dense(units = 4, activation = "relu") %>%
layer_dense(units = 46, activation = "softmax")

model %>% compile(


optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
model %>% fit(
partial_x_train,
partial_y_train,
epochs = 20,
batch_size = 128,
validation_data = list(x_val, y_val)
)
3: Regression Problem of Predicting House Prices using
Boston Housing Price Dataset

import numpy as npdataset = np.loadtxt(‘housing.csv’)


dataset_x = dataset[:, :-1]
dataset_y = dataset[:, -1]

# let’s split our loaded dataset into training and test datasets.
from sklearn.model_selection import train_test_splittraining_dataset_x,
test_dataset_x, training_dataset_y, test_dataset_y = train_test_split(dataset_x,
dataset_y, test_size = 0.20)
from sklearn.preprocessing import MinMaxScalermms = MinMaxScaler()
mms.fit(training_dataset_x)
training_dataset_x = mms.transform(training_dataset_x)
test_dataset_x = mms.transform(test_dataset_x)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Densemodel =
Sequential(name=’BostonHousingPrices’)model.add(Dense(100, input_dim = 13,
activation=’relu’, name=’Hidden-1'))model.add(Dense(100, activation=’relu’,
name=’Hidden-2'))model.add(Dense(1, activation=’linear’, name=’Output’))

model.compile(optimizer=’rmsprop’, loss=’mse’, metrics=[‘mae’])

hist = model.fit(training_dataset_x, training_dataset_y, batch_size=32,


epochs=500, validation_split=0.2)import matplotlib.pyplot as pltfigure =
plt.gcf()
figure.set_size_inches((15, 5))
plt.title(‘Loss — Epoch Graphics’)
plt.xlabel(‘Epoch’)
plt.ylabel(‘Loss’)
plt.plot(range(1, len(hist.history[‘loss’]) + 1), hist.history[‘loss’])
plt.plot(range(1, len(hist.history[‘val_loss’]) + 1), hist.history[‘val_loss’])
plt.legend([‘Loss’, ‘Validation Loss’])
plt.show()figure = plt.gcf()
figure.set_size_inches((15, 5))
plt.title(‘Mean Absolute Error — Epoch Graphics’)
plt.xlabel(‘Epoch’)
plt.ylabel(‘Mean Absolute Error’)
plt.plot(range(1, len(hist.history[‘mae’]) + 1), hist.history[‘mae’])
plt.plot(range(1, len(hist.history[‘val_mae’]) + 1), hist.history[‘val_mae’])
plt.legend([‘Mean Absolute Error’, ‘Validation Mean Absolute Error’])
plt.show()

You might also like