Professional Documents
Culture Documents
Dr Manohar Kapse
1 April 2019
rm(list=ls())
library(imager)
##
## Attaching package: 'imager'
#--------------------------------------------#
# convert image into gray scale
for (i in 1:length(IMCU.Images))
{
IMCU.Images[[i]]<- grayscale(IMCU.Images[[i]])
}
#str(IMCU.Images)
class(IMCU.Images)
# plot(IMCU.Images[[1]])
#--------------------------------------------#
# conver image to same size- rescale
for (i in 1:length(IMCU.Images))
{
IMCU.Images[[i]]<-resize(IMCU.Images[[i]], size_x = 100, size_y = 100, size_z = 1, size_c = 1)
}
IMCU.Images
#str(IMCU.Images)
class(IMCU.Images)
## [1] "imlist" "list"
#plot(IMCU.Images[[1]])
#-------------------------------------------------#
for (i in 1:length(IMCU.Images))
{
IMCU.Images[[i]]<-as.numeric(IMCU.Images[[i]]) # convert to number
}
#str(IMCU.Images)
class(IMCU.Images)
#-------------------------------------------------#
# convert data into data frame
IMCU.Images.data<-as.data.frame(IMCU.Images)
str(IMCU.Images.data)
#---------------------------------------------#
# adding unique ids to the data,
# since we know each image has 100*100 points
IMCU.Images.data$id<-seq(1:10000)
str(IMCU.Images.data)
## 'data.frame': 660000 obs. of 3 variables:
## $ im: chr "E1125.jpg" "E1125.jpg" "E1125.jpg" "E1125.jpg" ...
## $ v : num 1 1 1 1 1 1 1 1 1 1 ...
## $ id: int 1 2 3 4 5 6 7 8 9 10 ...
#---------------------------------#
# add grouping variable to the data set
IMCU.Images.data$group1<-"imcu"
str(IMCU.Images.data)
IMCU.Images.data$group1<-as.factor(IMCU.Images.data$group1)
#IMCU.Images.data
#----------------------------#
# since the data created is a single col with all pixel positions
# converting 10000 pixels address as variables
library(tidyr)
##
## Attaching package: 'tidyr'
IMCU.Images.data.1<-spread(IMCU.Images.data, id, v)
#str(IMCU.Images.data.1)
now create a data set for the other department Management Science
#--------------------------------------------#
# convert image into gray scale
for (i in 1:length(MS.Images))
{
MS.Images[[i]]<- grayscale(MS.Images[[i]])
}
#str(MS.Images)
class(MS.Images)
MS.Images
#str(MS.Images)
class(MS.Images)
# plot(MS.Images[[1]])
#-------------------------------------------------#
for (i in 1:length(MS.Images))
{
MS.Images[[i]]<-as.numeric(MS.Images[[i]]) # convert to number
}
#str(MS.Images)
class(MS.Images)
MS.Images.data<-as.data.frame(MS.Images)
#---------------------------------------------#
# adding unique ids to the data,
# since we know each image has 100*100 points
MS.Images.data$id<-seq(1:10000)
str(MS.Images.data)
#---------------------------------#
# add grouping variable to the data set
MS.Images.data$group1<-"MS"
str(MS.Images.data)
MS.Images.data$group1<-as.factor(MS.Images.data$group1)
# MS.Images.data
#----------------------------#
# since the data created is a single col with all pixel positions
# converting 10000 pixels address as variables
library(tidyr)
MS.Images.data.1<-spread(MS.Images.data, id, v)
#str(MS.Images.data.1)
CU.images.data<-rbind(IMCU.Images.data.1, MS.Images.data.1)
# str(CU.images.data)
summary(knnMod2)
print(knnMod2)
## k-Nearest Neighbors
##
## 106 samples
## 10000 predictors
## 2 classes: 'imcu', 'MS'
##
## Pre-processing: centered (10000), scaled (10000)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 96, 95, 96, 95, 95, 96, ...
## Resampling results across tuning parameters:
##
## k Accuracy Kappa
## 5 0.6754545 0.3341953
## 7 0.6690909 0.3081981
## 9 0.6300000 0.2157474
## 11 0.6209091 0.1961218
## 13 0.6127273 0.1482331
##
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was k = 5.
plot(knnMod2)
pred = predict(knnMod2, newdata=CU.images.data)
confusionMatrix(pred, CU.images.data[,1])
## Confusion Matrix and Statistics
##
## Reference
## Prediction imcu MS
## imcu 56 10
## MS 10 30
##
## Accuracy : 0.8113
## 95% CI : (0.7238, 0.8808)
## No Information Rate : 0.6226
## P-Value [Acc > NIR] : 2.122e-05
##
## Kappa : 0.5985
## Mcnemar's Test P-Value : 1
##
## Sensitivity : 0.8485
## Specificity : 0.7500
## Pos Pred Value : 0.8485
## Neg Pred Value : 0.7500
## Prevalence : 0.6226
## Detection Rate : 0.5283
## Detection Prevalence : 0.6226
## Balanced Accuracy : 0.7992
##
## 'Positive' Class : imcu
##
—————————————————-
SUpport vector Machine
library(e1071)
#dtm1$SV
#dtm1$kernel
#dtm1$tot.nSV
#dtm1$decision.values
#dtm1$fitted
#dtm1$coefs
————————————————————-
Predicting for image out of data set
test data set
import images from Directory or load images of IMCU
faculty members
test.Images<- load.dir(path="C:/Users/LENOVO/Desktop/Image classification/test", pattern=".jpg")
for (i in 1:length(test.Images))
{
test.Images[[i]]<-resize(test.Images[[i]], size_x = 100, size_y = 100, size_z = 1, size_c = 1)
}
# save the each image pixel data as numeric
for (i in 1:length(test.Images))
{
test.Images[[i]]<-as.numeric(test.Images[[i]]) # convert to number
}
#-------------------------------------------------#
# convert data into data frame
test.Images.data<-as.data.frame(test.Images)
#---------------------------------------------#
# adding unique ids to the data,
# since we know each image has 100*100 points
test.Images.data$id<-seq(1:10000)
#---------------------------------#
# add grouping variable to the data set
test.Images.data$group1<-"test"
test.Images.data$group1<-as.factor(test.Images.data$group1)
library(tidyr)
test.Images.data.1<-spread(test.Images.data, id, v)
#str(test.Images.data.1)
test.Images.data.1<-test.Images.data.1[,-2]
## 1 2 3 4
## imcu imcu imcu imcu
## Levels: imcu MS
———————————————-
Random Forest tree
takes lot of time - Not useful