Professional Documents
Culture Documents
Coding Detection Dan Training
Coding Detection Dan Training
Detection
import os,cv2,numpy as np
if not os.path.exists('dataset'):
os.makedirs('dataset')
face = os.listdir('dataset')
facedetect=cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
def dataset():
if not nama:
return
cam = cv2.VideoCapture(1)
if not os.path.exists('dataset/'+nama):
os.makedirs('dataset/'+nama.lower())
count = 0
while(True):
cv2.imshow('img',img)
if k == 27:
break
break
cam.release()
cv2.destroyAllWindows()
ditambahkan!")
def training():
face = os.listdir('dataset')
facemodels=[]
ids = []
cnt=0
for v in face:
Grayscale
ids.append(cnt)
cnt+=1
recognizer.train(facemodels,np.array(ids))
recognizer.write('models.yml')
root = Tk()
root.title("Pengambilan Data")
root.geometry('480x360')
root.eval('tk::PlaceWindow . center')
keterangan = Label(root,text="Prediksi")
root.mainloop()
2. Training Data
import os,cv2,numpy as np
if not os.path.exists('dataset'):
os.makedirs('dataset')
face = os.listdir('dataset')
facedetect=cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
def dataset():
if not nama:
return
cam = cv2.VideoCapture(1)
if not os.path.exists('dataset/'+nama):
os.makedirs('dataset/'+nama.lower())
count = 0
while(True):
count += 1
cv2.imshow('img',img)
if k == 27:
break
break
cam.release()
cv2.destroyAllWindows()
ditambahkan!")
def training():
face = os.listdir('dataset')
facemodels=[]
ids = []
cnt=0
for v in face:
Grayscale
faces = facedetect.detectMultiScale(img_numpy)
ids.append(cnt)
cnt+=1
recognizer.train(facemodels,np.array(ids))
recognizer.write('models.yml')
root = Tk()
root.title("Pengambilan Data")
root.geometry('480x360')
root.eval('tk::PlaceWindow . center')
keterangan = Label(root,text="Prediksi")
root.mainloop()