Sunday 2 December 2018

Dynamic directory read and pickle example in python

import math
from sklearn import neighbors
import os
import os.path
import pickle
import glob
from PIL import Image, ImageDraw

nameL = []
name_indx = []
i = 0
X = []
y = []

dirName = glob.glob('data\\*\\')
for name in dirName:
 #print(glob.glob(name+'*.jpg'))
 v = os.path.dirname(name)
 print(v)
 nameL.append(v)
 name_indx.append(i)
 
 i = i+1
 imgs = glob.glob(name+'*.jpg')
 for img_path in imgs:
  print(img_path)
  X.append(img_path)
  
print(nameL)
print(name_indx)
dictionary = dict(zip(name_indx, nameL))
print(dictionary)
#pickling.........
pickle_out = open("dict.pickle","wb")
pickle.dump(dictionary, pickle_out)
pickle_out.close()

#Unpickling.......
pickle_in = open("dict.pickle","rb")
example_dict = pickle.load(pickle_in)
print(example_dict)
print(example_dict[1])

Tuesday 6 November 2018

Extract text from image using Pytesseract in windows platform

For windows Os, we need an installation. Pytesseract binary is available here. Then Add a new variable with name tesseract in environment variables with value C:\Program Files (x86)\Tesseract-OCR\tesseract.exe

Then we need to install a python package: pip install tesseract 

Some cases we need the following line of code (if the environment variable is not added correctly) 

pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'


Here we provide the Pytesseract path to the interpreter.



Full Code

       
'''
download and install-https://github.com/UB-Mannheim/tesseract/wiki

'''

import numpy as np
import cv2
import time
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'
frame1 = cv2.imread('poc.jpg',0);

cv2.imwrite('ocr.jpg',frame1)

#from tesseract import image_to_string
text = pytesseract.image_to_string(frame1)
print(text)
cv2.imshow(text,frame1 )

cv2.waitKey(0)
cv2.destroyAllWindows()



Input Image

Output






Saturday 3 November 2018

ORB Feature matching Example in OpenCv

       

import cv2
import numpy as np
 
img1 = cv2.imread("face1.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("face2.jpg", cv2.IMREAD_GRAYSCALE)
 
# ORB Detector
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
 
# Brute Force Matching
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key = lambda x:x.distance)
 
matching_result = cv2.drawMatches(img1, kp1, img2, kp2, matches[:50], None, flags=2)
 
cv2.imshow("Img1", img1)
cv2.imshow("Img2", img2)
cv2.imshow("Matching result", matching_result)
cv2.imwrite("Matching result.jpg", matching_result)
cv2.waitKey(0)
cv2.destroyAllWindows()


OutPut


Friday 2 November 2018

Feature Descriptor like ORB, Shift and Surf Implementation using OpenCv Python

       
import cv2
import numpy as np
 
img = cv2.imread("2.PNG", cv2.IMREAD_GRAYSCALE)
 
sift = cv2.xfeatures2d.SIFT_create()
surf = cv2.xfeatures2d.SURF_create()
 
orb = cv2.ORB_create(nfeatures=1500)
# here None is for non-masking
keypoints1, descriptors1 = orb.detectAndCompute(img, None)
keypoints2, descriptors2 = sift.detectAndCompute(img, None)
keypoints3, descriptors3 = surf.detectAndCompute(img, None)
 
imgOrb  = cv2.drawKeypoints(img, keypoints1, None)
imgSift = cv2.drawKeypoints(img, keypoints2, None)
imgSurf = cv2.drawKeypoints(img, keypoints3, None)


cv2.imshow("Orb", cv2.resize(imgOrb,(700,500)) )
cv2.imshow("Sift", cv2.resize(imgSift,(700,500)) )
cv2.imshow("Surf", cv2.resize(imgSurf,(700,500)) )

cv2.waitKey(0)
cv2.destroyAllWindows()




OutPut


Friday 19 October 2018

Wednesday 2 May 2018

Simple implementation of SVM in python

import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn import svm



#train set
x = [[86,105], [109, 100], [94, 105], [106, 100], [100, 100], 
     [80, 90], [103, 80], [105, 80], [120, 85], [77, 83], [92, 75], 
     [98, 76], [106, 82],[106, 77], [105, 77], [119, 80], [115, 70], 
     [110, 66], [105, 65], [90, 67], [80, 60], [90, 57], [105, 55],
     [115, 55], [110, 50], [109, 49], [95, 45], [100, 42], [105, 40],
     [110, 42], [115, 42], [115,35], [105, 35], [85, 35], [95, 35], [109, 35],
     [115, 35], [120, 30], [105, 29], [109, 25]]

#train set lebels
y = [130, 130, 130, 130, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
     120, 110, 110, 110, 110, 100, 100, 100, 100, 90, 90, 80, 80, 80, 80, 80, 70, 
     70, 60, 60, 60, 60, 50, 50, 40]
clf = svm.SVC(gamma=0.001, C=100)

clf.fit(x,y)

from sklearn.externals import joblib
joblib.dump(clf, 'C:/Users/kiit1/Desktop/svm.pkl')
test_set = [[86,105], [109, 100], [80, 90], [103, 80], [98, 76], [106, 82],
 [105, 77], [119, 80], [90, 57], [105, 55], [119, 27]]
y_test = [130, 130, 120, 123, 120, 120, 120, 120, 100, 100, 40]
# model accuracy for X_testaccuracy = clf.score(test_set, y_test)
print(clf.predict(test_set))
print(accuracy)

# creating a confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, clf.predict(test_set))
print(cm)

clf1 = joblib.load('C:/Users/kiit1/Desktop/svm.pkl')
print(clf1.predict([[86,105]]))

Output

Sunday 18 March 2018

Text separation using OpenCv python

       

import numpy as np
import time
import cv2

# Load an color image in grayscale
img = cv2.imread('1.PNG',0)
cv2.imshow('Realimage',img)
Img_height = np.size(img, 0)
Img_width = np.size(img, 1)
print(Img_height)
print("-----------------------------------------")
imgray = img
thresh = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)#cv2.threshold(imgray,127,255,0)
im2, contours, hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,255,0), 3)
#cnt = contours[4]
for cnt in contours:
 #cv2.drawContours(img, [cnt], 0, (128,255,0), 2)
 x,y,w,h = cv2.boundingRect(cnt)
 img =cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
 crop_img = img[y:y+h, x:x+w]
 height_sub = np.size(crop_img, 0)
 width_sub = np.size(crop_img, 1)
 print(height_sub)
 if Img_height/2 <height_sub and Img_width/2 > width_sub:
     cv2.imwrite('./dataIMG/'+ str(time.time())+'.png',crop_img)
 
cv2.imshow('image',imgray)

cv2.waitKey(0)
cv2.destroyAllWindows()




Input

Output