Python OpenCV implements answer card recognition and judgment

Time:2022-8-5

The example of this article shares the specific code of python OpenCV to realize the answer card identification and judgment, for your reference, the specific content is as follows

Full code:

#import toolkit
import numpy as np
import argparse
import imutils
import cv2
 
# Setting parameters
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", default="./images/test_03.png",
 help="path to the input image")
args = vars(ap.parse_args())
 
# correct answer
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
 
def order_points(pts):
 # A total of 4 coordinate points
 rect = np.zeros((4, 2), dtype = "float32")
 
 # Find the corresponding coordinates 0123 in order: upper left, upper right, lower right, lower left
 # Calculate top left, bottom right
 s = pts.sum(axis = 1)
 rect[0] = pts[np.argmin(s)]
 rect[2] = pts[np.argmax(s)]
 
 # Calculate top right and bottom left
 diff = np.diff(pts, axis = 1)
 rect[1] = pts[np.argmin(diff)]
 rect[3] = pts[np.argmax(diff)]
 
 return rect
 
def four_point_transform(image, pts):
 # Get input coordinates
 rect = order_points(pts)
 (tl, tr, br, bl) = rect
 
 # Calculate the input w and h values
 widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
 widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
 maxWidth = max(int(widthA), int(widthB))
 
 heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
 heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
 maxHeight = max(int(heightA), int(heightB))
 
 # The corresponding coordinate position after transformation
 dst = np.array([
  [0, 0],
  [maxWidth - 1, 0],
  [maxWidth - 1, maxHeight - 1],
  [0, maxHeight - 1]], dtype = "float32")
 
 # Calculate the transformation matrix
 M = cv2.getPerspectiveTransform(rect, dst)
 warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
 
 # return the transformed result
 return warped
def sort_contours(cnts, method="left-to-right"):
    reverse = False
    i = 0
    if method == "right-to-left" or method == "bottom-to-top":
        reverse = True
    if method == "top-to-bottom" or method == "bottom-to-top":
        i = 1
    boundingBoxes = [cv2.boundingRect(c) for c in cnts]
    (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
                                        key=lambda b: b[1][i], reverse=reverse))
    return cnts, boundingBoxes
def cv_show(name,img):
        cv2.imshow(name, img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()  
 
# preprocessing
image = cv2.imread(args["image"])
contours_img = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
cv_show('blurred',blurred)
edged = cv2.Canny(blurred, 75, 200)
cv_show('edged',edged)
 
# contour detection
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
 cv2.CHAIN_APPROX_SIMPLE)[0]
cv2.drawContours(contours_img,cnts,-1,(0,0,255),3) 
cv_show('contours_img',contours_img)
docCnt = None
 
# make sure detected
if len(cnts) > 0:
 # Sort by contour size
 cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
 
 # loop through each contour
 for c in cnts:
  # approximate
  peri = cv2.arcLength(c, True)
  approx = cv2.approxPolyDP(c, 0.02 * peri, True)
 
  # Prepare for perspective transformation
  if len(approx) == 4:
   docCnt = approx
   break
 
# perform perspective transformation
 
warped = four_point_transform(gray, docCnt.reshape(4, 2))
cv_show('warped',warped)
# Otsu's thresholding
thresh = cv2.threshold(warped, 0, 255,
 cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] 
cv_show('thresh',thresh)
thresh_Contours = thresh.copy()
# find each circle outline
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
 cv2.CHAIN_APPROX_SIMPLE)[0]
cv2.drawContours(thresh_Contours,cnts,-1,(0,0,255),3) 
cv_show('thresh_Contours',thresh_Contours)
questionCnts = []
 
# traverse
for c in cnts:
 # Calculate scale and size
 (x, y, w, h) = cv2.boundingRect(c)
 ar = w / float(h)
 
 # Specify the standard according to the actual situation
 if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
  questionCnts.append(c)
 
# sort from top to bottom
questionCnts = sort_contours(questionCnts,
 method="top-to-bottom")[0]
correct = 0
 
# Each row has 5 options
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
 # sort
 cnts = sort_contours(questionCnts[i:i + 5])[0]
 bubbled = None
 
 # traverse每一个结果
 for (j, c) in enumerate(cnts):
  # Use mask to judge the result
  mask = np.zeros(thresh.shape, dtype="uint8")
  cv2.drawContours(mask, [c], -1, 255, -1) #-1 means filling
  cv_show('mask',mask)
  # Determine whether to choose this answer by counting the number of non-zero points
  mask = cv2.bitwise_and(thresh, thresh, mask=mask)
  total = cv2.countNonZero(mask)
 
  # Judge by threshold
  if bubbled is None or total > bubbled[0]:
   bubbled = (total, j)
 
 # Compare the correct answer
 color = (0, 0, 255)
 k = ANSWER_KEY[q]
 
 # Judgment is correct
 if k == bubbled[1]:
  color = (0, 255, 0)
  correct += 1
 
 # drawing
 cv2.drawContours(warped, [cnts[k]], -1, color, 3)
 
 
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(warped, "{:.2f}%".format(score), (10, 30),
 cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", warped)
cv2.waitKey(0)

test_03.png

running result:

The above is the whole content of this article, I hope it will be helpful to everyone's study, and I hope you will support developpaer a lot.

Recommended Today

Getting Started with JSP Development

JavaServer Pages (JSP) is a Java-based cross-platform web development language. JSP is compatible with Microsoft's Active Server Pages, but it uses HTML-like tags and Java code snippets instead of VBScript. When you are using a web server that does not provide native ASP support, namely an Apache or Netscape server, you may consider using JSP. […]