## >>> 星瞳科技-OpenMV中文教程网 <<<

# Freak Example
#
# This script shows off keypoint tracking by itself. Put an object in front of
# your OpenMV Cam without anything else in the image (i.e. camera should be
# facing a smooth wall) and the camera will learn the keypoints for an track
# whatever object is in the image. You can save keypoints to disk either via
# the OpenMV IDE or from in your script.
#
# Matching keypoints works by first extracting keypoints from an ROI. Once those
# are extracted then the OpenMV Cam compares the extracted keypoints against all
# the keypoints in an image. It tries to find the center matching point between
# the two sets of keypoints.
#
# Keep in mind that keypoint matching with just one training example isn't very
# robust. If you want professional quality results then stick with getting
# That said, if you're in a very controlled enviroment then keypoint tracking
# allows your OpenMV Cam to learn objects on the fly.
#
# If... you want really good keypoint matching results we suggest you gather
# keypoints from all faces of an object and with multiple rotations and scales.
# Comparing against all theses sets of keypoints helps versus just one.
#
# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY!

import sensor, time, image

# Normalized keypoints are not rotation invariant...
NORMALIZED=False
#NORMALIZED=False，特征可旋转检测

# Keypoint extractor threshold, range from 0 to any number.
# This threshold is used when extracting keypoints, the lower
# the threshold the higher the number of keypoints extracted.
KEYPOINTS_THRESH=30
#设置特征点提取数目

# Keypoint-level threshold, range from 0 to 100.
# This threshold is used when matching two keypoint descriptors, it's the
# percentage of the distance between two descriptors to the max distance.
# In other words, the minimum matching percentage between 2 keypoints.
MATCHING_THRESH=80
#设置特征匹配阈值

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
#等待一会儿，稍做准备

kpts1 = None
# Uncomment to load keypoints from file
clock = time.clock()

while (True):
clock.tick()
img = sensor.snapshot()
kpts2 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED)
#提取目前摄像头的图像特征。find_keypoints(roi=Auto, threshold=32, normalized=False)
#roi=(x,y,w,h)表示特征提取的范围，是左上顶点为(x,y),宽和高分别为w h 的矩形，默认是整个图像。
#threshold为特征提取数目，阈值越大，提取的特征点数目越多，占用内存越大，有可能会超出内存。
#normalized=False设置特征可旋转。

if (kpts1==None):
#如果是刚开始运行程序，提取最开始的图像作为目标物体特征，kpts1保存目标物体的特征
#kpts2保存以后每帧图片的特征，方便与kpts目标特征进行比较。
kpts1 = kpts2
print(kpts1)
elif kpts2:
#如果不是刚开始运行程序，将当前图像与目标物体特征进行比较。
c = image.match_descriptor(image.FREAK, kpts1, kpts2, threshold=MATCHING_THRESH)
#match_descriptor(type, descritor0, descriptor1, threshold=60)
#type表示图像特征，可以是image.LBP或者image.FREAK。descritor0和descriptor1
#表示需要比较的两个图像的特征。threshold调整匹配阈值，阈值越大，匹配的相似度越高，
#但是匹配成功的次数会变少。c[2]返回特征的相似度，如果相似度大于25%则认为匹配成功
# C[2] contains the percentage of matching keypoints.
# If more than 25% of the keypoints match, draw stuff.
if (c[2]>25):
#相似度大于25%,匹配成功
img.draw_cross(c[0], c[1], size=15)
#在匹配成功的目标中心画十字形，c[0] c[1]表示匹配的目标中心点，十字形大小为15
img.draw_string(0, 10, "Match %d%%"%(c[2]))

# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))