OpenMV舵机云台实现颜色识别、人脸跟踪的代码

fanfan   ·   发表于 5个月前   ·   OpenMV电子积木

舵机云台颜色识别跟踪



# Blob Detection Example
#
# This example shows off how to use the find_blobs function to find color
# blobs in the image. This example in particular looks for dark green objects.

import sensor, image, time
from pyb import Servo
from pyb import LED

step = 0
step_y=0
step_flag=0
x=0
y=0

s1 = Servo(1) #P7 舵机1
s2 = Servo(2) #P8 舵机2

#调试时可以结合led进行调试
red_led = LED(1)
green_led = LED(2)
blue_led = LED(3)

#设置颜色的阈值,括号里面的数值分别是L A B 的最大值和最小值(minL, maxL, minA,
# maxA, minB, maxB),LAB的值在图像左侧三个坐标图中选取。如果是灰度图,则只需
#设置(min, max)两个数字即可。
red_threshold = ( 0,125, -35, -20, 5, 10)

sensor.reset() # 初始化摄像头参数
sensor.set_pixformat(sensor.RGB565) # 选择RGB565
sensor.set_framesize(sensor.QQVGA) # 选择QQVGA
sensor.set_auto_whitebal(False) # 关闭自动白平衡设置,白平衡是默认开启的,在颜色识别中,需要关闭白平衡。
sensor.set_hmirror(True) #水平镜像,方便调试
sensor.set_auto_gain(False) #关闭自动亮度增益,太暗了可以自行打开
sensor.skip_frames(10) # Let new settings take affect.

clock = time.clock() # Tracks FPS.

s1.pulse_width(1500) #舵机1初始化到中间位置
s2.pulse_width(1500) #舵机2初始化到中间位置

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.
    red_blobs = img.find_blobs([red_threshold])
    if red_blobs:
        for b in red_blobs:
            if b[2]>5 and b[3]>5: #过滤过大或者过小,根据实际修改
                img.draw_rectangle(b[0:4],color=(0,0,0))
                img.draw_cross(b[5], b[6]) # cx, cy 黑色矩形标记出目标颜色区域
                x = b[5] #将cx赋值给x
                y = b[6] #将cy赋值给y
                #下面为简单的识别舵机运动算法,具体根据实际调试修改参数,可以自行优化
                if x > 82:
                    step=step-int((x-80)*1)
                    if -800 > step:
                        step=step+int((x-80)*1)
                if x < 78:
                    step=step+int((80-x)*1)
                    if step > 900:
                        step=step-int((80-x)*1)
                if y > 65:
                    step_y=step_y-int((y-60)*0.5)
                    if -800 > step_y:
                        step_y=step_y+int((y-60)*0.5)
                if y < 55:
                    step_y=step_y+int((60-y)*0.5)
                    if step_y > 900:
                        step_y=step_y-int((60-y)*0.5)
                s1.pulse_width(1500-step) #舵机1旋转角度
                s2.pulse_width(1500-step_y) #舵机2旋转角度

face_tracking servo

# Face Tracking Example
#
# This example shows off using the keypoints feature of your OpenMV Cam to track
# a face after it has been detected by a Haar Cascade. The first part of this
# script finds a face in the image using the frontalface Haar Cascade.
# After which the script uses the keypoints feature to automatically learn your
# face and track it. Keypoints can be used to automatically track anything.
#
# NOTE: LOTS OF KEYPOINTS MAY CAUSE THE SYSTEM TO RUN OUT OF MEMORY!

import sensor, time, image

from pyb import Servo
# Normalized keypoints are not rotation invariant...
NORMALIZED=False
# Keypoint extractor threshold, range from 0 to any number.
# This threshold is used when extracting keypoints, the lower
# the threshold the higher the number of keypoints extracted.
KEYPOINTS_THRESH=32
# Keypoint-level threshold, range from 0 to 100.
# This threshold is used when matching two keypoint descriptors, it's the
# percentage of the distance between two descriptors to the max distance.
# In other words, the minimum matching percentage between 2 keypoints.
MATCHING_THRESH=80

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

x_pos = 1500 # default
y_pos = 800 # default

x_min = 800
x_max = 2100
y_max = 2000
y_min = 10

x_gain = +1.00 # You have to tweak this value to stablize the control loop.
               # You also may need to invert the value if the system goes
               # in the wrong direction.
y_gain = +1.00 # You have to tweak this value to stablize the control loop.
               # You also may need to invert the value if the system goes
               # in the wrong direction.
xServo = Servo(1)
yServo = Servo(2)
xServo.pulse_width(x_pos)
yServo.pulse_width(y_pos)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 10):
    img = sensor.snapshot()
    img.draw_string(0, 0, "Please wait...")

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# First set of keypoints
kpts1 = None

# Find a face!
while (kpts1 == None):
    img = sensor.snapshot()
    img.draw_string(0, 0, "Looking for a face...")
    # Find faces
    objects = img.find_features(face_cascade, threshold=0.5, scale=1.5)
    if objects:
        # Expand the ROI by 11 pixels in each direction (half the pattern scale)
        face = (objects[0][0]-22, objects[0][1]-22,objects[0][2]+22*2, objects[0][3]+22*2)
        # Extract keypoints using the detect face size as the ROI
        kpts1 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED, roi=face)
        # Draw a rectangle around the first face
        img.draw_rectangle(objects[0])
        img.draw_cross((objects[0][0]+20), objects[0][1]+40)


        x = objects[0][0]+20
        y = objects[0][1]+40

        x_error = 60-x
        y_error = 60-y

        x_pos += 2*x_error
        y_pos -= 1*y_error
        print(y)

        # Clamp output beween min and max
        if (x_pos > x_max):
            x_pos = x_max
        if (x_pos< x_min):
            x_pos = x_min

        # Clamp output between min and max
        if (y_pos > y_max):
            y_pos = y_max
        if (y_pos < y_min):
            y_pos = y_min
        xServo.pulse_width(int(x_pos))
        yServo.pulse_width(int(y_pos))

# Draw keypoints

img.draw_keypoints(kpts1, size=12)
time.sleep(1000)

# FPS clock
clock = time.clock()


blob_detection servo

# Blob Detection Example
#
# This example shows off how to use the find_blobs function to find color
# blobs in the image. This example in particular looks for dark green objects.

import sensor, image, time
from pyb import Servo
from pyb import LED
# For color tracking to work really well you should ideally be in a very, very,
# very, controlled enviroment where the lighting is constant...
green_threshold   = (   0,   80,  -70,   -10,   -0,   30)
# You may need to tweak the above settings for tracking green things...
# Select an area in the Framebuffer to copy the color settings.

green_led = LED(3)
x_pos = 1500 # default
y_pos = 500 # default

x_min = 800
x_max = 2100
y_max = 1300
y_min = 1
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use RGB565.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.skip_frames(10) # Let new settings take affect.
#sensor.set_whitebal(False) # turn this off.
clock = time.clock() # Tracks FPS.



x_gain = +1.00 # You have to tweak this value to stablize the control loop.
               # You also may need to invert the value if the system goes
               # in the wrong direction.
y_gain = +1.00 # You have to tweak this value to stablize the control loop.
               # You also may need to invert the value if the system goes
               # in the wrong direction.
xServo = Servo(1)
yServo = Servo(2)
xServo.pulse_width(x_pos)
yServo.pulse_width(y_pos)

while (True):
    #clock.tick()
    green_led.off()
    img = sensor.snapshot()
    blobs = img.find_blobs([green_threshold])
    if blobs:
        for b in blobs[0:1]:
            # Draw a rect around the blob.
            if(b[4]>100):
                img.draw_rectangle(b[0:4]) # rect
                img.draw_cross(b[5], b[6]) # cx, cy
                #img.draw_string(0, 10, "Match %d%%"%(b[4]))
                #green_led.on()
                x = b[5]
                y = b[6]

                x_error = 80-x
                y_error = y - 60

                x_pos += 2*x_error
                y_pos += 2*y_error

                if (x_pos > x_max):
                    x_pos = x_max
                if (x_pos < x_min):
                    x_pos = x_min
                # Clamp output between min and max
                if (y_pos > y_max):
                    y_pos = y_max
                if (y_pos < y_min):
                    y_pos = y_min
                #print (y_pos)
                xServo.pulse_width(int(x_pos))
                yServo.pulse_width(int(y_pos))


Attachment List

舵机云台颜色识别跟踪.zip   File Size:0.001M (Download Count:15)

中北大学Wut有色差的代码.zip   File Size:0.003M (Download Count:1)

2 Reply   |  Until 5个月前 | 355 View

fanfan
发表于 2个月前

中北大学Wut有色差的代码

import sensor, image, time, math,lcd 
from pyb import UART
# Tracks a black line. Use [(128, 255)] for a tracking a white line.
#GRAYSCALE_THRESHOLD = [(0, 77)]
#设置阈值,如果是黑线,GRAYSCALE_THRESHOLD = [(0, 64)];
#如果是白线,GRAYSCALE_THRESHOLD = [(128,255)]
red_threshold = [(39, 87, -46, -4, -18, 14)] # L A B(19, 97, -47, 30, -22, 6)



# Each roi is (x, y, w, h). The line detection algorithm will try to find the
# centroid of the largest blob in each roi. The x position of the centroids
# will then be averaged with different weights where the most weight is assigned
# to the roi near the bottom of the image and less to the next roi and so on.
ROIS = [ # [ROI, weight]
        (0, 060, 160, 20, 0.7), # You'll need to tweak the weights for you app
        #(0, 050, 160, 20, 0.3), # depending on how your robot is setup.
        (0, 030, 160, 20, 0.1)
       ]
#roi代表三个取样区域,(x,y,w,h,weight),代表左上顶点(x,y)宽高分别为w和h的矩形,
#weight为当前矩形的权值。注意本例程采用的QQVGA图像大小为160x120,roi即把图像横分成三个矩形。
#三个矩形的阈值要根据实际情况进行调整,离机器人视野最近的矩形权值要最大,
#如上图的最下方的矩形,即(0, 100, 160, 20, 0.7)

# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0 #权值和初始化
for r in ROIS: weight_sum += r[4] # r[4] is the roi weight.
#计算权值和。遍历上面的三个矩形,r[4]即每个矩形的权值。

# Camera setup...
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use grayscale.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
#sensor.skip_frames(500) # Let new settings take affect.
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
#关闭白平衡
uart = UART(3, 9600)
clock = time.clock() # Tracks FPS.
lcd.init()
Left = 'L'
Right = 'R'
Go = 'Q'


#def find_max(blobs):
    #max_size=0
    #for blob in blobs:
        #if blob[2]*blob[3] > max_size:
            #max_blob=blob
            #max_size = blob[2]*blob[3]
    #return max_blob


while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    centroid_sum = 0
    #利用颜色识别分别寻找三个矩形区域内的线段
    for r in ROIS:
        blobs = img.find_blobs(red_threshold, roi=r[0:4], merge=True)
        # r[0:4] is roi tuple.
        #找到视野中的线,merge=true,将找到的图像区域合并成一个

        #目标区域找到直线
        if blobs:
            # Find the index of the blob with the most pixels.
            most_pixels = 0
            largest_blob = 0
            for i in range(len(blobs)):
            #目标区域找到的颜色块(线段块)可能不止一个,找到最大的一个,作为本区域内的目标直线
                if blobs[i].pixels() > most_pixels:
                    most_pixels = blobs[i].pixels()
                    #merged_blobs[i][4]是这个颜色块的像素总数,如果此颜色块像素总数大于                     #most_pixels,则把本区域作为像素总数最大的颜色块。更新most_pixels和largest_blob
                    largest_blob = i

            # Draw a rect around the blob.
            img.draw_rectangle(blobs[largest_blob].rect())
            #将此区域的像素数最大的颜色块画矩形和十字形标记出来
            img.draw_cross(blobs[largest_blob].cx(),
                           blobs[largest_blob].cy())

            centroid_sum += blobs[largest_blob].cx() * r[4] # r[4] is the roi weight.
            #计算centroid_sum,centroid_sum等于每个区域的最大颜色块的中心点的x坐标值乘本区域的权值

    center_pos = (centroid_sum / weight_sum) # Determine center of line.
    #中间公式

    # Convert the center_pos to a deflection angle. We're using a non-linear
    # operation so that the response gets stronger the farther off the line we
    # are. Non-linear operations are good to use on the output of algorithms
    # like this to cause a response "trigger".
    deflection_angle = 0
    #机器人应该转的角度

    # The 80 is from half the X res, the 60 is from half the Y res. The
    # equation below is just computing the angle of a triangle where the
    # opposite side of the triangle is the deviation of the center position
    # from the center and the adjacent side is half the Y res. This limits
    # the angle output to around -45 to 45. (It's not quite -45 and 45).
    deflection_angle = -math.atan((center_pos-80)/60)
    #角度计算.80 60 分别为图像宽和高的一半,图像大小为QQVGA 160x120.
    #注意计算得到的是弧度值

    # Convert angle in radians to degrees.
    deflection_angle = math.degrees(deflection_angle)

    img.draw_string(20, 10, "%.2f"%deflection_angle)
    lcd.display(img)
    #将计算结果的弧度值转化为角度值
    if uart.any():
        if (uart.readchar() == ord('K')):
            #uart.write("A")

            if  (deflection_angle > 30 and deflection_angle < 53.130102):
                uart.write(Left)
                #print("Left:%f" % deflection_angle)

            elif (deflection_angle > -30 and deflection_angle < 30):
                uart.write(Go)
                #print("Go:%f" % deflection_angle)

            elif deflection_angle < -30:
                uart.write(Right)
                #print("Right:%f" % deflection_angle)
            if deflection_angle == 53.130102 :
                 uart.write(Right)


    # Now you have an angle telling you how much to turn the robot by which
    # incorporates the part of the line nearest to the robot and parts of
    # the line farther away from the robot for a better prediction.
    print("Turn Angle: %f" % deflection_angle)


    #将结果打印在terminal中

    print(clock.fps())


评论列表

  • 加载数据中...

编写评论内容

fanfan
发表于 2个月前

OpenMV舵机云台安装视频


评论列表

  • 加载数据中...

编写评论内容
LoginCan Publish Content