实验用具:
openmv4 h7 R2
立创自己打印的openmv lcd扩展板
1.8寸tft spi 屏幕芯片st7735s
大夏龙雀BT24蓝牙模块
接线:P5接BT24的TX 用来接收蓝牙发来的数据
成果展示:
<iframe allowfullscreen="true" data-mediaembed="csdn" frameborder="0" id="XgjlTsLW-1721368171487" src="https://live.csdn.net/v/embed/410763"></iframe>openmv阈值脱机调试
代码:
原理:利用蓝牙,你发送a b会使阈值number1 number2加加,c d会减减
import sensor, image, time, math, pyb
from machine import UART
import display
#用来调binary值
NUMBER1=51
NUMBER2=255
#这两变量是用来你调阈值,是为了适应在不同场合光线下刚好把黑色与白色分开 ,借助按键调整这个阈值就可以完成现场调试
turnGRAYSCALE_THRESHOLD = [(NUMBER1, NUMBER2)]
GRAYSCALE_THRESHOLD =[(0,0)]
# 每个roi为(x, y, w, h),线检测算法将尝试找到每个roi中最大的blob的质心。
# 然后用不同的权重对质心的x位置求平均值,其中最大的权重分配给靠近图像底部的roi,
# 较小的权重分配给下一个roi,以此类推。
rois = [(0, 100, 160, 20), (0, 50, 160, 20), (0, 0, 160, 20)]
# roi代表三个取样区域,(x,y,w,h,weight),代表左上顶点(x,y)宽高分别为w和h的矩形,
# weight为当前矩形的权值。注意本例程采用的QQVGA图像大小为160x120,roi即把图像横分成三个矩形。
# 三个矩形的阈值要根据实际情况进行调整,离机器人视野最近的矩形权值要最大,
# 如上图的最下方的矩形,即(0, 100, 160, 20, 0.7)
# 初始化sensor
sensor.reset()
# 设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种
sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale.
# 设置图像像素大小
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
# 让新的设置生效。
sensor.skip_frames(time=2000) # Let new settings take effect.
# 颜色跟踪必须关闭自动增益
sensor.set_auto_gain(False) # must be turned off for color tracking
# 颜色跟踪必须关闭白平衡
sensor.set_auto_whitebal(False) # must be turned off for color tracking
# 跟踪FPS帧率
lcd = display.SPIDisplay() # Initialize the lcd screen.
kk='y'
sensor.set_vflip(1)
sensor.set_hmirror(1)
clock = time.clock() # Tracks FPS.
largest2_blob=0
deflection_angle = 0 # Initialize deflection_angle outside of conditional blocks
uart = UART(3,9600,timeout_char=1000)
while True:
turnGRAYSCALE_THRESHOLD = [(NUMBER1, NUMBER2)]
clock.tick() # Track elapsed milliseconds between snapshots.
img = sensor.snapshot() # Capture an image.
img.binary(turnGRAYSCALE_THRESHOLD)
largest_blob = None
#largest2_blob = None
#largest3_blob = None
# Track lines in each defined ROI.
#blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[0], merge=True)
#if blobs:
# largest_blob = max(blobs, key=lambda b: b.pixels())
blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[1], merge=True)
if blobs:
largest2_blob = max(blobs, key=lambda b: b.pixels())
#
#blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[2], merge=True)
#if blobs:
# largest3_blob = max(blobs, key=lambda b: b.pixels())
# Calculate deflection_angle based on largest2_blob.cx() - 79
#通过计算图像中心点x与块中心点x的差判断偏移
pianyi = 0
if largest2_blob:
pianyi = largest2_blob.cx() - 79
img.draw_rectangle(largest2_blob.rect(),color=(0));
if -5 <= pianyi < 5:
deflection_angle = 0
elif -15 <= pianyi < -5:
deflection_angle = -2
elif 5 <= pianyi < 15:
deflection_angle = 2
elif -30 <= pianyi < -15:
deflection_angle = -3
elif 15 <= pianyi < 30:
deflection_angle = 3
elif -50 <= pianyi < -30:
deflection_angle = -5
elif 30 <= pianyi < 50:
deflection_angle = 5
if NUMBER1>255:
NUMBER1=255
if NUMBER2>255:
NUMBER2=255
if NUMBER1<0:
NUMBER1=0
if NUMBER2<0:
NUMBER2=0
lcd.write(img) # Take a picture and display the image.
byte=uart.read(1)
if byte :
print(byte)
if byte ==b'a':
NUMBER1=NUMBER1+5
if byte ==b'b':
NUMBER2=NUMBER2+5
if byte ==b'c':
NUMBER1=NUMBER1-5
if byte ==b'd':
NUMBER2=NUMBER2-5
原理:利用按键,你按下按键1 按键2会使阈值number1 number2加加减减,支持长久按
P3 P4 P5 P6分别接按键模块KEY1 KEY2 KEY3 KEY4,用if判断是否按下
<iframe allowfullscreen="true" data-mediaembed="csdn" frameborder="0" id="sOGWn2KF-1721457721624" src="https://live.csdn.net/v/embed/411091"></iframe>openmv阈值脱机调试
import sensor, image, time, math, pyb
from machine import UART
import display
from pyb import Pin
#用来调binary值
NUMBER1=51
NUMBER2=255
#这两变量是用来你调阈值,是为了适应在不同场合光线下刚好把黑色与白色分开 ,借助按键调整这个阈值就可以完成现场调试
turnGRAYSCALE_THRESHOLD = [(NUMBER1, NUMBER2)]
GRAYSCALE_THRESHOLD =[(0,0)]
# 每个roi为(x, y, w, h),线检测算法将尝试找到每个roi中最大的blob的质心。
# 然后用不同的权重对质心的x位置求平均值,其中最大的权重分配给靠近图像底部的roi,
# 较小的权重分配给下一个roi,以此类推。
rois = [(0, 100, 160, 20), (0, 50, 160, 20), (0, 0, 160, 20)]
# roi代表三个取样区域,(x,y,w,h,weight),代表左上顶点(x,y)宽高分别为w和h的矩形,
# weight为当前矩形的权值。注意本例程采用的QQVGA图像大小为160x120,roi即把图像横分成三个矩形。
# 三个矩形的阈值要根据实际情况进行调整,离机器人视野最近的矩形权值要最大,
# 如上图的最下方的矩形,即(0, 100, 160, 20, 0.7)
# 初始化sensor
sensor.reset()
# 设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种
sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale.
# 设置图像像素大小
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
# 让新的设置生效。
sensor.skip_frames(time=2000) # Let new settings take effect.
# 颜色跟踪必须关闭自动增益
sensor.set_auto_gain(False) # must be turned off for color tracking
# 颜色跟踪必须关闭白平衡
sensor.set_auto_whitebal(False) # must be turned off for color tracking
# 跟踪FPS帧率
lcd = display.SPIDisplay() # Initialize the lcd screen.
kk='y'
#p_in4 = Pin('P7', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
p_in4 = Pin('P4', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
p_in6 = Pin('P6', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
#p_in7 = Pin('P7', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
p_in5 = Pin('P5', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
p_in3 = Pin('P3', Pin.IN, Pin.PULL_UP)#设置p_in为输入引脚,并开启上拉电阻
sensor.set_vflip(1)
sensor.set_hmirror(1)
clock = time.clock() # Tracks FPS.
largest2_blob=0
deflection_angle = 0 # Initialize deflection_angle outside of conditional blocks
uart = UART(3,9600,timeout_char=1000)
while True:
turnGRAYSCALE_THRESHOLD = [(NUMBER1, NUMBER2)]
clock.tick() # Track elapsed milliseconds between snapshots.
img = sensor.snapshot() # Capture an image.
img.binary(turnGRAYSCALE_THRESHOLD)
largest_blob = None
#largest2_blob = None
#largest3_blob = None
# Track lines in each defined ROI.
#blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[0], merge=True)
#if blobs:
# largest_blob = max(blobs, key=lambda b: b.pixels())
blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[1], merge=True)
if blobs:
largest2_blob = max(blobs, key=lambda b: b.pixels())
#
#blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=rois[2], merge=True)
#if blobs:
# largest3_blob = max(blobs, key=lambda b: b.pixels())
# Calculate deflection_angle based on largest2_blob.cx() - 79
#通过计算图像中心点x与块中心点x的差判断偏移
pianyi = 0
if largest2_blob:
pianyi = largest2_blob.cx() - 79
img.draw_rectangle(largest2_blob.rect(),color=(0));
if -5 <= pianyi < 5:
deflection_angle = 0
elif -15 <= pianyi < -5:
deflection_angle = -2
elif 5 <= pianyi < 15:
deflection_angle = 2
elif -30 <= pianyi < -15:
deflection_angle = -3
elif 15 <= pianyi < 30:
deflection_angle = 3
elif -50 <= pianyi < -30:
deflection_angle = -5
elif 30 <= pianyi < 50:
deflection_angle = 5
if p_in3.value()==0:
NUMBER1=NUMBER1+5
if p_in4.value()==0:
NUMBER2=NUMBER2+5
if p_in5.value()==0:
NUMBER1=NUMBER1-5
if p_in6.value()==0:
NUMBER2=NUMBER2-5
if NUMBER1>255:
NUMBER1=255
if NUMBER2>255:
NUMBER2=255
if NUMBER1<0:
NUMBER1=0
if NUMBER2<0:
NUMBER2=0
lcd.write(img) # Take a picture and display the image.
byte=uart.read(1)
if byte :
print(byte)
if byte ==b'a':
NUMBER1=NUMBER1+5
if byte ==b'b':
NUMBER2=NUMBER2+5
if byte ==b'c':
NUMBER1=NUMBER1-5
if byte ==b'd':
NUMBER2=NUMBER2-5
标签:roi,blobs,Pin,脱机,blob,THRESHOLD,openmv,sensor,循迹
From: https://blog.csdn.net/2301_80317247/article/details/140547101