Skip to content
This repository has been archived by the owner on Jun 19, 2020. It is now read-only.

Commit

Permalink
1.fixed animations
Browse files Browse the repository at this point in the history
2.added target sorting
3.made object tracking and calibration consistent
  • Loading branch information
Your Name committed Feb 1, 2020
1 parent 7595f2c commit e9b8d94
Show file tree
Hide file tree
Showing 12 changed files with 135 additions and 120 deletions.
62 changes: 33 additions & 29 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from cameras import Camera
from cameras import image_converter

from profiles import color_profiles
from processing import bay_tracker
from processing import port_tracker
from processing import ball_tracker2
Expand Down Expand Up @@ -105,10 +104,10 @@ def main():

time.sleep(5)

tracking_ws = create_connection("ws://localhost:8080/tracking/ws")
camera_ws = create_connection("ws://localhost:8080/camera/ws")
processed_ws = create_connection("ws://localhost:8080/processed/ws")
calibration_ws = create_connection("ws://localhost:8080/calibration/ws")
tracking_ws = create_connection("ws://localhost:8080/tracking/ws")

controller_listener.start("ws://localhost:8080/dashboard/ws")

Expand All @@ -124,13 +123,33 @@ def main():
print('opening camera')
cap.open(config.video_source_number)

_, raw_frame = cap.read()
_, bgr_frame = cap.read()

resized_frame = cv2.resize(bgr_frame, ((int)(640), (int)(480)), 0, 0, cv2.INTER_CUBIC)
rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)


if main_controller.enable_camera_feed:

jpg=image_converter.convert_to_jpg(rgb_frame)
camera_ws.send_binary(jpg)

if main_controller.enable_calibration_feed:

calibration_frame = rgb_frame.copy()

calibration_frame = color_calibrate.process(calibration_frame,
camera_mode = main_controller.calibration.get('camera_mode', 'RAW'),
color_mode = main_controller.calibration.get('color_mode'),
apply_mask = main_controller.calibration.get('apply_mask', False))

jpg=image_converter.convert_to_jpg(calibration_frame)
calibration_ws.send_binary(jpg)

rgb_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB)

if main_controller.camera_mode == CAMERA_MODE_RAW:

frame = frame
processed_frame = rgb_frame

elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY:

Expand All @@ -140,19 +159,19 @@ def main():
camera,
frame_cnt,
color_profile)

# print(tracking_data)
dashboard.putStringArray(networktables.keys.vision_target_data, tracking_data)
tracking_ws.send(json.dumps(dict(targets=tracking_data)))

elif main_controller.camera_mode == CAMERA_MODE_BALL:

color_profile=main_controller.color_profiles[CAMERA_MODE_BALL]
# print("ball")

processed_frame, tracking_data = ball_tracker2.process(rgb_frame,
camera,
frame_cnt,
color_profile)

tracking_ws.send(json.dumps(dict(targets=tracking_data)))

elif main_controller.camera_mode == CAMERA_MODE_HEXAGON:
Expand All @@ -165,22 +184,7 @@ def main():
color_profile)


if main_controller.enable_camera_feed:

jpg=image_converter.convert_to_jpg(rgb_frame)
camera_ws.send_binary(jpg)

if main_controller.enable_calibration_feed:

calibration_frame = raw_frame.copy()

calibration_frame = color_calibrate.process(calibration_frame,
camera_mode = main_controller.calibration.get('camera_mode', 'RAW'),
color_mode = main_controller.calibration.get('color_mode'),
apply_mask = main_controller.calibration.get('apply_mask', False))

jpg=image_converter.convert_to_jpg(calibration_frame)
calibration_ws.send_binary(jpg)

if main_controller.enable_processing_feed:

Expand All @@ -199,8 +203,8 @@ def main():
# if out is not None:
# out.write(frame)

cv2.imshow('frame', processed_frame )
#cv2.waitKey(1)
# cv2.imshow('frame', processed_frame )
# cv2.waitKey(0)

else:
logger.info('waiting for control socket')
Expand All @@ -210,13 +214,13 @@ def main():
cap.release()
time.sleep(.3)

if cv2.waitKey(1) & 0xFF == ord('q'):
break
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break



if __name__ == '__main__':
p = Process(target=start_web.main)
p.start()
#p = Process(target=start_web.main)
#p.start()
main()
p.join()
#p.join()
53 changes: 30 additions & 23 deletions processing/ball_tracker2.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,35 +14,41 @@
from processing import shape_util
import time

from profiles import color_profiles
from controls import CAMERA_MODE_RAW, CAMERA_MODE_LOADING_BAY, CAMERA_MODE_BALL, CAMERA_MODE_HEXAGON
import network

MIN_AREA = 30
MIN_AREA = 15
BALL_RADIUS = 3.5


debug = True
debug = False

def process(img, camera, frame_cnt, color_profile):
global rgb_window_active, hsv_window_active

FRAME_WIDTH = camera.FRAME_WIDTH
FRAME_HEIGHT = camera.FRAME_HEIGHT
red = color_profile.red
green = color_profile.green
blue = color_profile.blue
hue = color_profile.hsv_hue
sat = color_profile.hsv_sat
val = color_profile.hsv_val

tracking_data = []
original_img = img

img = cv2.GaussianBlur(img, (13, 13), 0)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv_mask = cvfilters.hsv_threshold(img, color_profile)
img = cv2.bitwise_and(img, img, hsv_mask)
#cv2.imshow('img', img)
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask_hsv = cv2.inRange(hsv, (hue.min, sat.min, val.min), (hue.max, sat.max, val.max))
mask_rgb = cv2.inRange(img, (red.min, green.min, blue.min), (red.max, green.max, blue.max))
img = cvfilters.apply_mask(img, mask_hsv)
img = cvfilters.apply_mask(img, mask_rgb)
img = cv2.erode(img, None, iterations=2)
img = cv2.dilate(img, None, iterations=2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

if debug:
cv2.imshow('ball tracker hsv', hsv_mask)
cv2.imshow('ball tracker img', img)

_, contours, hierarchy = cv2.findContours(img,
Expand All @@ -57,24 +63,25 @@ def process(img, camera, frame_cnt, color_profile):
peri = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
area = cv2.contourArea(approx)
x, y, w, h = cv2.boundingRect(approx)
((x, y), radius) = cv2.minEnclosingCircle(contour)
# limit the number of contours to process
#

#print('%s area:%s' %(index, area) )
if area > MIN_AREA:
contour_list.append(contour)
x, y, w, h = cv2.boundingRect(approx)
center_mass_x = x + w / 2
center_mass_y = y + h / 2
((x,y), radius) = cv2.minEnclosingCircle(contour)
contour_list.append(contour)

#
# tests for if its width is around its height which should be true

# print('x: %s y:%s ratio:%s' % (w, h, w/h))

if True :
#convert distance to inches
distance = shape_util.get_distance(w, 2 * radius, camera.FOCAL_LENGTH)
distance = shape_util.distance_in_inches(w)
angle = shape_util.get_angle(camera, center_mass_x, center_mass_y)
font = cv2.FONT_HERSHEY_DUPLEX

Expand All @@ -85,21 +92,20 @@ def process(img, camera, frame_cnt, color_profile):

data = dict(shape='BALL',
radius=radius,
index=index,
dist=distance,
angle=angle,
xpos=center_mass_x,
ypos=center_mass_y)

if(not tracking_data):
tracking_data.append(data)
else:
for target in tracking_data:
if(data["dist"] < target["dist"]):
tracking_data.insert(tracking_data.index(target), data)

tracking_data.append(data)
# sorter goes here

# if len(tracking_data) == 0:
# tracking_data.append(data)
# else:
# for target in tracking_data:
# if distance < target["dist"]:
# tracking_data.insert(tracking_data.index(target), data)
# break
#labels image
radius_text = 'radius:%s' % (radius)
coordinate_text = 'x:%s y:%s ' % (center_mass_x, center_mass_y)
Expand All @@ -111,9 +117,8 @@ def process(img, camera, frame_cnt, color_profile):
cv2.putText(original_img, angle_text, (int(x), int(y) - 5), font, .4, colors.WHITE, 1, cv2.LINE_AA)
cv2.putText(original_img, radius_text, (int(x), int(y) - 50), font, .4, colors.WHITE, 1, cv2.LINE_AA)

cv2.circle(original_img, (int(center_mass_x), int(center_mass_y)), 5, colors.GREEN, -1)
cv2.drawContours(original_img, contours, index, colors.GREEN, 2)
cv2.line(original_img, (FRAME_WIDTH // 2, FRAME_HEIGHT), (int(center_mass_x), int(center_mass_y)), colors.GREEN, 2)
cv2.drawContours(original_img, contours, index, colors.GREEN, 2)

elif debug:

Expand All @@ -128,4 +133,6 @@ def process(img, camera, frame_cnt, color_profile):
top_center = (FRAME_WIDTH // 2, FRAME_HEIGHT)
bottom_center = (FRAME_WIDTH // 2, 0)
cv2.line(original_img, top_center, bottom_center, colors.WHITE, 4)

tracking_data = sorted(tracking_data, key = lambda i: i['dist'])
return original_img, tracking_data
34 changes: 17 additions & 17 deletions processing/bay_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from profiles import color_profiles

MIN_AREA = 10
MIN_AREA = 50
BAY_LENGTH = 7

#
Expand All @@ -29,29 +29,28 @@

def process(img, camera, frame_cnt, color_profile):
global rgb_window_active, hsv_window_active

FRAME_WIDTH = camera.FRAME_WIDTH
FRAME_HEIGHT = camera.FRAME_HEIGHT
hue = color_profile.hsv_hue
sat = color_profile.hsv_sat
val = color_profile.hsv_val

tracking_data = []
original_img = img

img = cv2.GaussianBlur(img, (13, 13), 0)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv_mask = cvfilters.hsv_threshold(img, color_profile)
img = cv2.bitwise_and(img, img, hsv_mask)
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv, (hue.min, sat.min, val.min), (hue.max, sat.max, val.max))
img = cvfilters.apply_mask(img, mask)
img = cv2.erode(img, None, iterations=2)
img = cv2.dilate(img, None, iterations=2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# if debug:
# cv2.imshow('hsv', img)


_, contours, hierarchy = cv2.findContours(img,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)

contour_list = []
# algorithm for detecting rectangular object (loading bay)
for (index, contour) in enumerate(contours):
Expand All @@ -63,35 +62,36 @@ def process(img, camera, frame_cnt, color_profile):
# limit the number of contours to process
#
num_vertices = shape_util.find_vertices(contour)

if area > MIN_AREA:
contour_list.append(contour)
center_mass_x = x + w / 2
center_mass_y = y + h / 2
#
if shape_util.dimensions_match(contour, 4, WIDTH_TO_HEIGHT_RATIO):
if shape_util.dimensions_match(contour, 4, 2, WIDTH_TO_HEIGHT_RATIO):
# print 'x:%s, y:%s angle:%s ' % ( center_mass_x, center_mass_y, angle )
distance = shape_util.distance_in_inches(w)
angle = shape_util.get_angle(camera, center_mass_x, center_mass_y)
font = cv2.FONT_HERSHEY_DUPLEX

# set tracking_data
data = dict(shape='BAY',
w=w,
h=h,
index=index,
width=w,
height=h,
dist=distance,
angle=angle,
xpos=center_mass_x,
ypos=center_mass_y)

if(not tracking_data):
if len(tracking_data) == 0:
tracking_data.append(data)
else:
for target in tracking_data:
if(data["dist"] < target["dist"]):
if distance < target["dist"]:
tracking_data.insert(tracking_data.index(target), data)
break



vertices_text = 'vertices:%s' % (num_vertices)
coordinate_text = 'x:%s y:%s ' % (center_mass_x, center_mass_y)
area_text = 'area:%s width:%s height:%s' % (area, w, h)
Expand All @@ -106,9 +106,9 @@ def process(img, camera, frame_cnt, color_profile):
cv2.drawContours(original_img, contours, index, colors.random(), 2)
#cv2.circle(original_img, (int(center_mass_x), int(center_mass_y)), 5, colors.GREEN, -1)
cv2.line(original_img, (FRAME_WIDTH // 2, FRAME_HEIGHT), (int(center_mass_x), int(center_mass_y)), colors.GREEN, 2)
elif debug:
# elif debug:

cv2.drawContours(original_img, contours, index, colors.random(), 2)
# cv2.drawContours(original_img, contours, index, colors.random(), 2)
#cv2.rectangle(original_img, (x, y), (x + w, y + h), colors.WHITE, 2)

# print the rectangle that did not match
Expand Down
2 changes: 1 addition & 1 deletion processing/color_calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def process(image,
color_mode='rgb',
apply_mask=False):

image = cv2.resize(image, ((int)(640), (int)(400)), 0, 0, cv2.INTER_CUBIC)
image = cv2.resize(image, ((int)(640), (int)(480)), 0, 0, cv2.INTER_CUBIC)


if camera_mode != 'RAW':
Expand Down
Loading

0 comments on commit e9b8d94

Please sign in to comment.