Skip to content
This repository has been archived by the owner on Jun 19, 2020. It is now read-only.

Commit

Permalink
use tornado for object tracking
Browse files Browse the repository at this point in the history
  • Loading branch information
Weijian Zeng committed Jan 17, 2020
1 parent 6bc8a31 commit 3150ccf
Show file tree
Hide file tree
Showing 15 changed files with 740 additions and 116 deletions.
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ Vision processing code written in python3 for 2020 FRC Competition

### dependencies
- pynetworktables
- cv2
- cv2

### notes
if running on the nano, no virtualenv is required. If virtualenv is used, create with global site-packages enabled to pull in openCV2
if running on the nano, no virtualenv is required. If virtualenv is used, create with global site-packages enabled to pull in openCV2
```
virtualenv -p python3 --system-site-packages 2020Vision
```
Expand All @@ -16,3 +16,5 @@ Vision processing code written in python3 for 2020 FRC Competition
```
python main.py
```

out = cv2.VideoWriter( gst_utils.get_udp_sender(host='192.168.1.10', port='5000'), 0,25.0,(640,480))
1 change: 1 addition & 0 deletions cameras/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .camera import Camera
9 changes: 9 additions & 0 deletions cameras/camera.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@


class Camera():

def __init__(self, width, height, fps, flength=0):
self.FRAME_WIDTH = int(width)
self.FRAME_HEIGHT = int(height)
self.FOCAL_LENGTH = flength
self.FPS = int(fps)
130 changes: 95 additions & 35 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
from processing import port_tracker
from processing import ball_tracker
from controls import main_controller

import _thread as thread
import time

from processing import filters

Expand All @@ -22,6 +23,11 @@
import logging

import start_web
import websocket
from websocket import create_connection
import ujson as json

from cameras import Camera

# initiate the top level logger
logging.basicConfig(
Expand All @@ -34,79 +40,133 @@

logger = logging.getLogger('app')


def main():

networktables.init(client=False)

dashboard = networktables.get()

dashboard.putBoolean(networktables.keys.vision_initialized, True)

main_controller.connect()

cap = cv2.VideoCapture(config.video_source_number)

out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip,
config.gstreamer_client_port,
config.gstreamer_bitrate)
# out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip,
# config.gstreamer_client_port,
# config.gstreamer_bitrate)

out_pipeline = gst_utils.get_udp_sender(config.gstreamer_client_ip,
config.gstreamer_client_port)

out = cv2.VideoWriter(out_pipeline, 0, generic.FPS, (generic.FRAME_WIDTH, generic.FRAME_HEIGHT), True)

# Set camera properties
cap.set(cv2.CAP_PROP_FRAME_WIDTH, generic.FRAME_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, generic.FRAME_HEIGHT)
cap.set(cv2.CAP_PROP_FPS, generic.FPS)
camera = Camera(cap.get(cv2.CAP_PROP_FRAME_WIDTH),
cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
cap.get(cv2.CAP_PROP_FPS))

# print([camera.FRAME_WIDTH])
# print([camera.FRAME_HEIGHT])
# print([camera.FPS])

out = cv2.VideoWriter(out_pipeline, 0,
camera.FPS,
(camera.FRAME_WIDTH, camera.FRAME_HEIGHT),
True)

#TODO: if no camera, exit and msg no camera
logger.info('starting main loop')
time.sleep(5)


#websocket.enableTrace(True)

def update_controls(ws, message):
logger.info(message)

def ws_closed(ws):
logger.info('closed socket')

def on_error(ws, error):
print(error)

# tracking_ws = create_connection("wss://localhost:8080/tracking/ws/")
#

def on_open(ws):
def run(*args):
for i in range(3):
time.sleep(1)
ws.send("Hello %d" % i)
time.sleep(1)
ws.close()
print("thread terminating...")
thread.start_new_thread(run, ())

tracking_ws = create_connection("ws://localhost:8080/tracking/ws")

def start_dashboard_socket(*args):
dashboard_ws = websocket.WebSocketApp("ws://localhost:8080/dashboard/ws",
on_message = update_controls,
on_close=ws_closed,
on_error = on_error)
dashboard.on_open = on_open
dashboard_ws.run_forever()

thread.start_new_thread(start_dashboard_socket, ())

logger.info('starting main loop ')

frame_cnt = 0
while(True):

if True or main_controller.enable_camera:
frame_cnt += 1

if True or main_controller.enable_camera:

if not cap.isOpened():
if not cap.isOpened():
print('opening camera')
cap.open(config.video_source_number)

_, frame = cap.read()

#frame = filters.resize(frame, camera.FRAME_WIDTH, camera.FRAME_HEIGHT)

if main_controller.camera_mode == CAMERA_MODE_RAW:

frame = frame

elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY:

frame = bay_tracker.process(frame,
generic,
color_profiles.ReflectiveProfile())
# elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY:
#
# frame = bay_tracker.process(frame,
# generic,
# color_profiles.ReflectiveProfile())

elif main_controller.camera_mode == CAMERA_MODE_BALL:

frame, tracking_data = ball_tracker.process(frame, generic, color_profiles.BallProfile())
dashboard.send_tracking(tracking_data)
frame, tracking_data = ball_tracker.process(frame,
camera,
frame_cnt)

tracking_ws.send(json.dumps(dict(targets=tracking_data)))

elif main_controller.camera_mode == CAMERA_MODE_HEXAGON:

frame = port_tracker.process(frame, generic, color_profiles.ReflectiveProfile())


if main_controller.enable_streaming:
# always output to 640x480
frame = filters.resize(frame, 640, 480)

cv2.putText(frame,
'Tracking Mode %s' % main_controller.camera_mode,
(10,10),
cv2.FONT_HERSHEY_DUPLEX,
.4,
colors.BLUE,
1,
cv2.putText(frame,
'Tracking Mode %s' % main_controller.camera_mode,
(10,10),
cv2.FONT_HERSHEY_DUPLEX,
.4,
colors.BLUE,
1,
cv2.LINE_AA)


out.write(frame)
cv2.imshow('frame', frame )
cv2.waitKey(1)

#cv2.imshow('frame', frame )
#v2.waitKey(1)

else:
# IDLE mode
Expand All @@ -133,6 +193,6 @@ def single_frame(debug=False):

if __name__ == '__main__':
p = Process(target=start_web.main)
p.start()
p.start()
main()
p.join()
147 changes: 147 additions & 0 deletions processing/ball_tracker2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
"""
2020 Frc Infinite Recharge
Ball Intake Detection
uses contour lines, rough area calculations
width/height ratios, and radius of contours found
in masked image to find ball
"""

import math
import cv2
from processing import colors
from processing import cvfilters
from processing import shape_util
import time

from profiles import color_profile

import network

MIN_AREA = 1000
BALL_RADIUS = 3.5


debug = False

#
BALL_COLOR_PROFILE = color_profile.ColorProfile()
#
# BALL_COLOR_PROFILE.hsv_hue.min = 10
# BALL_COLOR_PROFILE.hsv_hue.max = 30
# BALL_COLOR_PROFILE.hsv_sat.min = 124
# BALL_COLOR_PROFILE.hsv_sat.max = 255
# BALL_COLOR_PROFILE.hsv_val.min = 138
# BALL_COLOR_PROFILE.hsv_val.max = 255
#

# yellow_ball.mp4
BALL_COLOR_PROFILE.hsv_hue.min = 19
BALL_COLOR_PROFILE.hsv_hue.max = 134
BALL_COLOR_PROFILE.hsv_sat.min = 115
BALL_COLOR_PROFILE.hsv_sat.max = 255
BALL_COLOR_PROFILE.hsv_val.min = 105
BALL_COLOR_PROFILE.hsv_val.max = 255


def process(img, camera, frame_cnt):
global rgb_window_active, hsv_window_active

tracking_data = []
original_img = img

img = cv2.GaussianBlur(img, (13, 13), 0)

img = cvfilters.hsv_threshold(img, BALL_COLOR_PROFILE)
img = cv2.erode(img, None, iterations=2)
img = cv2.dilate(img, None, iterations=2)

if debug:
cv2.imshow('hsv', img)

contours, hierarchy = cv2.findContours(img,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)

contour_list = []

# algorithm for detecting rectangular object (loading bay)
for (index, contour) in enumerate(contours):

peri = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
area = cv2.contourArea(approx)
x, y, w, h = cv2.boundingRect(approx)
# limit the number of contours to process
#

#print('%s area:%s' %(index, area) )
if area > MIN_AREA:
contour_list.append(contour)
center_mass_x = x + w / 2
center_mass_y = y + h / 2
#
((x, y), radius) = cv2.minEnclosingCircle(contour)
(x, y) = (int(x), int(y))
_, _, w, h = cv2.boundingRect(contour)
# tests for if its width is around its height which should be true

# print('x: %s y:%s ratio:%s' % (w, h, w/h))

if True :
distance = shape_util.get_distance(w, 2 * radius, camera.FOCAL_LENGTH)
#convert distance to inches
distance = 6520 * (w ** -1.02)
# print(distance * radius ** 2)

# checks if radius of ball is around actual radius
if(BALL_RADIUS * 0.9 <= radius <= BALL_RADIUS * 1.10):
cv2.circle(original_img, (x, y), int(radius),
colors.GREEN, 2)
# print 'x:%s, y:%s angle:%s ' % ( center_mass_x, center_mass_y, angle )
angle = shape_util.get_angle(camera, center_mass_x, center_mass_y)
font = cv2.FONT_HERSHEY_DUPLEX

#labels image
radius_text = 'radius:%s' % (radius)
coordinate_text = 'x:%s y:%s ' % (center_mass_x, center_mass_y)
area_text = 'area:%s width:%s height:%s' % (area, w, h)
angle_text = 'angle:%.2f distance:%s' % (angle, distance)

distance = int(distance)
angle = int(angle)
radius = int(radius)

# set tracking_data
tracking_data.append(dict(shape='BALL',
radius=radius,
index=index,
dist=int(distance),
angle=angle,
frame=frame_cnt,
xpos=center_mass_x,
ypos=center_mass_y))


cv2.putText(original_img, coordinate_text, (x, y - 35), font, .4, colors.WHITE, 1, cv2.LINE_AA)
cv2.putText(original_img, area_text, (x, y - 20), font, .4, colors.WHITE, 1, cv2.LINE_AA)
cv2.putText(original_img, angle_text, (x, y - 5), font, .4, colors.WHITE, 1, cv2.LINE_AA)
cv2.putText(original_img, radius_text, (x, y - 50), font, .4, colors.WHITE, 1, cv2.LINE_AA)

cv2.drawContours(original_img, contours, index, colors.GREEN, 2)
cv2.circle(original_img, (int(center_mass_x), int(center_mass_y)), 5, colors.GREEN, -1)
#cv2.line(original_img, (FRAME_WIDTH // 2, FRAME_HEIGHT), (int(center_mass_x), int(center_mass_y)), colors.GREEN, 2)

#if debug:

#cv2.drawContours(original_img, contours, index, colors.random(), 2)
#cv2.rectangle(original_img, (x, y), (x + w, y + h), colors.WHITE, 2)

# print the rectangle that did not match

#
# print 'square: %s,%s' % (w,h)
# print w/h, h/w
#top_center = (FRAME_WIDTH // 2, FRAME_HEIGHT)
#bottom_center = (FRAME_WIDTH // 2, 0)
#cv2.line(original_img, top_center, bottom_center, colors.WHITE, 4)
return original_img, tracking_data
5 changes: 5 additions & 0 deletions processing/shape_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,15 @@ def find_vertices(contour):
approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
return len(approx)


def get_angle(camera, x, y):

a = float(abs(camera.FRAME_WIDTH / 2 - x))
b = float(camera.FRAME_HEIGHT - y)

if b == 0:
return 0

radians = math.atan(a / b)
angle = radians * 180 / math.pi
return angle
Expand Down
Loading

0 comments on commit 3150ccf

Please sign in to comment.