Skip to content
This repository has been archived by the owner on Jun 19, 2020. It is now read-only.

Commit

Permalink
added dual camera support
Browse files Browse the repository at this point in the history
  • Loading branch information
Your Name committed Feb 1, 2020
1 parent 5fdd819 commit 33534b0
Show file tree
Hide file tree
Showing 12 changed files with 174 additions and 47 deletions.
Binary file added .config.py.swp
Binary file not shown.
22 changes: 22 additions & 0 deletions calibrate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import network as networktables
from web import tornado_server

import logging
# initiate the top level logger
# logging.basicConfig(
# level=logging.INFO,
# format="%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s",
# handlers=[
# logging.StreamHandler()
# ]
# )

logger = logging.getLogger('app')


def main():

tornado_server.start() #enable_calibrate=True

if __name__ == '__main__':
main()
Binary file added cameras/.generic.py.swp
Binary file not shown.
28 changes: 19 additions & 9 deletions cameras/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,35 @@ def __init__(self, source=0):
# self.HEIGHT = self.get(cv2.CAP_PROP_FRAME_HEIGHT),
# self.FPS = self.get(cv2.CAP_PROP_FPS)
# Set camera properties
self.cam = cv2.VideoCapture(source)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.cam.set(cv2.CAP_PROP_FPS, 120)
self.cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
self.cam.set(cv2.CAP_PROP_EXPOSURE, 0.02)
self.cam.set(cv2.CAP_PROP_CONTRAST, 0.0)
# self.cam = cv2.VideoCapture(source)
# self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# self.cam.set(cv2.CAP_PROP_FPS, 120)
# self.cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
# self.cam.set(cv2.CAP_PROP_EXPOSURE, 0.02)
# self.cam.set(cv2.CAP_PROP_CONTRAST, 0.0)


def open(self, source):
self.cam = cv2.VideoCapture(source)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.cam.set(cv2.CAP_PROP_FPS, 120)
self.cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
self.cam.set(cv2.CAP_PROP_EXPOSURE, 0.02)
self.cam.set(cv2.CAP_PROP_CONTRAST, 0.0)


def read(self):
"""TBW."""
return super().cam.read()
return self.cam.read()

def read_image(self):
"""TBW."""
ok, raw = self.read()
return convert_raw_image(raw)

def getCam(self):
return self.cam

class Camera():

Expand Down
3 changes: 2 additions & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
# when using static ip for robot
#config.networktables_server_ip = '10.26.1.11'

config.video_source_number = 0
config.video_source_number = 1
config.long_video_source_number = 0
config.networktables_table = 'SmartDashboard'

config.gstreamer_bitrate = '3500000'
Expand Down
2 changes: 1 addition & 1 deletion controls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self):
self.enable_camera_feed = False
self.enable_calibration_feed = False
self.enable_processing_feed = True

self.enable_dual_camera = True
self.send_tracking_data = True

self.camera_mode = CAMERA_MODE_BALL
Expand Down
74 changes: 39 additions & 35 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
from multiprocessing import Process

from processing import colors
import network as networktables
# import network as networktables

from cameras import logitech_c270, generic
from cameras import Camera
from cameras.camera import USBCam, Camera
from cameras import image_converter

from processing import bay_tracker
Expand Down Expand Up @@ -49,44 +49,37 @@
)

logger = logging.getLogger('app')
# creating instance of logger object(?)


def main():
def main(): # main method defined

cv2.destroyAllWindows()

networktables.init(client=False)
# networktables.init(client=False)

dashboard = networktables.get()
dashboard.putBoolean(networktables.keys.vision_initialized, True)
# dashboard = networktables.get()
# dashboard.putBoolean(networktables.keys.vision_initialized, True)

cv2.destroyAllWindows()

cap = cv2.VideoCapture(config.video_source_number)

enable_gstreamer_pipeline = False
# cap = cv2.VideoCapture(config.video_source_number)
# cap set to a cv2 object with input from a preset source
mainCam = USBCam()
mainCam.open(config.video_source_number)

out = None
if enable_gstreamer_pipeline:
out_pipeline = gst_utils.get_udp_sender(config.gstreamer_client_ip, config.gstreamer_client_port)

# out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip,
# config.gstreamer_client_port,
# config.gstreamer_bitrate)
out = cv2.VideoWriter(out_pipeline, 0,
camera.FPS,
(camera.FRAME_WIDTH, camera.FRAME_HEIGHT),
True)
if(main_controller.enable_dual_camera):
longCam = USBCam()
longCam.open(config.long_video_source_number)

# Set camera properties
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cap.set(cv2.CAP_PROP_FPS, 120)
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
cap.set(cv2.CAP_PROP_EXPOSURE, 0.02)
cap.set(cv2.CAP_PROP_CONTRAST, 0.0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# cap.set(cv2.CAP_PROP_FPS, 120)
# cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
# cap.set(cv2.CAP_PROP_EXPOSURE, 0.02)
# cap.set(cv2.CAP_PROP_CONTRAST, 0.0)


cap = mainCam.getCam()
# Set camera properties
camera = Camera(cap.get(cv2.CAP_PROP_FRAME_WIDTH),
cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
Expand Down Expand Up @@ -122,10 +115,16 @@ def main():

if not cap.isOpened():
print('opening camera')
cap.open(config.video_source_number)

_, bgr_frame = cap.read()

if main_controller.enable_dual_camera:
longCam.open(config.video_source_number)
mainCam.open(config.wide_cam_source_number)
# if the cap is not already open, do so

if main_controller.camera_mode == CAMERA_MODE_HEXAGON and main_controller.enable_dual_camera:
_, bgr_frame = longCam.read()
else:
_, bgr_frame = mainCam.read()

resized_frame = cv2.resize(bgr_frame, ((int)(640), (int)(480)), 0, 0, cv2.INTER_CUBIC)
rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)

Expand All @@ -134,6 +133,7 @@ def main():

jpg=image_converter.convert_to_jpg(rgb_frame)
camera_ws.send_binary(jpg)
# take rgb frame and convert it to a displayable jpg form, then send that as binary through websocket

if main_controller.enable_calibration_feed:

Expand All @@ -151,19 +151,22 @@ def main():
if main_controller.camera_mode == CAMERA_MODE_RAW:

processed_frame = rgb_frame
# Camera mode set to "raw" - takes rgb frame

elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY:

color_profile=main_controller.color_profiles[CAMERA_MODE_LOADING_BAY]
# Set color profile to that of "camera mode loading bay"

processed_frame, tracking_data = bay_tracker.process(rgb_frame,
camera,
frame_cnt,
color_profile)
# Frame is displayed with bay tracking properties

elif main_controller.camera_mode == CAMERA_MODE_BALL:

color_profile=main_controller.color_profiles[CAMERA_MODE_BALL]
color_profile=main_controller.color_profiles[CAMERA_MODE_BALL] # color profile set to the CAMERA MODE BALL one
# print("ball")

processed_frame, tracking_data = ball_tracker2.process(rgb_frame,
Expand All @@ -183,7 +186,7 @@ def main():



if main_controller.enable_processing_feed:
if main_controller.enable_processing_feed: # once we start showing our processing feed...

cv2.putText(processed_frame,
'Tracking Mode %s' % main_controller.camera_mode,
Expand All @@ -201,10 +204,11 @@ def main():
# out.write(frame)
if tracking_data is not None and main_controller.send_tracking_data:
# sort tracking data by closests object
logger.info(tracking_data)
tracking_data = sorted(tracking_data, key = lambda i: i['dist'])
tracking_ws.send(json.dumps(dict(targets=tracking_data)))
# put into networktables
dashboard.putStringArray(networktables.keys.vision_target_data, tracking_data)
# dashboard.putStringArray(networktables.keys.vision_target_data, tracking_data)

# cv2.imshow('frame', processed_frame )
# cv2.waitKey(0)
Expand Down
2 changes: 1 addition & 1 deletion network/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def connectionListener(connected, info):
notified[0] = True
cond.notify()

if client is True:
if client is True: # if there is a client to send to, we start up the network tables with the appropriate ip
NetworkTables.initialize(server=config.networktables_server_ip)
NetworkTables.addConnectionListener(connectionListener, immediateNotify=True)
with cond:
Expand Down
Binary file added processing/.ball_tracker2.py.swp
Binary file not shown.
45 changes: 45 additions & 0 deletions profiles/color_profile_BALL.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
{
"camera_mode": "BALL",
"rgb": {
"r": {
"min": "107",
"max": "152"
},
"b": {
"min": "21",
"max": "78"
},
"g": {
"min": "118",
"max": 156
}
},
"hsv": {
"h": {
"min": 0,
"max": 37
},
"s": {
"min": 155,
"max": 210
},
"v": {
"min": 108,
"max": 152
}
},
"hsl": {
"h": {
"min": 0,
"max": 255
},
"s": {
"min": 0,
"max": 255
},
"l": {
"min": 0,
"max": 255
}
}
}
45 changes: 45 additions & 0 deletions profiles/color_profile_BAY.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
{
"camera_mode": "BAY",
"rgb": {
"r": {
"min": 0,
"max": 255
},
"b": {
"min": 0,
"max": 255
},
"g": {
"min": 0,
"max": 255
}
},
"hsv": {
"h": {
"min": "28",
"max": "199"
},
"s": {
"min": "80",
"max": "255"
},
"v": {
"min": "165",
"max": 255
}
},
"hsl": {
"h": {
"min": 0,
"max": 255
},
"s": {
"min": 0,
"max": 255
},
"l": {
"min": 0,
"max": 255
}
}
}
Binary file added web/.tornado_server.py.swp
Binary file not shown.

0 comments on commit 33534b0

Please sign in to comment.