diff --git a/README.md b/README.md index df226e7..5526fb2 100644 --- a/README.md +++ b/README.md @@ -4,10 +4,10 @@ Vision processing code written in python3 for 2020 FRC Competition ### dependencies - pynetworktables - - cv2 + - cv2 ### notes - if running on the nano, no virtualenv is required. If virtualenv is used, create with global site-packages enabled to pull in openCV2 + if running on the nano, no virtualenv is required. If virtualenv is used, create with global site-packages enabled to pull in openCV2 ``` virtualenv -p python3 --system-site-packages 2020Vision ``` @@ -16,3 +16,5 @@ Vision processing code written in python3 for 2020 FRC Competition ``` python main.py ``` + +out = cv2.VideoWriter( gst_utils.get_udp_sender(host='192.168.1.10', port='5000'), 0,25.0,(640,480)) diff --git a/cameras/__init__.py b/cameras/__init__.py index e69de29..0cbbbe7 100644 --- a/cameras/__init__.py +++ b/cameras/__init__.py @@ -0,0 +1 @@ +from .camera import Camera diff --git a/cameras/camera.py b/cameras/camera.py new file mode 100644 index 0000000..d2cbc42 --- /dev/null +++ b/cameras/camera.py @@ -0,0 +1,9 @@ + + +class Camera(): + + def __init__(self, width, height, fps, flength=0): + self.FRAME_WIDTH = int(width) + self.FRAME_HEIGHT = int(height) + self.FOCAL_LENGTH = flength + self.FPS = int(fps) diff --git a/main.py b/main.py index 9f28dcc..79b056d 100644 --- a/main.py +++ b/main.py @@ -12,7 +12,8 @@ from processing import port_tracker from processing import ball_tracker from controls import main_controller - +import _thread as thread +import time from processing import filters @@ -22,6 +23,11 @@ import logging import start_web +import websocket +from websocket import create_connection +import ujson as json + +from cameras import Camera # initiate the top level logger logging.basicConfig( @@ -34,55 +40,112 @@ logger = logging.getLogger('app') + def main(): networktables.init(client=False) dashboard = networktables.get() - dashboard.putBoolean(networktables.keys.vision_initialized, True) - main_controller.connect() - cap = cv2.VideoCapture(config.video_source_number) - out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip, - config.gstreamer_client_port, - config.gstreamer_bitrate) + # out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip, + # config.gstreamer_client_port, + # config.gstreamer_bitrate) + + out_pipeline = gst_utils.get_udp_sender(config.gstreamer_client_ip, + config.gstreamer_client_port) - out = cv2.VideoWriter(out_pipeline, 0, generic.FPS, (generic.FRAME_WIDTH, generic.FRAME_HEIGHT), True) # Set camera properties - cap.set(cv2.CAP_PROP_FRAME_WIDTH, generic.FRAME_WIDTH) - cap.set(cv2.CAP_PROP_FRAME_HEIGHT, generic.FRAME_HEIGHT) - cap.set(cv2.CAP_PROP_FPS, generic.FPS) + camera = Camera(cap.get(cv2.CAP_PROP_FRAME_WIDTH), + cap.get(cv2.CAP_PROP_FRAME_HEIGHT), + cap.get(cv2.CAP_PROP_FPS)) + + # print([camera.FRAME_WIDTH]) + # print([camera.FRAME_HEIGHT]) + # print([camera.FPS]) + + out = cv2.VideoWriter(out_pipeline, 0, + camera.FPS, + (camera.FRAME_WIDTH, camera.FRAME_HEIGHT), + True) #TODO: if no camera, exit and msg no camera - logger.info('starting main loop') + time.sleep(5) + + + #websocket.enableTrace(True) + + def update_controls(ws, message): + logger.info(message) + + def ws_closed(ws): + logger.info('closed socket') + + def on_error(ws, error): + print(error) + + # tracking_ws = create_connection("wss://localhost:8080/tracking/ws/") + # + + def on_open(ws): + def run(*args): + for i in range(3): + time.sleep(1) + ws.send("Hello %d" % i) + time.sleep(1) + ws.close() + print("thread terminating...") + thread.start_new_thread(run, ()) + + tracking_ws = create_connection("ws://localhost:8080/tracking/ws") + + def start_dashboard_socket(*args): + dashboard_ws = websocket.WebSocketApp("ws://localhost:8080/dashboard/ws", + on_message = update_controls, + on_close=ws_closed, + on_error = on_error) + dashboard.on_open = on_open + dashboard_ws.run_forever() + + thread.start_new_thread(start_dashboard_socket, ()) + + logger.info('starting main loop ') + + frame_cnt = 0 while(True): - if True or main_controller.enable_camera: + frame_cnt += 1 + + if True or main_controller.enable_camera: - if not cap.isOpened(): + if not cap.isOpened(): print('opening camera') cap.open(config.video_source_number) _, frame = cap.read() + #frame = filters.resize(frame, camera.FRAME_WIDTH, camera.FRAME_HEIGHT) + if main_controller.camera_mode == CAMERA_MODE_RAW: frame = frame - - elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY: - frame = bay_tracker.process(frame, - generic, - color_profiles.ReflectiveProfile()) + # elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY: + # + # frame = bay_tracker.process(frame, + # generic, + # color_profiles.ReflectiveProfile()) elif main_controller.camera_mode == CAMERA_MODE_BALL: - frame, tracking_data = ball_tracker.process(frame, generic, color_profiles.BallProfile()) - dashboard.send_tracking(tracking_data) + frame, tracking_data = ball_tracker.process(frame, + camera, + frame_cnt) + + tracking_ws.send(json.dumps(dict(targets=tracking_data))) elif main_controller.camera_mode == CAMERA_MODE_HEXAGON: @@ -90,23 +153,20 @@ def main(): if main_controller.enable_streaming: - # always output to 640x480 - frame = filters.resize(frame, 640, 480) - - cv2.putText(frame, - 'Tracking Mode %s' % main_controller.camera_mode, - (10,10), - cv2.FONT_HERSHEY_DUPLEX, - .4, - colors.BLUE, - 1, + cv2.putText(frame, + 'Tracking Mode %s' % main_controller.camera_mode, + (10,10), + cv2.FONT_HERSHEY_DUPLEX, + .4, + colors.BLUE, + 1, cv2.LINE_AA) out.write(frame) - - cv2.imshow('frame', frame ) - cv2.waitKey(1) + + #cv2.imshow('frame', frame ) + #v2.waitKey(1) else: # IDLE mode @@ -133,6 +193,6 @@ def single_frame(debug=False): if __name__ == '__main__': p = Process(target=start_web.main) - p.start() + p.start() main() p.join() diff --git a/processing/ball_tracker2.py b/processing/ball_tracker2.py new file mode 100644 index 0000000..f1042ae --- /dev/null +++ b/processing/ball_tracker2.py @@ -0,0 +1,147 @@ +""" +2020 Frc Infinite Recharge +Ball Intake Detection +uses contour lines, rough area calculations +width/height ratios, and radius of contours found +in masked image to find ball +""" + +import math +import cv2 +from processing import colors +from processing import cvfilters +from processing import shape_util +import time + +from profiles import color_profile + +import network + +MIN_AREA = 1000 +BALL_RADIUS = 3.5 + + +debug = False + +# +BALL_COLOR_PROFILE = color_profile.ColorProfile() +# +# BALL_COLOR_PROFILE.hsv_hue.min = 10 +# BALL_COLOR_PROFILE.hsv_hue.max = 30 +# BALL_COLOR_PROFILE.hsv_sat.min = 124 +# BALL_COLOR_PROFILE.hsv_sat.max = 255 +# BALL_COLOR_PROFILE.hsv_val.min = 138 +# BALL_COLOR_PROFILE.hsv_val.max = 255 +# + +# yellow_ball.mp4 +BALL_COLOR_PROFILE.hsv_hue.min = 19 +BALL_COLOR_PROFILE.hsv_hue.max = 134 +BALL_COLOR_PROFILE.hsv_sat.min = 115 +BALL_COLOR_PROFILE.hsv_sat.max = 255 +BALL_COLOR_PROFILE.hsv_val.min = 105 +BALL_COLOR_PROFILE.hsv_val.max = 255 + + +def process(img, camera, frame_cnt): + global rgb_window_active, hsv_window_active + + tracking_data = [] + original_img = img + + img = cv2.GaussianBlur(img, (13, 13), 0) + + img = cvfilters.hsv_threshold(img, BALL_COLOR_PROFILE) + img = cv2.erode(img, None, iterations=2) + img = cv2.dilate(img, None, iterations=2) + + if debug: + cv2.imshow('hsv', img) + + contours, hierarchy = cv2.findContours(img, + cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + + contour_list = [] + + # algorithm for detecting rectangular object (loading bay) + for (index, contour) in enumerate(contours): + + peri = cv2.arcLength(contour, True) + approx = cv2.approxPolyDP(contour, 0.04 * peri, True) + area = cv2.contourArea(approx) + x, y, w, h = cv2.boundingRect(approx) + # limit the number of contours to process + # + + #print('%s area:%s' %(index, area) ) + if area > MIN_AREA: + contour_list.append(contour) + center_mass_x = x + w / 2 + center_mass_y = y + h / 2 + # + ((x, y), radius) = cv2.minEnclosingCircle(contour) + (x, y) = (int(x), int(y)) + _, _, w, h = cv2.boundingRect(contour) + # tests for if its width is around its height which should be true + + # print('x: %s y:%s ratio:%s' % (w, h, w/h)) + + if True : + distance = shape_util.get_distance(w, 2 * radius, camera.FOCAL_LENGTH) + #convert distance to inches + distance = 6520 * (w ** -1.02) + # print(distance * radius ** 2) + + # checks if radius of ball is around actual radius + if(BALL_RADIUS * 0.9 <= radius <= BALL_RADIUS * 1.10): + cv2.circle(original_img, (x, y), int(radius), + colors.GREEN, 2) + # print 'x:%s, y:%s angle:%s ' % ( center_mass_x, center_mass_y, angle ) + angle = shape_util.get_angle(camera, center_mass_x, center_mass_y) + font = cv2.FONT_HERSHEY_DUPLEX + + #labels image + radius_text = 'radius:%s' % (radius) + coordinate_text = 'x:%s y:%s ' % (center_mass_x, center_mass_y) + area_text = 'area:%s width:%s height:%s' % (area, w, h) + angle_text = 'angle:%.2f distance:%s' % (angle, distance) + + distance = int(distance) + angle = int(angle) + radius = int(radius) + + # set tracking_data + tracking_data.append(dict(shape='BALL', + radius=radius, + index=index, + dist=int(distance), + angle=angle, + frame=frame_cnt, + xpos=center_mass_x, + ypos=center_mass_y)) + + + cv2.putText(original_img, coordinate_text, (x, y - 35), font, .4, colors.WHITE, 1, cv2.LINE_AA) + cv2.putText(original_img, area_text, (x, y - 20), font, .4, colors.WHITE, 1, cv2.LINE_AA) + cv2.putText(original_img, angle_text, (x, y - 5), font, .4, colors.WHITE, 1, cv2.LINE_AA) + cv2.putText(original_img, radius_text, (x, y - 50), font, .4, colors.WHITE, 1, cv2.LINE_AA) + + cv2.drawContours(original_img, contours, index, colors.GREEN, 2) + cv2.circle(original_img, (int(center_mass_x), int(center_mass_y)), 5, colors.GREEN, -1) + #cv2.line(original_img, (FRAME_WIDTH // 2, FRAME_HEIGHT), (int(center_mass_x), int(center_mass_y)), colors.GREEN, 2) + + #if debug: + + #cv2.drawContours(original_img, contours, index, colors.random(), 2) + #cv2.rectangle(original_img, (x, y), (x + w, y + h), colors.WHITE, 2) + + # print the rectangle that did not match + + # + # print 'square: %s,%s' % (w,h) + # print w/h, h/w + #top_center = (FRAME_WIDTH // 2, FRAME_HEIGHT) + #bottom_center = (FRAME_WIDTH // 2, 0) + #cv2.line(original_img, top_center, bottom_center, colors.WHITE, 4) + return original_img, tracking_data diff --git a/processing/shape_util.py b/processing/shape_util.py index 14cfd60..4bf8b8e 100644 --- a/processing/shape_util.py +++ b/processing/shape_util.py @@ -35,10 +35,15 @@ def find_vertices(contour): approx = cv2.approxPolyDP(contour, 0.04 * peri, True) return len(approx) + def get_angle(camera, x, y): + a = float(abs(camera.FRAME_WIDTH / 2 - x)) b = float(camera.FRAME_HEIGHT - y) + if b == 0: + return 0 + radians = math.atan(a / b) angle = radians * 180 / math.pi return angle diff --git a/test_stream.py b/test_stream.py new file mode 100644 index 0000000..830e1b6 --- /dev/null +++ b/test_stream.py @@ -0,0 +1,42 @@ +import time +import cv2 + +# Cam properties +fps = 30. +frame_width = 640 +frame_height = 480 +# Create capture +cap = cv2.VideoCapture(0) + +# Set camera properties +cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) +cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) +cap.set(cv2.CAP_PROP_FPS, fps) + +# Define the gstreamer sink +gst_str_rtp = "appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink host=127.0.0.1 port=5000" + + +# Check if cap is open +if cap.isOpened() is not True: + print("Cannot open camera. Exiting.") + quit() + +# Create videowriter as a SHM sink +out = cv2.VideoWriter(gst_str_rtp, 0, fps, (frame_width, frame_height), True) + +# Loop it +while True: + # Get the frame + ret, frame = cap.read() + # Check + if ret is True: + # Flip frame + frame = cv2.flip(frame, 1) + # Write to SHM + out.write(frame) + else: + print("Camera error.") + time.sleep(10) + +cap.release() diff --git a/test_video.py b/test_video.py new file mode 100644 index 0000000..c69d3bc --- /dev/null +++ b/test_video.py @@ -0,0 +1,167 @@ +import cv2 +import config +import time + +from multiprocessing import Process + +from processing import colors +import network as networktables +from cameras import logitech_c270, generic +from profiles import color_profiles +from processing import bay_tracker +from processing import port_tracker +from processing import ball_tracker +from controls import main_controller + + +from processing import filters + +from controls import CAMERA_MODE_RAW, CAMERA_MODE_LOADING_BAY, CAMERA_MODE_BALL, CAMERA_MODE_HEXAGON +import gst_utils + +import logging + +import start_web +from websocket import create_connection +import json +from cameras import Camera + +# initiate the top level logger +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s", + handlers=[ + logging.StreamHandler() + ] +) + +logger = logging.getLogger('app') + +def main(): + + networktables.init(client=False) + + dashboard = networktables.get() + + dashboard.putBoolean(networktables.keys.vision_initialized, True) + + main_controller.connect() + + cap = cv2.VideoCapture('media/yellow_ball.mp4') + + # out_pipeline = gst_utils.get_udp_streamer_pipeline2(config.gstreamer_client_ip, + # config.gstreamer_client_port, + # config.gstreamer_bitrate) + + out_pipeline = gst_utils.get_udp_sender(config.gstreamer_client_ip, + config.gstreamer_client_port) + + + # Set camera properties + camera = Camera(cap.get(cv2.CAP_PROP_FRAME_WIDTH), + cap.get(cv2.CAP_PROP_FRAME_HEIGHT), + cap.get(cv2.CAP_PROP_FPS)) + + print([camera.FRAME_WIDTH]) + print([camera.FRAME_HEIGHT]) + print([camera.FPS]) + + out = cv2.VideoWriter(out_pipeline, 0, + camera.FPS, + (camera.FRAME_WIDTH, camera.FRAME_HEIGHT), + True) + + #TODO: if no camera, exit and msg no camera + time.sleep(5) + logger.info('starting main loop') + ws = create_connection("ws://localhost:8080/tracking/ws") + + frame_cnt = 0 + while(True): + + frame_cnt += 1 + + if True or main_controller.enable_camera: + + if not cap.isOpened(): + print('opening camera') + cap.open(config.video_source_number) + + _, frame = cap.read() + + if frame is None: + cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + frame_cnt = 0 + continue + + #frame = filters.resize(frame, camera.FRAME_WIDTH, camera.FRAME_HEIGHT) + + if main_controller.camera_mode == CAMERA_MODE_RAW: + + frame = frame + + elif main_controller.camera_mode == CAMERA_MODE_LOADING_BAY: + + frame = bay_tracker.process(frame, + generic, + color_profiles.ReflectiveProfile()) + + elif main_controller.camera_mode == CAMERA_MODE_BALL: + + frame, tracking_data = ball_tracker.process(frame, + camera, + frame_cnt) + + ws.send(json.dumps(dict(targets=tracking_data))) + + elif main_controller.camera_mode == CAMERA_MODE_HEXAGON: + + frame = port_tracker.process(frame, generic, color_profiles.ReflectiveProfile()) + + + if main_controller.enable_streaming: + # always output to 640x480 + + cv2.putText(frame, + 'Tracking Mode %s' % main_controller.camera_mode, + (10,10), + cv2.FONT_HERSHEY_DUPLEX, + .4, + colors.BLUE, + 1, + cv2.LINE_AA) + + + #out.write(frame) + + #cv2.imshow('frame', frame ) + #v2.waitKey(1) + + else: + # IDLE mode + #if cap.isOpened(): + #print('closing camera') + #cap.release() + time.sleep(.3) + + # if cv2.waitKey(1) & 0xFF == ord('q'): + # break + + + + + +def single_frame(debug=False): + + img = cv2.imread("frc_cube.jpg") + img = cube_tracker.process(img, + generic) + + cv2.imshow('Objects Detected',img) + cv2.waitKey() + +if __name__ == '__main__': + p = Process(target=start_web.main) + p.start() + main() + p.join() diff --git a/web/handlers.py b/web/handlers.py index b477616..9cee864 100644 --- a/web/handlers.py +++ b/web/handlers.py @@ -8,21 +8,23 @@ import logging import time +import uuid import network as networktables from controls import main_controller +from .nt_serial import NTSerial logger = logging.getLogger("handlers") -class DashboardWebSocket(WebSocketHandler): - """ - A tornado web handler that forwards values between NetworkTables - and a webpage via a websocket - """ +import ujson as json +class ObjectTrackingWebSocket(WebSocketHandler): + """ + """ + watchers = set() def open(self): - logger.info("websocket opened") - self.ioloop = IOLoop.current() + logger.info("ObjectTracking websocket opened") + ObjectTrackingWebSocket.watchers.add(self) def check_origin(self, origin): """ @@ -30,49 +32,89 @@ def check_origin(self, origin): """ return True + """ + broadcast to clients, assumes its target data + """ def on_message(self, message): + for waiter in ObjectTrackingWebSocket.watchers: + if waiter == self: + continue + waiter.write_message(message) + + def send_msg(self, msg): + try: + self.write_message(msg, False) + except WebSocketClosedError: + logger.warn("websocket closed when sending message") + + def on_close(self): + logger.info("ObjectTracking websocket closed") + ObjectTrackingWebSocket.watchers.remove(self) - dashboard = networktables.get() - inputs = json.loads(message) - data = None +class DashboardWebSocket(WebSocketHandler): + """ + A tornado web handler that forwards values between NetworkTables + and a webpage via a websocket + """ - logger.info(inputs) - if 'controls' in inputs: - controls = inputs['controls'] - dashboard.putBoolean(networktables.keys.vision_enable_camera, controls['enable_camera']) - dashboard.putValue(networktables.keys.vision_camera_mode, controls['camera_mode']) - # - # data = dict(enable_camera=main_controller.enable_camera, - # camera_mode=main_controller.camera_mode, - # enable_processing=main_controller.enable_processing, - # ) + watchers = set() + def open(self): + self.uid = str(uuid.uuid4()) + logger.info("Dashboard websocket opened") - elif 'rgb' in inputs: - rgb = inputs['rgb'] - dashboard.putValue(networktables.keys.vision_color_profile, json.dumps(rgb)) + DashboardWebSocket.watchers.add(self) - # print(json.dumps(data)) - # self.send_msg_threadsafe(json.dumps(data)) + self.ioloop = IOLoop.current() + dashboard = networktables.get() - # print(controls['enable_camera']) - # main_controller.enable_camera = controls['enable_camera'] + ### add listener network tables updates and send back to socket + self.ntserial = NTSerial(self.send_msg_threadsafe) + self.write_message(self.uid) + self.write_message(dashboard.getValue(networktables.keys.vision_color_profile,{})) + def check_origin(self, origin): + """ + Allow CORS requests + """ + return True + def on_message(self, message): + dashboard = networktables.get() + logger.info(message) + if message == 'status': + dashboard.getValue(networktables.keys.vision_color_profile, {}) + else: + inputs = json.loads(message) + + if 'controls' in inputs: + controls = inputs['controls'] + dashboard.putBoolean(networktables.keys.vision_enable_camera, controls['enable_camera']) + dashboard.putValue(networktables.keys.vision_camera_mode, controls['camera_mode']) + + elif 'color_profile' in inputs: + profile = inputs['color_profile'] + dashboard.putValue(networktables.keys.vision_color_profile, json.dumps(profile)) + + logger.info('broadcasting to %s' % len(DashboardWebSocket.watchers)) + for watcher in DashboardWebSocket.watchers: + watcher.write_message(message) def send_msg(self, msg): try: self.write_message(msg, False) except WebSocketClosedError: - logger.warn("websocket closed when sending message") + logger.warn("%s: websocket closed when sending message" % self.uid) + ## this is used by NTSerial to send updates to web def send_msg_threadsafe(self, data): self.ioloop.add_callback(self.send_msg, data) def on_close(self): - logger.info("NetworkTables websocket closed") + logger.info("Dashboard websocket closed %s" % self.uid) + DashboardWebSocket.watchers.remove(self) class NonCachingStaticFileHandler(StaticFileHandler): diff --git a/web/nt_serial.py b/web/nt_serial.py new file mode 100644 index 0000000..9cfc5d7 --- /dev/null +++ b/web/nt_serial.py @@ -0,0 +1,43 @@ +try: + import ujson as json +except ImportError: + import json + +from networktables import NetworkTables + +__all__ = ["NTSerial"] + + +class NTSerial(object): + """ + A utility class for synchronizing NetworkTables over a serial connection. + """ + + def __init__(self, update_callback): + """ + :param update_callback: A callable with signature ```callable(update)``` for processing outgoing updates + formatted as strings. + """ + self.update_callback = update_callback + NetworkTables.addGlobalListener(self._nt_on_change, immediateNotify=True) + + def process_update(self, update): + """Process an incoming update from a remote NetworkTables""" + data = json.loads(update) + NetworkTables.getEntry(data["k"]).setValue(data["v"]) + + def _send_update(self, data): + """Send a NetworkTables update via the stored send_update callback""" + if isinstance(data, dict): + data = json.dumps(data) + self.update_callback(data) + + def _nt_on_change(self, key, value, isNew): + """NetworkTables global listener callback""" + self._send_update({"k": key, "v": value, "n": isNew}) + + def close(self): + """ + Clean up NetworkTables listeners + """ + NetworkTables.removeGlobalListener(self._nt_on_change) diff --git a/web/tornado_server.py b/web/tornado_server.py index 8e4179e..973b97f 100644 --- a/web/tornado_server.py +++ b/web/tornado_server.py @@ -5,9 +5,9 @@ import logging from tornado.web import StaticFileHandler -from web.handlers import NonCachingStaticFileHandler, DashboardWebSocket +from web.handlers import NonCachingStaticFileHandler, DashboardWebSocket, ObjectTrackingWebSocket -logger = logging.getLogger("dashboard") +logger = logging.getLogger("tornado") def start(): @@ -15,12 +15,11 @@ def start(): www_dir = abspath(join(dirname(__file__), "www")) lib_dir = abspath(join(dirname(__file__), "www", "lib")) - - print(lib_dir) index_html = join(www_dir, "index.html") app = tornado.web.Application([ ("/dashboard/ws", DashboardWebSocket), + ("/tracking/ws", ObjectTrackingWebSocket), (r"/()", NonCachingStaticFileHandler, {"path": index_html}), (r'/lib/(.*)', StaticFileHandler, {"path": lib_dir}), (r"/(.*)", NonCachingStaticFileHandler, {"path": www_dir}) @@ -31,4 +30,3 @@ def start(): app.listen(config.tornado_server_port) IOLoop.current().start() - diff --git a/web/www/app.js b/web/www/app.js index 5bfa13e..5ad5549 100644 --- a/web/www/app.js +++ b/web/www/app.js @@ -9,32 +9,27 @@ new Vue({ // enable_processing: false, camera_mode: 'R' }, - rgb: { - red: { - min: 0, - max: 255 - }, - green: { - min: 0, - max: 255 - }, - blue: { - min: 0, - max: 255 - } - } + targets: [], + color_profile: null }, mounted: function () { console.log('mounted'); + var self = this; }, methods: { - + onTargetUpdate: function(key, value, isNew) { + //console.log(value); + this.targets = value + }, updateColors: function() { var self = this; console.log(self.rgb) Socket.send({ - 'rgb': self.rgb - }) + 'profile':{ + 'rgb': self.rgb, + 'hsv': self.hsv, + 'hsl': self.hsl + }}) }, enableCamera: function () { var self = this; @@ -49,9 +44,8 @@ new Vue({ enableRaw: function() { var self = this; self.controls.camera_mode = 'RAW'; - Socket.send({ - 'controls': self.controls - }) + Socket.send({'controls':self.controls}) + }, enableBall: function(){ var self = this; @@ -61,7 +55,7 @@ new Vue({ enableHex: function(){ var self = this; self.controls.camera_mode = 'HEXAGON' - Socket.send(self.controls) + Socket.send({'controls':self.controls}) }, enableBay: function(){ var self = this; @@ -78,6 +72,6 @@ new Vue({ // } // Socket.send(self.controls) // }, - + } -}); \ No newline at end of file +}); diff --git a/web/www/index.html b/web/www/index.html index 13ec205..49c1eec 100644 --- a/web/www/index.html +++ b/web/www/index.html @@ -10,7 +10,7 @@ @@ -84,48 +84,49 @@ - + +