diff --git a/.eslintrc.json b/.eslintrc.json index eded6bc..923e784 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -6,6 +6,7 @@ "extends": [ "airbnb-base" ], + "ignorePatterns": ["dist/**"], "parser": "babel-eslint", "parserOptions": { "ecmaVersion": 12, diff --git a/.gitignore b/.gitignore index 2dc23f4..fa02bef 100644 --- a/.gitignore +++ b/.gitignore @@ -82,7 +82,7 @@ out # Nuxt.js build / generate output .nuxt -dist +#dist # Gatsby files .cache/ diff --git a/.jsdoc.json b/.jsdoc.json index f7e1d7a..1f5fdda 100644 --- a/.jsdoc.json +++ b/.jsdoc.json @@ -4,9 +4,9 @@ "dictionaries": ["jsdoc"] }, "source": { - "include": ["lib", "package.json", "README.md"], + "include": ["src", "package.json", "README.md"], "includePattern": ".js$", - "excludePattern": "(node_modules/|docs)" + "excludePattern": "(node_modules/|docs|dist|__tests__)" }, "plugins": [ "plugins/markdown" diff --git a/MIGRATING.md b/MIGRATING.md index 020eca5..157551c 100644 --- a/MIGRATING.md +++ b/MIGRATING.md @@ -3,12 +3,14 @@ ## Migrating from 1.x to 2.x There have been several significant changes to the 2.x release of TJBotLib that break compatability with the 1.x release. Please use this guide, the new [TJBot API docs](https://ibmtjbot.github.io/docs/tjbot/2.0.1/), and the [updated recipes](https://github.com/ibmtjbot/tjbot/tree/master/recipes) in the `tjbot` repository, to help you migrate your recipes to the new 2.x API. -### ES6 Module -TJBot is now packaged as an ES6 module, meaning it can be imported as follows: +### ES6 / CommonJS Hybrid Module +TJBotLib is now packaged as hybrid ES6 / CommonJS module, meaning it can be imported as follows: - import TJBot from 'tjbot'; + import TJBot from 'tjbot'; // ES6 -Because of this new packaging, TJBotLib requires Node 15.x. + const TJBot = require('tjbot').default; // CommonJS + +Node 15.x+ is required to use TJBotLib as an ES6 module. ### `async`/`await` Semantics TJBot now uses `async`/`await` semantics rather than `Promise` semantics. This change has resulted in much cleaner, and easier to understand code. Thus, functions that used to return promises (e.g. `tj.analyzeTone()`) should now be called with `await`. diff --git a/README.md b/README.md index b3db8da..262b1c7 100644 --- a/README.md +++ b/README.md @@ -18,10 +18,27 @@ $ npm install --save tjbot > 💡 Note: The TJBot library was developed for use on Raspberry Pi. It may be possible to develop and test portions of this library on other Linux-based systems (e.g. Ubuntu), but this usage is not officially supported. -2. Instantiate the `TJBot` object. +2. Import the TJBot library. + +TJBot is packaged as both an ES6 and a CommonJS module (explained in [this guide](https://www.sensedeep.com/blog/posts/2021/how-to-create-single-source-npm-module.html)), which means you may import it using either the ES6 `import` statement or the CommonJS `require` method. + +For ES6, import TJBot as follows: ``` import TJBot from 'tjbot'; +``` + +For CommonJS, import TJBot as follows: + +``` +const TJBot = require('tjbot').default; +``` + +> 💡 Note: For CommonJS, the `TJBot` class is exported under a `.default` reference. + +3. Instantiate the `TJBot` object. + +``` const tj = new TJBot(); tj.initialize([TJBot.HARDWARE.LED_NEOPIXEL, TJBot.HARDWARE.SERVO, TJBot.HARDWARE.MICROPHONE, TJBot.HARDWARE.SPEAKER]); ``` @@ -123,7 +140,7 @@ The full list of capabilities can be accessed programatically via `TJBot.CAPABIL ## TJBot API -Please see [the API docs](https://ibmtjbot.github.io/docs/tjbot/2.0.1/) for documentation of the TJBot API. +Please see [the API docs](https://ibmtjbot.github.io/docs/tjbot/2.0.2/) for documentation of the TJBot API. > 💡 Please see the [Migration Guide](MIGRATING.md) for guidance on migrating your code to the latest version of the TJBot API. diff --git a/dist/cjs/package.json b/dist/cjs/package.json new file mode 100644 index 0000000..1cd945a --- /dev/null +++ b/dist/cjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/dist/cjs/tjbot.d.ts b/dist/cjs/tjbot.d.ts new file mode 100644 index 0000000..cdccd4f --- /dev/null +++ b/dist/cjs/tjbot.d.ts @@ -0,0 +1,666 @@ +export default TJBot; +/** +* Class representing a TJBot +*/ +declare class TJBot { + /** + * TJBot library version + * @readonly + */ + static readonly VERSION: "v2.0.2"; + /** + * TJBot capabilities + * @readonly + * @enum {string} + */ + static readonly CAPABILITIES: { + ANALYZE_TONE: string; + CONVERSE: string; + LISTEN: string; + SEE: string; + SHINE: string; + SPEAK: string; + TRANSLATE: string; + WAVE: string; + }; + /** + * TJBot hardware + * @readonly + * @enum {string} + */ + static readonly HARDWARE: { + CAMERA: string; + LED_NEOPIXEL: string; + LED_COMMON_ANODE: string; + MICROPHONE: string; + SERVO: string; + SPEAKER: string; + }; + /** + * TJBot Watson services + * @readonly + * @enum {string} + */ + static readonly SERVICES: { + ASSISTANT: string; + LANGUAGE_TRANSLATOR: string; + SPEECH_TO_TEXT: string; + TEXT_TO_SPEECH: string; + TONE_ANALYZER: string; + VISUAL_RECOGNITION: string; + }; + /** + * TJBot languages for listening, speaking, and seeing + * @readonly + * @enum {string} + */ + static readonly LANGUAGES: { + LISTEN: { + ARABIC: string; + CHINESE: string; + ENGLISH_UK: string; + ENGLISH_US: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + SPEAK: { + ARABIC: string; + CHINESE: string; + DUTCH: string; + ENGLISH_GB: string; + ENGLISH_US: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + SEE: { + CHINESE: string; + ENGLISH: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + }; + /** + * TJBot genders, used to pick a voice when speaking + * @readonly + * @enum {string} + */ + static readonly GENDERS: { + MALE: string; + FEMALE: string; + }; + /** + * TJBot servo motor stop positions + * @readonly + * @enum {int} + */ + static readonly SERVO: { + ARM_BACK: number; + ARM_UP: number; + ARM_DOWN: number; + }; + /** + * TJBot default configuration + * @readonly + */ + static readonly DEFAULT_CONFIG: { + log: { + level: string; + }; + robot: { + gender: string; + }; + converse: { + assistantId: undefined; + }; + listen: { + microphoneDeviceId: string; + inactivityTimeout: number; + backgroundAudioSuppression: number; + language: string; + }; + wave: { + servoPin: number; + }; + speak: { + language: string; + voice: undefined; + speakerDeviceId: string; + }; + see: { + confidenceThreshold: number; + camera: { + height: number; + width: number; + verticalFlip: boolean; + horizontalFlip: boolean; + }; + language: any; + }; + shine: { + neopixel: { + gpioPin: number; + grbFormat: boolean; + }; + commonAnode: { + redPin: number; + greenPin: number; + bluePin: number; + }; + }; + }; + /** ------------------------------------------------------------------------ */ + /** UTILITY METHODS */ + /** ------------------------------------------------------------------------ */ + /** + * Put TJBot to sleep. + * @param {int} msec Number of milliseconds to sleep for (1000 msec == 1 sec). + */ + static sleep(msec: any): void; + /** + * TJBot constructor. After constructing a TJBot instance, call initialize() to configure its hardware. + * @param {object} configuration Configuration for the TJBot. See TJBot.DEFAULT_CONFIG for all configuration options. + * @param {string=} credentialsFile (optional) Path to the 'ibm-credentials.env' file containing authentication credentials for IBM Watson services. + * @return {TJBot} instance of the TJBot class + */ + constructor(configuration?: object, credentialsFile?: string | undefined); + configuration: { + log: { + level: string; + }; + robot: { + gender: string; + }; + converse: { + assistantId: undefined; + }; + listen: { + microphoneDeviceId: string; + inactivityTimeout: number; + backgroundAudioSuppression: number; + language: string; + }; + wave: { + servoPin: number; + }; + speak: { + language: string; + voice: undefined; + speakerDeviceId: string; + }; + see: { + confidenceThreshold: number; + camera: { + height: number; + width: number; + verticalFlip: boolean; + horizontalFlip: boolean; + }; + language: any; + }; + shine: { + neopixel: { + gpioPin: number; + grbFormat: boolean; + }; + commonAnode: { + redPin: number; + greenPin: number; + bluePin: number; + }; + }; + }; + /** + * @param {array} hardware List of hardware peripherals attached to TJBot. + * @see {@link #TJBot+HARDWARE} for a list of supported hardware. + * @async + */ + initialize(hardware: any): Promise; + /** ------------------------------------------------------------------------ */ + /** INTERNAL HARDWARE & WATSON SERVICE INITIALIZATION */ + /** ------------------------------------------------------------------------ */ + /** + * Configure the camera hardware. + * @private + */ + private _setupCamera; + _camera: Raspistill | undefined; + /** + * Configure the Neopixel LED hardware. + * @param {int} gpioPin The GPIO pin number to which the LED is connected. + * @private + */ + private _setupLEDNeopixel; + _neopixelLed: any; + /** + * Configure the common anode RGB LED hardware. + * @param {int} redPin The pin number to which the led red pin is connected. + * @param {int} greenPin The pin number to which the led green pin is connected. + * @param {int} bluePin The pin number to which the led blue pin is connected. + * @private + */ + private _setupLEDCommonAnode; + _commonAnodeLed: { + redPin: Gpio; + greenPin: Gpio; + bluePin: Gpio; + } | undefined; + /** + * Configure the microphone for speech recognition. + * @private + */ + private _setupMicrophone; + _mic: any; + _micInputStream: any; + /** + * Configure the servo module for the given pin number. + * @param {int} pin The pin number to which the servo is connected. + * @private + */ + private _setupServo; + _motor: Gpio | undefined; + /** + * Configure the speaker. + * @private + */ + private _setupSpeaker; + _soundplayer: any; + /** + * Instantiate the specified Watson service. + * @param {string} service The name of the service. Valid names are defined in TJBot.services. + * @param {string} version The version of the service (e.g. "2018-09-20"). If null, the default version will be used. + * @private + */ + private _createServiceAPI; + _assistant: AssistantV2 | undefined; + _languageTranslator: LanguageTranslatorV3 | undefined; + _stt: SpeechToTextV1 | undefined; + _tts: TextToSpeechV1 | undefined; + _toneAnalyzer: ToneAnalyzerV3 | undefined; + _visualRecognition: VisualRecognitionV3 | undefined; + /** + * Assert that TJBot is able to perform a specified capability. Instantiates Watson + * services as needed. + * @param {string} capability The capability assert (see TJBot.prototype.capabilities). + * @private + */ + private _assertCapability; + /** ------------------------------------------------------------------------ */ + /** ANALYZE TONE */ + /** ------------------------------------------------------------------------ */ + /** + * Analyze the tone of the given text. + * @param {string} text The text to analyze. + * @return {object} Returns the response object from the Tone Analyzer service. + * @example + * response = { + * "document_tone": { + * "tones": [{ + * "score": 0.6165, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.829888, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * "sentences_tone": [{ + * "sentence_id": 0, + * "text": "Team, I know that times are tough!", + * "tones": [{ + * "score": 0.801827, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * }, + * { + * "sentence_id": 1, + * "text": "Product sales have been disappointing for the past three quarters.", + * "tones": [{ + * "score": 0.771241, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.687768, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * { + * "sentence_id": 2, + * "text": "We have a competitive product, but we need to do a better job of selling it!", + * "tones": [{ + * "score": 0.506763, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/tone-analyzer?code=node#tone|Tone Analyzer} documentation provides details on the response object. + * @async + */ + analyzeTone(text: string): object; + /** ------------------------------------------------------------------------ */ + /** CONVERSE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a conversational turn in the conversation. + * @param {string} message The message to send to the Assistant service. + * @return {object} Returns an object with two keys: `object` contains the full Assistant response object, and `description` contains the string response. + * @example + * response = { + * "object": {conversation response object}, + * "description": "hello, how are you" + * } + * @see {@link https://cloud.ibm.com/apidocs/assistant/assistant-v2?code=node#message|Assistant} documentation provides details on the response object. + * @async + */ + converse(message: string): object; + _assistantSessionId: string | undefined; + /** ------------------------------------------------------------------------ */ + /** LISTEN */ + /** ------------------------------------------------------------------------ */ + /** + * Listen for a spoken utterance. + * @async + */ + listen(): Promise; + _recognizeStream: import("ibm-watson/lib/recognize-stream") | undefined; + _sttTextStream: any; + /** + * Internal method for pausing listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + private _pauseListening; + /** + * Internal method for resuming listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + private _resumeListening; + /** ------------------------------------------------------------------------ */ + /** SEE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a picture and identify the objects present. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @example + * response = { + * "images": [{ + * "classifiers": [{ + * "classifier_id": "roundPlusBanana_1758279329", + * "name": "roundPlusBanana", + * "classes": [{ + * "class": "fruit", + * "score": 0.788 + * }, + * { + * "class": "olive color", + * "score": 0.973 + * }, + * { + * "class": "lemon yellow color", + * "score": 0.789 + * } + * ] + * }], + * "image": "fruitbowl.jpg" + * }], + * "images_processed": 1, + * "custom_classes": 6 + * } + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + see(classifierIds?: any | undefined): object; + /** + * Recognize objects in a given photo. + * @param {string} filePath Path to the photo file. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + recognizeObjectsInPhoto(filePath: string, classifierIds?: any | undefined): object; + /** + * Capture an image and save it in the given path. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @async + */ + takePhoto(filePath?: string | undefined): string; + /** + * Internal method to capture an image at the given path. Used to avoid triggering + * the check for an apikey for Watson Visual Recognition in _assertCapability() + * during testing. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @private + * @async + */ + private _takePhoto; + /** ------------------------------------------------------------------------ */ + /** SHINE */ + /** ------------------------------------------------------------------------ */ + /** + * Change the color of the LED. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + */ + shine(color: string): void; + /** + * Pulse the LED a single time. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @param {float=} duration The duration the pulse should last. The duration should be in + * the range [0.5, 2.0] seconds. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + * @async + */ + pulse(color: string, duration?: any | undefined): Promise; + /** + * Get the list of all colors recognized by TJBot. + * @return {array} List of all named colors recognized by `shine()` and `pulse()`. + */ + shineColors(): any; + /** + * Get a random color. + * @return {string} Random named color. + */ + randomColor(): string; + /** + * Normalize the given color to #RRGGBB. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @return {string} Hex string corresponding to the given color (e.g. "#RRGGBB") + * @private + */ + private _normalizeColor; + /** + * Convert hex color code to RGB value. + * @param {string} hexColor Hex color code + * @return {array} RGB color (e.g. (255, 128, 128)) + * @private + */ + private _convertHexToRgbColor; + /** + * Render the given rgb color for the common anode led. + * @param {string} hexColor Color in hex format + * @private + */ + private _renderCommonAnodeLed; + /** ------------------------------------------------------------------------ */ + /** SPEAK */ + /** ------------------------------------------------------------------------ */ + /** + * Speak a message. + * @param {string} message The message to speak. + * @async + */ + speak(message: string): Promise; + _ttsVoices: import("ibm-watson/text-to-speech/v1-generated").Voice[] | undefined; + /** + * Play a sound at the specified path. + * @param {string} soundFile The path to the sound file to be played. + * @async + */ + play(soundFile: string): Promise; + /** ------------------------------------------------------------------------ */ + /** TRANSLATE */ + /** ------------------------------------------------------------------------ */ + /** + * Translates the given text from the source language to the target language. + * + * @param {string} text The text to translate. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {object} The response object from the Language Translator service. + * @example + * response = { + * "object": { + * "translations": [{ + * "translation": "Hola, mi nombre es TJBot!" + * }], + * "word_count": 7, + * "character_count": 25 + * }, + * "description": "Hola, mi nombre es TJBot!" + * } + * @see Use {@link #TJBot+isTranslatable} to determine whether lanuage can be translated from + * the `sourceLanguage` to `targetLanguage`. + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#translate|Language Translator} + * documentation provides details on the response object. + * @async + */ + translate(text: string, sourceLanguage: string, targetLanguage: string): object; + /** + * Identifies the language of the given text. + * @param {string} text The text to identify. + * @return {object} Returns a response object from the Language Translator service. + * @example + * response = { + * "languages": [{ + * "language": "en", + * "confidence": 0.9804833843796723 + * }, + * { + * "language": "nn", + * "confidence": 0.005988721319786277 + * }, + * { + * "language": "sq", + * "confidence": 0.0036927759389060203 + * }, + * { + * "language": "nb", + * "confidence": 0.0035802051870239037 + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#identify|Language Translator} + * documentation provides details on the response object. + * @async + */ + identifyLanguage(text: string): object; + /** + * Determines if TJBot can translate from the source language to the target language. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {bool} True if the `sourceLanguage` can be translated to the + * `targetLanguage`, false otherwise. + * @async + */ + isTranslatable(sourceLanguage: string, targetLanguage: string): any; + _translationModels: {} | undefined; + /** + * Returns a list of languages that can TJBot can translate to from the given language. + * @param {string} sourceLanguage The source language (e.g. "en" for English) + * @return {array} List of languages that TJBot can translate to from the source langauge + */ + translatableLanguages(sourceLanguage: string): any; + /** + * Returns the name of the given language code. + * @param {string} languageCode Two-character language code (e.g. "en") + * @return {string} Name of the language (e.g. "English"), or undefined if the language is unknown. + */ + languageForCode(languageCode: string): string; + /** + * Returns the two-letter code for the given language. + * @param {string} language Name of the language (e.g. "English") + * @return {string} Two-letter language code for the language (e.g. "en"), or undefined if the language code is unknown. + */ + codeForLanguage(language: string): string; + /** + * Loads the list of language models that can be used for translation. + * @private + * @async + */ + private _loadLanguageTranslationModels; + /** ------------------------------------------------------------------------ */ + /** WAVE */ + /** ------------------------------------------------------------------------ */ + /** + * Moves TJBot's arm all the way back. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_BACK may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.armBack() + */ + armBack(): void; + /** + * Raises TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_UP may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.raiseArm() + */ + raiseArm(): void; + /** + * Lowers TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_DOWN may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.lowerArm() + */ + lowerArm(): void; + /** + * Waves TJBots's arm once. + */ + wave(): Promise; +} +import { Raspistill } from "node-raspistill"; +import { Gpio } from "pigpio"; +import AssistantV2 from "ibm-watson/assistant/v2.js"; +import LanguageTranslatorV3 from "ibm-watson/language-translator/v3.js"; +import SpeechToTextV1 from "ibm-watson/speech-to-text/v1.js"; +import TextToSpeechV1 from "ibm-watson/text-to-speech/v1.js"; +import ToneAnalyzerV3 from "ibm-watson/tone-analyzer/v3.js"; +import VisualRecognitionV3 from "ibm-watson/visual-recognition/v3.js"; diff --git a/dist/cjs/tjbot.js b/dist/cjs/tjbot.js new file mode 100644 index 0000000..141d5a6 --- /dev/null +++ b/dist/cjs/tjbot.js @@ -0,0 +1,1558 @@ +"use strict"; +/* eslint-disable import/extensions */ +/** + * Copyright 2016-2020 IBM Corp. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +// node modules +const temp_1 = __importDefault(require("temp")); +const bluebird_1 = __importDefault(require("bluebird")); +const fs_1 = __importDefault(require("fs")); +const sleep_1 = __importDefault(require("sleep")); +const colornames_1 = __importDefault(require("colornames")); +const color_model_1 = __importDefault(require("color-model")); +const winston_1 = __importDefault(require("winston")); +const events_1 = require("events"); +// hardware modules +const mic_1 = __importDefault(require("mic")); +const node_raspistill_1 = require("node-raspistill"); +const rpi_ws281x_native_1 = __importDefault(require("rpi-ws281x-native")); +const pigpio_1 = require("pigpio"); +const sound_player_1 = __importDefault(require("sound-player")); +// watson modules +const v2_js_1 = __importDefault(require("ibm-watson/assistant/v2.js")); +const v3_js_1 = __importDefault(require("ibm-watson/language-translator/v3.js")); +const v1_js_1 = __importDefault(require("ibm-watson/speech-to-text/v1.js")); +const v1_js_2 = __importDefault(require("ibm-watson/text-to-speech/v1.js")); +const v3_js_2 = __importDefault(require("ibm-watson/tone-analyzer/v3.js")); +const v3_js_3 = __importDefault(require("ibm-watson/visual-recognition/v3.js")); +/** +* Class representing a TJBot +*/ +class TJBot { + /** + * TJBot constructor. After constructing a TJBot instance, call initialize() to configure its hardware. + * @param {object} configuration Configuration for the TJBot. See TJBot.DEFAULT_CONFIG for all configuration options. + * @param {string=} credentialsFile (optional) Path to the 'ibm-credentials.env' file containing authentication credentials for IBM Watson services. + * @return {TJBot} instance of the TJBot class + */ + constructor(configuration = {}, credentialsFile = '') { + // import configuration params + this.configuration = Object.assign(Object.assign({}, TJBot.DEFAULT_CONFIG), configuration); + // set up logging + winston_1.default.configure({ + level: this.configuration.log.level || 'info', + format: winston_1.default.format.simple(), + transports: [ + new winston_1.default.transports.Console(), + ], + }); + // automatically track and clean up temporary files + temp_1.default.track(); + // keep track of IBM Cloud service credentials + if (credentialsFile !== '') { + process.env.IBM_CREDENTIALS_FILE = credentialsFile; + } + winston_1.default.info('Hello from TJBot!'); + winston_1.default.verbose(`TJBot library version ${TJBot.VERSION}`); + winston_1.default.silly(`TJBot configuration: ${JSON.stringify(this.configuration)}`); + } + /** + * @param {array} hardware List of hardware peripherals attached to TJBot. + * @see {@link #TJBot+HARDWARE} for a list of supported hardware. + * @async + */ + initialize(hardware) { + return __awaiter(this, void 0, void 0, function* () { + // set up the hardware + if (hardware === undefined) { + throw new Error('must define a hardware configuration for TJBot'); + } + if (!Array.isArray(hardware)) { + throw new Error('hardware must be an array'); + } + winston_1.default.info(`Initializing TJBot with ${hardware}`); + hardware.forEach((device) => { + switch (device) { + case TJBot.HARDWARE.CAMERA: + this._setupCamera(); + break; + case TJBot.HARDWARE.LED_NEOPIXEL: + this._setupLEDNeopixel(this.configuration.shine.neopixel.gpioPin); + break; + case TJBot.HARDWARE.LED_COMMON_ANODE: + this._setupLEDCommonAnode(this.configuration.shine.commonAnode.redPin, this.configuration.shine.commonAnode.greenPin, this.configuration.shine.commonAnode.bluePin); + break; + case TJBot.HARDWARE.MICROPHONE: + this._setupMicrophone(); + break; + case TJBot.HARDWARE.SERVO: + this._setupServo(this.configuration.wave.servoPin); + break; + case TJBot.HARDWARE.SPEAKER: + this._setupSpeaker(); + break; + default: + break; + } + }, this); + }); + } + /** ------------------------------------------------------------------------ */ + /** INTERNAL HARDWARE & WATSON SERVICE INITIALIZATION */ + /** ------------------------------------------------------------------------ */ + /** + * Configure the camera hardware. + * @private + */ + _setupCamera() { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.CAMERA}`); + this._camera = new node_raspistill_1.Raspistill({ + width: this.configuration.see.camera.width, + height: this.configuration.see.camera.height, + noPreview: true, + encoding: 'jpg', + outputDir: './', + verticalFlip: this.configuration.see.camera.verticalFlip, + horizontalFlip: this.configuration.see.camera.horizontalFlip, + time: 1, + }); + } + /** + * Configure the Neopixel LED hardware. + * @param {int} gpioPin The GPIO pin number to which the LED is connected. + * @private + */ + _setupLEDNeopixel(gpioPin) { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.LED_NEOPIXEL} on PIN ${gpioPin}`); + // init with 1 LED + this._neopixelLed = rpi_ws281x_native_1.default; + this._neopixelLed.init(1, { + gpioPin, + }); + // capture 'this' context + const self = this; + // reset the LED before the program exits + process.on('SIGINT', () => { + self._neopixelLed.reset(); + process.nextTick(() => { + process.exit(0); + }); + }); + } + /** + * Configure the common anode RGB LED hardware. + * @param {int} redPin The pin number to which the led red pin is connected. + * @param {int} greenPin The pin number to which the led green pin is connected. + * @param {int} bluePin The pin number to which the led blue pin is connected. + * @private + */ + _setupLEDCommonAnode(redPin, greenPin, bluePin) { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.LED_COMMON_ANODE} on RED PIN ${redPin}, GREEN PIN ${greenPin}, and BLUE PIN ${bluePin}`); + this._commonAnodeLed = { + redPin: new pigpio_1.Gpio(redPin, { + mode: pigpio_1.Gpio.OUTPUT, + }), + greenPin: new pigpio_1.Gpio(greenPin, { + mode: pigpio_1.Gpio.OUTPUT, + }), + bluePin: new pigpio_1.Gpio(bluePin, { + mode: pigpio_1.Gpio.OUTPUT, + }), + }; + } + /** + * Configure the microphone for speech recognition. + * @private + */ + _setupMicrophone() { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.MICROPHONE}`); + const micParams = { + rate: '16000', + channels: '1', + debug: false, + exitOnSilence: 6, + }; + if (this.configuration.listen.microphoneDeviceId) { + micParams.device = this.configuration.listen.microphoneDeviceId; + } + // create the microphone + this._mic = (0, mic_1.default)(micParams); + // (re-)create the mic audio stream and pipe it to STT + this._micInputStream = this._mic.getAudioStream(); + this._micInputStream.on('startComplete', () => { + winston_1.default.verbose('microphone started'); + }); + this._micInputStream.on('pauseComplete', () => { + winston_1.default.verbose('microphone paused'); + }); + // log errors in the mic input stream + this._micInputStream.on('error', (err) => { + winston_1.default.error('the microphone input stream experienced an error', err); + }); + this._micInputStream.on('processExitComplete', () => { + winston_1.default.verbose('microphone exit'); + }); + // ignore silence + this._micInputStream.on('silence', () => { + winston_1.default.verbose('microphone silence'); + }); + } + /** + * Configure the servo module for the given pin number. + * @param {int} pin The pin number to which the servo is connected. + * @private + */ + _setupServo(pin) { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.SERVO} on PIN ${pin}`); + this._motor = new pigpio_1.Gpio(pin, { + mode: pigpio_1.Gpio.OUTPUT, + }); + } + /** + * Configure the speaker. + * @private + */ + _setupSpeaker() { + winston_1.default.verbose(`initializing ${TJBot.HARDWARE.SPEAKER}`); + this._soundplayer = sound_player_1.default; + } + /** + * Instantiate the specified Watson service. + * @param {string} service The name of the service. Valid names are defined in TJBot.services. + * @param {string} version The version of the service (e.g. "2018-09-20"). If null, the default version will be used. + * @private + */ + _createServiceAPI(service, version) { + winston_1.default.verbose(`initializing ${service} service`); + switch (service) { + case TJBot.SERVICES.ASSISTANT: { + // https://cloud.ibm.com/apidocs/assistant-v2 + const defaultVersion = '2018-09-19'; + // there seems to be a bug in the AssistantV2 service where + // the service name is 'conversation', so it expects the environment + // variables for the credentails to be named CONVERSATION_*, but + // when downloading the credentials files, they are named + // ASSISTANT_* + // AssistantV2.DEFAULT_SERVICE_NAME = 'assistant'; + this._assistant = new v2_js_1.default({ + serviceName: 'assistant', + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.LANGUAGE_TRANSLATOR: { + // https://cloud.ibm.com/apidocs/language-translator + const defaultVersion = '2018-05-01'; + this._languageTranslator = new v3_js_1.default({ + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.SPEECH_TO_TEXT: { + // https://cloud.ibm.com/apidocs/speech-to-text + this._stt = new v1_js_1.default({}); + break; + } + case TJBot.SERVICES.TEXT_TO_SPEECH: { + // https://cloud.ibm.com/apidocs/text-to-speech + this._tts = new v1_js_2.default({}); + break; + } + case TJBot.SERVICES.TONE_ANALYZER: { + // https://cloud.ibm.com/apidocs/tone-analyzer + const defaultVersion = '2017-09-21'; + this._toneAnalyzer = new v3_js_2.default({ + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.VISUAL_RECOGNITION: { + // https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3 + const defaultVersion = '2018-03-19'; + this._visualRecognition = new v3_js_3.default({ + serviceName: 'visual_recognition', + version: version || defaultVersion, + }); + break; + } + default: + break; + } + } + /** + * Assert that TJBot is able to perform a specified capability. Instantiates Watson + * services as needed. + * @param {string} capability The capability assert (see TJBot.prototype.capabilities). + * @private + */ + _assertCapability(capability) { + switch (capability) { + case TJBot.CAPABILITIES.ANALYZE_TONE: + if (!this._toneAnalyzer) { + this._createServiceAPI(TJBot.SERVICES.TONE_ANALYZER); + } + break; + case TJBot.CAPABILITIES.CONVERSE: + if (!this.configuration.converse.assistantId) { + throw new Error('TJBot is not configured to converse. ' + + 'Please check that you defined an assistantId for the ' + + 'converse.assistantId parameter in the TJBot initialize() method.'); + } + if (!this._assistant) { + this._createServiceAPI(TJBot.SERVICES.ASSISTANT); + } + break; + case TJBot.CAPABILITIES.LISTEN: + if (!this._mic) { + throw new Error('TJBot is not configured to listen. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.MICROPHONE} hardware in the TJBot initialize() method.`); + } + if (!this._stt) { + this._createServiceAPI(TJBot.SERVICES.SPEECH_TO_TEXT); + } + break; + case TJBot.CAPABILITIES.SEE: + if (!this._camera) { + throw new Error('TJBot is not configured to see. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.CAMERA} hardware in the TJBot initialize() method.`); + } + if (!this._visualRecognition) { + this._createServiceAPI(TJBot.SERVICES.VISUAL_RECOGNITION); + } + break; + case TJBot.CAPABILITIES.SHINE: + // one LED should be defined + if (!this._neopixelLed && !this._commonAnodeLed) { + throw new Error('TJBot is not configured with an LED. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.LED_NEOPIXEL} or ${TJBot.HARDWARE.LED_COMMON_ANODE} ` + + 'hardware in the TJBot initialize() method.'); + } + break; + case TJBot.CAPABILITIES.SPEAK: + if (!this._soundplayer) { + throw new Error('TJBot is not configured to speak. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.SPEAKER} hardware in the TJBot initialize() method.`); + } + if (!this._tts) { + this._createServiceAPI(TJBot.SERVICES.TEXT_TO_SPEECH); + } + break; + case TJBot.CAPABILITIES.TRANSLATE: + if (!this._languageTranslator) { + this._createServiceAPI(TJBot.SERVICES.LANGUAGE_TRANSLATOR); + } + break; + case TJBot.CAPABILITIES.WAVE: + if (!this._motor) { + throw new Error('TJBot is not configured with an arm. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.SERVO} hardware in the TJBot initialize() method.`); + } + break; + default: + break; + } + } + /** ------------------------------------------------------------------------ */ + /** UTILITY METHODS */ + /** ------------------------------------------------------------------------ */ + /** + * Put TJBot to sleep. + * @param {int} msec Number of milliseconds to sleep for (1000 msec == 1 sec). + */ + static sleep(msec) { + const usec = msec * 1000; + sleep_1.default.usleep(usec); + } + /** ------------------------------------------------------------------------ */ + /** ANALYZE TONE */ + /** ------------------------------------------------------------------------ */ + /** + * Analyze the tone of the given text. + * @param {string} text The text to analyze. + * @return {object} Returns the response object from the Tone Analyzer service. + * @example + * response = { + * "document_tone": { + * "tones": [{ + * "score": 0.6165, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.829888, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * "sentences_tone": [{ + * "sentence_id": 0, + * "text": "Team, I know that times are tough!", + * "tones": [{ + * "score": 0.801827, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * }, + * { + * "sentence_id": 1, + * "text": "Product sales have been disappointing for the past three quarters.", + * "tones": [{ + * "score": 0.771241, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.687768, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * { + * "sentence_id": 2, + * "text": "We have a competitive product, but we need to do a better job of selling it!", + * "tones": [{ + * "score": 0.506763, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/tone-analyzer?code=node#tone|Tone Analyzer} documentation provides details on the response object. + * @async + */ + analyzeTone(text) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.ANALYZE_TONE); + const params = { + toneInput: { text }, + contentType: 'application/json', + }; + try { + const body = yield this._toneAnalyzer.tone(params); + winston_1.default.silly(`response from _toneAnalyzer.tone(): ${body}`); + return body.result; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.TONE_ANALYZER} service returned an error.`, err); + throw err; + } + }); + } + /** ------------------------------------------------------------------------ */ + /** CONVERSE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a conversational turn in the conversation. + * @param {string} message The message to send to the Assistant service. + * @return {object} Returns an object with two keys: `object` contains the full Assistant response object, and `description` contains the string response. + * @example + * response = { + * "object": {conversation response object}, + * "description": "hello, how are you" + * } + * @see {@link https://cloud.ibm.com/apidocs/assistant/assistant-v2?code=node#message|Assistant} documentation provides details on the response object. + * @async + */ + converse(message) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.CONVERSE); + // set up the session if needed + if (!this._assistantSessionId) { + try { + winston_1.default.silly(`creating assistant session, sessionId: ${this.configuration.converse.assistantId}`); + const body = yield this._assistant.createSession({ + assistantId: this.configuration.converse.assistantId, + }); + winston_1.default.silly(`response from _assistant.createSession(): ${body}`); + this._assistantSessionId = body.result.session_id; + } + catch (err) { + winston_1.default.error(`error creating session for ${TJBot.SERVICES.ASSISTANT} service. please check that tj.configuration.converse.assistantId is defined.`); + throw err; + } + } + // define the conversational turn + const turn = { + assistantId: this.configuration.converse.assistantId, + sessionId: this._assistantSessionId, + input: { + message_type: 'text', + text: message, + }, + }; + // send to Assistant service + try { + const body = yield this._assistant.message(turn); + winston_1.default.silly(`response from _assistant.message(): ${JSON.stringify(body)}`); + const { result } = body; + // this might not be necessary but in the past, conversational replies + // came in through result.output.text, not result.output.generic + let response; + if (result.output.generic) { + response = result.output.generic; + } + else if (result.output.text) { + response = result.output.text; + } + const responseText = response.length > 0 ? response[0].text : ''; + const assistantResponse = { + object: result.output, + description: responseText, + }; + winston_1.default.verbose(`received response from assistant: ${JSON.stringify(responseText)}`); + return assistantResponse; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.ASSISTANT} service returned an error.`, err); + throw err; + } + }); + } + /** ------------------------------------------------------------------------ */ + /** LISTEN */ + /** ------------------------------------------------------------------------ */ + /** + * Listen for a spoken utterance. + * @async + */ + listen() { + return __awaiter(this, void 0, void 0, function* () { + // make sure we can listen + this._assertCapability(TJBot.CAPABILITIES.LISTEN); + // lazy create the sttTextStream + if (this._sttTextStream === undefined) { + // initialize the microphone because if stopListening() was called, we don't seem to + // be able to re-use the microphone twice + this._setupMicrophone(); + // create the microphone -> STT recognizer stream + // see this page for additional documentation on the STT configuration parameters: + // https://cloud.ibm.com/apidocs/speech-to-text?code=node#recognize-audio-websockets- + const params = { + objectMode: false, + contentType: 'audio/l16; rate=16000; channels=1', + model: `${this.configuration.listen.language}_BroadbandModel`, + inactivityTimeout: this.configuration.listen.inactivityTimeout || 60, + interimResults: true, + backgroundAudioSuppression: this.configuration.listen.backgroundAudioSuppression || 0.0, + }; + winston_1.default.silly(`recognizeUsingWebSocket() params: ${JSON.stringify(params)}`); + // Create the stream. + this._recognizeStream = this._stt.recognizeUsingWebSocket(params); + this._recognizeStream.setEncoding('utf8'); + // create the mic -> STT recognizer -> text stream + this._sttTextStream = this._micInputStream.pipe(this._recognizeStream); + this._sttTextStream.setEncoding('utf8'); + // start the microphone + this._mic.start(); + // handle errors + this._sttTextStream.on('error', (err) => { + winston_1.default.error('an error occurred in the STT text stream', err); + }); + } + const fd = this._sttTextStream; + const end = new bluebird_1.default((resolve) => { + fd.once('data', resolve); + }); + const transcript = yield end; + winston_1.default.info(`TJBot heard: "${transcript.trim()}"`); + return transcript.trim(); + }); + } + /** + * Internal method for pausing listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + _pauseListening() { + if (this._mic !== undefined) { + winston_1.default.verbose('listening paused'); + this._mic.pause(); + } + } + /** + * Internal method for resuming listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + _resumeListening() { + if (this._mic !== undefined) { + winston_1.default.verbose('listening resumed'); + this._mic.resume(); + } + } + /** ------------------------------------------------------------------------ */ + /** SEE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a picture and identify the objects present. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @example + * response = { + * "images": [{ + * "classifiers": [{ + * "classifier_id": "roundPlusBanana_1758279329", + * "name": "roundPlusBanana", + * "classes": [{ + * "class": "fruit", + * "score": 0.788 + * }, + * { + * "class": "olive color", + * "score": 0.973 + * }, + * { + * "class": "lemon yellow color", + * "score": 0.789 + * } + * ] + * }], + * "image": "fruitbowl.jpg" + * }], + * "images_processed": 1, + * "custom_classes": 6 + * } + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + see(classifierIds = []) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.SEE); + let filePath; + let objects; + try { + winston_1.default.verbose('taking a photo with the camera'); + filePath = yield this.takePhoto(); + } + catch (err) { + winston_1.default.error('an error occured taking a photo', err); + throw err; + } + try { + objects = yield this.recognizeObjectsInPhoto(filePath, classifierIds); + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.VISUAL_RECOGNITION} service returned an error`, err); + throw err; + } + return objects; + }); + } + /** + * Recognize objects in a given photo. + * @param {string} filePath Path to the photo file. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + recognizeObjectsInPhoto(filePath, classifierIds = []) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.SEE); + winston_1.default.verbose(`sending image to the ${TJBot.SERVICES.VISUAL_RECOGNITION} service to recognize objects`); + const params = { + imagesFile: fs_1.default.createReadStream(filePath), + threshold: this.configuration.see.confidenceThreshold || 0.6, + acceptLanguage: this.configuration.see.language || 'en', + }; + if (classifierIds !== undefined && classifierIds.length > 0) { + params.classifierIds = classifierIds; + // params.owners = ['me']; // the API docs say this is not necessary to set when specifying classifierIds + } + try { + const body = yield this._visualRecognition.classify(params); + winston_1.default.silly(`response from _visualRecognition.classify() ${JSON.stringify(body)}`); + const result = body.result.images[0].classifiers[0].classes; + return result; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.VISUAL_RECOGNITION} service returned an error`, err); + throw err; + } + }); + } + /** + * Capture an image and save it in the given path. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @async + */ + takePhoto(filePath = '') { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.SEE); + return this._takePhoto(filePath); + }); + } + /** + * Internal method to capture an image at the given path. Used to avoid triggering + * the check for an apikey for Watson Visual Recognition in _assertCapability() + * during testing. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @private + * @async + */ + _takePhoto(filePath = '') { + return __awaiter(this, void 0, void 0, function* () { + let fp = filePath; + let path = ''; + let name = ''; + // if no file path provided, save to temp location + if (fp === '') { + fp = temp_1.default.path({ + prefix: 'tjbot', + suffix: '.jpg', + }); + } + winston_1.default.verbose(`capturing image at path: ${fp}`); + path = fp.lastIndexOf('/') > 0 ? fp.substring(0, fp.lastIndexOf('/')) : '.'; // save to current dir if no directory provided. + name = fp.substring(fp.lastIndexOf('/') + 1); + name = name.replace('.jpg', ''); // the node raspistill lib already adds encoding .jpg to file. + winston_1.default.silly(`image path: ${path}, image filename: ${name}`); + // set the configuration options, which may have changed since the camera was initialized + this._camera.setOptions({ + outputDir: path, + fileName: name, + width: this.configuration.see.camera.width, + height: this.configuration.see.camera.height, + verticalFlip: this.configuration.see.camera.verticalFlip, + horizontalFlip: this.configuration.see.camera.horizontalFlip, + }); + winston_1.default.silly(`camera options: ${JSON.stringify(this._camera.getOptions())}`); + try { + yield this._camera.takePhoto(); + return fp; + } + catch (err) { + winston_1.default.error('error taking picture', err); + throw err; + } + }); + } + /** ------------------------------------------------------------------------ */ + /** SHINE */ + /** ------------------------------------------------------------------------ */ + /** + * Change the color of the LED. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + */ + shine(color) { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + // normalize the color + const c = this._normalizeColor(color); + // shine! will shine on both LEDs if they are both set up + if (this._commonAnodeLed) { + this._renderCommonAnodeLed(c); + } + if (this._neopixelLed) { + const colors = new Uint32Array(1); + if (this.configuration.shine.neopixel.grbFormat) { + // convert to the 0xGGRRBB format for the LED + const grb = `0x${c[3]}${c[4]}${c[1]}${c[2]}${c[5]}${c[6]}`; + winston_1.default.verbose(`shining my LED to GRB color ${grb}`); + colors[0] = parseInt(grb, 16); + } + else { + // convert to the 0xRRGGBB format for the LED + const rgb = `0x${c[1]}${c[2]}${c[3]}${c[4]}${c[5]}${c[6]}`; + winston_1.default.verbose(`shining my LED to RGB color ${rgb}`); + colors[0] = parseInt(rgb, 16); + } + this._neopixelLed.render(colors); + } + } + /** + * Pulse the LED a single time. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @param {float=} duration The duration the pulse should last. The duration should be in + * the range [0.5, 2.0] seconds. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + * @async + */ + pulse(color, duration = 1.0) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + if (duration < 0.5) { + throw new Error('TJBot does not recommend pulsing for less than 0.5 seconds.'); + } + if (duration > 2.0) { + throw new Error('TJBot does not recommend pulsing for more than 2 seconds.'); + } + // number of easing steps + const numSteps = 20; + // quadratic in-out easing + const easeInOutQuad = (t, b, c, d) => { + if ((t / d / 2) < 1) { + return (c / 2) * (t / d) * (t / d) + b; + } + return (-c / 2) * ((t - 1) * (t - 3) - 1) + b; + }; + let ease = []; + for (let i = 0; i < numSteps; i += 1) { + ease.push(i); + } + ease = ease.map((x, i) => easeInOutQuad(i, 0, 1, ease.length)); + // normalize to 'duration' msec + ease = ease.map((x) => Math.round(x * duration * 1000)); + // convert to deltas + const easeDelays = []; + for (let i = 0; i < ease.length - 1; i += 1) { + easeDelays[i] = ease[i + 1] - ease[i]; + } + // color ramp + const rgb = this._normalizeColor(color).slice(1); // remove the # + const hex = new color_model_1.default.HexRgb(rgb); + const colorRamp = []; + for (let i = 0; i < numSteps / 2; i += 1) { + const l = 0.0 + (i / (numSteps / 2)) * 0.5; + colorRamp[i] = hex.toHsl().lightness(l).toRgb().toHexString() + .replace('#', '0x'); + } + // perform the ease + for (let i = 0; i < easeDelays.length; i += 1) { + const c = i < colorRamp.length + ? colorRamp[i] + : colorRamp[colorRamp.length - 1 - (i - colorRamp.length) - 1]; + this.shine(c); + // eslint-disable-next-line no-await-in-loop + TJBot.sleep(easeDelays[i]); + } + }); + } + /** + * Get the list of all colors recognized by TJBot. + * @return {array} List of all named colors recognized by `shine()` and `pulse()`. + */ + shineColors() { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + return colornames_1.default.all().map((elt) => elt.name); + } + /** + * Get a random color. + * @return {string} Random named color. + */ + randomColor() { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + const colors = this.shineColors(); + const randIdx = Math.floor(Math.random() * colors.length); + const randColor = colors[randIdx]; + return randColor; + } + /** + * Normalize the given color to #RRGGBB. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @return {string} Hex string corresponding to the given color (e.g. "#RRGGBB") + * @private + */ + _normalizeColor(color) { + let normColor = color; + // assume undefined == "off" + if (normColor === undefined) { + normColor = 'off'; + } + // is this "on" or "off"? + if (normColor === 'on') { + normColor = 'FFFFFF'; + } + else if (normColor === 'off') { + normColor = '000000'; + } + else if (normColor === 'random') { + normColor = this.randomColor(); + } + // strip prefixes if they are present + if (normColor.startsWith('0x')) { + normColor = normColor.slice(2); + } + if (normColor.startsWith('#')) { + normColor = normColor.slice(1); + } + // is this a hex number or a named color? + const isHex = /(^[0-9A-F]{6}$)|(^[0-9A-F]{3}$)/i; + let rgb; + if (!isHex.test(normColor)) { + rgb = (0, colornames_1.default)(normColor); + } + else { + rgb = normColor; + } + // did we get something back? + if (rgb === undefined) { + throw new Error(`TJBot did not understand the specified color "${color}"`); + } + // prefix rgb with # in case it's not + if (!rgb.startsWith('#')) { + rgb = `#${rgb}`; + } + // throw an error if we didn't understand this color + if (rgb.length !== 7) { + throw new Error(`TJBot did not understand the specified color "${color}"`); + } + return rgb; + } + /** + * Convert hex color code to RGB value. + * @param {string} hexColor Hex color code + * @return {array} RGB color (e.g. (255, 128, 128)) + * @private + */ + // eslint-disable-next-line class-methods-use-this + _convertHexToRgbColor(hexColor) { + return hexColor.replace(/^#?([a-f\d])([a-f\d])([a-f\d])$/i, (m, r, g, b) => `#${r}${r}${g}${g}${b}${b}`) + .substring(1).match(/.{2}/g) + .map((x) => parseInt(x, 16)); + } + /** + * Render the given rgb color for the common anode led. + * @param {string} hexColor Color in hex format + * @private + */ + _renderCommonAnodeLed(hexColor) { + const rgb = this._convertHexToRgbColor(hexColor); + this._commonAnodeLed.redPin.pwmWrite(rgb[0] == null ? 255 : 255 - rgb[0]); + this._commonAnodeLed.greenPin.pwmWrite(rgb[1] == null ? 255 : 255 - rgb[1]); + this._commonAnodeLed.bluePin.pwmWrite(rgb[2] == null ? 255 : 255 - rgb[2]); + } + /** ------------------------------------------------------------------------ */ + /** SPEAK */ + /** ------------------------------------------------------------------------ */ + /** + * Speak a message. + * @param {string} message The message to speak. + * @async + */ + speak(message) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.SPEAK); + // make sure we're trying to say something + if (message === undefined || message === '') { + winston_1.default.error('TJBot tried to speak an empty message.'); + return; // exit if there's nothing to say! + } + // default voice + let voice = 'en-US_MichaelV3Voice'; + // check to see if the user has specified a voice + if (this.configuration.speak.voice !== undefined) { + winston_1.default.silly(`using voice specified in configuration: ${this.configuration.speak.voice}`); + voice = this.configuration.speak.voice; + } + else if (this.configuration.speak.language === TJBot.LANGUAGES.SPEAK.ENGLISH_US) { + // force MichaelV3 if the language is en-US + voice = 'en-US_MichaelV3Voice'; + winston_1.default.silly(`forcing ${voice} since the language is English`); + } + else { + winston_1.default.silly(`finding voice that matches gender ${this.configuration.robot.gender} and language ${this.configuration.speak.language}`); + // load voices if they haven't been loaded yet + if (!this._ttsVoices) { + winston_1.default.verbose('loading TTS voices…'); + const body = yield this._tts.listVoices(); + winston_1.default.silly(`response from _tts.listVoices(): ${JSON.stringify(body)}`); + this._ttsVoices = body.result.voices; + winston_1.default.verbose('TTS voices loaded'); + } + // first figure out which voices will work for speak.langauge + const { language } = this.configuration.speak; + const languageMatches = this._ttsVoices.filter((v) => v.language === language); + winston_1.default.silly(`candidate TTS voices from language match: ${JSON.stringify(languageMatches)}`); + // now use *at least* a voice in the correct language + // note that Watson TTS doesn't always return voices in the same order, so + // this won't always pick the same voice every time + if (languageMatches.length > 0) { + voice = languageMatches[0].name; + winston_1.default.silly(`provisionally selected TTS voice ${voice} to ensure language match`); + } + // finally, see if we have a gender match with robot.gender + const { gender } = this.configuration.robot; + const languageAndGenderMatches = languageMatches.sort((a, b) => a.name < b.name).filter((v) => v.gender === gender); + if (languageAndGenderMatches.length > 0) { + voice = languageAndGenderMatches[0].name; + winston_1.default.silly(`final selection of TTS voice ${voice} due to language and gender match`); + } + winston_1.default.silly(`selected ${voice} as the ${this.configuration.robot.gender} voice for ${this.configuration.speak.language} `); + } + winston_1.default.verbose(`TJBot speaking with voice ${voice}`); + const params = { + text: message, + voice, + accept: 'audio/wav', + }; + const info = temp_1.default.openSync('tjbot'); + const response = yield this._tts.synthesize(params); + // pipe the audio buffer to a file + winston_1.default.silly('writing audio buffer to temp file', info.path); + const fd = fs_1.default.createWriteStream(info.path); + response.result.pipe(fd); + // wait for the pipe to finish writing + const end = new bluebird_1.default((resolve, reject) => { + fd.on('close', resolve); + fd.on('error', reject); + }); + yield end; + // now play it + winston_1.default.info(`TJBot speaking: ${message}`); + yield this.play(info.path); + }); + } + /** + * Play a sound at the specified path. + * @param {string} soundFile The path to the sound file to be played. + * @async + */ + play(soundFile) { + return __awaiter(this, void 0, void 0, function* () { + // pause listening while we play a sound -- using the internal + // method to avoid a capability check (and potential fail if the TJBot + // isn't configured to listen) + this._pauseListening(); + // if we don't have a speaker, throw an error + if (this._soundplayer === undefined) { + throw new Error('unable to play audio, TJBot hardware doesn\'t include a "speaker"'); + } + // initialize soundplayer lib + const params = { + filename: soundFile, + gain: 100, + debug: true, + player: 'aplay', + device: this.configuration.speak.speakerDeviceId, + }; + const player = new this._soundplayer(params); + winston_1.default.silly('playing audio with parameters: ', params); + // capture 'this' context + const self = this; + player.on('complete', () => { + winston_1.default.silly('audio playback finished'); + // resume listening + self._resumeListening(); + }); + player.on('error', (err) => { + winston_1.default.error('error occurred while playing audio', err); + }); + // play the audio + player.play(soundFile); + // wait for the audio to finish playing, either by completing playback or by throwing an error + yield bluebird_1.default.race([(0, events_1.once)(player, 'complete'), (0, events_1.once)(player, 'error')]); + }); + } + /** ------------------------------------------------------------------------ */ + /** TRANSLATE */ + /** ------------------------------------------------------------------------ */ + /** + * Translates the given text from the source language to the target language. + * + * @param {string} text The text to translate. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {object} The response object from the Language Translator service. + * @example + * response = { + * "object": { + * "translations": [{ + * "translation": "Hola, mi nombre es TJBot!" + * }], + * "word_count": 7, + * "character_count": 25 + * }, + * "description": "Hola, mi nombre es TJBot!" + * } + * @see Use {@link #TJBot+isTranslatable} to determine whether lanuage can be translated from + * the `sourceLanguage` to `targetLanguage`. + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#translate|Language Translator} + * documentation provides details on the response object. + * @async + */ + translate(text, sourceLanguage, targetLanguage) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + const params = { + text, + source: sourceLanguage, + target: targetLanguage, + }; + let translation; + try { + const body = yield this._languageTranslator.translate(params); + winston_1.default.silly(`response from _languageTranslator.translate(): ${JSON.stringify(body)}`); + translation = body.result; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + if (Object.prototype.hasOwnProperty.call(translation, 'translations')) { + if (translation.translations.length > 0 + && Object.prototype.hasOwnProperty.call(translation.translations[0], 'translation')) { + return { + object: translation, + description: translation.translations[0].translation, + }; + } + } + return { + object: translation, + description: '', + }; + }); + } + /** + * Identifies the language of the given text. + * @param {string} text The text to identify. + * @return {object} Returns a response object from the Language Translator service. + * @example + * response = { + * "languages": [{ + * "language": "en", + * "confidence": 0.9804833843796723 + * }, + * { + * "language": "nn", + * "confidence": 0.005988721319786277 + * }, + * { + * "language": "sq", + * "confidence": 0.0036927759389060203 + * }, + * { + * "language": "nb", + * "confidence": 0.0035802051870239037 + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#identify|Language Translator} + * documentation provides details on the response object. + * @async + */ + identifyLanguage(text) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + const params = { + text, + }; + let identifiedLanguages; + try { + const body = yield this._languageTranslator.identify(params); + winston_1.default.silly(`response from _langaugeTranslator.identify(): ${JSON.stringify(body)}`); + identifiedLanguages = body.result; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + return identifiedLanguages; + }); + } + /** + * Determines if TJBot can translate from the source language to the target language. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {bool} True if the `sourceLanguage` can be translated to the + * `targetLanguage`, false otherwise. + * @async + */ + isTranslatable(sourceLanguage, targetLanguage) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + // lazy load of language translation models… + if (this._translationModels === undefined) { + winston_1.default.verbose('loading language models...'); + this._translationModels = yield this._loadLanguageTranslationModels(); + winston_1.default.verbose('language models loaded'); + } + if (this._translationModels[sourceLanguage] !== undefined) { + return this._translationModels[sourceLanguage].includes(targetLanguage); + } + return false; + }); + } + /** + * Returns a list of languages that can TJBot can translate to from the given language. + * @param {string} sourceLanguage The source language (e.g. "en" for English) + * @return {array} List of languages that TJBot can translate to from the source langauge + */ + translatableLanguages(sourceLanguage) { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + // lazy load of language translation models… + if (this._translationModels === undefined) { + winston_1.default.verbose('loading language models...'); + this._translationModels = yield this._loadLanguageTranslationModels(); + winston_1.default.verbose('language models loaded'); + } + if (this._translationModels[sourceLanguage] !== undefined) { + return this._translationModels[sourceLanguage]; + } + return []; + }); + } + /** + * Returns the name of the given language code. + * @param {string} languageCode Two-character language code (e.g. "en") + * @return {string} Name of the language (e.g. "English"), or undefined if the language is unknown. + */ + // eslint-disable-next-line class-methods-use-this + languageForCode(languageCode) { + switch (languageCode.toLowerCase()) { + case 'ar': + return 'Arabic'; + case 'de': + return 'German'; + case 'en': + return 'English'; + case 'es': + return 'Spanish'; + case 'fr': + return 'French'; + case 'it': + return 'Italian'; + case 'ja': + return 'Japanese'; + case 'ko': + return 'Korean'; + case 'nl': + return 'Dutch'; + case 'pt': + return 'Portuguese'; + case 'zh': + return 'Chinese'; + default: + return undefined; + } + } + /** + * Returns the two-letter code for the given language. + * @param {string} language Name of the language (e.g. "English") + * @return {string} Two-letter language code for the language (e.g. "en"), or undefined if the language code is unknown. + */ + // eslint-disable-next-line class-methods-use-this + codeForLanguage(language) { + switch (language.toLowerCase()) { + case 'arabic': + return 'ar'; + case 'german': + return 'de'; + case 'english': + return 'en'; + case 'spanish': + return 'es'; + case 'french': + return 'fr'; + case 'italian': + return 'it'; + case 'japanese': + return 'ja'; + case 'korean': + return 'ko'; + case 'dutch': + return 'nl'; + case 'portuguese': + return 'pt'; + case 'chinese': + return 'zh'; + default: + return undefined; + } + } + /** + * Loads the list of language models that can be used for translation. + * @private + * @async + */ + _loadLanguageTranslationModels() { + return __awaiter(this, void 0, void 0, function* () { + let models; + try { + const body = yield this._languageTranslator.listModels({}); + winston_1.default.silly(`response from _languageTranslator.listModels(): ${JSON.stringify(body)}`); + models = body.result; + } + catch (err) { + winston_1.default.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + const translations = {}; + if (Object.prototype.hasOwnProperty.call(models, 'models')) { + models.models.forEach((model) => { + if (translations[model.source] === undefined) { + translations[model.source] = []; + } + if (!translations[model.source].includes(model.target)) { + translations[model.source].push(model.target); + } + }); + } + return translations; + }); + } + /** ------------------------------------------------------------------------ */ + /** WAVE */ + /** ------------------------------------------------------------------------ */ + /** + * Moves TJBot's arm all the way back. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_BACK may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.armBack() + */ + armBack() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston_1.default.info("Moving TJBot's arm back"); + this._motor.servoWrite(TJBot.SERVO.ARM_BACK); + } + /** + * Raises TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_UP may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.raiseArm() + */ + raiseArm() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston_1.default.info("Raising TJBot's arm"); + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + } + /** + * Lowers TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_DOWN may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.lowerArm() + */ + lowerArm() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston_1.default.info("Lowering TJBot's arm"); + this._motor.servoWrite(TJBot.SERVO.ARM_DOWN); + } + /** + * Waves TJBots's arm once. + */ + wave() { + return __awaiter(this, void 0, void 0, function* () { + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston_1.default.info("Waving TJBot's arm"); + const delay = 200; + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + TJBot.sleep(delay); + this._motor.servoWrite(TJBot.SERVO.ARM_DOWN); + TJBot.sleep(delay); + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + TJBot.sleep(delay); + }); + } +} +/** + * TJBot library version + * @readonly +*/ +TJBot.VERSION = 'v2.0.2'; +/** + * TJBot capabilities + * @readonly + * @enum {string} + */ +TJBot.CAPABILITIES = { + ANALYZE_TONE: 'analyze_tone', + CONVERSE: 'converse', + LISTEN: 'listen', + SEE: 'see', + SHINE: 'shine', + SPEAK: 'speak', + TRANSLATE: 'translate', + WAVE: 'wave', +}; +/** + * TJBot hardware + * @readonly + * @enum {string} + */ +TJBot.HARDWARE = { + CAMERA: 'camera', + LED_NEOPIXEL: 'led_neopixel', + LED_COMMON_ANODE: 'led_common_anode', + MICROPHONE: 'microphone', + SERVO: 'servo', + SPEAKER: 'speaker', +}; +/** + * TJBot Watson services + * @readonly + * @enum {string} + */ +TJBot.SERVICES = { + ASSISTANT: 'assistant', + LANGUAGE_TRANSLATOR: 'language_translator', + SPEECH_TO_TEXT: 'speech_to_text', + TEXT_TO_SPEECH: 'text_to_speech', + TONE_ANALYZER: 'tone_analyzer', + VISUAL_RECOGNITION: 'visual_recognition', +}; +/** + * TJBot languages for listening, speaking, and seeing + * @readonly + * @enum {string} + */ +TJBot.LANGUAGES = { + // https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models + LISTEN: { + ARABIC: 'ar-AR', + CHINESE: 'zh-CN', + ENGLISH_UK: 'en-GB', + ENGLISH_US: 'en-US', + FRENCH: 'fr-FR', + GERMAN: 'de-DE', + ITALIAN: 'it-IT', + JAPANESE: 'ja-JP', + KOREAN: 'ko-KR', + PORTUGUESE: 'pt-BR', + SPANISH: 'es-ES', + }, + // https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices + SPEAK: { + ARABIC: 'ar-AR', + CHINESE: 'zh-CN', + DUTCH: 'nl-NL', + ENGLISH_GB: 'en-GB', + ENGLISH_US: 'en-US', + FRENCH: 'fr-FR', + GERMAN: 'de-DE', + ITALIAN: 'it-IT', + JAPANESE: 'ja-JP', + KOREAN: 'ko-KR', + PORTUGUESE: 'pt-BR', + SPANISH: 'es-ES', + }, + // https://cloud.ibm.com/docs/visual-recognition?topic=visual-recognition-language-support-top + SEE: { + CHINESE: 'zh-cn', + ENGLISH: 'en', + FRENCH: 'fr', + GERMAN: 'de', + ITALIAN: 'it', + JAPANESE: 'ja', + KOREAN: 'ko', + PORTUGUESE: 'pt-br', + SPANISH: 'es', + }, +}; +/** + * TJBot genders, used to pick a voice when speaking + * @readonly + * @enum {string} + */ +TJBot.GENDERS = { + MALE: 'male', + FEMALE: 'female', +}; +/** + * TJBot servo motor stop positions + * @readonly + * @enum {int} + */ +TJBot.SERVO = { + ARM_BACK: 500, + ARM_UP: 1400, + ARM_DOWN: 2300, +}; +/** + * TJBot default configuration + * @readonly + */ +TJBot.DEFAULT_CONFIG = { + log: { + level: 'info', // valid levels are 'error', 'warn', 'info', 'verbose', 'debug', 'silly' + }, + robot: { + gender: TJBot.GENDERS.MALE, // see TJBot.GENDERS + }, + converse: { + assistantId: undefined, // placeholder for Watson Assistant's assistantId + }, + listen: { + microphoneDeviceId: 'plughw:1,0', + inactivityTimeout: -1, + backgroundAudioSuppression: 0.4, + language: TJBot.LANGUAGES.LISTEN.ENGLISH_US, // see TJBot.LANGUAGES.LISTEN + }, + wave: { + servoPin: 7, // default pin is GPIO 7 (physical pin 26) + }, + speak: { + language: TJBot.LANGUAGES.SPEAK.ENGLISH_US, + voice: undefined, + speakerDeviceId: 'plughw:0,0', // plugged-in USB card 1, device 0; 'see aplay -l' for a list of playback devices + }, + see: { + confidenceThreshold: 0.6, + camera: { + height: 720, + width: 960, + verticalFlip: false, + horizontalFlip: false, // flips the image horizontally, should not need to be overridden + }, + language: TJBot.LANGUAGES.SEE.ENGLISH_US, + }, + shine: { + // see https://pinout.xyz for a pin diagram + neopixel: { + gpioPin: 18, + grbFormat: false, // if false, the RGB color format will be used for the LED; if true, the GRB format will be used + }, + commonAnode: { + redPin: 19, + greenPin: 13, + bluePin: 12, // default blue pin is GPIO 12 (physical pin 32) + }, + }, +}; +/** ------------------------------------------------------------------------ */ +/** MODULE EXPORTS */ +/** ------------------------------------------------------------------------ */ +/** + * Export TJBot! + */ +exports.default = TJBot; diff --git a/dist/mjs/package.json b/dist/mjs/package.json new file mode 100644 index 0000000..4720025 --- /dev/null +++ b/dist/mjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/dist/mjs/tjbot.d.ts b/dist/mjs/tjbot.d.ts new file mode 100644 index 0000000..cdccd4f --- /dev/null +++ b/dist/mjs/tjbot.d.ts @@ -0,0 +1,666 @@ +export default TJBot; +/** +* Class representing a TJBot +*/ +declare class TJBot { + /** + * TJBot library version + * @readonly + */ + static readonly VERSION: "v2.0.2"; + /** + * TJBot capabilities + * @readonly + * @enum {string} + */ + static readonly CAPABILITIES: { + ANALYZE_TONE: string; + CONVERSE: string; + LISTEN: string; + SEE: string; + SHINE: string; + SPEAK: string; + TRANSLATE: string; + WAVE: string; + }; + /** + * TJBot hardware + * @readonly + * @enum {string} + */ + static readonly HARDWARE: { + CAMERA: string; + LED_NEOPIXEL: string; + LED_COMMON_ANODE: string; + MICROPHONE: string; + SERVO: string; + SPEAKER: string; + }; + /** + * TJBot Watson services + * @readonly + * @enum {string} + */ + static readonly SERVICES: { + ASSISTANT: string; + LANGUAGE_TRANSLATOR: string; + SPEECH_TO_TEXT: string; + TEXT_TO_SPEECH: string; + TONE_ANALYZER: string; + VISUAL_RECOGNITION: string; + }; + /** + * TJBot languages for listening, speaking, and seeing + * @readonly + * @enum {string} + */ + static readonly LANGUAGES: { + LISTEN: { + ARABIC: string; + CHINESE: string; + ENGLISH_UK: string; + ENGLISH_US: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + SPEAK: { + ARABIC: string; + CHINESE: string; + DUTCH: string; + ENGLISH_GB: string; + ENGLISH_US: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + SEE: { + CHINESE: string; + ENGLISH: string; + FRENCH: string; + GERMAN: string; + ITALIAN: string; + JAPANESE: string; + KOREAN: string; + PORTUGUESE: string; + SPANISH: string; + }; + }; + /** + * TJBot genders, used to pick a voice when speaking + * @readonly + * @enum {string} + */ + static readonly GENDERS: { + MALE: string; + FEMALE: string; + }; + /** + * TJBot servo motor stop positions + * @readonly + * @enum {int} + */ + static readonly SERVO: { + ARM_BACK: number; + ARM_UP: number; + ARM_DOWN: number; + }; + /** + * TJBot default configuration + * @readonly + */ + static readonly DEFAULT_CONFIG: { + log: { + level: string; + }; + robot: { + gender: string; + }; + converse: { + assistantId: undefined; + }; + listen: { + microphoneDeviceId: string; + inactivityTimeout: number; + backgroundAudioSuppression: number; + language: string; + }; + wave: { + servoPin: number; + }; + speak: { + language: string; + voice: undefined; + speakerDeviceId: string; + }; + see: { + confidenceThreshold: number; + camera: { + height: number; + width: number; + verticalFlip: boolean; + horizontalFlip: boolean; + }; + language: any; + }; + shine: { + neopixel: { + gpioPin: number; + grbFormat: boolean; + }; + commonAnode: { + redPin: number; + greenPin: number; + bluePin: number; + }; + }; + }; + /** ------------------------------------------------------------------------ */ + /** UTILITY METHODS */ + /** ------------------------------------------------------------------------ */ + /** + * Put TJBot to sleep. + * @param {int} msec Number of milliseconds to sleep for (1000 msec == 1 sec). + */ + static sleep(msec: any): void; + /** + * TJBot constructor. After constructing a TJBot instance, call initialize() to configure its hardware. + * @param {object} configuration Configuration for the TJBot. See TJBot.DEFAULT_CONFIG for all configuration options. + * @param {string=} credentialsFile (optional) Path to the 'ibm-credentials.env' file containing authentication credentials for IBM Watson services. + * @return {TJBot} instance of the TJBot class + */ + constructor(configuration?: object, credentialsFile?: string | undefined); + configuration: { + log: { + level: string; + }; + robot: { + gender: string; + }; + converse: { + assistantId: undefined; + }; + listen: { + microphoneDeviceId: string; + inactivityTimeout: number; + backgroundAudioSuppression: number; + language: string; + }; + wave: { + servoPin: number; + }; + speak: { + language: string; + voice: undefined; + speakerDeviceId: string; + }; + see: { + confidenceThreshold: number; + camera: { + height: number; + width: number; + verticalFlip: boolean; + horizontalFlip: boolean; + }; + language: any; + }; + shine: { + neopixel: { + gpioPin: number; + grbFormat: boolean; + }; + commonAnode: { + redPin: number; + greenPin: number; + bluePin: number; + }; + }; + }; + /** + * @param {array} hardware List of hardware peripherals attached to TJBot. + * @see {@link #TJBot+HARDWARE} for a list of supported hardware. + * @async + */ + initialize(hardware: any): Promise; + /** ------------------------------------------------------------------------ */ + /** INTERNAL HARDWARE & WATSON SERVICE INITIALIZATION */ + /** ------------------------------------------------------------------------ */ + /** + * Configure the camera hardware. + * @private + */ + private _setupCamera; + _camera: Raspistill | undefined; + /** + * Configure the Neopixel LED hardware. + * @param {int} gpioPin The GPIO pin number to which the LED is connected. + * @private + */ + private _setupLEDNeopixel; + _neopixelLed: any; + /** + * Configure the common anode RGB LED hardware. + * @param {int} redPin The pin number to which the led red pin is connected. + * @param {int} greenPin The pin number to which the led green pin is connected. + * @param {int} bluePin The pin number to which the led blue pin is connected. + * @private + */ + private _setupLEDCommonAnode; + _commonAnodeLed: { + redPin: Gpio; + greenPin: Gpio; + bluePin: Gpio; + } | undefined; + /** + * Configure the microphone for speech recognition. + * @private + */ + private _setupMicrophone; + _mic: any; + _micInputStream: any; + /** + * Configure the servo module for the given pin number. + * @param {int} pin The pin number to which the servo is connected. + * @private + */ + private _setupServo; + _motor: Gpio | undefined; + /** + * Configure the speaker. + * @private + */ + private _setupSpeaker; + _soundplayer: any; + /** + * Instantiate the specified Watson service. + * @param {string} service The name of the service. Valid names are defined in TJBot.services. + * @param {string} version The version of the service (e.g. "2018-09-20"). If null, the default version will be used. + * @private + */ + private _createServiceAPI; + _assistant: AssistantV2 | undefined; + _languageTranslator: LanguageTranslatorV3 | undefined; + _stt: SpeechToTextV1 | undefined; + _tts: TextToSpeechV1 | undefined; + _toneAnalyzer: ToneAnalyzerV3 | undefined; + _visualRecognition: VisualRecognitionV3 | undefined; + /** + * Assert that TJBot is able to perform a specified capability. Instantiates Watson + * services as needed. + * @param {string} capability The capability assert (see TJBot.prototype.capabilities). + * @private + */ + private _assertCapability; + /** ------------------------------------------------------------------------ */ + /** ANALYZE TONE */ + /** ------------------------------------------------------------------------ */ + /** + * Analyze the tone of the given text. + * @param {string} text The text to analyze. + * @return {object} Returns the response object from the Tone Analyzer service. + * @example + * response = { + * "document_tone": { + * "tones": [{ + * "score": 0.6165, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.829888, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * "sentences_tone": [{ + * "sentence_id": 0, + * "text": "Team, I know that times are tough!", + * "tones": [{ + * "score": 0.801827, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * }, + * { + * "sentence_id": 1, + * "text": "Product sales have been disappointing for the past three quarters.", + * "tones": [{ + * "score": 0.771241, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.687768, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * { + * "sentence_id": 2, + * "text": "We have a competitive product, but we need to do a better job of selling it!", + * "tones": [{ + * "score": 0.506763, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/tone-analyzer?code=node#tone|Tone Analyzer} documentation provides details on the response object. + * @async + */ + analyzeTone(text: string): object; + /** ------------------------------------------------------------------------ */ + /** CONVERSE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a conversational turn in the conversation. + * @param {string} message The message to send to the Assistant service. + * @return {object} Returns an object with two keys: `object` contains the full Assistant response object, and `description` contains the string response. + * @example + * response = { + * "object": {conversation response object}, + * "description": "hello, how are you" + * } + * @see {@link https://cloud.ibm.com/apidocs/assistant/assistant-v2?code=node#message|Assistant} documentation provides details on the response object. + * @async + */ + converse(message: string): object; + _assistantSessionId: string | undefined; + /** ------------------------------------------------------------------------ */ + /** LISTEN */ + /** ------------------------------------------------------------------------ */ + /** + * Listen for a spoken utterance. + * @async + */ + listen(): Promise; + _recognizeStream: import("ibm-watson/lib/recognize-stream") | undefined; + _sttTextStream: any; + /** + * Internal method for pausing listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + private _pauseListening; + /** + * Internal method for resuming listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + private _resumeListening; + /** ------------------------------------------------------------------------ */ + /** SEE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a picture and identify the objects present. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @example + * response = { + * "images": [{ + * "classifiers": [{ + * "classifier_id": "roundPlusBanana_1758279329", + * "name": "roundPlusBanana", + * "classes": [{ + * "class": "fruit", + * "score": 0.788 + * }, + * { + * "class": "olive color", + * "score": 0.973 + * }, + * { + * "class": "lemon yellow color", + * "score": 0.789 + * } + * ] + * }], + * "image": "fruitbowl.jpg" + * }], + * "images_processed": 1, + * "custom_classes": 6 + * } + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + see(classifierIds?: any | undefined): object; + /** + * Recognize objects in a given photo. + * @param {string} filePath Path to the photo file. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + recognizeObjectsInPhoto(filePath: string, classifierIds?: any | undefined): object; + /** + * Capture an image and save it in the given path. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @async + */ + takePhoto(filePath?: string | undefined): string; + /** + * Internal method to capture an image at the given path. Used to avoid triggering + * the check for an apikey for Watson Visual Recognition in _assertCapability() + * during testing. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @private + * @async + */ + private _takePhoto; + /** ------------------------------------------------------------------------ */ + /** SHINE */ + /** ------------------------------------------------------------------------ */ + /** + * Change the color of the LED. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + */ + shine(color: string): void; + /** + * Pulse the LED a single time. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @param {float=} duration The duration the pulse should last. The duration should be in + * the range [0.5, 2.0] seconds. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + * @async + */ + pulse(color: string, duration?: any | undefined): Promise; + /** + * Get the list of all colors recognized by TJBot. + * @return {array} List of all named colors recognized by `shine()` and `pulse()`. + */ + shineColors(): any; + /** + * Get a random color. + * @return {string} Random named color. + */ + randomColor(): string; + /** + * Normalize the given color to #RRGGBB. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @return {string} Hex string corresponding to the given color (e.g. "#RRGGBB") + * @private + */ + private _normalizeColor; + /** + * Convert hex color code to RGB value. + * @param {string} hexColor Hex color code + * @return {array} RGB color (e.g. (255, 128, 128)) + * @private + */ + private _convertHexToRgbColor; + /** + * Render the given rgb color for the common anode led. + * @param {string} hexColor Color in hex format + * @private + */ + private _renderCommonAnodeLed; + /** ------------------------------------------------------------------------ */ + /** SPEAK */ + /** ------------------------------------------------------------------------ */ + /** + * Speak a message. + * @param {string} message The message to speak. + * @async + */ + speak(message: string): Promise; + _ttsVoices: import("ibm-watson/text-to-speech/v1-generated").Voice[] | undefined; + /** + * Play a sound at the specified path. + * @param {string} soundFile The path to the sound file to be played. + * @async + */ + play(soundFile: string): Promise; + /** ------------------------------------------------------------------------ */ + /** TRANSLATE */ + /** ------------------------------------------------------------------------ */ + /** + * Translates the given text from the source language to the target language. + * + * @param {string} text The text to translate. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {object} The response object from the Language Translator service. + * @example + * response = { + * "object": { + * "translations": [{ + * "translation": "Hola, mi nombre es TJBot!" + * }], + * "word_count": 7, + * "character_count": 25 + * }, + * "description": "Hola, mi nombre es TJBot!" + * } + * @see Use {@link #TJBot+isTranslatable} to determine whether lanuage can be translated from + * the `sourceLanguage` to `targetLanguage`. + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#translate|Language Translator} + * documentation provides details on the response object. + * @async + */ + translate(text: string, sourceLanguage: string, targetLanguage: string): object; + /** + * Identifies the language of the given text. + * @param {string} text The text to identify. + * @return {object} Returns a response object from the Language Translator service. + * @example + * response = { + * "languages": [{ + * "language": "en", + * "confidence": 0.9804833843796723 + * }, + * { + * "language": "nn", + * "confidence": 0.005988721319786277 + * }, + * { + * "language": "sq", + * "confidence": 0.0036927759389060203 + * }, + * { + * "language": "nb", + * "confidence": 0.0035802051870239037 + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#identify|Language Translator} + * documentation provides details on the response object. + * @async + */ + identifyLanguage(text: string): object; + /** + * Determines if TJBot can translate from the source language to the target language. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {bool} True if the `sourceLanguage` can be translated to the + * `targetLanguage`, false otherwise. + * @async + */ + isTranslatable(sourceLanguage: string, targetLanguage: string): any; + _translationModels: {} | undefined; + /** + * Returns a list of languages that can TJBot can translate to from the given language. + * @param {string} sourceLanguage The source language (e.g. "en" for English) + * @return {array} List of languages that TJBot can translate to from the source langauge + */ + translatableLanguages(sourceLanguage: string): any; + /** + * Returns the name of the given language code. + * @param {string} languageCode Two-character language code (e.g. "en") + * @return {string} Name of the language (e.g. "English"), or undefined if the language is unknown. + */ + languageForCode(languageCode: string): string; + /** + * Returns the two-letter code for the given language. + * @param {string} language Name of the language (e.g. "English") + * @return {string} Two-letter language code for the language (e.g. "en"), or undefined if the language code is unknown. + */ + codeForLanguage(language: string): string; + /** + * Loads the list of language models that can be used for translation. + * @private + * @async + */ + private _loadLanguageTranslationModels; + /** ------------------------------------------------------------------------ */ + /** WAVE */ + /** ------------------------------------------------------------------------ */ + /** + * Moves TJBot's arm all the way back. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_BACK may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.armBack() + */ + armBack(): void; + /** + * Raises TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_UP may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.raiseArm() + */ + raiseArm(): void; + /** + * Lowers TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_DOWN may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.lowerArm() + */ + lowerArm(): void; + /** + * Waves TJBots's arm once. + */ + wave(): Promise; +} +import { Raspistill } from "node-raspistill"; +import { Gpio } from "pigpio"; +import AssistantV2 from "ibm-watson/assistant/v2.js"; +import LanguageTranslatorV3 from "ibm-watson/language-translator/v3.js"; +import SpeechToTextV1 from "ibm-watson/speech-to-text/v1.js"; +import TextToSpeechV1 from "ibm-watson/text-to-speech/v1.js"; +import ToneAnalyzerV3 from "ibm-watson/tone-analyzer/v3.js"; +import VisualRecognitionV3 from "ibm-watson/visual-recognition/v3.js"; diff --git a/dist/mjs/tjbot.js b/dist/mjs/tjbot.js new file mode 100644 index 0000000..4adf784 --- /dev/null +++ b/dist/mjs/tjbot.js @@ -0,0 +1,1510 @@ +/* eslint-disable import/extensions */ +/** + * Copyright 2016-2020 IBM Corp. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// node modules +import temp from 'temp'; +import Promise from 'bluebird'; +import fs from 'fs'; +import sleep from 'sleep'; +import colorToHex from 'colornames'; +import cm from 'color-model'; +import winston from 'winston'; +import { once } from 'events'; +// hardware modules +import Mic from 'mic'; +import { Raspistill } from 'node-raspistill'; +import ws281x from 'rpi-ws281x-native'; +import { Gpio } from 'pigpio'; +import SoundPlayer from 'sound-player'; +// watson modules +import AssistantV2 from 'ibm-watson/assistant/v2.js'; +import LanguageTranslatorV3 from 'ibm-watson/language-translator/v3.js'; +import SpeechToTextV1 from 'ibm-watson/speech-to-text/v1.js'; +import TextToSpeechV1 from 'ibm-watson/text-to-speech/v1.js'; +import ToneAnalyzerV3 from 'ibm-watson/tone-analyzer/v3.js'; +import VisualRecognitionV3 from 'ibm-watson/visual-recognition/v3.js'; +/** +* Class representing a TJBot +*/ +class TJBot { + /** + * TJBot library version + * @readonly + */ + static VERSION = 'v2.0.2'; + /** + * TJBot capabilities + * @readonly + * @enum {string} + */ + static CAPABILITIES = { + ANALYZE_TONE: 'analyze_tone', + CONVERSE: 'converse', + LISTEN: 'listen', + SEE: 'see', + SHINE: 'shine', + SPEAK: 'speak', + TRANSLATE: 'translate', + WAVE: 'wave', + }; + /** + * TJBot hardware + * @readonly + * @enum {string} + */ + static HARDWARE = { + CAMERA: 'camera', + LED_NEOPIXEL: 'led_neopixel', + LED_COMMON_ANODE: 'led_common_anode', + MICROPHONE: 'microphone', + SERVO: 'servo', + SPEAKER: 'speaker', + }; + /** + * TJBot Watson services + * @readonly + * @enum {string} + */ + static SERVICES = { + ASSISTANT: 'assistant', + LANGUAGE_TRANSLATOR: 'language_translator', + SPEECH_TO_TEXT: 'speech_to_text', + TEXT_TO_SPEECH: 'text_to_speech', + TONE_ANALYZER: 'tone_analyzer', + VISUAL_RECOGNITION: 'visual_recognition', + }; + /** + * TJBot languages for listening, speaking, and seeing + * @readonly + * @enum {string} + */ + static LANGUAGES = { + // https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models + LISTEN: { + ARABIC: 'ar-AR', + CHINESE: 'zh-CN', + ENGLISH_UK: 'en-GB', + ENGLISH_US: 'en-US', + FRENCH: 'fr-FR', + GERMAN: 'de-DE', + ITALIAN: 'it-IT', + JAPANESE: 'ja-JP', + KOREAN: 'ko-KR', + PORTUGUESE: 'pt-BR', + SPANISH: 'es-ES', + }, + // https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices + SPEAK: { + ARABIC: 'ar-AR', + CHINESE: 'zh-CN', + DUTCH: 'nl-NL', + ENGLISH_GB: 'en-GB', + ENGLISH_US: 'en-US', + FRENCH: 'fr-FR', + GERMAN: 'de-DE', + ITALIAN: 'it-IT', + JAPANESE: 'ja-JP', + KOREAN: 'ko-KR', + PORTUGUESE: 'pt-BR', + SPANISH: 'es-ES', + }, + // https://cloud.ibm.com/docs/visual-recognition?topic=visual-recognition-language-support-top + SEE: { + CHINESE: 'zh-cn', + ENGLISH: 'en', + FRENCH: 'fr', + GERMAN: 'de', + ITALIAN: 'it', + JAPANESE: 'ja', + KOREAN: 'ko', + PORTUGUESE: 'pt-br', + SPANISH: 'es', + }, + }; + /** + * TJBot genders, used to pick a voice when speaking + * @readonly + * @enum {string} + */ + static GENDERS = { + MALE: 'male', + FEMALE: 'female', + }; + /** + * TJBot servo motor stop positions + * @readonly + * @enum {int} + */ + static SERVO = { + ARM_BACK: 500, + ARM_UP: 1400, + ARM_DOWN: 2300, + }; + /** + * TJBot default configuration + * @readonly + */ + static DEFAULT_CONFIG = { + log: { + level: 'info', // valid levels are 'error', 'warn', 'info', 'verbose', 'debug', 'silly' + }, + robot: { + gender: TJBot.GENDERS.MALE, // see TJBot.GENDERS + }, + converse: { + assistantId: undefined, // placeholder for Watson Assistant's assistantId + }, + listen: { + microphoneDeviceId: 'plughw:1,0', + inactivityTimeout: -1, + backgroundAudioSuppression: 0.4, + language: TJBot.LANGUAGES.LISTEN.ENGLISH_US, // see TJBot.LANGUAGES.LISTEN + }, + wave: { + servoPin: 7, // default pin is GPIO 7 (physical pin 26) + }, + speak: { + language: TJBot.LANGUAGES.SPEAK.ENGLISH_US, + voice: undefined, + speakerDeviceId: 'plughw:0,0', // plugged-in USB card 1, device 0; 'see aplay -l' for a list of playback devices + }, + see: { + confidenceThreshold: 0.6, + camera: { + height: 720, + width: 960, + verticalFlip: false, + horizontalFlip: false, // flips the image horizontally, should not need to be overridden + }, + language: TJBot.LANGUAGES.SEE.ENGLISH_US, + }, + shine: { + // see https://pinout.xyz for a pin diagram + neopixel: { + gpioPin: 18, + grbFormat: false, // if false, the RGB color format will be used for the LED; if true, the GRB format will be used + }, + commonAnode: { + redPin: 19, + greenPin: 13, + bluePin: 12, // default blue pin is GPIO 12 (physical pin 32) + }, + }, + }; + /** + * TJBot constructor. After constructing a TJBot instance, call initialize() to configure its hardware. + * @param {object} configuration Configuration for the TJBot. See TJBot.DEFAULT_CONFIG for all configuration options. + * @param {string=} credentialsFile (optional) Path to the 'ibm-credentials.env' file containing authentication credentials for IBM Watson services. + * @return {TJBot} instance of the TJBot class + */ + constructor(configuration = {}, credentialsFile = '') { + // import configuration params + this.configuration = { ...TJBot.DEFAULT_CONFIG, ...configuration }; + // set up logging + winston.configure({ + level: this.configuration.log.level || 'info', + format: winston.format.simple(), + transports: [ + new winston.transports.Console(), + ], + }); + // automatically track and clean up temporary files + temp.track(); + // keep track of IBM Cloud service credentials + if (credentialsFile !== '') { + process.env.IBM_CREDENTIALS_FILE = credentialsFile; + } + winston.info('Hello from TJBot!'); + winston.verbose(`TJBot library version ${TJBot.VERSION}`); + winston.silly(`TJBot configuration: ${JSON.stringify(this.configuration)}`); + } + /** + * @param {array} hardware List of hardware peripherals attached to TJBot. + * @see {@link #TJBot+HARDWARE} for a list of supported hardware. + * @async + */ + async initialize(hardware) { + // set up the hardware + if (hardware === undefined) { + throw new Error('must define a hardware configuration for TJBot'); + } + if (!Array.isArray(hardware)) { + throw new Error('hardware must be an array'); + } + winston.info(`Initializing TJBot with ${hardware}`); + hardware.forEach((device) => { + switch (device) { + case TJBot.HARDWARE.CAMERA: + this._setupCamera(); + break; + case TJBot.HARDWARE.LED_NEOPIXEL: + this._setupLEDNeopixel(this.configuration.shine.neopixel.gpioPin); + break; + case TJBot.HARDWARE.LED_COMMON_ANODE: + this._setupLEDCommonAnode(this.configuration.shine.commonAnode.redPin, this.configuration.shine.commonAnode.greenPin, this.configuration.shine.commonAnode.bluePin); + break; + case TJBot.HARDWARE.MICROPHONE: + this._setupMicrophone(); + break; + case TJBot.HARDWARE.SERVO: + this._setupServo(this.configuration.wave.servoPin); + break; + case TJBot.HARDWARE.SPEAKER: + this._setupSpeaker(); + break; + default: + break; + } + }, this); + } + /** ------------------------------------------------------------------------ */ + /** INTERNAL HARDWARE & WATSON SERVICE INITIALIZATION */ + /** ------------------------------------------------------------------------ */ + /** + * Configure the camera hardware. + * @private + */ + _setupCamera() { + winston.verbose(`initializing ${TJBot.HARDWARE.CAMERA}`); + this._camera = new Raspistill({ + width: this.configuration.see.camera.width, + height: this.configuration.see.camera.height, + noPreview: true, + encoding: 'jpg', + outputDir: './', + verticalFlip: this.configuration.see.camera.verticalFlip, + horizontalFlip: this.configuration.see.camera.horizontalFlip, + time: 1, + }); + } + /** + * Configure the Neopixel LED hardware. + * @param {int} gpioPin The GPIO pin number to which the LED is connected. + * @private + */ + _setupLEDNeopixel(gpioPin) { + winston.verbose(`initializing ${TJBot.HARDWARE.LED_NEOPIXEL} on PIN ${gpioPin}`); + // init with 1 LED + this._neopixelLed = ws281x; + this._neopixelLed.init(1, { + gpioPin, + }); + // capture 'this' context + const self = this; + // reset the LED before the program exits + process.on('SIGINT', () => { + self._neopixelLed.reset(); + process.nextTick(() => { + process.exit(0); + }); + }); + } + /** + * Configure the common anode RGB LED hardware. + * @param {int} redPin The pin number to which the led red pin is connected. + * @param {int} greenPin The pin number to which the led green pin is connected. + * @param {int} bluePin The pin number to which the led blue pin is connected. + * @private + */ + _setupLEDCommonAnode(redPin, greenPin, bluePin) { + winston.verbose(`initializing ${TJBot.HARDWARE.LED_COMMON_ANODE} on RED PIN ${redPin}, GREEN PIN ${greenPin}, and BLUE PIN ${bluePin}`); + this._commonAnodeLed = { + redPin: new Gpio(redPin, { + mode: Gpio.OUTPUT, + }), + greenPin: new Gpio(greenPin, { + mode: Gpio.OUTPUT, + }), + bluePin: new Gpio(bluePin, { + mode: Gpio.OUTPUT, + }), + }; + } + /** + * Configure the microphone for speech recognition. + * @private + */ + _setupMicrophone() { + winston.verbose(`initializing ${TJBot.HARDWARE.MICROPHONE}`); + const micParams = { + rate: '16000', + channels: '1', + debug: false, + exitOnSilence: 6, + }; + if (this.configuration.listen.microphoneDeviceId) { + micParams.device = this.configuration.listen.microphoneDeviceId; + } + // create the microphone + this._mic = Mic(micParams); + // (re-)create the mic audio stream and pipe it to STT + this._micInputStream = this._mic.getAudioStream(); + this._micInputStream.on('startComplete', () => { + winston.verbose('microphone started'); + }); + this._micInputStream.on('pauseComplete', () => { + winston.verbose('microphone paused'); + }); + // log errors in the mic input stream + this._micInputStream.on('error', (err) => { + winston.error('the microphone input stream experienced an error', err); + }); + this._micInputStream.on('processExitComplete', () => { + winston.verbose('microphone exit'); + }); + // ignore silence + this._micInputStream.on('silence', () => { + winston.verbose('microphone silence'); + }); + } + /** + * Configure the servo module for the given pin number. + * @param {int} pin The pin number to which the servo is connected. + * @private + */ + _setupServo(pin) { + winston.verbose(`initializing ${TJBot.HARDWARE.SERVO} on PIN ${pin}`); + this._motor = new Gpio(pin, { + mode: Gpio.OUTPUT, + }); + } + /** + * Configure the speaker. + * @private + */ + _setupSpeaker() { + winston.verbose(`initializing ${TJBot.HARDWARE.SPEAKER}`); + this._soundplayer = SoundPlayer; + } + /** + * Instantiate the specified Watson service. + * @param {string} service The name of the service. Valid names are defined in TJBot.services. + * @param {string} version The version of the service (e.g. "2018-09-20"). If null, the default version will be used. + * @private + */ + _createServiceAPI(service, version) { + winston.verbose(`initializing ${service} service`); + switch (service) { + case TJBot.SERVICES.ASSISTANT: { + // https://cloud.ibm.com/apidocs/assistant-v2 + const defaultVersion = '2018-09-19'; + // there seems to be a bug in the AssistantV2 service where + // the service name is 'conversation', so it expects the environment + // variables for the credentails to be named CONVERSATION_*, but + // when downloading the credentials files, they are named + // ASSISTANT_* + // AssistantV2.DEFAULT_SERVICE_NAME = 'assistant'; + this._assistant = new AssistantV2({ + serviceName: 'assistant', + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.LANGUAGE_TRANSLATOR: { + // https://cloud.ibm.com/apidocs/language-translator + const defaultVersion = '2018-05-01'; + this._languageTranslator = new LanguageTranslatorV3({ + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.SPEECH_TO_TEXT: { + // https://cloud.ibm.com/apidocs/speech-to-text + this._stt = new SpeechToTextV1({}); + break; + } + case TJBot.SERVICES.TEXT_TO_SPEECH: { + // https://cloud.ibm.com/apidocs/text-to-speech + this._tts = new TextToSpeechV1({}); + break; + } + case TJBot.SERVICES.TONE_ANALYZER: { + // https://cloud.ibm.com/apidocs/tone-analyzer + const defaultVersion = '2017-09-21'; + this._toneAnalyzer = new ToneAnalyzerV3({ + version: version || defaultVersion, + }); + break; + } + case TJBot.SERVICES.VISUAL_RECOGNITION: { + // https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3 + const defaultVersion = '2018-03-19'; + this._visualRecognition = new VisualRecognitionV3({ + serviceName: 'visual_recognition', + version: version || defaultVersion, + }); + break; + } + default: + break; + } + } + /** + * Assert that TJBot is able to perform a specified capability. Instantiates Watson + * services as needed. + * @param {string} capability The capability assert (see TJBot.prototype.capabilities). + * @private + */ + _assertCapability(capability) { + switch (capability) { + case TJBot.CAPABILITIES.ANALYZE_TONE: + if (!this._toneAnalyzer) { + this._createServiceAPI(TJBot.SERVICES.TONE_ANALYZER); + } + break; + case TJBot.CAPABILITIES.CONVERSE: + if (!this.configuration.converse.assistantId) { + throw new Error('TJBot is not configured to converse. ' + + 'Please check that you defined an assistantId for the ' + + 'converse.assistantId parameter in the TJBot initialize() method.'); + } + if (!this._assistant) { + this._createServiceAPI(TJBot.SERVICES.ASSISTANT); + } + break; + case TJBot.CAPABILITIES.LISTEN: + if (!this._mic) { + throw new Error('TJBot is not configured to listen. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.MICROPHONE} hardware in the TJBot initialize() method.`); + } + if (!this._stt) { + this._createServiceAPI(TJBot.SERVICES.SPEECH_TO_TEXT); + } + break; + case TJBot.CAPABILITIES.SEE: + if (!this._camera) { + throw new Error('TJBot is not configured to see. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.CAMERA} hardware in the TJBot initialize() method.`); + } + if (!this._visualRecognition) { + this._createServiceAPI(TJBot.SERVICES.VISUAL_RECOGNITION); + } + break; + case TJBot.CAPABILITIES.SHINE: + // one LED should be defined + if (!this._neopixelLed && !this._commonAnodeLed) { + throw new Error('TJBot is not configured with an LED. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.LED_NEOPIXEL} or ${TJBot.HARDWARE.LED_COMMON_ANODE} ` + + 'hardware in the TJBot initialize() method.'); + } + break; + case TJBot.CAPABILITIES.SPEAK: + if (!this._soundplayer) { + throw new Error('TJBot is not configured to speak. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.SPEAKER} hardware in the TJBot initialize() method.`); + } + if (!this._tts) { + this._createServiceAPI(TJBot.SERVICES.TEXT_TO_SPEECH); + } + break; + case TJBot.CAPABILITIES.TRANSLATE: + if (!this._languageTranslator) { + this._createServiceAPI(TJBot.SERVICES.LANGUAGE_TRANSLATOR); + } + break; + case TJBot.CAPABILITIES.WAVE: + if (!this._motor) { + throw new Error('TJBot is not configured with an arm. ' + + 'Please check that you included the ' + + `${TJBot.HARDWARE.SERVO} hardware in the TJBot initialize() method.`); + } + break; + default: + break; + } + } + /** ------------------------------------------------------------------------ */ + /** UTILITY METHODS */ + /** ------------------------------------------------------------------------ */ + /** + * Put TJBot to sleep. + * @param {int} msec Number of milliseconds to sleep for (1000 msec == 1 sec). + */ + static sleep(msec) { + const usec = msec * 1000; + sleep.usleep(usec); + } + /** ------------------------------------------------------------------------ */ + /** ANALYZE TONE */ + /** ------------------------------------------------------------------------ */ + /** + * Analyze the tone of the given text. + * @param {string} text The text to analyze. + * @return {object} Returns the response object from the Tone Analyzer service. + * @example + * response = { + * "document_tone": { + * "tones": [{ + * "score": 0.6165, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.829888, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * "sentences_tone": [{ + * "sentence_id": 0, + * "text": "Team, I know that times are tough!", + * "tones": [{ + * "score": 0.801827, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * }, + * { + * "sentence_id": 1, + * "text": "Product sales have been disappointing for the past three quarters.", + * "tones": [{ + * "score": 0.771241, + * "tone_id": "sadness", + * "tone_name": "Sadness" + * }, + * { + * "score": 0.687768, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * } + * ] + * }, + * { + * "sentence_id": 2, + * "text": "We have a competitive product, but we need to do a better job of selling it!", + * "tones": [{ + * "score": 0.506763, + * "tone_id": "analytical", + * "tone_name": "Analytical" + * }] + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/tone-analyzer?code=node#tone|Tone Analyzer} documentation provides details on the response object. + * @async + */ + async analyzeTone(text) { + this._assertCapability(TJBot.CAPABILITIES.ANALYZE_TONE); + const params = { + toneInput: { text }, + contentType: 'application/json', + }; + try { + const body = await this._toneAnalyzer.tone(params); + winston.silly(`response from _toneAnalyzer.tone(): ${body}`); + return body.result; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.TONE_ANALYZER} service returned an error.`, err); + throw err; + } + } + /** ------------------------------------------------------------------------ */ + /** CONVERSE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a conversational turn in the conversation. + * @param {string} message The message to send to the Assistant service. + * @return {object} Returns an object with two keys: `object` contains the full Assistant response object, and `description` contains the string response. + * @example + * response = { + * "object": {conversation response object}, + * "description": "hello, how are you" + * } + * @see {@link https://cloud.ibm.com/apidocs/assistant/assistant-v2?code=node#message|Assistant} documentation provides details on the response object. + * @async + */ + async converse(message) { + this._assertCapability(TJBot.CAPABILITIES.CONVERSE); + // set up the session if needed + if (!this._assistantSessionId) { + try { + winston.silly(`creating assistant session, sessionId: ${this.configuration.converse.assistantId}`); + const body = await this._assistant.createSession({ + assistantId: this.configuration.converse.assistantId, + }); + winston.silly(`response from _assistant.createSession(): ${body}`); + this._assistantSessionId = body.result.session_id; + } + catch (err) { + winston.error(`error creating session for ${TJBot.SERVICES.ASSISTANT} service. please check that tj.configuration.converse.assistantId is defined.`); + throw err; + } + } + // define the conversational turn + const turn = { + assistantId: this.configuration.converse.assistantId, + sessionId: this._assistantSessionId, + input: { + message_type: 'text', + text: message, + }, + }; + // send to Assistant service + try { + const body = await this._assistant.message(turn); + winston.silly(`response from _assistant.message(): ${JSON.stringify(body)}`); + const { result } = body; + // this might not be necessary but in the past, conversational replies + // came in through result.output.text, not result.output.generic + let response; + if (result.output.generic) { + response = result.output.generic; + } + else if (result.output.text) { + response = result.output.text; + } + const responseText = response.length > 0 ? response[0].text : ''; + const assistantResponse = { + object: result.output, + description: responseText, + }; + winston.verbose(`received response from assistant: ${JSON.stringify(responseText)}`); + return assistantResponse; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.ASSISTANT} service returned an error.`, err); + throw err; + } + } + /** ------------------------------------------------------------------------ */ + /** LISTEN */ + /** ------------------------------------------------------------------------ */ + /** + * Listen for a spoken utterance. + * @async + */ + async listen() { + // make sure we can listen + this._assertCapability(TJBot.CAPABILITIES.LISTEN); + // lazy create the sttTextStream + if (this._sttTextStream === undefined) { + // initialize the microphone because if stopListening() was called, we don't seem to + // be able to re-use the microphone twice + this._setupMicrophone(); + // create the microphone -> STT recognizer stream + // see this page for additional documentation on the STT configuration parameters: + // https://cloud.ibm.com/apidocs/speech-to-text?code=node#recognize-audio-websockets- + const params = { + objectMode: false, + contentType: 'audio/l16; rate=16000; channels=1', + model: `${this.configuration.listen.language}_BroadbandModel`, + inactivityTimeout: this.configuration.listen.inactivityTimeout || 60, + interimResults: true, + backgroundAudioSuppression: this.configuration.listen.backgroundAudioSuppression || 0.0, + }; + winston.silly(`recognizeUsingWebSocket() params: ${JSON.stringify(params)}`); + // Create the stream. + this._recognizeStream = this._stt.recognizeUsingWebSocket(params); + this._recognizeStream.setEncoding('utf8'); + // create the mic -> STT recognizer -> text stream + this._sttTextStream = this._micInputStream.pipe(this._recognizeStream); + this._sttTextStream.setEncoding('utf8'); + // start the microphone + this._mic.start(); + // handle errors + this._sttTextStream.on('error', (err) => { + winston.error('an error occurred in the STT text stream', err); + }); + } + const fd = this._sttTextStream; + const end = new Promise((resolve) => { + fd.once('data', resolve); + }); + const transcript = await end; + winston.info(`TJBot heard: "${transcript.trim()}"`); + return transcript.trim(); + } + /** + * Internal method for pausing listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + _pauseListening() { + if (this._mic !== undefined) { + winston.verbose('listening paused'); + this._mic.pause(); + } + } + /** + * Internal method for resuming listening, used when + * we want to play a sound but we don't want to assert + * the 'listen' capability. + * @private + */ + _resumeListening() { + if (this._mic !== undefined) { + winston.verbose('listening resumed'); + this._mic.resume(); + } + } + /** ------------------------------------------------------------------------ */ + /** SEE */ + /** ------------------------------------------------------------------------ */ + /** + * Take a picture and identify the objects present. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @example + * response = { + * "images": [{ + * "classifiers": [{ + * "classifier_id": "roundPlusBanana_1758279329", + * "name": "roundPlusBanana", + * "classes": [{ + * "class": "fruit", + * "score": 0.788 + * }, + * { + * "class": "olive color", + * "score": 0.973 + * }, + * { + * "class": "lemon yellow color", + * "score": 0.789 + * } + * ] + * }], + * "image": "fruitbowl.jpg" + * }], + * "images_processed": 1, + * "custom_classes": 6 + * } + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + async see(classifierIds = []) { + this._assertCapability(TJBot.CAPABILITIES.SEE); + let filePath; + let objects; + try { + winston.verbose('taking a photo with the camera'); + filePath = await this.takePhoto(); + } + catch (err) { + winston.error('an error occured taking a photo', err); + throw err; + } + try { + objects = await this.recognizeObjectsInPhoto(filePath, classifierIds); + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.VISUAL_RECOGNITION} service returned an error`, err); + throw err; + } + return objects; + } + /** + * Recognize objects in a given photo. + * @param {string} filePath Path to the photo file. + * @param {array=} classifierIds (optional) List of classifier IDs to use in the Visual Recognition service. + * @return {object} Returns a list of objects seen and their confidences. + * @see {@link https://cloud.ibm.com/apidocs/visual-recognition/visual-recognition-v3?code=node#classify|Visual Recognition} + * documentation provides details on the response object. The response object returned by + * `see()` corresponds to `response.images[0].classifiers[0].classes` from Visual Recognition. + * @async + */ + async recognizeObjectsInPhoto(filePath, classifierIds = []) { + this._assertCapability(TJBot.CAPABILITIES.SEE); + winston.verbose(`sending image to the ${TJBot.SERVICES.VISUAL_RECOGNITION} service to recognize objects`); + const params = { + imagesFile: fs.createReadStream(filePath), + threshold: this.configuration.see.confidenceThreshold || 0.6, + acceptLanguage: this.configuration.see.language || 'en', + }; + if (classifierIds !== undefined && classifierIds.length > 0) { + params.classifierIds = classifierIds; + // params.owners = ['me']; // the API docs say this is not necessary to set when specifying classifierIds + } + try { + const body = await this._visualRecognition.classify(params); + winston.silly(`response from _visualRecognition.classify() ${JSON.stringify(body)}`); + const result = body.result.images[0].classifiers[0].classes; + return result; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.VISUAL_RECOGNITION} service returned an error`, err); + throw err; + } + } + /** + * Capture an image and save it in the given path. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @async + */ + async takePhoto(filePath = '') { + this._assertCapability(TJBot.CAPABILITIES.SEE); + return this._takePhoto(filePath); + } + /** + * Internal method to capture an image at the given path. Used to avoid triggering + * the check for an apikey for Watson Visual Recognition in _assertCapability() + * during testing. + * @param {string=} filePath (optional) Path at which to save the photo file. If not + * specified, photo will be saved in a temp location. + * @return {string} Path at which the photo was saved. + * @private + * @async + */ + async _takePhoto(filePath = '') { + let fp = filePath; + let path = ''; + let name = ''; + // if no file path provided, save to temp location + if (fp === '') { + fp = temp.path({ + prefix: 'tjbot', + suffix: '.jpg', + }); + } + winston.verbose(`capturing image at path: ${fp}`); + path = fp.lastIndexOf('/') > 0 ? fp.substring(0, fp.lastIndexOf('/')) : '.'; // save to current dir if no directory provided. + name = fp.substring(fp.lastIndexOf('/') + 1); + name = name.replace('.jpg', ''); // the node raspistill lib already adds encoding .jpg to file. + winston.silly(`image path: ${path}, image filename: ${name}`); + // set the configuration options, which may have changed since the camera was initialized + this._camera.setOptions({ + outputDir: path, + fileName: name, + width: this.configuration.see.camera.width, + height: this.configuration.see.camera.height, + verticalFlip: this.configuration.see.camera.verticalFlip, + horizontalFlip: this.configuration.see.camera.horizontalFlip, + }); + winston.silly(`camera options: ${JSON.stringify(this._camera.getOptions())}`); + try { + await this._camera.takePhoto(); + return fp; + } + catch (err) { + winston.error('error taking picture', err); + throw err; + } + } + /** ------------------------------------------------------------------------ */ + /** SHINE */ + /** ------------------------------------------------------------------------ */ + /** + * Change the color of the LED. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + */ + shine(color) { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + // normalize the color + const c = this._normalizeColor(color); + // shine! will shine on both LEDs if they are both set up + if (this._commonAnodeLed) { + this._renderCommonAnodeLed(c); + } + if (this._neopixelLed) { + const colors = new Uint32Array(1); + if (this.configuration.shine.neopixel.grbFormat) { + // convert to the 0xGGRRBB format for the LED + const grb = `0x${c[3]}${c[4]}${c[1]}${c[2]}${c[5]}${c[6]}`; + winston.verbose(`shining my LED to GRB color ${grb}`); + colors[0] = parseInt(grb, 16); + } + else { + // convert to the 0xRRGGBB format for the LED + const rgb = `0x${c[1]}${c[2]}${c[3]}${c[4]}${c[5]}${c[6]}`; + winston.verbose(`shining my LED to RGB color ${rgb}`); + colors[0] = parseInt(rgb, 16); + } + this._neopixelLed.render(colors); + } + } + /** + * Pulse the LED a single time. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @param {float=} duration The duration the pulse should last. The duration should be in + * the range [0.5, 2.0] seconds. + * @see {@link https://github.com/timoxley/colornames|Colornames} for a list of color names. + * @async + */ + async pulse(color, duration = 1.0) { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + if (duration < 0.5) { + throw new Error('TJBot does not recommend pulsing for less than 0.5 seconds.'); + } + if (duration > 2.0) { + throw new Error('TJBot does not recommend pulsing for more than 2 seconds.'); + } + // number of easing steps + const numSteps = 20; + // quadratic in-out easing + const easeInOutQuad = (t, b, c, d) => { + if ((t / d / 2) < 1) { + return (c / 2) * (t / d) * (t / d) + b; + } + return (-c / 2) * ((t - 1) * (t - 3) - 1) + b; + }; + let ease = []; + for (let i = 0; i < numSteps; i += 1) { + ease.push(i); + } + ease = ease.map((x, i) => easeInOutQuad(i, 0, 1, ease.length)); + // normalize to 'duration' msec + ease = ease.map((x) => Math.round(x * duration * 1000)); + // convert to deltas + const easeDelays = []; + for (let i = 0; i < ease.length - 1; i += 1) { + easeDelays[i] = ease[i + 1] - ease[i]; + } + // color ramp + const rgb = this._normalizeColor(color).slice(1); // remove the # + const hex = new cm.HexRgb(rgb); + const colorRamp = []; + for (let i = 0; i < numSteps / 2; i += 1) { + const l = 0.0 + (i / (numSteps / 2)) * 0.5; + colorRamp[i] = hex.toHsl().lightness(l).toRgb().toHexString() + .replace('#', '0x'); + } + // perform the ease + for (let i = 0; i < easeDelays.length; i += 1) { + const c = i < colorRamp.length + ? colorRamp[i] + : colorRamp[colorRamp.length - 1 - (i - colorRamp.length) - 1]; + this.shine(c); + // eslint-disable-next-line no-await-in-loop + TJBot.sleep(easeDelays[i]); + } + } + /** + * Get the list of all colors recognized by TJBot. + * @return {array} List of all named colors recognized by `shine()` and `pulse()`. + */ + shineColors() { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + return colorToHex.all().map((elt) => elt.name); + } + /** + * Get a random color. + * @return {string} Random named color. + */ + randomColor() { + this._assertCapability(TJBot.CAPABILITIES.SHINE); + const colors = this.shineColors(); + const randIdx = Math.floor(Math.random() * colors.length); + const randColor = colors[randIdx]; + return randColor; + } + /** + * Normalize the given color to #RRGGBB. + * @param {string} color The color to shine the LED. May be specified in a number of + * formats, including: hexadecimal, (e.g. "0xF12AC4", "11FF22", "#AABB24"), "on", "off", + * "random", or may be a named color in the `colornames` package. Hexadecimal colors + * follow an #RRGGBB format. + * @return {string} Hex string corresponding to the given color (e.g. "#RRGGBB") + * @private + */ + _normalizeColor(color) { + let normColor = color; + // assume undefined == "off" + if (normColor === undefined) { + normColor = 'off'; + } + // is this "on" or "off"? + if (normColor === 'on') { + normColor = 'FFFFFF'; + } + else if (normColor === 'off') { + normColor = '000000'; + } + else if (normColor === 'random') { + normColor = this.randomColor(); + } + // strip prefixes if they are present + if (normColor.startsWith('0x')) { + normColor = normColor.slice(2); + } + if (normColor.startsWith('#')) { + normColor = normColor.slice(1); + } + // is this a hex number or a named color? + const isHex = /(^[0-9A-F]{6}$)|(^[0-9A-F]{3}$)/i; + let rgb; + if (!isHex.test(normColor)) { + rgb = colorToHex(normColor); + } + else { + rgb = normColor; + } + // did we get something back? + if (rgb === undefined) { + throw new Error(`TJBot did not understand the specified color "${color}"`); + } + // prefix rgb with # in case it's not + if (!rgb.startsWith('#')) { + rgb = `#${rgb}`; + } + // throw an error if we didn't understand this color + if (rgb.length !== 7) { + throw new Error(`TJBot did not understand the specified color "${color}"`); + } + return rgb; + } + /** + * Convert hex color code to RGB value. + * @param {string} hexColor Hex color code + * @return {array} RGB color (e.g. (255, 128, 128)) + * @private + */ + // eslint-disable-next-line class-methods-use-this + _convertHexToRgbColor(hexColor) { + return hexColor.replace(/^#?([a-f\d])([a-f\d])([a-f\d])$/i, (m, r, g, b) => `#${r}${r}${g}${g}${b}${b}`) + .substring(1).match(/.{2}/g) + .map((x) => parseInt(x, 16)); + } + /** + * Render the given rgb color for the common anode led. + * @param {string} hexColor Color in hex format + * @private + */ + _renderCommonAnodeLed(hexColor) { + const rgb = this._convertHexToRgbColor(hexColor); + this._commonAnodeLed.redPin.pwmWrite(rgb[0] == null ? 255 : 255 - rgb[0]); + this._commonAnodeLed.greenPin.pwmWrite(rgb[1] == null ? 255 : 255 - rgb[1]); + this._commonAnodeLed.bluePin.pwmWrite(rgb[2] == null ? 255 : 255 - rgb[2]); + } + /** ------------------------------------------------------------------------ */ + /** SPEAK */ + /** ------------------------------------------------------------------------ */ + /** + * Speak a message. + * @param {string} message The message to speak. + * @async + */ + async speak(message) { + this._assertCapability(TJBot.CAPABILITIES.SPEAK); + // make sure we're trying to say something + if (message === undefined || message === '') { + winston.error('TJBot tried to speak an empty message.'); + return; // exit if there's nothing to say! + } + // default voice + let voice = 'en-US_MichaelV3Voice'; + // check to see if the user has specified a voice + if (this.configuration.speak.voice !== undefined) { + winston.silly(`using voice specified in configuration: ${this.configuration.speak.voice}`); + voice = this.configuration.speak.voice; + } + else if (this.configuration.speak.language === TJBot.LANGUAGES.SPEAK.ENGLISH_US) { + // force MichaelV3 if the language is en-US + voice = 'en-US_MichaelV3Voice'; + winston.silly(`forcing ${voice} since the language is English`); + } + else { + winston.silly(`finding voice that matches gender ${this.configuration.robot.gender} and language ${this.configuration.speak.language}`); + // load voices if they haven't been loaded yet + if (!this._ttsVoices) { + winston.verbose('loading TTS voices…'); + const body = await this._tts.listVoices(); + winston.silly(`response from _tts.listVoices(): ${JSON.stringify(body)}`); + this._ttsVoices = body.result.voices; + winston.verbose('TTS voices loaded'); + } + // first figure out which voices will work for speak.langauge + const { language } = this.configuration.speak; + const languageMatches = this._ttsVoices.filter((v) => v.language === language); + winston.silly(`candidate TTS voices from language match: ${JSON.stringify(languageMatches)}`); + // now use *at least* a voice in the correct language + // note that Watson TTS doesn't always return voices in the same order, so + // this won't always pick the same voice every time + if (languageMatches.length > 0) { + voice = languageMatches[0].name; + winston.silly(`provisionally selected TTS voice ${voice} to ensure language match`); + } + // finally, see if we have a gender match with robot.gender + const { gender } = this.configuration.robot; + const languageAndGenderMatches = languageMatches.sort((a, b) => a.name < b.name).filter((v) => v.gender === gender); + if (languageAndGenderMatches.length > 0) { + voice = languageAndGenderMatches[0].name; + winston.silly(`final selection of TTS voice ${voice} due to language and gender match`); + } + winston.silly(`selected ${voice} as the ${this.configuration.robot.gender} voice for ${this.configuration.speak.language} `); + } + winston.verbose(`TJBot speaking with voice ${voice}`); + const params = { + text: message, + voice, + accept: 'audio/wav', + }; + const info = temp.openSync('tjbot'); + const response = await this._tts.synthesize(params); + // pipe the audio buffer to a file + winston.silly('writing audio buffer to temp file', info.path); + const fd = fs.createWriteStream(info.path); + response.result.pipe(fd); + // wait for the pipe to finish writing + const end = new Promise((resolve, reject) => { + fd.on('close', resolve); + fd.on('error', reject); + }); + await end; + // now play it + winston.info(`TJBot speaking: ${message}`); + await this.play(info.path); + } + /** + * Play a sound at the specified path. + * @param {string} soundFile The path to the sound file to be played. + * @async + */ + async play(soundFile) { + // pause listening while we play a sound -- using the internal + // method to avoid a capability check (and potential fail if the TJBot + // isn't configured to listen) + this._pauseListening(); + // if we don't have a speaker, throw an error + if (this._soundplayer === undefined) { + throw new Error('unable to play audio, TJBot hardware doesn\'t include a "speaker"'); + } + // initialize soundplayer lib + const params = { + filename: soundFile, + gain: 100, + debug: true, + player: 'aplay', + device: this.configuration.speak.speakerDeviceId, + }; + const player = new this._soundplayer(params); + winston.silly('playing audio with parameters: ', params); + // capture 'this' context + const self = this; + player.on('complete', () => { + winston.silly('audio playback finished'); + // resume listening + self._resumeListening(); + }); + player.on('error', (err) => { + winston.error('error occurred while playing audio', err); + }); + // play the audio + player.play(soundFile); + // wait for the audio to finish playing, either by completing playback or by throwing an error + await Promise.race([once(player, 'complete'), once(player, 'error')]); + } + /** ------------------------------------------------------------------------ */ + /** TRANSLATE */ + /** ------------------------------------------------------------------------ */ + /** + * Translates the given text from the source language to the target language. + * + * @param {string} text The text to translate. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {object} The response object from the Language Translator service. + * @example + * response = { + * "object": { + * "translations": [{ + * "translation": "Hola, mi nombre es TJBot!" + * }], + * "word_count": 7, + * "character_count": 25 + * }, + * "description": "Hola, mi nombre es TJBot!" + * } + * @see Use {@link #TJBot+isTranslatable} to determine whether lanuage can be translated from + * the `sourceLanguage` to `targetLanguage`. + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#translate|Language Translator} + * documentation provides details on the response object. + * @async + */ + async translate(text, sourceLanguage, targetLanguage) { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + const params = { + text, + source: sourceLanguage, + target: targetLanguage, + }; + let translation; + try { + const body = await this._languageTranslator.translate(params); + winston.silly(`response from _languageTranslator.translate(): ${JSON.stringify(body)}`); + translation = body.result; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + if (Object.prototype.hasOwnProperty.call(translation, 'translations')) { + if (translation.translations.length > 0 + && Object.prototype.hasOwnProperty.call(translation.translations[0], 'translation')) { + return { + object: translation, + description: translation.translations[0].translation, + }; + } + } + return { + object: translation, + description: '', + }; + } + /** + * Identifies the language of the given text. + * @param {string} text The text to identify. + * @return {object} Returns a response object from the Language Translator service. + * @example + * response = { + * "languages": [{ + * "language": "en", + * "confidence": 0.9804833843796723 + * }, + * { + * "language": "nn", + * "confidence": 0.005988721319786277 + * }, + * { + * "language": "sq", + * "confidence": 0.0036927759389060203 + * }, + * { + * "language": "nb", + * "confidence": 0.0035802051870239037 + * } + * ] + * } + * @see {@link https://cloud.ibm.com/apidocs/language-translator?code=node#identify|Language Translator} + * documentation provides details on the response object. + * @async + */ + async identifyLanguage(text) { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + const params = { + text, + }; + let identifiedLanguages; + try { + const body = await this._languageTranslator.identify(params); + winston.silly(`response from _langaugeTranslator.identify(): ${JSON.stringify(body)}`); + identifiedLanguages = body.result; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + return identifiedLanguages; + } + /** + * Determines if TJBot can translate from the source language to the target language. + * @param {string} sourceLanguage The source language (e.g. "en" for English). + * @param {string} targetLanguage The target language (e.g. "es" for Spanish). + * @return {bool} True if the `sourceLanguage` can be translated to the + * `targetLanguage`, false otherwise. + * @async + */ + async isTranslatable(sourceLanguage, targetLanguage) { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + // lazy load of language translation models… + if (this._translationModels === undefined) { + winston.verbose('loading language models...'); + this._translationModels = await this._loadLanguageTranslationModels(); + winston.verbose('language models loaded'); + } + if (this._translationModels[sourceLanguage] !== undefined) { + return this._translationModels[sourceLanguage].includes(targetLanguage); + } + return false; + } + /** + * Returns a list of languages that can TJBot can translate to from the given language. + * @param {string} sourceLanguage The source language (e.g. "en" for English) + * @return {array} List of languages that TJBot can translate to from the source langauge + */ + async translatableLanguages(sourceLanguage) { + this._assertCapability(TJBot.CAPABILITIES.TRANSLATE); + // lazy load of language translation models… + if (this._translationModels === undefined) { + winston.verbose('loading language models...'); + this._translationModels = await this._loadLanguageTranslationModels(); + winston.verbose('language models loaded'); + } + if (this._translationModels[sourceLanguage] !== undefined) { + return this._translationModels[sourceLanguage]; + } + return []; + } + /** + * Returns the name of the given language code. + * @param {string} languageCode Two-character language code (e.g. "en") + * @return {string} Name of the language (e.g. "English"), or undefined if the language is unknown. + */ + // eslint-disable-next-line class-methods-use-this + languageForCode(languageCode) { + switch (languageCode.toLowerCase()) { + case 'ar': + return 'Arabic'; + case 'de': + return 'German'; + case 'en': + return 'English'; + case 'es': + return 'Spanish'; + case 'fr': + return 'French'; + case 'it': + return 'Italian'; + case 'ja': + return 'Japanese'; + case 'ko': + return 'Korean'; + case 'nl': + return 'Dutch'; + case 'pt': + return 'Portuguese'; + case 'zh': + return 'Chinese'; + default: + return undefined; + } + } + /** + * Returns the two-letter code for the given language. + * @param {string} language Name of the language (e.g. "English") + * @return {string} Two-letter language code for the language (e.g. "en"), or undefined if the language code is unknown. + */ + // eslint-disable-next-line class-methods-use-this + codeForLanguage(language) { + switch (language.toLowerCase()) { + case 'arabic': + return 'ar'; + case 'german': + return 'de'; + case 'english': + return 'en'; + case 'spanish': + return 'es'; + case 'french': + return 'fr'; + case 'italian': + return 'it'; + case 'japanese': + return 'ja'; + case 'korean': + return 'ko'; + case 'dutch': + return 'nl'; + case 'portuguese': + return 'pt'; + case 'chinese': + return 'zh'; + default: + return undefined; + } + } + /** + * Loads the list of language models that can be used for translation. + * @private + * @async + */ + async _loadLanguageTranslationModels() { + let models; + try { + const body = await this._languageTranslator.listModels({}); + winston.silly(`response from _languageTranslator.listModels(): ${JSON.stringify(body)}`); + models = body.result; + } + catch (err) { + winston.error(`the ${TJBot.SERVICES.LANGUAGE_TRANSLATOR} service returned an error`, err); + throw err; + } + const translations = {}; + if (Object.prototype.hasOwnProperty.call(models, 'models')) { + models.models.forEach((model) => { + if (translations[model.source] === undefined) { + translations[model.source] = []; + } + if (!translations[model.source].includes(model.target)) { + translations[model.source].push(model.target); + } + }); + } + return translations; + } + /** ------------------------------------------------------------------------ */ + /** WAVE */ + /** ------------------------------------------------------------------------ */ + /** + * Moves TJBot's arm all the way back. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_BACK may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.armBack() + */ + armBack() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston.info("Moving TJBot's arm back"); + this._motor.servoWrite(TJBot.SERVO.ARM_BACK); + } + /** + * Raises TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_UP may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.raiseArm() + */ + raiseArm() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston.info("Raising TJBot's arm"); + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + } + /** + * Lowers TJBot's arm. If this method doesn't move the arm all the way back, the servo motor stop point defined in TJBot.SERVO.ARM_DOWN may need to be overridden. Valid servo values are in the range [500, 2300]. + * @example tj.lowerArm() + */ + lowerArm() { + // make sure we have an arm + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston.info("Lowering TJBot's arm"); + this._motor.servoWrite(TJBot.SERVO.ARM_DOWN); + } + /** + * Waves TJBots's arm once. + */ + async wave() { + this._assertCapability(TJBot.CAPABILITIES.WAVE); + winston.info("Waving TJBot's arm"); + const delay = 200; + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + TJBot.sleep(delay); + this._motor.servoWrite(TJBot.SERVO.ARM_DOWN); + TJBot.sleep(delay); + this._motor.servoWrite(TJBot.SERVO.ARM_UP); + TJBot.sleep(delay); + } +} +/** ------------------------------------------------------------------------ */ +/** MODULE EXPORTS */ +/** ------------------------------------------------------------------------ */ +/** + * Export TJBot! + */ +export default TJBot; diff --git a/fixup.sh b/fixup.sh new file mode 100644 index 0000000..00d32c0 --- /dev/null +++ b/fixup.sh @@ -0,0 +1,12 @@ +#!/bin/sh +cat >dist/cjs/package.json <dist/mjs/package.json <=4.2.0" + } + }, "node_modules/uc.micro": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", @@ -16351,6 +16362,12 @@ "is-typedarray": "^1.0.0" } }, + "typescript": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.4.tgz", + "integrity": "sha512-DqGhF5IKoBl8WNf8C1gu8q0xZSInh9j1kJJMqT3a94w1JzVaBU4EXOSMrz9yDqMT0xt3selp83fuFMQ0uzv6qA==", + "dev": true + }, "uc.micro": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", diff --git a/package.json b/package.json index 2f4ec3c..544d3af 100644 --- a/package.json +++ b/package.json @@ -1,15 +1,18 @@ { "name": "tjbot", - "version": "2.0.1", + "version": "2.0.2", "description": "Node.js library for writing TJBot recipes", - "main": "index.js", - "type": "module", + "main": "dist/cjs/tjbot.js", + "module": "dist/mjs/tjbot.js", "directories": { "lib": "lib", "test": "__tests__" }, "exports": { - ".": "./index.js" + ".": { + "import": "./dist/mjs/tjbot.js", + "require": "./dist/cjs/tjbot.js" + } }, "dependencies": { "bluebird": "^3.7.2", @@ -40,11 +43,15 @@ "eslint-plugin-promise": "^4.2.1", "eslint-plugin-standard": "^4.0.1", "jest": "^26.4.2", - "jsdoc": "^3.6.5" + "jsdoc": "^3.6.5", + "typescript": "^4.4.4" }, "scripts": { - "test": "node_modules/.bin/jest test.tjbotlib.js", - "generate-docs": "node_modules/.bin/jsdoc --configure .jsdoc.json --verbose" + "prebuild": "rm -rf dist/*", + "build": "tsc -p tsconfig-mjs.json && tsc -p tsconfig-cjs.json", + "postbuild": "sh fixup.sh", + "test": "jest test.tjbotlib.js", + "generate-docs": "jsdoc --configure .jsdoc.json --verbose" }, "repository": { "type": "git", diff --git a/lib/tjbot.js b/src/tjbot.js similarity index 99% rename from lib/tjbot.js rename to src/tjbot.js index 4d3c0d4..0d51da8 100644 --- a/lib/tjbot.js +++ b/src/tjbot.js @@ -48,7 +48,7 @@ class TJBot { * TJBot library version * @readonly */ - static VERSION = 'v2.0.1'; + static VERSION = 'v2.0.2'; /** * TJBot capabilities diff --git a/tsconfig-base.json b/tsconfig-base.json new file mode 100644 index 0000000..2c6b0b0 --- /dev/null +++ b/tsconfig-base.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "allowJs": true, + "allowSyntheticDefaultImports": true, + "baseUrl": "src", + "declaration": true, + "esModuleInterop": true, + "inlineSourceMap": false, + "lib": ["esnext"], + "listEmittedFiles": false, + "listFiles": false, + "moduleResolution": "node", + "noFallthroughCasesInSwitch": true, + "pretty": true, + "resolveJsonModule": true, + "rootDir": "src", + "skipLibCheck": true, + "strict": true, + "traceResolution": false, + "types": ["node", "jest"] + }, + "compileOnSave": false, + "exclude": ["node_modules", "dist"], + "include": ["src"] +} \ No newline at end of file diff --git a/tsconfig-cjs.json b/tsconfig-cjs.json new file mode 100644 index 0000000..13a7594 --- /dev/null +++ b/tsconfig-cjs.json @@ -0,0 +1,8 @@ +{ + "extends": "./tsconfig-base.json", + "compilerOptions": { + "module": "commonjs", + "outDir": "dist/cjs", + "target": "es2015" + } +} \ No newline at end of file diff --git a/tsconfig-mjs.json b/tsconfig-mjs.json new file mode 100644 index 0000000..382b9c4 --- /dev/null +++ b/tsconfig-mjs.json @@ -0,0 +1,8 @@ +{ + "extends": "./tsconfig-base.json", + "compilerOptions": { + "module": "esnext", + "outDir": "dist/mjs", + "target": "esnext" + } +} \ No newline at end of file