From 673c84ac1a19e89d95433afb198e269aaa4be482 Mon Sep 17 00:00:00 2001 From: JarbasAI <33701864+JarbasAl@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:13:47 +0100 Subject: [PATCH 1/3] drop threading (#19) * fix:standardize_lang * fix:standardize_lang * fix:drop_multithreading * typing --- ovos_padatious/intent_container.py | 233 +++++++++++++---------------- ovos_padatious/opm.py | 31 ++-- ovos_padatious/training_manager.py | 140 ++++++++++------- tests/test_container.py | 49 ------ 4 files changed, 198 insertions(+), 255 deletions(-) diff --git a/ovos_padatious/intent_container.py b/ovos_padatious/intent_container.py index 5714d55..ef5c288 100644 --- a/ovos_padatious/intent_container.py +++ b/ovos_padatious/intent_container.py @@ -12,23 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. import inspect -import json import os - -from ovos_padatious import padaos -import sys from functools import wraps -from subprocess import call, check_output -from threading import Thread +from typing import List, Dict, Any, Optional -from ovos_padatious.match_data import MatchData +from ovos_utils.log import LOG + +from ovos_padatious import padaos from ovos_padatious.entity import Entity from ovos_padatious.entity_manager import EntityManager from ovos_padatious.intent_manager import IntentManager +from ovos_padatious.match_data import MatchData from ovos_padatious.util import tokenize def _save_args(func): + """ + Decorator that saves the arguments passed to the function in the serialized_args attribute of the class. + + Args: + func (function): The function to be decorated. + """ + @wraps(func) def wrapper(*args, **kwargs): func(*args, **kwargs) @@ -41,41 +46,42 @@ def wrapper(*args, **kwargs): return wrapper -class IntentContainer(object): +class IntentContainer: """ Creates an IntentContainer object used to load and match intents Args: - cache_dir (str): Place to put all saved neural networks + cache_dir (str): Directory for caching the neural network models and intent/entity files. """ - def __init__(self, cache_dir): + def __init__(self, cache_dir: str) -> None: os.makedirs(cache_dir, exist_ok=True) - self.cache_dir = cache_dir - self.must_train = False - self.intents = IntentManager(cache_dir) - self.entities = EntityManager(cache_dir) - self.padaos = padaos.IntentContainer() - self.train_thread = None # type: Thread - self.serialized_args = [] # Arguments of all calls to register intents/entities - - def clear(self): + self.cache_dir: str = cache_dir + self.must_train: bool = False + self.intents: IntentManager = IntentManager(cache_dir) + self.entities: EntityManager = EntityManager(cache_dir) + self.padaos: padaos.IntentContainer = padaos.IntentContainer() + self.train_thread: Optional[Any] = None # deprecated + self.serialized_args: List[Dict[str, Any]] = [] # Serialized calls for training intents/entities + + def clear(self) -> None: + """ + Clears the current intent and entity managers and resets the container. + """ os.makedirs(self.cache_dir, exist_ok=True) self.must_train = False self.intents = IntentManager(self.cache_dir) self.entities = EntityManager(self.cache_dir) self.padaos = padaos.IntentContainer() - self.train_thread = None self.serialized_args = [] - def instantiate_from_disk(self): + def instantiate_from_disk(self) -> None: """ Instantiates the necessary (internal) data structures when loading persisted model from disk. This is done via injecting entities and intents back from cached file versions. """ - - entity_traindata = {} - intent_traindata = {} + entity_traindata: Dict[str, List[str]] = {} + intent_traindata: Dict[str, List[str]] = {} # workaround: load training data for both entities and intents since # padaos regex needs it for (re)compilation until TODO is cleared @@ -95,7 +101,6 @@ def instantiate_from_disk(self): # TODO: padaos.compile (regex compilation) is redone when loading: find # a way to persist regex, as well! for f in os.listdir(self.cache_dir): - if f.startswith('{') and f.endswith('}.hash'): entity_name = f[1:f.find('}.hash')] if entity_name in entity_traindata: @@ -103,7 +108,8 @@ def instantiate_from_disk(self): name=entity_name, lines=entity_traindata[entity_name], reload_cache=False, - must_train=False) + must_train=False + ) elif not f.startswith('{') and f.endswith('.hash'): intent_name = f[0:f.find('.hash')] if intent_name in intent_traindata: @@ -111,24 +117,26 @@ def instantiate_from_disk(self): name=intent_name, lines=intent_traindata[intent_name], reload_cache=False, - must_train=False) + must_train=False + ) @_save_args - def add_intent(self, name, lines, reload_cache=False, must_train=True): + def add_intent(self, name: str, lines: List[str], reload_cache: bool = False, must_train: bool = True) -> None: """ Creates a new intent, optionally checking the cache first Args: - name (str): The associated name of the intent - lines (list): All the sentences that should activate the intent - reload_cache: Whether to ignore cached intent if exists + name (str): Name of the intent. + lines (List[str]): Sentences that will activate the intent. + reload_cache (bool): Whether to ignore cached intent. + must_train (bool): Whether the model needs training after adding the intent. """ self.intents.add(name, lines, reload_cache, must_train) self.padaos.add_intent(name, lines) self.must_train = must_train @_save_args - def add_entity(self, name, lines, reload_cache=False, must_train=True): + def add_entity(self, name: str, lines: List[str], reload_cache: bool = False, must_train: bool = True) -> None: """ Adds an entity that matches the given lines. @@ -137,9 +145,10 @@ def add_entity(self, name, lines, reload_cache=False, must_train=True): self.add_entity('weekday', ['monday', 'tuesday', 'wednesday']) # ... Args: - name (str): The name of the entity - lines (list): Lines of example extracted entities - reload_cache (bool): Whether to refresh all of cache + name (str): Name of the entity. + lines (List[str]): Example extracted entities. + reload_cache (bool): Whether to refresh the cache. + must_train (bool): Whether the model needs training after adding the entity. """ Entity.verify_name(name) self.entities.add( @@ -151,12 +160,7 @@ def add_entity(self, name, lines, reload_cache=False, must_train=True): self.must_train = must_train @_save_args - def load_entity( - self, - name, - file_name, - reload_cache=False, - must_train=True): + def load_entity(self, name: str, file_name: str, reload_cache: bool = False, must_train: bool = True) -> None: """ Loads an entity, optionally checking the cache first @@ -164,7 +168,8 @@ def load_entity( name (str): The associated name of the entity file_name (str): The location of the entity file reload_cache (bool): Whether to refresh all of cache - """ + must_train (bool): Whether the model needs training after loading the entity. + """ Entity.verify_name(name) self.entities.load(Entity.wrap_name(name), file_name, reload_cache) with open(file_name) as f: @@ -177,12 +182,7 @@ def load_file(self, *args, **kwargs): self.load_intent(*args, **kwargs) @_save_args - def load_intent( - self, - name, - file_name, - reload_cache=False, - must_train=True): + def load_intent(self, name: str, file_name: str, reload_cache: bool = False, must_train: bool = True) -> None: """ Loads an intent, optionally checking the cache first @@ -190,6 +190,7 @@ def load_intent( name (str): The associated name of the intent file_name (str): The location of the intent file reload_cache (bool): Whether to refresh all of cache + must_train (bool): Whether the model needs training after loading the intent. """ self.intents.load(name, file_name, reload_cache) with open(file_name) as f: @@ -197,36 +198,30 @@ def load_intent( self.must_train = must_train @_save_args - def remove_intent(self, name): - """Unload an intent""" + def remove_intent(self, name: str) -> None: + """ + Removes an intent by its name. + + Args: + name (str): Name of the intent to remove. + """ self.intents.remove(name) self.padaos.remove_intent(name) self.must_train = True @_save_args - def remove_entity(self, name): - """Unload an entity""" + def remove_entity(self, name: str) -> None: + """ + Removes an entity by its name. + + Args: + name (str): Name of the entity to remove. + """ self.entities.remove(name) self.padaos.remove_entity(name) - def _train(self, *args, **kwargs): - t1 = Thread( - target=self.intents.train, - args=args, - kwargs=kwargs, - daemon=True) - t2 = Thread( - target=self.entities.train, - args=args, - kwargs=kwargs, - daemon=True) - t1.start() - t2.start() - t1.join() - t2.join() - self.entities.calc_ent_dict() - - def train(self, debug=True, force=False, single_thread=False, timeout=20): + def train(self, debug: bool = True, force: bool = False, single_thread: Optional[bool] = None, + timeout: Optional[float] = None) -> bool: """ Trains all the loaded intents that need to be updated If a cache file exists with the same hash as the intent file, @@ -235,100 +230,72 @@ def train(self, debug=True, force=False, single_thread=False, timeout=20): Args: debug (bool): Whether to print a message to stdout each time a new intent is trained force (bool): Whether to force training if already finished - single_thread (bool): Whether to force running in a single thread - timeout (float): Seconds before cancelling training + single_thread (bool): DEPRECATED + timeout (float): DEPRECATED Returns: - bool: True if training succeeded without timeout + bool: True if training succeeded """ + if single_thread is not None: + LOG.warning("'single_thread' argument is deprecated and will be ignored") + if timeout is not None: + LOG.warning("'timeout' argument is deprecated and will be ignored") if not self.must_train and not force: - return + return True self.padaos.compile() - self.train_thread = Thread(target=self._train, kwargs=dict( - debug=debug, - single_thread=single_thread, - timeout=timeout - ), daemon=True) - self.train_thread.start() - self.train_thread.join(timeout) - self.must_train = False - return not self.train_thread.is_alive() + # Train intents and entities + self.intents.train(debug=debug) + self.entities.train(debug=debug) - def train_subprocess(self, *args, **kwargs): - """ - Trains in a subprocess which provides a timeout guarantees everything shuts down properly + self.entities.calc_ent_dict() - Args: - See - Returns: - bool: True for success, False if timed out - """ - ret = call([ - sys.executable, '-m', 'ovos_padatious', 'train', self.cache_dir, - '-d', json.dumps(self.serialized_args), - '-a', json.dumps(args), - '-k', json.dumps(kwargs), - ]) - if ret == 2: - raise TypeError( - 'Invalid train arguments: {} {}'.format( - args, kwargs)) - data = self.serialized_args - self.clear() - self.apply_training_args(data) - self.padaos.compile() - if ret == 0: - self.must_train = False - return True - elif ret == 10: # timeout - return False - else: - raise ValueError( - 'Training failed and returned code: {}'.format(ret)) + self.must_train = False + return True - def calc_intents(self, query): + def calc_intents(self, query: str) -> List[MatchData]: """ Tests all the intents against the query and returns data on how well each one matched against the query Args: - query (str): Input sentence to test against intents + query (str): Input sentence to test against intents. + Returns: - list: List of intent matches - See calc_intent() for a description of the returned MatchData + List[MatchData]: A list of all intent matches with confidence scores. """ if self.must_train: self.train() - intents = {} if self.train_thread and self.train_thread.is_alive() else { - i.name: i for i in self.intents.calc_intents(query, self.entities) - } + intents = {i.name: i for i in self.intents.calc_intents(query, self.entities)} sent = tokenize(query) for perfect_match in self.padaos.calc_intents(query): name = perfect_match['name'] - intents[name] = MatchData( - name, sent, matches=perfect_match['entities'], conf=1.0) + intents[name] = MatchData(name, sent, matches=perfect_match['entities'], conf=1.0) return list(intents.values()) - def calc_intent(self, query): + def calc_intent(self, query: str) -> MatchData: """ - Tests all the intents against the query and returns - match data of the best intent + Returns the best intent match for the given query. Args: - query (str): Input sentence to test against intents + query (str): Input sentence to test against intents. + Returns: - MatchData: Best intent match + MatchData: The best matching intent. """ matches = self.calc_intents(query) - if len(matches) == 0: + if not matches: return MatchData('', '') best_match = max(matches, key=lambda x: x.conf) - best_matches = ( - match for match in matches if match.conf == best_match.conf) - return min(best_matches, key=lambda x: sum( - map(len, x.matches.values()))) + best_matches = [match for match in matches if match.conf == best_match.conf] + return min(best_matches, key=lambda x: sum(map(len, x.matches.values()))) + + def get_training_args(self) -> List[Dict[str, Any]]: + """ + Returns all serialized arguments used for training intents and entities. - def get_training_args(self): + Returns: + List[Dict[str, Any]]: List of serialized arguments for training. + """ return self.serialized_args def apply_training_args(self, data): diff --git a/ovos_padatious/opm.py b/ovos_padatious/opm.py index f26786d..1dd1d6e 100644 --- a/ovos_padatious/opm.py +++ b/ovos_padatious/opm.py @@ -105,7 +105,7 @@ def __init__(self, bus: Optional[Union[MessageBusClient, FakeBus]] = None, self.containers = {lang: PadatiousIntentContainer(f"{intent_cache}/{lang}") for lang in langs} - self.finished_training_event = Event() + self.finished_training_event = Event() # DEPRECATED self.finished_initial_train = False self.registered_intents = [] @@ -116,12 +116,11 @@ def __init__(self, bus: Optional[Union[MessageBusClient, FakeBus]] = None, self.bus.on('padatious:register_entity', self.register_entity) self.bus.on('detach_intent', self.handle_detach_intent) self.bus.on('detach_skill', self.handle_detach_skill) - self.bus.on('mycroft.skills.initialized', self.train) self.bus.on('intent.service.padatious.get', self.handle_get_padatious) self.bus.on('intent.service.padatious.manifest.get', self.handle_padatious_manifest) self.bus.on('intent.service.padatious.entities.manifest.get', self.handle_entity_manifest) - LOG.debug('Loaded Padatious intent parser.') + LOG.debug('Loaded Padatious intent pipeline') @property def padatious_config(self) -> Dict: @@ -188,26 +187,25 @@ def train(self, message=None): Args: message (Message): optional triggering message """ - self.finished_training_event.clear() - padatious_single_thread = self.config.get('single_thread', False) - if message is None: - single_thread = padatious_single_thread - else: - single_thread = message.data.get('single_thread', - padatious_single_thread) + name = message.data["name"] if message else "" + if not any(engine.must_train + for engine in self.containers.values()): + LOG.debug(f"Nothing new to train for '{name}'") + return + for lang in self.containers: - self.containers[lang].train(single_thread=single_thread) + if self.containers[lang].must_train: + LOG.debug(f"Training '{name}' for lang '{lang}'") + self.containers[lang].train() - LOG.debug('Training complete.') - self.finished_training_event.set() + LOG.debug(f"Training complete for '{name}'!") if not self.finished_initial_train: self.bus.emit(Message('mycroft.skills.trained')) self.finished_initial_train = True + @deprecated("'wait_and_train' has been deprecated, use 'train' directly", "2.0.0") def wait_and_train(self): """Wait for minimum time between training and start training.""" - if not self.finished_initial_train: - return self.train() def __detach_intent(self, intent_name): @@ -264,7 +262,7 @@ def _register_object(self, message, object_name, register_func): register_func(name, samples) - self.wait_and_train() + self.train(message) def register_intent(self, message): """Messagebus handler for registering intents. @@ -344,7 +342,6 @@ def shutdown(self): self.bus.remove('intent.service.padatious.entities.manifest.get', self.handle_entity_manifest) self.bus.remove('detach_intent', self.handle_detach_intent) self.bus.remove('detach_skill', self.handle_detach_skill) - self.bus.remove('mycroft.skills.initialized', self.train) def handle_get_padatious(self, message): """messagebus handler for perfoming padatious parsing. diff --git a/ovos_padatious/training_manager.py b/ovos_padatious/training_manager.py index af24048..833fdf2 100644 --- a/ovos_padatious/training_manager.py +++ b/ovos_padatious/training_manager.py @@ -11,52 +11,71 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import multiprocessing as mp from functools import partial -from multiprocessing.context import TimeoutError -from os.path import join, isfile, isdir, splitext +from os.path import join, isfile, splitext +from typing import List, Type, Union + +from ovos_utils.log import LOG import ovos_padatious +from ovos_padatious.trainable import Trainable from ovos_padatious.train_data import TrainData from ovos_padatious.util import lines_hash -def _train_and_save(obj, cache, data, print_updates): - """Internal pickleable function used to train objects in another process""" +def _train_and_save(obj: Trainable, cache: str, data: TrainData, print_updates: bool) -> None: + """ + Internal function to train objects sequentially and save them. + + Args: + obj (Trainable): Object to train (Intent or Entity). + cache (str): Path to the cache directory. + data (TrainData): Training data. + print_updates (bool): Whether to print updates during training. + """ obj.train(data) if print_updates: - print('Regenerated ' + obj.name + '.') + LOG.debug('Regenerated ' + obj.name + '.') obj.save(cache) -class TrainingManager(object): +class TrainingManager: """ - Manages multithreaded training of either Intents or Entities + Manages sequential training of either Intents or Entities. Args: - cls (Type[Trainable]): Class to wrap - cache_dir (str): Place to store cache files + cls (Type[Trainable]): Class to wrap (Intent or Entity). + cache_dir (str): Path to the cache directory. """ - def __init__(self, cls, cache_dir): + def __init__(self, cls: Type[Trainable], cache_dir: str) -> None: + """ + Initializes the TrainingManager. + + Args: + cls (Type[Trainable]): Class to be managed (Intent or Entity). + cache_dir (str): Path where cache files are stored. + """ self.cls = cls self.cache = cache_dir - self.objects = [] - self.objects_to_train = [] - + self.objects: List[Trainable] = [] + self.objects_to_train: List[Trainable] = [] self.train_data = TrainData() - def add(self, name, lines, reload_cache=False, must_train=True): + def add(self, name: str, lines: List[str], reload_cache: bool = False, must_train: bool = True) -> None: + """ + Adds a new intent or entity for training or loading from cache. - # special case: load persisted (aka. cached) resource (i.e. - # entity or intent) from file into memory data structures + Args: + name (str): Name of the intent or entity. + lines (List[str]): Lines of training data. + reload_cache (bool): Whether to force reload of cache if it exists. + must_train (bool): Whether training is required for the new intent/entity. + """ if not must_train: - self.objects.append( - self.cls.from_file( - name=name, - folder=self.cache)) - # general case: load resource (entity or intent) to training queue - # or if no change occurred to memory data structures + self.objects.append(self.cls.from_file(name=name, folder=self.cache)) + # general case: load resource (entity or intent) to training queue + # or if no change occurred to memory data structures else: hash_fn = join(self.cache, name + '.hash') old_hsh = None @@ -68,51 +87,60 @@ def add(self, name, lines, reload_cache=False, must_train=True): if reload_cache or old_hsh != new_hsh: self.objects_to_train.append(self.cls(name=name, hsh=new_hsh)) else: - self.objects.append( - self.cls.from_file( - name=name, folder=self.cache)) + self.objects.append(self.cls.from_file(name=name, folder=self.cache)) self.train_data.add_lines(name, lines) - def load(self, name, file_name, reload_cache=False): + def load(self, name: str, file_name: str, reload_cache: bool = False) -> None: + """ + Loads an entity or intent from a file and adds it for training or caching. + + Args: + name (str): Name of the intent or entity. + file_name (str): Path to the file containing the training data. + reload_cache (bool): Whether to reload the cache for this intent/entity. + """ with open(file_name) as f: self.add(name, f.read().split('\n'), reload_cache) - def remove(self, name): + def remove(self, name: str) -> None: + """ + Removes an intent or entity from the training and cache. + + Args: + name (str): Name of the intent or entity to remove. + """ self.objects = [i for i in self.objects if i.name != name] - self.objects_to_train = [ - i for i in self.objects_to_train if i.name != name] + self.objects_to_train = [i for i in self.objects_to_train if i.name != name] self.train_data.remove_lines(name) - def train(self, debug=True, single_thread=False, timeout=20): - train = partial( - _train_and_save, - cache=self.cache, - data=self.train_data, - print_updates=debug) + def train(self, debug: bool = True, single_thread: Union[None, bool] = None, + timeout: Union[None, int] = None) -> None: + """ + Trains all intents and entities sequentially. - if single_thread: - for i in self.objects_to_train: - train(i) - else: - # Train in multiple processes to disk - pool = mp.Pool() + Args: + debug (bool): Whether to print debug messages. + single_thread (bool): DEPRECATED + timeout (float): DEPRECATED + """ + if single_thread is not None: + LOG.warning("'single_thread' argument is deprecated and will be ignored") + if timeout is not None: + LOG.warning("'timeout' argument is deprecated and will be ignored") + + train = partial(_train_and_save, cache=self.cache, data=self.train_data, print_updates=debug) + + # Train objects sequentially + for obj in self.objects_to_train: try: - pool.map_async(train, self.objects_to_train).get(timeout) - except TimeoutError: - if debug: - print('Some objects timed out while training') - finally: - pool.close() - pool.join() + train(obj) + except Exception as e: + LOG.error(f"Error training {obj.name}: {e}") # Load saved objects from disk for obj in self.objects_to_train: try: - self.objects.append( - self.cls.from_file( - name=obj.name, - folder=self.cache)) - except IOError: - if debug: - print('Took too long to train', obj.name) + self.objects.append(self.cls.from_file(name=obj.name, folder=self.cache)) + except Exception as e: + LOG.exception(f"Failed to load trained object {obj.name}") self.objects_to_train = [] diff --git a/tests/test_container.py b/tests/test_container.py index cd99eb5..ce3ea34 100644 --- a/tests/test_container.py +++ b/tests/test_container.py @@ -119,55 +119,6 @@ def _create_large_intent(self, depth): return '(a|b|)' return '{0} {0}'.format(self._create_large_intent(depth - 1)) - @pytest.mark.skipif( - not os.environ.get('RUN_LONG'), - reason="Takes a long time") - def test_train_timeout(self): - self.cont.add_intent('a', [ - ' '.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(5)) - for __ in range(300) - ]) - self.cont.add_intent('b', [ - ' '.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(5)) - for __ in range(300) - - ]) - a = monotonic() - self.cont.train(True, timeout=1) - b = monotonic() - assert b - a <= 2 - - a = monotonic() - self.cont.train(True, timeout=1) - b = monotonic() - assert b - a <= 0.1 - - def test_train_timeout_subprocess(self): - self.cont.add_intent('a', [ - ' '.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(5)) - for __ in range(300) - ]) - self.cont.add_intent('b', [ - ' '.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(5)) - for __ in range(300) - ]) - a = monotonic() - assert not self.cont.train_subprocess(timeout=0.1) - b = monotonic() - assert b - a <= 1 - - def test_train_subprocess(self): - self.cont.add_intent('timer', [ - 'set a timer for {time} minutes', - ]) - self.cont.add_entity('time', [ - '#', '##', '#:##', '##:##' - ]) - assert self.cont.train_subprocess(False, timeout=20) - intent = self.cont.calc_intent('set timer for 3 minutes') - assert intent.name == 'timer' - assert intent.matches == {'time': '3'} - def test_calc_intents(self): self._add_intent() self.cont.train(False) From 607128dad686b9b144d685faf40e8f5cb78e27a0 Mon Sep 17 00:00:00 2001 From: JarbasAl Date: Wed, 16 Oct 2024 22:14:01 +0000 Subject: [PATCH 2/3] Increment Version to 1.0.2a1 --- ovos_padatious/version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ovos_padatious/version.py b/ovos_padatious/version.py index 4198cf6..93175e9 100644 --- a/ovos_padatious/version.py +++ b/ovos_padatious/version.py @@ -1,6 +1,6 @@ # START_VERSION_BLOCK VERSION_MAJOR = 1 VERSION_MINOR = 0 -VERSION_BUILD = 1 -VERSION_ALPHA = 0 +VERSION_BUILD = 2 +VERSION_ALPHA = 1 # END_VERSION_BLOCK From 8eae4b11ccd5d466f890009adc9975348105ceb1 Mon Sep 17 00:00:00 2001 From: JarbasAl Date: Wed, 16 Oct 2024 22:14:24 +0000 Subject: [PATCH 3/3] Update Changelog --- CHANGELOG.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc7cf8f..11fee32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,16 @@ # Changelog -## [1.0.1a1](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/tree/1.0.1a1) (2024-10-16) +## [1.0.2a1](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/tree/1.0.2a1) (2024-10-16) -[Full Changelog](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/compare/1.0.0...1.0.1a1) +[Full Changelog](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/compare/1.0.1...1.0.2a1) + +**Fixed bugs:** + +- random training failure [\#12](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/issues/12) **Merged pull requests:** -- port tests from core [\#17](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/pull/17) ([JarbasAl](https://github.com/JarbasAl)) +- drop threading [\#19](https://github.com/OpenVoiceOS/ovos-padatious-pipeline-plugin/pull/19) ([JarbasAl](https://github.com/JarbasAl))