From ba55c72d286f7831609c9d1c115f96a3d7408855 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 3 Feb 2023 11:04:21 +0100 Subject: [PATCH 01/15] Started integrating the improved signal processing into the Parrot codebase --- lib/default_config.py | 2 +- lib/signal_processing.py | 107 ++++++++++++++ lib/srt.py | 304 +++++++++++++++++++++++++++++++++++++++ lib/stream_processing.py | 54 +++++++ lib/typing.py | 54 +++++++ 5 files changed, 520 insertions(+), 1 deletion(-) create mode 100644 lib/signal_processing.py create mode 100644 lib/srt.py create mode 100644 lib/stream_processing.py create mode 100644 lib/typing.py diff --git a/lib/default_config.py b/lib/default_config.py index 53de3267..7d129596 100644 --- a/lib/default_config.py +++ b/lib/default_config.py @@ -59,4 +59,4 @@ if( SPEECHREC_ENABLED == True ): SPEECHREC_ENABLED = dragonfly_spec is not None - \ No newline at end of file +BACKGROUND_LABEL = "silence" \ No newline at end of file diff --git a/lib/signal_processing.py b/lib/signal_processing.py new file mode 100644 index 00000000..040a109f --- /dev/null +++ b/lib/signal_processing.py @@ -0,0 +1,107 @@ +import math +import numpy as np +from scipy.fftpack import fft, rfft, fft2, dct +import audioop +from python_speech_features import mfcc +from .mfsc import Mfsc +from typing import List, Tuple + +_mfscs = [] + +# Determine the decibel based on full scale of 16 bit ints ( same as Audacity ) +def determine_dBFS(waveData: np.array) -> float: + return 20 * math.log10(determine_power(waveData) / math.pow(32767, 2)) + +def determine_power(waveData: np.array) -> float: + return audioop.rms(waveData, 4) + +# This power measurement is the old representation for human readability +def determine_legacy_power(waveData: np.array) -> float: + return determine_power( ) / 1000 + +# Old fundamental frequency finder - this one doesn't show frequency in Hz +def determine_legacy_frequency(waveData: np.array) -> float: + fft_result = fft( waveData ) + positiveFreqs = np.abs( fft_result[ 0:round( len(fft_result) / 2 ) ] ) + highestFreq = 0 + loudestPeak = 500 + frequencies = [0] + for freq in range( 0, len( positiveFreqs ) ): + if( positiveFreqs[ freq ] > loudestPeak ): + loudestPeak = positiveFreqs[ freq ] + highestFreq = freq + + if( loudestPeak > 500 ): + frequencies.append( highestFreq ) + + if( recordLength < 1 ): + # Considering our sound sample is, for example, 100 ms, our lowest frequency we can find is 10Hz ( I think ) + # So add that as a base to our found frequency to get Hz - This is probably wrong + freqInHz = ( 1 / recordLength ) + np.amax( frequencies ) + else: + # I have no clue how to even pretend to know how to calculate Hz for fft frames longer than a second + freqInHz = np.amax( frequencies ) + + return freqInHz + +# Approximate vocal formants F1 and F2 using weighted average +# Goal is to have a light weight, smooth pair of values that can be properly controlled by the user +# Heuristics taken based on https://home.cc.umanitoba.ca/~krussll/phonetics/acoustic/formants.html +# 241 taken from assumption 15ms * 16khz + 1 +def determine_formant_frequencies(waveData: np.array, bin_size: float = 241) -> Tuple[float, float]: + bin_range = 8000 / bin_size + + # Check what the loudest frequency is in the 1000Hz range first + f1_range = int(bin_size / 8) + # Initially start F2 range from 1100Hz + f2_range = int(bin_size / 8 + 3) + + fft_bins = np.fft.rfft(waveData) + + f1_bins = fft_bins[:f1_range] + f1_n_loudest_bins = 5 + loudest_f1_fft_bins = np.argpartition(f1_bins, -f1_n_loudest_bins)[-f1_n_loudest_bins:] + loudest_f1_bin_values = np.take(fft_bins, loudest_f1_fft_bins) + f1_bin_sum = np.sum(loudest_f1_bin_values) + f1_weighted_avg = np.real(np.average(loudest_f1_fft_bins, weights=(loudest_f1_bin_values / f1_bin_sum))) + f1 = max(0, f1_weighted_avg) * bin_range + + # Incase the F1 is lower than 600Hz, lower the F2 range start to find low sounding vowels' F2 + if (f1 < 550): + f2_range = int(bin_size / 8 * 0.8) + + f2_bins = fft_bins[f2_range:] + f2_n_loudest_bins = 20 + loudest_f2_fft_bins = np.argpartition(f2_bins, -f2_n_loudest_bins)[-f2_n_loudest_bins:] + loudest_f2_bin_values = np.take(f2_bins, loudest_f2_fft_bins) + f2_bin_sum = np.sum(loudest_f2_bin_values) + + # Append the offset of the indexes of f2 to make sure the bins line up with the original fft bins + f2_weighted_avg = np.real(np.average([f2_bin + f2_range for f2_bin in loudest_f2_fft_bins], weights=(loudest_f2_bin_values / f2_bin_sum))) + f2 = f2_weighted_avg * bin_range + + return f1, f2 + +def determine_mfcc_type1(waveData: np.array, sampleRate: int = 16000) -> List[float]: + return mfcc( wavData, samplerate=sampleRate, nfft=1103, numcep=13, appendEnergy=True ) + +def determine_mfcc_type2(waveData: np.array, sampleRate: int = 16000) -> List[float]: + return mfcc( wavData, samplerate=sampleRate, nfft=1103, numcep=30, nfilt=40, preemph=0.5, winstep=0.005, winlen=0.015, appendEnergy=False ) + +def determine_mfsc(waveData: np.array, sampleRate:int = 16000) -> List[float]: + global _mfscs + if ( sampleRate not in _mfscs ): + _mfscs[sampleRate] = Mfsc(sr=sampleRate, n_mel=40, preem_coeff=0.5, frame_stride_ms=5, frame_size_ms=15) + _mfsc = _mfscs[sampleRate] + mfsc_result = _mfsc.apply( wavData ) + +# Get a feeling of how much the signal changes based on the total distance between mel frames +def determine_euclidean_dist(mfscData: np.array) -> float: + filter_number = 40 + mel_frame_amount = int(len(mfscData) / filter_number) + mfscData = np.reshape(mfscData, (mel_frame_amount, filter_number) ) + distance = 0 + for i in range(0, mel_frame_amount): + if i > 0: + distance += np.linalg.norm(mfscData[i-1] - mfscData[i]) + return distance \ No newline at end of file diff --git a/lib/srt.py b/lib/srt.py new file mode 100644 index 00000000..ce373b6d --- /dev/null +++ b/lib/srt.py @@ -0,0 +1,304 @@ +import time +from config.config import BACKGROUND_LABEL +from .typing import TransitionEvent, DetectionEvent, DetectionFrame +from typing import List + +def ms_to_srt_timestring( ms: int, include_hours=True): + if ms <= 0: + return "00:00:00,000" if include_hours else "00:00,000" + + if include_hours: + hours = math.floor(ms / (60 * 60 * 1000)) + ms -= hours * 60 * 60 * 1000 + minutes = math.floor(ms / (60 * 1000)) + ms -= minutes * 60 * 1000 + seconds = math.floor(ms / 1000) + ms -= seconds * 1000 + return ( "{:02d}".format(hours) + ":" if include_hours else "" ) + "{:02d}".format(minutes) + ":" + "{:02d}".format(seconds) + "," + "{:03d}".format(ms) + +def srt_timestring_to_ms( srt_timestring: str): + ms = int(srt_timestring.split(",")[1]) + ms += int(srt_timestring.split(":")[2].split(",")[0]) * 1000 + ms += int(srt_timestring.split(":")[1]) * 60 * 1000 + ms += int(srt_timestring.split(":")[0]) * 60 * 60 * 1000 + return ms + +def persist_srt_file(srt_filename: str, events: List[DetectionEvent]): + if not srt_filename.endswith(".v1.srt"): + srt_filename += ".v1.srt" + + # Sort events chronologically first + events.sort(key = lambda event: event.start_index) + with open(srt_filename, 'w') as srt_file: + for index, event in enumerate(events): + srt_file.write( str(index + 1) + '\n' ) + srt_file.write( ms_to_srt_timestring(event.start_ms) + " --> " + ms_to_srt_timestring(event.end_ms) + '\n' ) + srt_file.write( event.label + '\n\n' ) + +def parse_srt_file(srt_filename: str, rounding_ms: int) -> List[TransitionEvent]: + transition_events = [] + positive_event_list = [] + + if not srt_filename.endswith(".srt"): + srt_filename += ".srt" + + with open(srt_filename, "r") as srt: + time_start = 0 + time_end = 0 + type_sound = "" + for line_index, line in enumerate(srt): + if not line.strip(): + time_start = 0 + time_end = 0 + type_sound = "" + elif "-->" in line: + # Extract time start and end rounded to the window size + # To give the detection a fair estimate of correctness + time_pair = [timestring.strip() for timestring in line.split("-->")] + time_start = math.ceil(srt_timestring_to_ms( time_pair[0] ) / rounding_ms) * rounding_ms + + time_end = math.ceil(srt_timestring_to_ms( time_pair[1] ) / rounding_ms) * rounding_ms + elif not line.strip().isnumeric(): + if type_sound == "": + type_sound = line.strip() + if time_start < time_end: + positive_event_list.append(str(time_start) + "---" + type_sound + "---start") + positive_event_list.append(str(time_end) + "---" + type_sound + "---end") + else: + print( ".SRT error at line " + str(line_index) + " - Start time not before end time! Not adding this event - Numbers won't be valid!" ) + + # Sort chronologically by time + positive_event_list.sort(key = lambda event: int(event.split("---")[0])) + for time_index, time_event in enumerate(positive_event_list): + # Remove duplicates if found + if time_index != 0 and len(transition_events) > 0 and transition_events[-1].start_index == math.floor(int(time_event.split("---")[0]) / rounding_ms): + print( "Found duplicate entry at second " + str(math.floor(int(time_event.split("---")[0]) / rounding_ms) / 1000) + " - Not adding duplicate") + continue; + + if time_event.endswith("---start"): + if time_index == 0 and int(time_event.split("---")[0]) > 0: + transition_events.append( TransitionEvent(BACKGROUND_LABEL, 0, 0) ) + + ms_start = math.floor(int(time_event.split("---")[0])) + + # If the time between the end and start of a new event is 0, then the previous event should be removed + if len(transition_events) > 0 and ms_start - transition_events[-1].start_ms <= rounding_ms: + transition_events.pop() + + transition_events.append( TransitionEvent(time_event.split("---")[1], math.floor(ms_start / rounding_ms), ms_start) ) + elif time_event.endswith("---end"): + ms_start = math.floor(int(time_event.split("---")[0])) + transition_events.append( TransitionEvent(BACKGROUND_LABEL, math.floor(ms_start / rounding_ms), ms_start) ) + + return transition_events + + +def print_detection_performance_compared_to_srt(actual_frames: List[DetectionFrame], frames_to_read: int, srt_file_location: str, output_wave_file = None): + ms_per_frame = actual_frames[0].duration_ms + transition_events = parse_srt_file(srt_file_location, ms_per_frame) + detection_audio_frames = [] + total_ms = 0 + + # Detection states + detected_during_index = False + false_detections = 0 + + # Times of recognitions + total_occurrences = 0 + false_recognitions = 0 + positive_recognitions = 0 + total_recognitions = 0 + + # Statistics + ms_true_negative = 0 + ms_true_positive = 0 + ms_false_negative = 0 + ms_false_positive = 0 + + false_types = { + # Types of false negative recognitions + "lag": [], + "stutter": [], + "cutoff": [], + "full_miss": [], + # Types of false positive recognitions + "late_stop": [], + "missed_dip": [], + "false_start": [], + "full_false_positive": [], + } + + # Loop over the results and compare them against the expected transition events + index = 0 + t_index = 0 + for frame in actual_frames: + index += 1 + total_ms += ms_per_frame + + # Determine expected label + actual = frame.label + expected = BACKGROUND_LABEL + transitioning = False + if t_index < len(transition_events): + if t_index + 1 < len(transition_events) and index >= transition_events[t_index + 1].start_index: + t_index += 1 + transitioning = True + if transition_events[t_index].label != BACKGROUND_LABEL: + total_occurrences += 1 + # If the current label is a background label, we have just passed a full occurrence + # So check if it has been found during the occurrence + else: + if detected_during_index: + positive_recognitions += 1 + else: + false_recognitions += 1 + detected_during_index = False + expected = transition_events[t_index].label + + # Add a WAVE signal for each false and true positive detections + if output_wave_file is not None: + highest_amp = 65536 / 10 + signal_strength = highest_amp if actual != BACKGROUND_LABEL else 0 + if expected != actual and actual != BACKGROUND_LABEL: + signal_strength = -highest_amp + + detection_signal = np.full(int(frames_to_read / 4), int(signal_strength)) + detection_signal[::2] = 0 + detection_signal[::3] = 0 + detection_signal[::5] = 0 + detection_signal[::7] = 0 + detection_signal[::9] = 0 + detection_audio_frames.append( detection_signal ) + + if expected == actual: + # Determine false detection types + if false_detections > 0: + false_index_start = index - false_detections + false_index_end = index + + # Determine the amount of true events that have been miscategorized + current_event_index = t_index + first_index = t_index + while( false_index_start < transition_events[first_index].start_index ): + first_index -= 1 + if first_index <= 0: + first_index = 0 + break + + for ei in range(first_index - 1, current_event_index): + event_index = ei + 1 + event = transition_events[event_index] + event_start = event.start_index + event_end = transition_events[event_index + 1].start_index if event_index + 1 < len(transition_events) else len(actual_frames) - 1 + + false_event_type = "" + ms_event = 0 + if false_index_start <= event_start: + false_index_start = event_start + + # Misrecognition of the start of an event + if false_index_end < event_end: + ms_event = (false_index_end - false_index_start ) * ms_per_frame + false_event_type = "late_stop" if event.label == BACKGROUND_LABEL else "lag" + # Misrecognition of a complete event + else: + ms_event = ( event_end - false_index_start ) * ms_per_frame + + false_event_type = "missed_dip" if event.label == BACKGROUND_LABEL else "full_miss" + elif false_index_start > event_start: + + # Misrecognition in between a full event + if false_index_end < event_end: + ms_event = ( false_index_end - false_index_start ) * ms_per_frame + false_event_type = "full_false_positive" if event.label == BACKGROUND_LABEL else "stutter" + # Misrecognition of the end of an event + else: + ms_event = (event_end - false_index_start) * ms_per_frame + false_event_type = "false_start" if event.label == BACKGROUND_LABEL else "cutoff" + + if false_event_type in false_types and ms_event > 0: + false_types[false_event_type].append( ms_event ) + + # Reset the index to the start of the next event if the event can be followed by another false event + if false_event_type in ["false_start", "cutoff", "full_miss", "full_false_positive"]: + false_index_start = event_end + false_detections = 0 + + if expected != BACKGROUND_LABEL: + if detected_during_index == False: + detected_during_index = True + ms_true_positive += ms_per_frame + else: + ms_true_negative += ms_per_frame + else: + # False detections are counted by the sum of their events + false_detections += 1 + + if output_wave_file is not None: + output_wave_file.writeframes(b''.join(detection_audio_frames)) + output_wave_file.close() + + # Determine total time + ms_false_positive = 0 + ms_false_negative = 0 + for false_type in false_types: + false_types[false_type] = { + "data": false_types[false_type], + } + amount = len(false_types[false_type]["data"]) + + false_types[false_type]["times"] = amount + false_types[false_type]["avg"] = round(np.mean(false_types[false_type]["data"])) if amount > 0 else 0 + false_types[false_type]["std"] = round(np.std(false_types[false_type]["data"])) if amount > 0 else 0 + if false_type in ["late_stop", "missed_dip", "false_start", "full_false_positive"]: + ms_false_positive += round(np.sum(false_types[false_type]["data"])) + else: + ms_false_negative += round(np.sum(false_types[false_type]["data"])) + + # Export the results + export_row = [] + print("-------- Detection statistics --------") + print("Expected: " + str(total_occurrences) ) + export_row.append( str(positive_recognitions) ) + export_row.append( str(false_recognitions) ) + export_row.append( "0%" if total_occurrences == 0 else str(round(positive_recognitions / total_occurrences * 100)) + "%" ) + print("Found: " + str(positive_recognitions) + " (" + ("0%" if total_occurrences == 0 else str(round(positive_recognitions / total_occurrences * 100)) + "%)") ) + print("Missed: " + str(false_recognitions) + " (" + ("0%" if total_occurrences == 0 else str(round(false_recognitions / total_occurrences * 100)) + "%)")) + print("------------- Frame data -------------") + print("Total frames: " + str(len(actual_frames))) + export_row.append( str(round((ms_true_positive + ms_true_negative) / total_ms * 1000) / 10) + "%" ) + print("Accuracy: " + export_row[-1]) + print("-------- Positive / negative --------") + export_row.append( str(round(ms_true_positive / total_ms * 1000) / 10) + "%" ) + print("True positive: " + export_row[-1]) + export_row.append( str(round(ms_true_negative / total_ms * 1000) / 10) + "%" ) + print("True negative: " + export_row[-1]) + export_row.append( str(round(ms_false_positive / total_ms * 1000) / 10) + "%" ) + print("False positive: " + export_row[-1]) + export_row.append( str(round(ms_false_negative / total_ms * 1000) / 10) + "%" ) + print("False negative: " + export_row[-1]) + print("----------- False positives ----------") + key_length = 28 + if ms_false_positive > 0: + for fp_type in [{"key": "false_start", "name": "Early start"},{"key": "missed_dip", "name": "Missed dip"},{"key": "late_stop", "name": "Late stop"},{"key": "full_false_positive", "name": "Full FP"},]: + ms_total = sum(false_types[fp_type["key"]]["data"]) + print( (fp_type["name"] + " (% of FP):").ljust(key_length, " ") + ("0%" if ms_false_positive == 0 else str(round(ms_total / ms_false_positive * 100)) + "%") + " (" + str(false_types[fp_type["key"]]["times"]) + "x)" ) + print(" [ Average " + str(false_types[fp_type["key"]]["avg"]) + "ms (σ " + str(false_types[fp_type["key"]]["std"]) + "ms) ]") + export_row.append( str(false_types[fp_type["key"]]["times"]) ) + export_row.append( str(false_types[fp_type["key"]]["avg"]) + " σ " + str(false_types[fp_type["key"]]["std"]) if false_types[fp_type["key"]]["times"] > 0 else "0" ) + else: + export_row.extend(["0", "0", "0", "0", "0", "0", "0", "0"]) + if ms_false_negative > 0: + print("----------- False negatives ----------") + for fn_type in [{"key": "lag", "name": "Lagged start"},{"key": "stutter", "name": "Stutter"},{"key": "cutoff", "name": "Early cut-off"},{"key": "full_miss", "name": "Full miss"},]: + ms_total = sum(false_types[fn_type["key"]]["data"]) + print( (fn_type["name"] + " (% of FN):").ljust(key_length, " ") + ("0%" if ms_false_negative == 0 else str(round(ms_total / ms_false_negative * 100)) + "%") + " (" + str(false_types[fn_type["key"]]["times"]) + "x)" ) + print(" [ Average " + str(false_types[fn_type["key"]]["avg"]) + "ms (σ " + str(false_types[fn_type["key"]]["std"]) + "ms) ]") + export_row.append( str(false_types[fn_type["key"]]["times"]) ) + export_row.append( str(false_types[fn_type["key"]]["avg"]) + " σ " + str(false_types[fn_type["key"]]["std"]) if false_types[fn_type["key"]]["times"] > 0 else "0" ) + else: + export_row.extend(["0", "0", "0", "0", "0", "0", "0", "0"]) + print("--------------------------------------") + + print("Excel row") + print( " ".join(export_row) ) \ No newline at end of file diff --git a/lib/stream_processing.py b/lib/stream_processing.py new file mode 100644 index 00000000..85aac2c1 --- /dev/null +++ b/lib/stream_processing.py @@ -0,0 +1,54 @@ +from .typing import DetectionLabel, DetectionFrame, DetectionEvent, DetectionState +from config.config import BACKGROUND_LABEL +from typing import List + +def determine_detection_state(detection_frames: List[DetectionFrame], detection_state: DetectionState) -> DetectionState: + # Filter out very low power dbFS values as we can assume the hardware microphone is off + # And we do not want to skew the mean for that as it would create more false positives + # ( -70 dbFS was selected as a cut off after a bit of testing with a HyperX Quadcast microphone ) + dBFS_frames = [x.dBFS for x in detection_frames] + std_dbFS = np.std(dBFS_frames) + detection_state.expected_snr = math.floor(std_dbFS * 2) + detection_state.expected_noise_floor = np.min(dBFS_frames) + std_dbFS + for label in detection_state.labels: + + # Recalculate the duration type every 15 seconds + if label.duration_type == "" or len(detection_frames) % round(15 / RECORD_SECONDS): + label.duration_type = determine_duration_type(label, detection_frames) + label.min_dBFS = detection_state.expected_noise_floor + detection_state.expected_snr + return detection_state + +# Approximately determine whether the label in the stream is discrete or continuous +# Discrete sounds are from a single source event like a click, tap or a snap +# Whereas continuous sounds have a steady stream of energy from a source +def determine_duration_type(label: DetectionLabel, detection_frames: List[DetectionFrame]) -> str: + label_events = [x for x in detection_frames_to_events(detection_frames) if x.label == label.label] + if len(label_events) < 10: + return "" + else: + # The assumption here is that discrete sounds cannot vary in length much as you cannot elongate the sound of a click for example + # So if the length doesn't vary much, we assume discrete over continuous + lengths = [x.end_ms - x.start_ms for x in label_events] + continuous_length_threshold = detection_frames[0].duration_ms * SLIDING_WINDOW_AMOUNT + return "discrete" if np.std(lengths) < continuous_length_threshold else "continuous" + +def detection_frames_to_events(detection_frames: List[DetectionFrame]) -> List[DetectionEvent]: + events = [] + current_label = "" + current_frames = [] + for frame in detection_frames: + if frame.label != current_label: + if len(current_frames) > 0: + events.append( DetectionEvent(current_label, current_frames[0].index, current_frames[-1].index, \ + (current_frames[0].index - 1) * current_frames[0].duration_ms, (current_frames[-1].index) * current_frames[-1].duration_ms, current_frames) ) + current_frames = [] + current_label = frame.label + + if current_label != BACKGROUND_LABEL: + current_frames.append( frame ) + + if len(current_frames) > 0: + events.append( DetectionEvent(current_label, current_frames[0].index, current_frames[-1].index, \ + (current_frames[0].index - 1) * current_frames[0].duration_ms, (current_frames[-1].index) * current_frames[-1].duration_ms, current_frames) ) + current_frames = [] + return events diff --git a/lib/typing.py b/lib/typing.py new file mode 100644 index 00000000..dc1827a5 --- /dev/null +++ b/lib/typing.py @@ -0,0 +1,54 @@ +from dataclasses import dataclass +from typing import List + +@dataclass +class TransitionEvent: + label: str + start_index: int + start_ms: int + +@dataclass +class DetectionFrame: + index: int + duration_ms: int + positive: bool + power: float + dBFS: float + euclid_dist: float + mel_data: List[List[float]] + label: str + +@dataclass +class DetectionEvent: + label: str + + # Based on wave indecis + start_index: int + end_index: int + start_ms: int + end_ms: int + frames: List[DetectionFrame] + +@dataclass +class DetectionLabel: + label: str + ms_detected: int + duration_type: str + + min_ms: float + min_dBFS: float + min_distance: float + max_distance: float + +@dataclass +class DetectionState: + strategy: str + state: str + frames_to_read: int + ms_per_frame: int + ms_recorded: int + advanced_logging: bool + + expected_snr: float + expected_noise_floor: float + labels: List[DetectionLabel] \ No newline at end of file From 25b8d0a4d04ca1c4e36826166253bcfecd97a7dd Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 10 Feb 2023 17:59:08 +0100 Subject: [PATCH 02/15] Added print statuses and progress bar Started working on migration from old format to new --- lib/migrate_data.py | 49 ++++++++++++++++++++ lib/print_status.py | 97 ++++++++++++++++++++++++++++++++++++++++ lib/stream_processing.py | 2 + settings.py | 4 +- 4 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 lib/migrate_data.py create mode 100644 lib/print_status.py diff --git a/lib/migrate_data.py b/lib/migrate_data.py new file mode 100644 index 00000000..080fefed --- /dev/null +++ b/lib/migrate_data.py @@ -0,0 +1,49 @@ +from config.config import * +import os +from lib.stream_processing import CURRENT_VERSION +from lib.print_status import create_progress_bar, clear_previous_lines +import time + +def check_migration(): + version_detected = CURRENT_VERSION + recording_dirs = os.listdir(RECORDINGS_FOLDER) + for file in recording_dirs: + if os.path.isdir(os.path.join(RECORDINGS_FOLDER, file)): + if not os.path.exists(os.path.join(RECORDINGS_FOLDER, file, "segments")) \ + or not os.listdir(os.path.join(RECORDINGS_FOLDER, file, "segments")): + version_detected = 0 + + if version_detected < CURRENT_VERSION: + print("----------------------------") + print("!! Improvement to segmentation found !!") + print("This can help improve the data gathering from your recordings which make newer models better") + update = input("Do you want to reprocess your recordings? [y/N] ") + if (update.lower() == "y"): + migrate_data() + +def migrate_data(): + print("----------------------------") + recording_dirs = os.listdir(RECORDINGS_FOLDER) + for file in recording_dirs: + source_dir = os.path.join(RECORDINGS_FOLDER, file, "source") + if os.path.isdir(source_dir): + segments_dir = os.path.join(RECORDINGS_FOLDER, file, "segments") + if not os.path.exists(segments_dir): + os.makedirs(segments_dir) + print( "Resegmenting " + file + "..." ) + wav_files = [x for x in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, x)) and x.endswith(".wav")] + progress = 0 + progress_chunk = 1 / len( wav_files ) + print( create_progress_bar(progress) ) + for index, wav_file in enumerate(wav_files): + srt_file = os.path.join(segments_dir, wav_file.replace(".wav", ".v1.srt")) + + progress = index / len( wav_files ) + progress_chunk + clear_previous_lines(1) + print( create_progress_bar(progress) ) + clear_previous_lines(1) + clear_previous_lines(1) + print( file + " updated!" ) + + time.sleep(1) + \ No newline at end of file diff --git a/lib/print_status.py b/lib/print_status.py new file mode 100644 index 00000000..fa732376 --- /dev/null +++ b/lib/print_status.py @@ -0,0 +1,97 @@ +from .typing import DetectionState +from typing import List +from .srt import ms_to_srt_timestring +import os +import sys + +# Needed to make escape characters work on Windows for some reason +if os.name == 'nt': + os.system("") +ANSI_CODE_LINE_UP = '\033[1A' +ANSI_CODE_LINE_CLEAR = '\x1b[2K' + +# If no UTF-8 characters are supported, use ascii characters instead +PROGRESS_FILLED = '#' if sys.stdout.encoding != 'utf-8' else '\u2588' +PROGRESS_AVAILABLE = '-' if sys.stdout.encoding != 'utf-8' else '\u2591' +LINE_LENGTH = 50 + +def create_progress_bar(percentage: float = 1.0) -> str: + filled_characters = round(max(0, min(LINE_LENGTH, LINE_LENGTH * percentage))) + return "".rjust(filled_characters, PROGRESS_FILLED).ljust(LINE_LENGTH, PROGRESS_AVAILABLE) + +def get_current_status(detection_state: DetectionState) -> List[str]: + recorded_timestring = ms_to_srt_timestring( detection_state.ms_recorded, False) + + # Quality rating was manually established by doing some testing with added noise + # And finding the results becoming worse when the SNR went lower than 10 + quality = "" + if detection_state.ms_recorded > 10000: + if detection_state.expected_snr >= 25: + quality = "Excellent" + elif detection_state.expected_snr >= 20: + quality = "Great" + elif detection_state.expected_snr >= 15: + quality = "Good" + elif detection_state.expected_snr >= 10: + quality = "Average" + elif detection_state.expected_snr >= 7: + quality = "Poor" + else: + quality = "Unusable" + + lines = [ + ".".ljust(LINE_LENGTH - 2, "-") + ".", + "| " + "Listening for:" + recorded_timestring.rjust(LINE_LENGTH - 12) + " |", + ] + + if detection_state.state == "recording": + lines.append("| " + "Mic Quality: " + quality.rjust(LINE_LENGTH - 13) + " |") + elif detection_state.state == "processing": + lines.append("| " + "PROCESSING...".ljust(LINE_LENGTH) + " |") + elif detection_state.state == "paused": + lines.append("| " + "PAUSED - Resume using SPACE".ljust(LINE_LENGTH) + " |") + else: + lines.append("| " + detection_state.state.upper().ljust(LINE_LENGTH) + " |") + + if detection_state.advanced_logging: + lines.extend([ + "|".ljust(LINE_LENGTH - 2,"-") + "|", + "| " + "Est. values for thresholding".ljust(LINE_LENGTH) + " |", + "|".ljust(LINE_LENGTH - 2,"-") + "|", + "| " + ("Noise floor (dBFS):" + str(round(detection_state.expected_noise_floor)).rjust(LINE_LENGTH - 19)) + " |", + "| " + ("SNR:" + str(round(detection_state.expected_snr)).rjust(LINE_LENGTH - 4)) + " |", + ]) + + for label in detection_state.labels: + # Quantity rating is based on 5000 30ms windows being good enough to train a label from the example model + # And 1000 30ms windows being enough to train a label decently + # With atleast 10 percent extra for a possible hold-out set during training + quantity = "" + if label.ms_detected < 16500: + quantity = "Not enough" + elif label.ms_detected > 16500 and label.ms_detected < 41250: + quantity = "Sufficient" + elif label.ms_detected >= 41250 and label.ms_detected < 82500: + quantity = "Good" + elif label.ms_detected >= 82500: + quantity = "Excellent" + + lines.extend([ + "|".ljust(LINE_LENGTH - 2,"-") + "|", + "| " + label.label.ljust(LINE_LENGTH) + " |", + "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected, False ).rjust(LINE_LENGTH - 10) + " |", + "| " + "Data Quantity: " + quantity.rjust(LINE_LENGTH - 15) + " |", + ]) + + if detection_state.advanced_logging: + lines.append( "| " + ("type:" + str(label.duration_type if label.duration_type else "Unknown").upper().rjust(33)) + " |" ) + lines.append( "| " + ("dbFS treshold:" + str(round(label.min_dBFS, 2)).rjust(LINE_LENGTH - 14)) + " |" ) + lines.append("'".ljust(LINE_LENGTH - 2,"-") + "'") + + return lines + +def clear_previous_lines(line_count): + line = ""; + for i in range(0,line_count): + line += ANSI_CODE_LINE_UP + print(line, end=ANSI_CODE_LINE_CLEAR ) \ No newline at end of file diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 85aac2c1..6ab84628 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -2,6 +2,8 @@ from config.config import BACKGROUND_LABEL from typing import List +CURRENT_VERSION = 1 + def determine_detection_state(detection_frames: List[DetectionFrame], detection_state: DetectionState) -> DetectionState: # Filter out very low power dbFS values as we can assume the hardware microphone is off # And we do not want to skew the mean for that as it would create more false positives diff --git a/settings.py b/settings.py index 4245dcd3..4db104bf 100644 --- a/settings.py +++ b/settings.py @@ -4,6 +4,7 @@ from lib.test_data import test_data from lib.convert_files import convert_files from lib.combine_models import combine_models +from lib.migrate_data import check_migration def root_navigation( first): if( first ): @@ -41,5 +42,6 @@ def select_mode(): root_navigation( False ) elif( setup_mode.lower() == 'x' ): print( "Goodbye." ) - + +check_migration() root_navigation( True ) \ No newline at end of file From 22c05a30a58d57767053e642ceb5ae6929677f38 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Sat, 11 Feb 2023 11:21:58 +0100 Subject: [PATCH 03/15] Started integrating the complete signal detection flow inside the migration path - Will refactor some stuff later to make it work for recording flow as well --- lib/migrate_data.py | 38 ++++++--- lib/print_status.py | 20 ++--- lib/signal_processing.py | 12 ++- lib/srt.py | 1 + lib/stream_processing.py | 176 ++++++++++++++++++++++++++++++++++++++- 5 files changed, 217 insertions(+), 30 deletions(-) diff --git a/lib/migrate_data.py b/lib/migrate_data.py index 080fefed..7893e84a 100644 --- a/lib/migrate_data.py +++ b/lib/migrate_data.py @@ -1,7 +1,8 @@ from config.config import * import os -from lib.stream_processing import CURRENT_VERSION -from lib.print_status import create_progress_bar, clear_previous_lines +from lib.stream_processing import CURRENT_VERSION, process_wav_file +from lib.print_status import create_progress_bar, clear_previous_lines, get_current_status +from .typing import DetectionState import time def check_migration(): @@ -24,26 +25,39 @@ def check_migration(): def migrate_data(): print("----------------------------") recording_dirs = os.listdir(RECORDINGS_FOLDER) - for file in recording_dirs: - source_dir = os.path.join(RECORDINGS_FOLDER, file, "source") + for label in recording_dirs: + source_dir = os.path.join(RECORDINGS_FOLDER, label, "source") if os.path.isdir(source_dir): - segments_dir = os.path.join(RECORDINGS_FOLDER, file, "segments") + segments_dir = os.path.join(RECORDINGS_FOLDER, label, "segments") if not os.path.exists(segments_dir): os.makedirs(segments_dir) - print( "Resegmenting " + file + "..." ) + print( "Resegmenting " + label + "..." ) + print( "" ) wav_files = [x for x in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, x)) and x.endswith(".wav")] progress = 0 progress_chunk = 1 / len( wav_files ) - print( create_progress_bar(progress) ) for index, wav_file in enumerate(wav_files): - srt_file = os.path.join(segments_dir, wav_file.replace(".wav", ".v1.srt")) - + wav_file_location = os.path.join(source_dir, wav_file) + srt_file_location = os.path.join(segments_dir, wav_file.replace(".wav", ".v" + str(CURRENT_VERSION) + ".srt")) + output_file_location = os.path.join(segments_dir, wav_file.replace(".wav", "_detection.wav")) + + process_wav_file(wav_file_location, srt_file_location, output_file_location, [label], \ + lambda internal_progress, state: print_migration_progress(progress + (internal_progress * progress_chunk), state) ) progress = index / len( wav_files ) + progress_chunk clear_previous_lines(1) print( create_progress_bar(progress) ) clear_previous_lines(1) - clear_previous_lines(1) - print( file + " updated!" ) + clear_previous_lines(1) + print( label + " updated!" ) + + time.sleep(1) - time.sleep(1) +def print_migration_progress(progress, state: DetectionState): + status_lines = get_current_status(state) + line_count = 1 + len(status_lines) if state.ms_recorded > 0 else 1 + clear_previous_lines(line_count) + print( create_progress_bar(progress) ) + if progress < 1: + for line in status_lines: + print( line ) \ No newline at end of file diff --git a/lib/print_status.py b/lib/print_status.py index fa732376..5058284f 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -41,11 +41,11 @@ def get_current_status(detection_state: DetectionState) -> List[str]: lines = [ ".".ljust(LINE_LENGTH - 2, "-") + ".", - "| " + "Listening for:" + recorded_timestring.rjust(LINE_LENGTH - 12) + " |", + "| " + "Listening for:" + recorded_timestring.rjust(LINE_LENGTH - 19) + " |", ] if detection_state.state == "recording": - lines.append("| " + "Mic Quality: " + quality.rjust(LINE_LENGTH - 13) + " |") + lines.append("| " + "Mic Quality: " + quality.rjust(LINE_LENGTH - 18) + " |") elif detection_state.state == "processing": lines.append("| " + "PROCESSING...".ljust(LINE_LENGTH) + " |") elif detection_state.state == "paused": @@ -56,10 +56,10 @@ def get_current_status(detection_state: DetectionState) -> List[str]: if detection_state.advanced_logging: lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", - "| " + "Est. values for thresholding".ljust(LINE_LENGTH) + " |", + "| " + "Est. values for thresholding".ljust(LINE_LENGTH - 5) + " |", "|".ljust(LINE_LENGTH - 2,"-") + "|", - "| " + ("Noise floor (dBFS):" + str(round(detection_state.expected_noise_floor)).rjust(LINE_LENGTH - 19)) + " |", - "| " + ("SNR:" + str(round(detection_state.expected_snr)).rjust(LINE_LENGTH - 4)) + " |", + "| " + ("Noise floor (dBFS):" + str(round(detection_state.expected_noise_floor)).rjust(LINE_LENGTH - 24)) + " |", + "| " + ("SNR:" + str(round(detection_state.expected_snr)).rjust(LINE_LENGTH - 9)) + " |", ]) for label in detection_state.labels: @@ -78,14 +78,14 @@ def get_current_status(detection_state: DetectionState) -> List[str]: lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", - "| " + label.label.ljust(LINE_LENGTH) + " |", - "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected, False ).rjust(LINE_LENGTH - 10) + " |", - "| " + "Data Quantity: " + quantity.rjust(LINE_LENGTH - 15) + " |", + "| " + label.label.ljust(LINE_LENGTH - 5) + " |", + "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected, False ).rjust(LINE_LENGTH - 15) + " |", + "| " + "Data Quantity: " + quantity.rjust(LINE_LENGTH - 20) + " |", ]) if detection_state.advanced_logging: - lines.append( "| " + ("type:" + str(label.duration_type if label.duration_type else "Unknown").upper().rjust(33)) + " |" ) - lines.append( "| " + ("dbFS treshold:" + str(round(label.min_dBFS, 2)).rjust(LINE_LENGTH - 14)) + " |" ) + lines.append( "| " + ("type:" + str(label.duration_type if label.duration_type else "Unknown").upper().rjust(LINE_LENGTH - 10)) + " |" ) + lines.append( "| " + ("dBFS treshold:" + str(round(label.min_dBFS, 2)).rjust(LINE_LENGTH - 19)) + " |" ) lines.append("'".ljust(LINE_LENGTH - 2,"-") + "'") return lines diff --git a/lib/signal_processing.py b/lib/signal_processing.py index 040a109f..9ee785e3 100644 --- a/lib/signal_processing.py +++ b/lib/signal_processing.py @@ -6,7 +6,7 @@ from .mfsc import Mfsc from typing import List, Tuple -_mfscs = [] +_mfscs = {} # Determine the decibel based on full scale of 16 bit ints ( same as Audacity ) def determine_dBFS(waveData: np.array) -> float: @@ -83,23 +83,21 @@ def determine_formant_frequencies(waveData: np.array, bin_size: float = 241) -> return f1, f2 def determine_mfcc_type1(waveData: np.array, sampleRate: int = 16000) -> List[float]: - return mfcc( wavData, samplerate=sampleRate, nfft=1103, numcep=13, appendEnergy=True ) + return mfcc( waveData, samplerate=sampleRate, nfft=1103, numcep=13, appendEnergy=True ) def determine_mfcc_type2(waveData: np.array, sampleRate: int = 16000) -> List[float]: - return mfcc( wavData, samplerate=sampleRate, nfft=1103, numcep=30, nfilt=40, preemph=0.5, winstep=0.005, winlen=0.015, appendEnergy=False ) + return mfcc( waveData, samplerate=sampleRate, nfft=1103, numcep=30, nfilt=40, preemph=0.5, winstep=0.005, winlen=0.015, appendEnergy=False ) def determine_mfsc(waveData: np.array, sampleRate:int = 16000) -> List[float]: global _mfscs if ( sampleRate not in _mfscs ): _mfscs[sampleRate] = Mfsc(sr=sampleRate, n_mel=40, preem_coeff=0.5, frame_stride_ms=5, frame_size_ms=15) _mfsc = _mfscs[sampleRate] - mfsc_result = _mfsc.apply( wavData ) + return _mfsc.apply( waveData ) # Get a feeling of how much the signal changes based on the total distance between mel frames def determine_euclidean_dist(mfscData: np.array) -> float: - filter_number = 40 - mel_frame_amount = int(len(mfscData) / filter_number) - mfscData = np.reshape(mfscData, (mel_frame_amount, filter_number) ) + mel_frame_amount = len(mfscData) distance = 0 for i in range(0, mel_frame_amount): if i > 0: diff --git a/lib/srt.py b/lib/srt.py index ce373b6d..8614cd36 100644 --- a/lib/srt.py +++ b/lib/srt.py @@ -2,6 +2,7 @@ from config.config import BACKGROUND_LABEL from .typing import TransitionEvent, DetectionEvent, DetectionFrame from typing import List +import math def ms_to_srt_timestring( ms: int, include_hours=True): if ms <= 0: diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 6ab84628..b371e16f 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -1,9 +1,128 @@ from .typing import DetectionLabel, DetectionFrame, DetectionEvent, DetectionState -from config.config import BACKGROUND_LABEL +from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT from typing import List +import wave +import math +import numpy as np +from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist CURRENT_VERSION = 1 +def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None): + audioFrames = [] + edgesAudioFrames = [] + wf = wave.open(input_file, 'rb') + number_channels = wf.getnchannels() + total_frames = wf.getnframes() + frame_rate = wf.getframerate() + frames_to_read = round( frame_rate * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ) + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + + detection_strategy = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" + + detection_labels = [] + for label in labels: + detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) + detection_state = DetectionState(detection_strategy, "recording", frames_to_read, ms_per_frame, 0, True, 0, 0, detection_labels) + + false_occurrence = [] + current_occurrence = [] + index = 0 + + if progress_callback is not None: + progress_callback(0, detection_state) + + detection_frames = [] + + while( wf.tell() < total_frames ): + index = index + 1 + raw_wav = wf.readframes(frames_to_read * number_channels) + + detection_state.ms_recorded += ms_per_frame + detected = False + + # If our wav file is shorter than the amount of bytes ( assuming 16 bit ) times the frames, we discard it and assume we arrived at the end of the file + if (len(raw_wav) != 2 * frames_to_read * number_channels ): + break; + else: + audioFrames.append(raw_wav) + if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): + audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] + + byteString = b''.join(audioFrames) + wave_data = np.frombuffer( byteString, dtype=np.int16 ) + power = determine_power( wave_data ) + dBFS = determine_dBFS( wave_data ) + mfsc_data = determine_mfsc( wave_data, frame_rate ) + distance = determine_euclidean_dist( mfsc_data ) + + # Attempt to detect a label + detected_label = BACKGROUND_LABEL + for label in detection_state.labels: + if is_detected(detection_state.strategy, power, dBFS, distance, label.min_dBFS): + detected = True + label.ms_detected += ms_per_frame + detected_label = label.label + break + + detection_frames.append(DetectionFrame(index, ms_per_frame, detected, power, dBFS, distance, mfsc_data, detected_label)) + if detected: + current_occurrence.append(detection_frames[-1]) + else: + false_occurrence.append(detection_frames[-1]) + else: + detection_frames.append(DetectionFrame(index, ms_per_frame, False, 0, 0, 0, [], BACKGROUND_LABEL)) + false_occurrence.append(detection_frames[-1]) + + # Recalculate the noise floor / signal strength every 10 frames + # For performance reason and because the statistical likelyhood of things changing every 150ms is pretty low + if len(detection_frames) % 10 == 0: + detection_state = determine_detection_state(detection_frames, detection_state) + + # On-line rejection - This may be undone in post-processing later + # Only add occurrences longer than 75 ms as no sound a human produces is shorter + if detected == False and len(current_occurrence) > 0: + is_continuous = False + for label in detection_state.labels: + if label == current_occurrence[0].label: + is_continuous = label.duration_type == "continuous" + break + + if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): + total_rejected_frames = len(current_occurrence) + for frame_index in range(-total_rejected_frames - 1, 0, 1): + rejected_frame_index = frame_index + detection_frames[rejected_frame_index].label = BACKGROUND_LABEL + detection_frames[rejected_frame_index].positive = False + current_occurrence = [] + # On-line mending - This may be undone in post-processing later + # Only keep false detections longer than a certain amount ( because a human can't make them shorter ) + elif detected and len(false_occurrence) > 0: + if is_mended(detection_state.strategy, false_occurrence, detection_state, detected_label): + total_mended_frames = len(false_occurrence) + for frame_index in range(-total_mended_frames - 1, 0, 1): + mended_frame_index = frame_index + detection_frames[mended_frame_index].label = detected_label + detection_frames[mended_frame_index].positive = True + false_occurrence = [] + + progress = index * ms_per_frame / total_frames * ms_per_frame + if progress_callback is not None: + progress_callback(progress, detection_state) + + wf.close() + + output_wave_file = wave.open(output_file, 'wb') + output_wave_file.setnchannels(number_channels) + output_wave_file.setsampwidth(audio.get_sample_size(FORMAT)) + output_wave_file.setframerate(frame_rate) + post_processing(detection_frames, detection_state, output_file, output_wave_file ) + + progress = 1 + if progress_callback is not None: + progress_callback(progress, detection_state) + + def determine_detection_state(detection_frames: List[DetectionFrame], detection_state: DetectionState) -> DetectionState: # Filter out very low power dbFS values as we can assume the hardware microphone is off # And we do not want to skew the mean for that as it would create more false positives @@ -54,3 +173,58 @@ def detection_frames_to_events(detection_frames: List[DetectionFrame]) -> List[D (current_frames[0].index - 1) * current_frames[0].duration_ms, (current_frames[-1].index) * current_frames[-1].duration_ms, current_frames) ) current_frames = [] return events + +def auto_decibel_detection(power, dBFS, distance, dBFS_threshold): + return dBFS > dBFS_threshold + +def auto_secondary_decibel_detection(power, dBFS, distance, dBFS_threshold): + return dBFS > dBFS_threshold - 7 + +def is_detected(strategy, power, dBFS, distance, estimated_threshold): + if "auto_dBFS" in strategy: + return auto_decibel_detection(power, dBFS, distance, estimated_threshold) + +def is_rejected( strategy, occurrence, ms_per_frame, continuous = False ): + if "reject" not in strategy: + return False + elif "reject_45ms" in strategy: + return len(occurrence) * ms_per_frame < 45 + elif "reject_60ms" in strategy: + return len(occurrence) * ms_per_frame < 60 + elif "reject_75ms" in strategy: + return len(occurrence) * ms_per_frame < 75 + elif "reject_90ms" in strategy: + return len(occurrence) * ms_per_frame < 90 + elif "reject_cont_45ms" in strategy: + return len(occurrence) * ms_per_frame < ( 45 if continuous else 0 ) + +def is_detected_secondary( strategy, power, dBFS, distance, estimated_threshold ): + if "secondary" not in strategy: + return False + elif "secondary_dBFS" in strategy: + return auto_secondary_decibel_detection(power, dBFS, distance, estimated_threshold) + +def is_mended( strategy, occurrence, detection_state, current_label ): + if "mend" not in strategy: + return False + elif "mend_60ms" in strategy: + return len(occurrence) * detection_state.ms_per_frame < 60 + elif "mend_45ms" in strategy: + return len(occurrence) * detection_state.ms_per_frame < 45 + elif "mend_dBFS" in strategy: + label_dBFS_threshold = 0 + for label in detection_state.labels: + if label.label == current_label: + label_dBFS_threshold = label.min_dBFS + + total_missed_length_ms = 0 + for frame in occurrence: + if not auto_secondary_decibel_detection(frame.power, frame.dBFS, frame.euclid_dist, label_dBFS_threshold): + if not "mend_dBFS_30ms" in strategy: + return False + else: + total_missed_length_ms += detection_state.ms_per_frame + if not "mend_dBFS_30ms" in strategy: + return True + else: + return total_missed_length_ms < 30 From 0c513e363bb04a9aaa1b45cc7c93411544489f66 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 17 Feb 2023 17:18:39 +0100 Subject: [PATCH 04/15] Finished implementing migration of old data, even for 44.1kHz data Fixed a bug where the rms was resulting in different numbers in windows versus linux Added post processing for audio sources --- lib/migrate_data.py | 26 ++-- lib/print_status.py | 16 +- lib/signal_processing.py | 10 +- lib/stream_processing.py | 307 +++++++++++++++++++++++++++++---------- lib/typing.py | 1 - 5 files changed, 258 insertions(+), 102 deletions(-) diff --git a/lib/migrate_data.py b/lib/migrate_data.py index 7893e84a..163cd1a8 100644 --- a/lib/migrate_data.py +++ b/lib/migrate_data.py @@ -1,7 +1,7 @@ from config.config import * import os from lib.stream_processing import CURRENT_VERSION, process_wav_file -from lib.print_status import create_progress_bar, clear_previous_lines, get_current_status +from lib.print_status import create_progress_bar, clear_previous_lines, get_current_status, reset_previous_lines from .typing import DetectionState import time @@ -18,9 +18,8 @@ def check_migration(): print("----------------------------") print("!! Improvement to segmentation found !!") print("This can help improve the data gathering from your recordings which make newer models better") - update = input("Do you want to reprocess your recordings? [y/N] ") - if (update.lower() == "y"): - migrate_data() + print("Resegmenting your data may take a while") + migrate_data() def migrate_data(): print("----------------------------") @@ -32,7 +31,6 @@ def migrate_data(): if not os.path.exists(segments_dir): os.makedirs(segments_dir) print( "Resegmenting " + label + "..." ) - print( "" ) wav_files = [x for x in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, x)) and x.endswith(".wav")] progress = 0 progress_chunk = 1 / len( wav_files ) @@ -44,20 +42,22 @@ def migrate_data(): process_wav_file(wav_file_location, srt_file_location, output_file_location, [label], \ lambda internal_progress, state: print_migration_progress(progress + (internal_progress * progress_chunk), state) ) progress = index / len( wav_files ) + progress_chunk + + if progress == 1: clear_previous_lines(1) - print( create_progress_bar(progress) ) - clear_previous_lines(1) + clear_previous_lines(1) - print( label + " updated!" ) + print( label + " resegmented!" ) time.sleep(1) + print("Finished migrating data!") + print("----------------------------") def print_migration_progress(progress, state: DetectionState): status_lines = get_current_status(state) - line_count = 1 + len(status_lines) if state.ms_recorded > 0 else 1 - clear_previous_lines(line_count) + line_count = 1 + len(status_lines) if progress > 0 or state.state == "processing" else 0 + reset_previous_lines(line_count) if progress < 1 else clear_previous_lines(line_count) print( create_progress_bar(progress) ) - if progress < 1: + if progress != 1: for line in status_lines: - print( line ) - \ No newline at end of file + print( line ) \ No newline at end of file diff --git a/lib/print_status.py b/lib/print_status.py index 5058284f..37068a7d 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -45,13 +45,13 @@ def get_current_status(detection_state: DetectionState) -> List[str]: ] if detection_state.state == "recording": - lines.append("| " + "Mic Quality: " + quality.rjust(LINE_LENGTH - 18) + " |") + lines.append("| " + "Sound Quality: " + quality.rjust(LINE_LENGTH - 20) + " |") elif detection_state.state == "processing": - lines.append("| " + "PROCESSING...".ljust(LINE_LENGTH) + " |") + lines.append("| " + "PROCESSING...".ljust(LINE_LENGTH - 5) + " |") elif detection_state.state == "paused": - lines.append("| " + "PAUSED - Resume using SPACE".ljust(LINE_LENGTH) + " |") + lines.append("| " + "PAUSED - Resume using SPACE".ljust(LINE_LENGTH - 5) + " |") else: - lines.append("| " + detection_state.state.upper().ljust(LINE_LENGTH) + " |") + lines.append("| " + detection_state.state.upper().ljust(LINE_LENGTH - 5) + " |") if detection_state.advanced_logging: lines.extend([ @@ -90,8 +90,12 @@ def get_current_status(detection_state: DetectionState) -> List[str]: return lines -def clear_previous_lines(line_count): +def reset_previous_lines(line_count): line = ""; for i in range(0,line_count): line += ANSI_CODE_LINE_UP - print(line, end=ANSI_CODE_LINE_CLEAR ) \ No newline at end of file + print(line, end=ANSI_CODE_LINE_CLEAR ) + +def clear_previous_lines(line_count): + for i in range(0,line_count): + print(ANSI_CODE_LINE_UP, end=ANSI_CODE_LINE_CLEAR ) \ No newline at end of file diff --git a/lib/signal_processing.py b/lib/signal_processing.py index 9ee785e3..7962ee97 100644 --- a/lib/signal_processing.py +++ b/lib/signal_processing.py @@ -5,6 +5,12 @@ from python_speech_features import mfcc from .mfsc import Mfsc from typing import List, Tuple +import os + +# When converting to ints from bytes, Windows uses a 32 bit number. +# Other OSes use the bytes shown. So for Windows we need different calculations for frame count +# ( https://stackoverflow.com/questions/72482769/numpy-returns-different-results-on-windows-and-unix ) +long_byte_size = 4 if os.name == 'nt' else 2 _mfscs = {} @@ -13,11 +19,11 @@ def determine_dBFS(waveData: np.array) -> float: return 20 * math.log10(determine_power(waveData) / math.pow(32767, 2)) def determine_power(waveData: np.array) -> float: - return audioop.rms(waveData, 4) + return audioop.rms(waveData, long_byte_size) # This power measurement is the old representation for human readability def determine_legacy_power(waveData: np.array) -> float: - return determine_power( ) / 1000 + return determine_power(audioop.rms(waveData, 4)) / 1000 # Old fundamental frequency finder - this one doesn't show frequency in Hz def determine_legacy_frequency(waveData: np.array) -> float: diff --git a/lib/stream_processing.py b/lib/stream_processing.py index b371e16f..2216cf54 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -1,14 +1,22 @@ from .typing import DetectionLabel, DetectionFrame, DetectionEvent, DetectionState -from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT +from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE from typing import List import wave import math import numpy as np from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist +from .srt import persist_srt_file, print_detection_performance_compared_to_srt +import os +import audioop + +# When converting to ints from bytes, Windows uses a 32 bit number. +# Other OSes use the bytes shown. So for Windows we need different calculations for frame count +# ( https://stackoverflow.com/questions/72482769/numpy-returns-different-results-on-windows-and-unix ) +long_byte_size = 4 if os.name == 'nt' else 2 CURRENT_VERSION = 1 -def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None): +def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None, comparison_srt_file = None, print_statistics = False): audioFrames = [] edgesAudioFrames = [] wf = wave.open(input_file, 'rb') @@ -16,112 +24,251 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac total_frames = wf.getnframes() frame_rate = wf.getframerate() frames_to_read = round( frame_rate * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ) - ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + sample_width = 2# 16 bit = 2 bytes detection_strategy = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" detection_labels = [] for label in labels: detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) - detection_state = DetectionState(detection_strategy, "recording", frames_to_read, ms_per_frame, 0, True, 0, 0, detection_labels) + detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, detection_labels) false_occurrence = [] current_occurrence = [] - index = 0 + index = 0 + detection_frames = [] if progress_callback is not None: - progress_callback(0, detection_state) - - detection_frames = [] + progress_callback(0, detection_state) while( wf.tell() < total_frames ): index = index + 1 raw_wav = wf.readframes(frames_to_read * number_channels) - detection_state.ms_recorded += ms_per_frame - detected = False + detected = False # If our wav file is shorter than the amount of bytes ( assuming 16 bit ) times the frames, we discard it and assume we arrived at the end of the file if (len(raw_wav) != 2 * frames_to_read * number_channels ): - break; - else: - audioFrames.append(raw_wav) - if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): - audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] - - byteString = b''.join(audioFrames) - wave_data = np.frombuffer( byteString, dtype=np.int16 ) - power = determine_power( wave_data ) - dBFS = determine_dBFS( wave_data ) - mfsc_data = determine_mfsc( wave_data, frame_rate ) - distance = determine_euclidean_dist( mfsc_data ) - - # Attempt to detect a label - detected_label = BACKGROUND_LABEL - for label in detection_state.labels: - if is_detected(detection_state.strategy, power, dBFS, distance, label.min_dBFS): - detected = True - label.ms_detected += ms_per_frame - detected_label = label.label - break - - detection_frames.append(DetectionFrame(index, ms_per_frame, detected, power, dBFS, distance, mfsc_data, detected_label)) - if detected: - current_occurrence.append(detection_frames[-1]) - else: - false_occurrence.append(detection_frames[-1]) - else: - detection_frames.append(DetectionFrame(index, ms_per_frame, False, 0, 0, 0, [], BACKGROUND_LABEL)) - false_occurrence.append(detection_frames[-1]) - - # Recalculate the noise floor / signal strength every 10 frames - # For performance reason and because the statistical likelyhood of things changing every 150ms is pretty low - if len(detection_frames) % 10 == 0: - detection_state = determine_detection_state(detection_frames, detection_state) - - # On-line rejection - This may be undone in post-processing later - # Only add occurrences longer than 75 ms as no sound a human produces is shorter - if detected == False and len(current_occurrence) > 0: - is_continuous = False - for label in detection_state.labels: - if label == current_occurrence[0].label: - is_continuous = label.duration_type == "continuous" - break - - if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): - total_rejected_frames = len(current_occurrence) - for frame_index in range(-total_rejected_frames - 1, 0, 1): - rejected_frame_index = frame_index - detection_frames[rejected_frame_index].label = BACKGROUND_LABEL - detection_frames[rejected_frame_index].positive = False - current_occurrence = [] - # On-line mending - This may be undone in post-processing later - # Only keep false detections longer than a certain amount ( because a human can't make them shorter ) - elif detected and len(false_occurrence) > 0: - if is_mended(detection_state.strategy, false_occurrence, detection_state, detected_label): - total_mended_frames = len(false_occurrence) - for frame_index in range(-total_mended_frames - 1, 0, 1): - mended_frame_index = frame_index - detection_frames[mended_frame_index].label = detected_label - detection_frames[mended_frame_index].positive = True - false_occurrence = [] + break; + + # Do online downsampling if the files frame rate is higher than our 16k Hz rate + # To make sure all the calculations stay accurate + if frame_rate > RATE: + raw_wav, _ = audioop.ratecv(raw_wav, sample_width, number_channels, frame_rate, RATE, None) + if number_channels > 1: + raw_wav = audioop.tomono(raw_wav[0], 2, 1, 0) + + audioFrames.append(raw_wav) + if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): + audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] - progress = index * ms_per_frame / total_frames * ms_per_frame - if progress_callback is not None: - progress_callback(progress, detection_state) + byteString = b''.join(audioFrames) + wave_data = np.frombuffer( byteString, dtype=np.int16 ) + power = determine_power( wave_data ) + dBFS = determine_dBFS( wave_data ) + mfsc_data = determine_mfsc( wave_data, RATE ) + distance = determine_euclidean_dist( mfsc_data ) + + # Attempt to detect a label + detected_label = BACKGROUND_LABEL + for label in detection_state.labels: + if is_detected(detection_state.strategy, power, dBFS, distance, label.min_dBFS): + detected = True + label.ms_detected += ms_per_frame + detected_label = label.label + break + + detection_frames.append(DetectionFrame(index, ms_per_frame, detected, power, dBFS, distance, mfsc_data, detected_label)) + if detected: + current_occurrence.append(detection_frames[-1]) + else: + false_occurrence.append(detection_frames[-1]) + else: + detection_frames.append(DetectionFrame(index, ms_per_frame, False, 0, 0, 0, [], BACKGROUND_LABEL)) + false_occurrence.append(detection_frames[-1]) + # Recalculate the noise floor / signal strength every 10 frames + # For performance reason and because the statistical likelyhood of things changing every 150ms is pretty low + if len(detection_frames) % 10 == 0: + detection_state = determine_detection_state(detection_frames, detection_state) + + # On-line rejection - This may be undone in post-processing later + # Only add occurrences longer than 75 ms as no sound a human produces is shorter + if detected == False and len(current_occurrence) > 0: + is_continuous = False + for label in detection_state.labels: + if label == current_occurrence[0].label: + is_continuous = label.duration_type == "continuous" + break + + if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): + total_rejected_frames = len(current_occurrence) + for frame_index in range(-total_rejected_frames - 1, 0, 1): + rejected_frame_index = frame_index + detection_frames[rejected_frame_index].label = BACKGROUND_LABEL + detection_frames[rejected_frame_index].positive = False + current_occurrence = [] + # On-line mending - This may be undone in post-processing later + # Only keep false detections longer than a certain amount ( because a human can't make them shorter ) + elif detected and len(false_occurrence) > 0: + if is_mended(detection_state.strategy, false_occurrence, detection_state, detected_label): + total_mended_frames = len(false_occurrence) + for frame_index in range(-total_mended_frames - 1, 0, 1): + mended_frame_index = frame_index + detection_frames[mended_frame_index].label = detected_label + detection_frames[mended_frame_index].positive = True + false_occurrence = [] + + # Convert from different byte sizes to 16bit for proper progress + progress = wf.tell() / total_frames + if progress_callback is not None and progress < 1: + # For the initial pass we calculate 75% of the progress + # This progress partitioning is completely arbitrary + progress_callback(progress * 0.75, detection_state) + wf.close() output_wave_file = wave.open(output_file, 'wb') output_wave_file.setnchannels(number_channels) - output_wave_file.setsampwidth(audio.get_sample_size(FORMAT)) - output_wave_file.setframerate(frame_rate) - post_processing(detection_frames, detection_state, output_file, output_wave_file ) - + output_wave_file.setsampwidth(sample_width) + output_wave_file.setframerate(RATE) + + post_processing(detection_frames, detection_state, srt_file, progress_callback, output_wave_file, comparison_srt_file, print_statistics ) progress = 1 if progress_callback is not None: - progress_callback(progress, detection_state) + progress_callback(progress, detection_state) + +def post_processing(frames: List[DetectionFrame], detection_state: DetectionState, output_filename: str, progress_callback = None, output_wave_file: wave.Wave_write = None, comparison_srt_file: str = None, print_statistics = False) -> List[DetectionFrame]: + detection_state.state = "processing" + if progress_callback is not None: + progress_callback(0, detection_state) + + # Do a full pass on all the frames again to fix labels we might have missed + if "repair" in detection_state.strategy: + current_occurrence = [] + false_occurrence = [] + current_label = None + detected_label = None + + # Recalculate the MS detection and duratoin type + for label in detection_state.labels: + label.ms_detected = 0 + label.duration_type = determine_duration_type(label, frames) + + for index, frame in enumerate(frames): + detected = False + for label in detection_state.labels: + if is_detected(detection_state.strategy, frame.power, frame.dBFS, frame.euclid_dist, label.min_dBFS): + detected = True + label.ms_detected += detection_state.ms_per_frame + current_label = label + break + + # Do a secondary pass if the previous label was negative + # As we can use its thresholds for correcting late starts + mending_offset = 0 + if detected and not frames[index - 1].positive: + for label in detection_state.labels: + if current_label.label == label.label and is_detected_secondary(detection_state.strategy, frames[index - 1].power, frames[index - 1].dBFS, frames[index - 1].euclid_dist, label.min_dBFS - 4): + label.ms_detected += detection_state.ms_per_frame + frames[index - 1].label = current_label.label + frames[index - 1].positive = True + mending_offset = -1 + if len(false_occurrence) > 0: + false_occurrence.pop() + + # Only do two frames of late start fixing as longer late starts statistically do not seem to occur + if not frames[index - 2].positive and is_detected_secondary(detection_state.strategy, frames[index - 2].power, frames[index - 2].dBFS, frames[index - 2].euclid_dist, label.min_dBFS - 4): + label.ms_detected += detection_state.ms_per_frame + frames[index - 2].label = current_label.label + frames[index - 2].positive = True + mending_offset = -2 + if len(false_occurrence) > 0: + false_occurrence.pop() + break + + if detected: + current_occurrence.append(frame) + frame.label = current_label.label + frame.positive = True + frames[index] = frame + + if len(false_occurrence) > 0: + if is_mended(detection_state.strategy, false_occurrence, detection_state, current_label.label): + total_mended_frames = len(false_occurrence) + current_label.ms_detected += total_mended_frames * detection_state.ms_per_frame + for frame_index in range(-total_mended_frames - 1 + mending_offset, mending_offset, 1): + mended_frame_index = index + frame_index + frames[mended_frame_index].label = current_label.label + frames[mended_frame_index].positive = True + false_occurrence = [] + + if not detected: + false_occurrence.append(frame) + frame.positive = False + frame.label = BACKGROUND_LABEL + frames[index] = frame + + if len(current_occurrence) > 0: + is_continuous = False + for label in detection_state.labels: + if label == current_occurrence[0].label: + is_continuous = label.duration_type == "continuous" + break + + if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): + total_rejected_frames = len(current_occurrence) + current_label.ms_detected -= total_rejected_frames * detection_state.ms_per_frame + current_label = None + for frame_index in range(-total_rejected_frames - 1, 0, 1): + rejected_frame_index = index + frame_index + frames[rejected_frame_index].label = BACKGROUND_LABEL + frames[rejected_frame_index].positive = False + current_occurrence = [] + + progress = index / len(frames) + if progress_callback is not None and progress < 1: + # For the post processing phase - we count the remaining 25% of the progress + # This progress partitioning is completely arbitrary + progress_callback(0.75 + ( progress * 0.25 ), detection_state) + + + # Persist the SRT file + events = detection_frames_to_events(frames) + persist_srt_file( output_filename, events ) + + comparisonOutputWaveFile = None + if print_statistics: + if output_wave_file is not None: + comparisonOutputWaveFile = wave.open(output_filename + "_comparison.wav", 'wb') + comparisonOutputWaveFile.setnchannels(output_wave_file.getnchannels()) + comparisonOutputWaveFile.setsampwidth(output_wave_file.getsampwidth()) + comparisonOutputWaveFile.setframerate(output_wave_file.getframerate()) + + print_detection_performance_compared_to_srt(frames, detection_state.ms_per_frame, comparison_srt_file, comparisonOutputWaveFile) + + # Persist the detection wave file + if output_wave_file is not None: + frames_to_write = round( RATE * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ) + sample_width = 2# 16 bit = 2 bytes + detection_audio_frames = [] + for frame in frames: + highest_amp = 65536 / 10 + signal_strength = highest_amp if frame.positive else 0 + + detection_signal = np.full(int(frames_to_write / sample_width), int(signal_strength)) + detection_signal[::2] = 0 + detection_signal[::3] = 0 + detection_signal[::5] = 0 + detection_signal[::7] = 0 + detection_signal[::9] = 0 + detection_audio_frames.append( detection_signal ) + output_wave_file.writeframes(b''.join(detection_audio_frames)) + output_wave_file.close() + detection_state.state = "recording" + return frames def determine_detection_state(detection_frames: List[DetectionFrame], detection_state: DetectionState) -> DetectionState: # Filter out very low power dbFS values as we can assume the hardware microphone is off diff --git a/lib/typing.py b/lib/typing.py index dc1827a5..e37b9190 100644 --- a/lib/typing.py +++ b/lib/typing.py @@ -44,7 +44,6 @@ class DetectionLabel: class DetectionState: strategy: str state: str - frames_to_read: int ms_per_frame: int ms_recorded: int advanced_logging: bool From 8b052acf7ec412066f27e68d7602e216162185da Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 17 Feb 2023 17:25:11 +0100 Subject: [PATCH 05/15] Moved the audio resampling over to signal processing - Might need its own separate file for wav related things --- lib/signal_processing.py | 13 ++++++++++++- lib/stream_processing.py | 13 ++----------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/lib/signal_processing.py b/lib/signal_processing.py index 7962ee97..2f5bb467 100644 --- a/lib/signal_processing.py +++ b/lib/signal_processing.py @@ -6,6 +6,7 @@ from .mfsc import Mfsc from typing import List, Tuple import os +from config.config import RATE # When converting to ints from bytes, Windows uses a 32 bit number. # Other OSes use the bytes shown. So for Windows we need different calculations for frame count @@ -108,4 +109,14 @@ def determine_euclidean_dist(mfscData: np.array) -> float: for i in range(0, mel_frame_amount): if i > 0: distance += np.linalg.norm(mfscData[i-1] - mfscData[i]) - return distance \ No newline at end of file + return distance + +# Resamples the audio down to 16kHz ( or any other RATE filled in ) +# To make sure all the other calculations are stable and correct +def resample_audio(wavData: np.array, frame_rate, number_channels) -> np.array: + if frame_rate > RATE: + sample_width = 2# 16 bit = 2 bytes + wavData, _ = audioop.ratecv(wavData, sample_width, number_channels, frame_rate, RATE, None) + if number_channels > 1: + wavData = audioop.tomono(wavData[0], 2, 1, 0) + return wavData diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 2216cf54..9339d5b3 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -4,15 +4,9 @@ import wave import math import numpy as np -from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist +from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist, resample_audio from .srt import persist_srt_file, print_detection_performance_compared_to_srt import os -import audioop - -# When converting to ints from bytes, Windows uses a 32 bit number. -# Other OSes use the bytes shown. So for Windows we need different calculations for frame count -# ( https://stackoverflow.com/questions/72482769/numpy-returns-different-results-on-windows-and-unix ) -long_byte_size = 4 if os.name == 'nt' else 2 CURRENT_VERSION = 1 @@ -54,10 +48,7 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac # Do online downsampling if the files frame rate is higher than our 16k Hz rate # To make sure all the calculations stay accurate - if frame_rate > RATE: - raw_wav, _ = audioop.ratecv(raw_wav, sample_width, number_channels, frame_rate, RATE, None) - if number_channels > 1: - raw_wav = audioop.tomono(raw_wav[0], 2, 1, 0) + raw_wav = resample_audio(raw_wav, frame_rate, number_channels) audioFrames.append(raw_wav) if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): From 03c6ffc3c5c82450d7374a8f79561b92766431e0 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Sat, 18 Feb 2023 11:43:05 +0100 Subject: [PATCH 06/15] Added dataloading with wav and srt file combinations Preliminary testing shows anywhere between a 10 to 22 times loading speed improvement on Windows with slight improved accuracy on sklearn random forests --- lib/learn_data.py | 3 +- lib/signal_processing.py | 12 +--- lib/stream_processing.py | 4 +- lib/wav.py | 115 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 120 insertions(+), 14 deletions(-) create mode 100644 lib/wav.py diff --git a/lib/learn_data.py b/lib/learn_data.py index 193cceb3..db530043 100644 --- a/lib/learn_data.py +++ b/lib/learn_data.py @@ -22,6 +22,7 @@ from sklearn.neural_network import * from lib.combine_models import define_settings, get_current_default_settings from lib.audio_model import AudioModel +from lib.wav import load_wav_files_with_srts def learn_data(): dir_path = os.path.join( os.path.dirname( os.path.dirname( os.path.realpath(__file__)) ), DATASET_FOLDER) @@ -205,7 +206,7 @@ def load_data( dir_path, max_files, input_type ): for str_label, directories in grouped_data_directories.items(): # Add a label used for classifying the sounds id_label = get_label_for_directory( "".join( directories ) ) - cat_dataset_x, cat_dataset_labels, featureEngineeringTime = load_wav_files( directories, str_label, id_label, 0, max_files, input_type ) + cat_dataset_x, cat_dataset_labels, featureEngineeringTime = load_wav_files_with_srts( directories, str_label, id_label, 0, max_files, input_type ) totalFeatureEngineeringTime += featureEngineeringTime dataset_x.extend( cat_dataset_x ) dataset_labels.extend( cat_dataset_labels ) diff --git a/lib/signal_processing.py b/lib/signal_processing.py index 2f5bb467..ea0ba3bf 100644 --- a/lib/signal_processing.py +++ b/lib/signal_processing.py @@ -109,14 +109,4 @@ def determine_euclidean_dist(mfscData: np.array) -> float: for i in range(0, mel_frame_amount): if i > 0: distance += np.linalg.norm(mfscData[i-1] - mfscData[i]) - return distance - -# Resamples the audio down to 16kHz ( or any other RATE filled in ) -# To make sure all the other calculations are stable and correct -def resample_audio(wavData: np.array, frame_rate, number_channels) -> np.array: - if frame_rate > RATE: - sample_width = 2# 16 bit = 2 bytes - wavData, _ = audioop.ratecv(wavData, sample_width, number_channels, frame_rate, RATE, None) - if number_channels > 1: - wavData = audioop.tomono(wavData[0], 2, 1, 0) - return wavData + return distance \ No newline at end of file diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 9339d5b3..6794ae2a 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -4,7 +4,8 @@ import wave import math import numpy as np -from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist, resample_audio +from .signal_processing import determine_power, determine_dBFS, determine_mfsc, determine_euclidean_dist +from .wav import resample_audio from .srt import persist_srt_file, print_detection_performance_compared_to_srt import os @@ -12,7 +13,6 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None, comparison_srt_file = None, print_statistics = False): audioFrames = [] - edgesAudioFrames = [] wf = wave.open(input_file, 'rb') number_channels = wf.getnchannels() total_frames = wf.getnframes() diff --git a/lib/wav.py b/lib/wav.py new file mode 100644 index 00000000..08d8f7af --- /dev/null +++ b/lib/wav.py @@ -0,0 +1,115 @@ +import wave +from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE, TYPE_FEATURE_ENGINEERING_NORM_MFSC +from lib.machinelearning import feature_engineering_raw +from .srt import parse_srt_file +import numpy as np +import audioop +from typing import List +import os +import time +import math + +# Resamples the audio down to 16kHz ( or any other RATE filled in ) +# To make sure all the other calculations are stable and correct +def resample_audio(wavData: np.array, frame_rate, number_channels) -> np.array: + if frame_rate > RATE: + sample_width = 2# 16 bit = 2 bytes + wavData, _ = audioop.ratecv(wavData, sample_width, number_channels, frame_rate, RATE, None) + if number_channels > 1: + wavData = audioop.tomono(wavData[0], 2, 1, 0) + return wavData + +def load_wav_files_with_srts( directories, label, int_label, start, end, input_type ): + category_dataset_x = [] + category_dataset_labels = [] + totalFeatureEngineeringTime = 0 + category_file_index = 0 + + for directory in directories: + source_directory = os.path.join( directory, "source" ) + segments_directory = os.path.join( directory, "segments" ) + + srt_files = [] + + for fileindex, file in enumerate(os.listdir(segments_directory)): + if file.endswith(".srt"): + srt_files.append(file) + + for source_index, source_file in enumerate(os.listdir(source_directory)): + if source_file.endswith(".wav"): + full_filename = os.path.join(source_directory, source_file) + print( "Loading " + str(category_file_index) + " files for " + label + "... ", end="\r" ) + category_file_index += 1 + + # Find the SRT files available for this source file + shared_key = source_file.replace(".wav", "") + possible_srt_files = [x for x in srt_files if x.startswith(shared_key)] + if len(possible_srt_files) == 0: + continue + + # Find the highest version of the segmentation for this source file + srt_file = possible_srt_files[0] + for possible_srt_file in possible_srt_files: + current_version = int( srt_file.replace(".srt", "").replace(shared_key + ".v", "") ) + version = int( possible_srt_file.replace(".srt", "").replace(shared_key + ".v", "") ) + if version > current_version: + srt_file = possible_srt_file + full_srt_filename = os.path.join(segments_directory, srt_file) + + # Load the WAV file and turn it into a onedimensional array of numbers + feature_engineering_start = time.time() * 1000 + data = load_wav_data_from_srt(full_srt_filename, full_filename, input_type, False) + category_dataset_x.extend( data ) + category_dataset_labels.extend([ label for data_row in data ]) + totalFeatureEngineeringTime += time.time() * 1000 - feature_engineering_start + + print( "Loaded " + str( len( category_dataset_labels ) ) + " .wav files for category " + label + " (id: " + str(int_label) + ")" ) + return category_dataset_x, category_dataset_labels, totalFeatureEngineeringTime + +def load_wav_data_from_srt(srt_file: str, source_file: str, feature_engineering_type = TYPE_FEATURE_ENGINEERING_NORM_MFSC, with_offset = True) -> List[List[float]]: + wav_file_data = [] + wf = wave.open(source_file, 'rb') + frame_rate = wf.getframerate() + number_channels = wf.getnchannels() + total_frames = wf.getnframes() + frames_to_read = round( frame_rate * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ) + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + + # If offsets are required - We seek half a frame behind the expected frame to get more data from a different location + halfframe_offset = round( frames_to_read * number_channels * 0.5 ) + start_offsets = [0, -halfframe_offset] if with_offset else [0] + + transition_events = parse_srt_file(srt_file, ms_per_frame) + for index, transition_event in enumerate(transition_events): + next_event_index = total_frames / frames_to_read if index + 1 >= len(transition_events) else transition_events[index + 1].start_index + audioFrames = [] + + if transition_event.label != BACKGROUND_LABEL: + for offset in start_offsets: + # Skip of the offset makes the position before the start of the file + if offset + (frames_to_read * transition_event.start_index) < 0: + continue; + wf.setpos(offset + (frames_to_read * transition_event.start_index)) + + keep_collecting = True + while keep_collecting: + raw_wav = wf.readframes(frames_to_read * number_channels) + + # Reached the end of wav - do not keep collecting + if (len(raw_wav) != SLIDING_WINDOW_AMOUNT * frames_to_read * number_channels ): + keep_collecting = False + break + + raw_wav = resample_audio(raw_wav, frame_rate, number_channels) + audioFrames.append(raw_wav) + if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): + audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] + + byteString = b''.join(audioFrames) + wave_data = np.frombuffer( byteString, dtype=np.int16 ) + wav_file_data.append( feature_engineering_raw(wave_data, RATE, 0, RECORD_SECONDS, feature_engineering_type)[0] ) + + if wf.tell() >= ( next_event_index * frames_to_read ) + offset: + keep_collecting = False + + return wav_file_data \ No newline at end of file From 4e5e51ee314806bdc71c11839ddd285515f7f014 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 24 Feb 2023 10:33:13 +0100 Subject: [PATCH 07/15] Improved migration by properly checking individual files for possible upgrades --- lib/migrate_data.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/migrate_data.py b/lib/migrate_data.py index 163cd1a8..98593a90 100644 --- a/lib/migrate_data.py +++ b/lib/migrate_data.py @@ -10,9 +10,17 @@ def check_migration(): recording_dirs = os.listdir(RECORDINGS_FOLDER) for file in recording_dirs: if os.path.isdir(os.path.join(RECORDINGS_FOLDER, file)): - if not os.path.exists(os.path.join(RECORDINGS_FOLDER, file, "segments")) \ - or not os.listdir(os.path.join(RECORDINGS_FOLDER, file, "segments")): + segments_folder = os.path.join(RECORDINGS_FOLDER, file, "segments") + if not os.path.exists(segments_folder): version_detected = 0 + break + else: + source_files = os.listdir(os.path.join(RECORDINGS_FOLDER, file, "source")) + for source_file in source_files: + srt_file = source_file.replace(".wav", ".v" + str(CURRENT_VERSION) + ".srt") + if not os.path.exists(os.path.join(segments_folder, srt_file)): + version_detected = 0 + break if version_detected < CURRENT_VERSION: print("----------------------------") @@ -34,20 +42,25 @@ def migrate_data(): wav_files = [x for x in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, x)) and x.endswith(".wav")] progress = 0 progress_chunk = 1 / len( wav_files ) + skipped_amount = 0 for index, wav_file in enumerate(wav_files): - wav_file_location = os.path.join(source_dir, wav_file) + wav_file_location = os.path.join(source_dir, wav_file) srt_file_location = os.path.join(segments_dir, wav_file.replace(".wav", ".v" + str(CURRENT_VERSION) + ".srt")) output_file_location = os.path.join(segments_dir, wav_file.replace(".wav", "_detection.wav")) - - process_wav_file(wav_file_location, srt_file_location, output_file_location, [label], \ - lambda internal_progress, state: print_migration_progress(progress + (internal_progress * progress_chunk), state) ) + + # Only resegment if the new version does not exist already + if not os.path.exists(srt_file_location): + process_wav_file(wav_file_location, srt_file_location, output_file_location, [label], \ + lambda internal_progress, state: print_migration_progress(progress + (internal_progress * progress_chunk), state) ) + else: + skipped_amount += 1 progress = index / len( wav_files ) + progress_chunk - if progress == 1: + if progress == 1 and skipped_amount < len(wav_files): clear_previous_lines(1) clear_previous_lines(1) - print( label + " resegmented!" ) + print( label + " resegmented!" if skipped_amount < len(wav_files) else label + " already properly segmented!" ) time.sleep(1) print("Finished migrating data!") From cc5f7bada0f23c58da46939cddd1f33b12e2cac7 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 24 Feb 2023 12:05:49 +0100 Subject: [PATCH 08/15] Integrated new srt loading into audio dataset loading Improves data loading times even when compared to cached audio data --- lib/audio_dataset.py | 78 +++++++++++++++++++++--------------------- lib/machinelearning.py | 1 - lib/wav.py | 18 ++++++++-- 3 files changed, 54 insertions(+), 43 deletions(-) diff --git a/lib/audio_dataset.py b/lib/audio_dataset.py index 6fc66810..8028007b 100644 --- a/lib/audio_dataset.py +++ b/lib/audio_dataset.py @@ -5,6 +5,7 @@ import numpy as np import random import math +from lib.wav import load_wav_data_from_srt class AudioDataset(Dataset): @@ -15,61 +16,60 @@ def __init__(self, grouped_data_directories, settings): self.augmented_samples = [] self.length = 0 self.training = False - rebuild_cache = False for index, label in enumerate( grouped_data_directories ): directories = grouped_data_directories[ label ] - listed_files = [] + listed_files = {} for directory in directories: - for file in os.listdir( directory ): - if( file.endswith(".wav") ): - listed_files.append( os.path.join(directory, file) ) + segments_directory = os.path.join(directory, "segments") + source_directory = os.path.join(directory, "source") + if not (os.path.exists(segments_directory) and os.path.exists(source_directory)): + continue + + source_files = os.listdir(source_directory) + srt_files = [x for x in os.listdir(segments_directory) if x.endswith(".srt")] + for source_file in source_files: + shared_key = source_file.replace(".wav", "") + + possible_srt_files = [x for x in srt_files if x.startswith(shared_key)] + if len(possible_srt_files) == 0: + continue + + # Find the highest version of the segmentation for this source file + srt_file = possible_srt_files[0] + for possible_srt_file in possible_srt_files: + current_version = int( srt_file.replace(".srt", "").replace(shared_key + ".v", "") ) + version = int( possible_srt_file.replace(".srt", "").replace(shared_key + ".v", "") ) + if version > current_version: + srt_file = possible_srt_file + + listed_files[os.path.join(source_directory, source_file)] = os.path.join(segments_directory, srt_file) listed_files_size = len( listed_files ) - print( f"Loading in {label}: {listed_files_size} files" ) - - for file_index, full_filename in enumerate( listed_files ): - print( str( math.floor(((file_index + 1 ) / listed_files_size ) * 100)) + "%", end="\r" ) - - # When the input length changes due to a different input type being used, we need to rebuild the cache from scratch - if (index == 0 and file_index == 0): - rebuild_cache = len(self.feature_engineering_cached(full_filename, False)) != len(self.feature_engineering_augmented(full_filename)) - - self.samples.append([full_filename, index, torch.tensor(self.feature_engineering_cached(full_filename, rebuild_cache)).float()]) - self.augmented_samples.append(None) + print( f"Loading in {label}" ) + listed_source_files = listed_files.keys() + for file_index, full_filename in enumerate( listed_source_files ): + all_samples = load_wav_data_from_srt(listed_files[full_filename], full_filename, self.settings['FEATURE_ENGINEERING_TYPE'], False) + augmented_samples = load_wav_data_from_srt(listed_files[full_filename], full_filename, self.settings['FEATURE_ENGINEERING_TYPE'], False, True) + + for sample in all_samples: + self.samples.append([full_filename, index, torch.tensor(sample).float()]) + for augmented_sample in augmented_samples: + self.augmented_samples.append([full_filename, index, torch.tensor(augmented_sample).float()]) def set_training(self, training): self.training = training - def feature_engineering_cached(self, filename, rebuild_cache=False): - # Only build a filesystem cache of feature engineering results if we are dealing with non-raw wave form - if (self.settings['FEATURE_ENGINEERING_TYPE'] != 1): - cache_dir = os.path.join(os.path.dirname(filename), "cache") - os.makedirs(cache_dir, exist_ok=True) - cached_filename = os.path.join(cache_dir, os.path.basename(filename) + "_fe") - if (os.path.isfile(cached_filename) == False or rebuild_cache == True): - data_row = training_feature_engineering(filename, self.settings) - np.savetxt( cached_filename, data_row ) - else: - cached_filename = filename - - return np.loadtxt( cached_filename, dtype='float' ) - - def feature_engineering_augmented(self, filename): - return augmented_feature_engineering(filename, self.settings) - def __len__(self): return len( self.samples ) def __getitem__(self, idx): # During training, get a 10% probability that you get an augmented sample if (self.training and random.uniform(0, 1) >= 0.9 ): - if (self.augmented_samples[idx] is None): - self.augmented_samples[idx] = [self.samples[idx][0], self.samples[idx][1], torch.tensor(self.feature_engineering_augmented(self.samples[idx][0])).float()] - return self.augmented_samples[idx][2], self.augmented_samples[idx][1] - else: - return self.samples[idx][2], self.samples[idx][1] - + if (idx in self.augmented_samples): + return self.augmented_samples[idx][2], self.augmented_samples[idx][1] + return self.samples[idx][2], self.samples[idx][1] + def get_labels(self): return self.paths diff --git a/lib/machinelearning.py b/lib/machinelearning.py index 5d9ba806..260a7e46 100644 --- a/lib/machinelearning.py +++ b/lib/machinelearning.py @@ -114,7 +114,6 @@ def augmented_feature_engineering( wavFile, settings ): print( "OLD MFCC TYPE IS NOT SUPPORTED FOR TRAINING PYTORCH" ) return data_row - def get_label_for_directory( setdir ): return float( int(hashlib.sha256( setdir.encode('utf-8')).hexdigest(), 16) % 10**8 ) diff --git a/lib/wav.py b/lib/wav.py index 08d8f7af..4b532179 100644 --- a/lib/wav.py +++ b/lib/wav.py @@ -1,5 +1,5 @@ import wave -from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE, TYPE_FEATURE_ENGINEERING_NORM_MFSC +from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE, TYPE_FEATURE_ENGINEERING_NORM_MFSC, PYTORCH_AVAILABLE from lib.machinelearning import feature_engineering_raw from .srt import parse_srt_file import numpy as np @@ -8,6 +8,8 @@ import os import time import math +if (PYTORCH_AVAILABLE == True): + from audiomentations import Compose, AddGaussianNoise, Shift, TimeStretch # Resamples the audio down to 16kHz ( or any other RATE filled in ) # To make sure all the other calculations are stable and correct @@ -66,7 +68,15 @@ def load_wav_files_with_srts( directories, label, int_label, start, end, input_t print( "Loaded " + str( len( category_dataset_labels ) ) + " .wav files for category " + label + " (id: " + str(int_label) + ")" ) return category_dataset_x, category_dataset_labels, totalFeatureEngineeringTime -def load_wav_data_from_srt(srt_file: str, source_file: str, feature_engineering_type = TYPE_FEATURE_ENGINEERING_NORM_MFSC, with_offset = True) -> List[List[float]]: +def augment_wav_data(wavData, sample_rate): + augmenter = Compose([ + AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5), + TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5), + Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5), + ]) + return augmenter(samples=np.array(wavData, dtype="float32"), sample_rate=sample_rate) + +def load_wav_data_from_srt(srt_file: str, source_file: str, feature_engineering_type = TYPE_FEATURE_ENGINEERING_NORM_MFSC, with_offset = True, should_augment=False) -> List[List[float]]: wav_file_data = [] wf = wave.open(source_file, 'rb') frame_rate = wf.getframerate() @@ -100,13 +110,15 @@ def load_wav_data_from_srt(srt_file: str, source_file: str, feature_engineering_ keep_collecting = False break - raw_wav = resample_audio(raw_wav, frame_rate, number_channels) + raw_wav = resample_audio(raw_wav, frame_rate, number_channels) audioFrames.append(raw_wav) if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] byteString = b''.join(audioFrames) wave_data = np.frombuffer( byteString, dtype=np.int16 ) + if should_augment and PYTORCH_AVAILABLE: + wave_data = augment_wav_data(wave_data, RATE) wav_file_data.append( feature_engineering_raw(wave_data, RATE, 0, RECORD_SECONDS, feature_engineering_type)[0] ) if wf.tell() >= ( next_event_index * frames_to_read ) + offset: From 6e3bd3c9ef511487ea60999ad702508f07db59da Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Fri, 24 Feb 2023 20:03:33 +0100 Subject: [PATCH 09/15] Slight refactoring in stream processing to allow easier integration in recording audio streams Started implementing new recording flow - Does not do post processing and key processing properly yet --- lib/print_status.py | 18 ++--- lib/record_data.py | 142 +++++++++++++++++---------------------- lib/stream_processing.py | 134 +++++++++++++++++++----------------- 3 files changed, 143 insertions(+), 151 deletions(-) diff --git a/lib/print_status.py b/lib/print_status.py index 37068a7d..1f9bfc55 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -19,13 +19,13 @@ def create_progress_bar(percentage: float = 1.0) -> str: filled_characters = round(max(0, min(LINE_LENGTH, LINE_LENGTH * percentage))) return "".rjust(filled_characters, PROGRESS_FILLED).ljust(LINE_LENGTH, PROGRESS_AVAILABLE) -def get_current_status(detection_state: DetectionState) -> List[str]: - recorded_timestring = ms_to_srt_timestring( detection_state.ms_recorded, False) +def get_current_status(detection_state: DetectionState, multiplier = 1) -> List[str]: + recorded_timestring = ms_to_srt_timestring( detection_state.ms_recorded * multiplier, False) # Quality rating was manually established by doing some testing with added noise # And finding the results becoming worse when the SNR went lower than 10 quality = "" - if detection_state.ms_recorded > 10000: + if detection_state.ms_recorded * multiplier > 10000: if detection_state.expected_snr >= 25: quality = "Excellent" elif detection_state.expected_snr >= 20: @@ -67,24 +67,24 @@ def get_current_status(detection_state: DetectionState) -> List[str]: # And 1000 30ms windows being enough to train a label decently # With atleast 10 percent extra for a possible hold-out set during training quantity = "" - if label.ms_detected < 16500: + if label.ms_detected * multiplier < 16500: quantity = "Not enough" - elif label.ms_detected > 16500 and label.ms_detected < 41250: + elif label.ms_detected * multiplier > 16500 and label.ms_detected * multiplier < 41250: quantity = "Sufficient" - elif label.ms_detected >= 41250 and label.ms_detected < 82500: + elif label.ms_detected * multiplier >= 41250 and label.ms_detected * multiplier < 82500: quantity = "Good" - elif label.ms_detected >= 82500: + elif label.ms_detected * multiplier >= 82500: quantity = "Excellent" lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", "| " + label.label.ljust(LINE_LENGTH - 5) + " |", - "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected, False ).rjust(LINE_LENGTH - 15) + " |", + "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected * multiplier, False ).rjust(LINE_LENGTH - 15) + " |", "| " + "Data Quantity: " + quantity.rjust(LINE_LENGTH - 20) + " |", ]) if detection_state.advanced_logging: - lines.append( "| " + ("type:" + str(label.duration_type if label.duration_type else "Unknown").upper().rjust(LINE_LENGTH - 10)) + " |" ) + lines.append( "| " + ("type:" + str(label.duration_type if label.duration_type else "DETERMINING...").upper().rjust(LINE_LENGTH - 10)) + " |" ) lines.append( "| " + ("dBFS treshold:" + str(round(label.min_dBFS, 2)).rjust(LINE_LENGTH - 19)) + " |" ) lines.append("'".ljust(LINE_LENGTH - 2,"-") + "'") diff --git a/lib/record_data.py b/lib/record_data.py index cc684420..a3c4cd91 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -19,7 +19,12 @@ import sys from lib.listen import validate_microphone_input from lib.key_poller import KeyPoller +from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame +from lib.srt import persist_srt_file +from lib.print_status import get_current_status, reset_previous_lines, clear_previous_lines +from lib.typing import DetectionLabel, DetectionState import struct +lock = threading.Lock() # Countdown from seconds to 0 def countdown( seconds ): @@ -35,6 +40,7 @@ def countdown( seconds ): def record_controls( key_poller, recordQueue=None ): global currently_recording global streams + global lock ESCAPEKEY = '\x1b' SPACEBAR = ' ' @@ -42,6 +48,9 @@ def record_controls( key_poller, recordQueue=None ): if(character is not None): if( character == SPACEBAR ): print( "Recording paused!" ) + with lock: + if( recordQueue != None ): + recordQueue['status'] = 'paused' if (streams is not None): for stream in streams: @@ -54,22 +63,23 @@ def record_controls( key_poller, recordQueue=None ): if( recordQueue != None ): for key in recordQueue: recordQueue[key].queue.clear() - + character = key_poller.poll() - if(character is not None): + if(character is not None and ( recordQueue is None or recordQueue['status'] != 'processing') ): if( character == SPACEBAR ): - print( "Recording resumed!" ) + with lock: + if( recordQueue != None ): + recordQueue['status'] = 'recording' + if (streams is not None): for stream in streams: streams[stream].start_stream() return True elif( character == ESCAPEKEY ): - print( "Recording stopped" ) currently_recording = False return False time.sleep(0.3) elif( character == ESCAPEKEY ): - print( "Recording stopped" ) currently_recording = False return False return True @@ -138,25 +148,6 @@ def record_sound(): if not os.path.exists(RECORDINGS_FOLDER + "/" + directory + "/source"): os.makedirs(RECORDINGS_FOLDER + "/" + directory + "/source") - print("What signal power ( loudness ) threshold do you need?") - print("(if you do not know, start with something like 10000 and see afterwards") - print("what power values you get while recording.)") - power_threshold = input("power: ") - if( power_threshold == "" ): - power_threshold = 0 - else: - power_threshold = int( power_threshold ) - - print("What frequency threshold do you need?") - print("(you may not need this at all, so feel free to just press enter here)") - frequency_threshold = input("frequency: ") - if( frequency_threshold == "" ): - frequency_threshold = 0 - else: - frequency_threshold = int( frequency_threshold ) - begin_threshold = 10000 - - print("") print("You can pause/resume the recording session using the [SPACE] key, and stop the recording using the [ESC] key" ) global streams @@ -166,7 +157,10 @@ def record_sound(): files_recorded = 0 streams = {} audios = {} - recordQueue = {} + recordQueue = { + 'status': 'recording' + } + labels = [directory] if( countdown( 5 ) == False ): return; @@ -175,12 +169,9 @@ def record_sound(): time_string = str(int(time.time())) for index, microphone_index in enumerate(valid_mics): - FULL_WAVE_OUTPUT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/source/i_0__p_" + str(power_threshold) + \ - "__f_" + str(frequency_threshold) + "__begin_" + str(begin_threshold) + "__mici_" + str(microphone_index) + "__" + time_string + ".wav" - WAVE_OUTPUT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/" + time_string + "__mici_" + str(microphone_index) + "__file"; - WAVE_OUTPUT_FILE_EXTENSION = ".wav"; - - non_blocking_record(power_threshold, frequency_threshold, begin_threshold, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION, FULL_WAVE_OUTPUT_FILENAME, microphone_index, index==0) + FULL_WAVE_OUTPUT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/source/mici_" + str(microphone_index) + "__" + time_string + ".wav" + SRT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/segments/mici_" + str(microphone_index) + "__" + time_string + "v" + str(CURRENT_VERSION) + ".srt" + non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILENAME, microphone_index, index==0) # wait for stream to finish (5) while currently_recording: @@ -192,21 +183,31 @@ def record_sound(): audios['index' + str(microphone_index)].terminate() # Consumes the recordings in a sliding window fashion - Always combining the two latest chunks together -def record_consumer(power_threshold, frequency_threshold, begin_threshold, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION, FULL_WAVE_OUTPUT_FILENAME, MICROPHONE_INPUT_INDEX, audio, streams, print_stuff=False): +def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audio, streams, print_stuff=False): global recordQueue global currently_recording global files_recorded indexedQueue = recordQueue['index' + str(MICROPHONE_INPUT_INDEX)] - j = 0 - record_wave_file_count = 0 - audioFrames = [] + amount_of_streams = len(streams) + detection_strategy = CURRENT_DETECTION_STRATEGY - # Set the proper thresholds for starting recordings - delay_threshold = 0 - if( begin_threshold < 0 ): - delay_threshold = begin_threshold * -1 - begin_threshold = 1000 + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + detection_labels = [] + for label in labels: + detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) + detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, detection_labels) + + audioFrames = [] + false_occurrence = [] + current_occurrence = [] + index = 0 + detection_frames = [] + + if print_stuff: + current_status = get_current_status(detection_state) + for line in current_status: + print( line ) totalAudioFrames = [] try: @@ -229,44 +230,24 @@ def record_consumer(power_threshold, frequency_threshold, begin_threshold, WAVE_ while( currently_recording ): while( not indexedQueue.empty() ): audioFrames.append( indexedQueue.get() ) + detection_state.ms_recorded += ms_per_frame + audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence = \ + process_audio_frame(index, audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence) totalAudioFrames.append( audioFrames[-1] ) - if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): - j+=1 - audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] + index += 1 - byteString = b''.join(audioFrames) - fftData = np.frombuffer( byteString, dtype=np.int16 ) - frequency = get_loudest_freq( fftData, RECORD_SECONDS ) - power = get_recording_power( fftData, RECORD_SECONDS ) - - fileid = "%0.2f" % ((j) * RECORD_SECONDS ) - - if( record_controls( key_poller, recordQueue ) == False ): - for stream in streams: - streams[stream].stop_stream() - currently_recording = False - break; - - if( frequency > frequency_threshold and power > power_threshold ): - record_wave_file_count += 1 - if( record_wave_file_count <= begin_threshold and record_wave_file_count > delay_threshold ): - files_recorded += 1 - if print_stuff: - print( "Files recorded: %0d - Power: %0d - Freq: %0d - Saving %s" % ( files_recorded, power, frequency, fileid ) ) - waveFile = wave.open(WAVE_OUTPUT_FILENAME + fileid + WAVE_OUTPUT_FILE_EXTENSION, 'wb') - waveFile.setnchannels(CHANNELS) - waveFile.setsampwidth(audio.get_sample_size(FORMAT)) - waveFile.setframerate(RATE) - waveFile.writeframes(byteString) - waveFile.close() - else: - if print_stuff: - print( "Files recorded: %0d - Power: %0d - Freq: %0d" % ( files_recorded, power, frequency ) ) - else: - record_wave_file_count = 0 - if print_stuff: - print( "Files recorded: %0d - Power: %0d - Freq: %0d" % ( files_recorded, power, frequency ) ) - + if( record_controls( key_poller, recordQueue ) == False ): + for stream in streams: + streams[stream].stop_stream() + currently_recording = False + break; + + if print_stuff: + current_status = get_current_status(detection_state) + reset_previous_lines(len(current_status)) + for line in current_status: + print( line ) + # Append to the total wav file only once every ten audio frames ( roughly once every 225 milliseconds ) if (len(totalAudioFrames) >= 15 ): byteString = b''.join(totalAudioFrames) @@ -304,9 +285,8 @@ def multithreaded_record( in_data, frame_count, time_info, status, queue ): return in_data, pyaudio.paContinue -# Records a non blocking audio stream and saves the chunks onto a queue -# The queue will be used as a sliding window over the audio, where two chunks are combined into one audio file -def non_blocking_record(power_threshold, frequency_threshold, begin_threshold, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION, FULL_WAVE_OUTPUT_FILENAME, MICROPHONE_INPUT_INDEX, print_logs): +# Records a non blocking audio stream and saves the source and SRT file for it +def non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_logs): global recordQueue global streams global audios @@ -321,8 +301,8 @@ def non_blocking_record(power_threshold, frequency_threshold, begin_threshold, W input_device_index=MICROPHONE_INPUT_INDEX, frames_per_buffer=round( RATE * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ), stream_callback=micindexed_lambda) - - consumer = threading.Thread(name='consumer', target=record_consumer, args=(power_threshold, frequency_threshold, begin_threshold, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION, FULL_WAVE_OUTPUT_FILENAME, MICROPHONE_INPUT_INDEX, audios[mic_index], streams, print_logs)) + + consumer = threading.Thread(name='consumer', target=record_consumer, args=(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audios[mic_index], streams, print_logs)) consumer.setDaemon( True ) consumer.start() streams[mic_index].start_stream() diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 6794ae2a..43a24e03 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -10,6 +10,7 @@ import os CURRENT_VERSION = 1 +CURRENT_DETECTION_STRATEGY = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None, comparison_srt_file = None, print_statistics = False): audioFrames = [] @@ -21,7 +22,7 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) sample_width = 2# 16 bit = 2 bytes - detection_strategy = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" + detection_strategy = CURRENT_DETECTION_STRATEGY detection_labels = [] for label in labels: @@ -34,7 +35,7 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac detection_frames = [] if progress_callback is not None: - progress_callback(0, detection_state) + progress_callback(0, detection_state) while( wf.tell() < total_frames ): index = index + 1 @@ -51,65 +52,8 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac raw_wav = resample_audio(raw_wav, frame_rate, number_channels) audioFrames.append(raw_wav) - if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): - audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] - - byteString = b''.join(audioFrames) - wave_data = np.frombuffer( byteString, dtype=np.int16 ) - power = determine_power( wave_data ) - dBFS = determine_dBFS( wave_data ) - mfsc_data = determine_mfsc( wave_data, RATE ) - distance = determine_euclidean_dist( mfsc_data ) - - # Attempt to detect a label - detected_label = BACKGROUND_LABEL - for label in detection_state.labels: - if is_detected(detection_state.strategy, power, dBFS, distance, label.min_dBFS): - detected = True - label.ms_detected += ms_per_frame - detected_label = label.label - break - - detection_frames.append(DetectionFrame(index, ms_per_frame, detected, power, dBFS, distance, mfsc_data, detected_label)) - if detected: - current_occurrence.append(detection_frames[-1]) - else: - false_occurrence.append(detection_frames[-1]) - else: - detection_frames.append(DetectionFrame(index, ms_per_frame, False, 0, 0, 0, [], BACKGROUND_LABEL)) - false_occurrence.append(detection_frames[-1]) - - # Recalculate the noise floor / signal strength every 10 frames - # For performance reason and because the statistical likelyhood of things changing every 150ms is pretty low - if len(detection_frames) % 10 == 0: - detection_state = determine_detection_state(detection_frames, detection_state) - - # On-line rejection - This may be undone in post-processing later - # Only add occurrences longer than 75 ms as no sound a human produces is shorter - if detected == False and len(current_occurrence) > 0: - is_continuous = False - for label in detection_state.labels: - if label == current_occurrence[0].label: - is_continuous = label.duration_type == "continuous" - break - - if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): - total_rejected_frames = len(current_occurrence) - for frame_index in range(-total_rejected_frames - 1, 0, 1): - rejected_frame_index = frame_index - detection_frames[rejected_frame_index].label = BACKGROUND_LABEL - detection_frames[rejected_frame_index].positive = False - current_occurrence = [] - # On-line mending - This may be undone in post-processing later - # Only keep false detections longer than a certain amount ( because a human can't make them shorter ) - elif detected and len(false_occurrence) > 0: - if is_mended(detection_state.strategy, false_occurrence, detection_state, detected_label): - total_mended_frames = len(false_occurrence) - for frame_index in range(-total_mended_frames - 1, 0, 1): - mended_frame_index = frame_index - detection_frames[mended_frame_index].label = detected_label - detection_frames[mended_frame_index].positive = True - false_occurrence = [] + audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence = \ + process_audio_frame(index, audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence) # Convert from different byte sizes to 16bit for proper progress progress = wf.tell() / total_frames @@ -130,6 +74,74 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac if progress_callback is not None: progress_callback(progress, detection_state) +def process_audio_frame(index, audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence): + detection_frames.append(determine_detection_frame(index, detection_state, audioFrames)) + detected = detection_frames[-1].positive + detected_label = detection_frames[-1].label + if detected: + current_occurrence.append(detection_frames[-1]) + else: + false_occurrence.append(detection_frames[-1]) + + # Recalculate the noise floor / signal strength every 10 frames + # For performance reason and because the statistical likelyhood of things changing every 150ms is pretty low + if len(detection_frames) % 10 == 0: + detection_state = determine_detection_state(detection_frames, detection_state) + + # On-line rejection - This may be undone in post-processing later + # Only add occurrences longer than 75 ms as no sound a human produces is shorter + if detected == False and len(current_occurrence) > 0: + is_continuous = False + for label in detection_state.labels: + if label == current_occurrence[0].label: + is_continuous = label.duration_type == "continuous" + break + + if is_rejected(detection_state.strategy, current_occurrence, detection_state.ms_per_frame, is_continuous): + total_rejected_frames = len(current_occurrence) + for frame_index in range(-total_rejected_frames - 1, 0, 1): + rejected_frame_index = frame_index + detection_frames[rejected_frame_index].label = BACKGROUND_LABEL + detection_frames[rejected_frame_index].positive = False + current_occurrence = [] + # On-line mending - This may be undone in post-processing later + # Only keep false detections longer than a certain amount ( because a human can't make them shorter ) + elif detected and len(false_occurrence) > 0: + if is_mended(detection_state.strategy, false_occurrence, detection_state, detected_label): + total_mended_frames = len(false_occurrence) + for frame_index in range(-total_mended_frames - 1, 0, 1): + mended_frame_index = frame_index + detection_frames[mended_frame_index].label = detected_label + detection_frames[mended_frame_index].positive = True + false_occurrence = [] + + return audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence + +def determine_detection_frame(index, detection_state, audioFrames) -> DetectionFrame: + detected = False + if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ): + audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:] + + byteString = b''.join(audioFrames) + wave_data = np.frombuffer( byteString, dtype=np.int16 ) + power = determine_power( wave_data ) + dBFS = determine_dBFS( wave_data ) + mfsc_data = determine_mfsc( wave_data, RATE ) + distance = determine_euclidean_dist( mfsc_data ) + + # Attempt to detect a label + detected_label = BACKGROUND_LABEL + for label in detection_state.labels: + if is_detected(detection_state.strategy, power, dBFS, distance, label.min_dBFS): + detected = True + label.ms_detected += detection_state.ms_per_frame + detected_label = label.label + break + + return DetectionFrame(index, detection_state.ms_per_frame, detected, power, dBFS, distance, mfsc_data, detected_label) + else: + return DetectionFrame(index, detection_state.ms_per_frame, detected, 0, 0, 0, [], BACKGROUND_LABEL) + def post_processing(frames: List[DetectionFrame], detection_state: DetectionState, output_filename: str, progress_callback = None, output_wave_file: wave.Wave_write = None, comparison_srt_file: str = None, print_statistics = False) -> List[DetectionFrame]: detection_state.state = "processing" if progress_callback is not None: From f9815b36ae2197e42cb9456ba0368ca88757b299 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Sat, 25 Feb 2023 11:22:45 +0100 Subject: [PATCH 10/15] Added post processing to recording flow Improved threshold level determination from a high SNR stream Still working on properly displaying status and processing the data during pauses --- lib/print_status.py | 1 + lib/record_data.py | 64 ++++++++++++++++++++++++++++++---------- lib/stream_processing.py | 23 +++++++++++---- lib/typing.py | 3 +- 4 files changed, 70 insertions(+), 21 deletions(-) diff --git a/lib/print_status.py b/lib/print_status.py index 1f9bfc55..3d9e28b1 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -53,6 +53,7 @@ def get_current_status(detection_state: DetectionState, multiplier = 1) -> List[ else: lines.append("| " + detection_state.state.upper().ljust(LINE_LENGTH - 5) + " |") + lines.append("| " + ("dBFS:" + str(round(detection_state.latest_dBFS)).rjust(LINE_LENGTH - 10)) + " |") if detection_state.advanced_logging: lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", diff --git a/lib/record_data.py b/lib/record_data.py index a3c4cd91..4949819c 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -19,7 +19,7 @@ import sys from lib.listen import validate_microphone_input from lib.key_poller import KeyPoller -from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame +from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame, post_processing from lib.srt import persist_srt_file from lib.print_status import get_current_status, reset_previous_lines, clear_previous_lines from lib.typing import DetectionLabel, DetectionState @@ -47,7 +47,8 @@ def record_controls( key_poller, recordQueue=None ): character = key_poller.poll() if(character is not None): if( character == SPACEBAR ): - print( "Recording paused!" ) + if( recordQueue == None ): + print( "Recording paused!" ) with lock: if( recordQueue != None ): recordQueue['status'] = 'paused' @@ -62,8 +63,9 @@ def record_controls( key_poller, recordQueue=None ): ## If the audio queue exists - make sure to clear it continuously if( recordQueue != None ): for key in recordQueue: - recordQueue[key].queue.clear() - + if not key.startswith('status'): + recordQueue[key].queue.clear() + character = key_poller.poll() if(character is not None and ( recordQueue is None or recordQueue['status'] != 'processing') ): if( character == SPACEBAR ): @@ -170,8 +172,8 @@ def record_sound(): time_string = str(int(time.time())) for index, microphone_index in enumerate(valid_mics): FULL_WAVE_OUTPUT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/source/mici_" + str(microphone_index) + "__" + time_string + ".wav" - SRT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/segments/mici_" + str(microphone_index) + "__" + time_string + "v" + str(CURRENT_VERSION) + ".srt" - non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILENAME, microphone_index, index==0) + SRT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/segments/mici_" + str(microphone_index) + "__" + time_string + ".v" + str(CURRENT_VERSION) + ".srt" + non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILENAME, microphone_index, index==0, len(valid_mics)) # wait for stream to finish (5) while currently_recording: @@ -183,7 +185,7 @@ def record_sound(): audios['index' + str(microphone_index)].terminate() # Consumes the recordings in a sliding window fashion - Always combining the two latest chunks together -def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audio, streams, print_stuff=False): +def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audio, streams, print_stuff=False, mic_amount = 1): global recordQueue global currently_recording global files_recorded @@ -196,7 +198,7 @@ def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPU detection_labels = [] for label in labels: detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) - detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, detection_labels) + detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, 0, detection_labels) audioFrames = [] false_occurrence = [] @@ -205,10 +207,11 @@ def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPU detection_frames = [] if print_stuff: - current_status = get_current_status(detection_state) + current_status = get_current_status(detection_state, mic_amount) for line in current_status: print( line ) + comparison_wav_file = wave.open(SRT_FILE.replace(".v" + str(CURRENT_VERSION) + ".srt", "_comparison.wav"), 'wb') totalAudioFrames = [] try: with KeyPoller() as key_poller: @@ -226,16 +229,29 @@ def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPU LITTLE_ENDIAN_INT = struct.Struct(' -70] std_dbFS = np.std(dBFS_frames) + + minimum_dBFS = np.min(dBFS_frames) + + # For noisy signals and for clean signals we need different noise floor and threshold estimation + # Because noisy thresholds have a lower standard deviation across the signal + # Whereas clean signals have a very clear floor and do not need as high of a threshold + noisy_threshold = False detection_state.expected_snr = math.floor(std_dbFS * 2) - detection_state.expected_noise_floor = np.min(dBFS_frames) + std_dbFS + if detection_state.expected_snr < 25: + noisy_threshold = True + detection_state.expected_noise_floor = minimum_dBFS + std_dbFS + else: + detection_state.expected_noise_floor = minimum_dBFS + for label in detection_state.labels: # Recalculate the duration type every 15 seconds if label.duration_type == "" or len(detection_frames) % round(15 / RECORD_SECONDS): label.duration_type = determine_duration_type(label, detection_frames) - label.min_dBFS = detection_state.expected_noise_floor + detection_state.expected_snr + label.min_dBFS = detection_state.expected_noise_floor + ( detection_state.expected_snr if noisy_threshold else detection_state.expected_snr / 2 ) + detection_state.latest_dBFS = detection_frames[-1].dBFS return detection_state # Approximately determine whether the label in the stream is discrete or continuous @@ -294,7 +307,7 @@ def determine_detection_state(detection_frames: List[DetectionFrame], detection_ # Whereas continuous sounds have a steady stream of energy from a source def determine_duration_type(label: DetectionLabel, detection_frames: List[DetectionFrame]) -> str: label_events = [x for x in detection_frames_to_events(detection_frames) if x.label == label.label] - if len(label_events) < 10: + if len(label_events) < 4: return "" else: # The assumption here is that discrete sounds cannot vary in length much as you cannot elongate the sound of a click for example diff --git a/lib/typing.py b/lib/typing.py index e37b9190..95a4d7dd 100644 --- a/lib/typing.py +++ b/lib/typing.py @@ -47,7 +47,8 @@ class DetectionState: ms_per_frame: int ms_recorded: int advanced_logging: bool - + + latest_dBFS: float expected_snr: float expected_noise_floor: float labels: List[DetectionLabel] \ No newline at end of file From 626cb05c1ad578c48d1716f5e602cc52c15903e4 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Mon, 13 Mar 2023 19:24:05 +0100 Subject: [PATCH 11/15] Fixed recording flow to work with multiple microphones Fixed space and escape flow for recording and their status messages Fixed some crashes in the migration script --- lib/key_poller.py | 5 +- lib/migrate_data.py | 4 +- lib/print_status.py | 27 ++-- lib/record_data.py | 334 ++++++++++++++++++--------------------- lib/stream_processing.py | 2 +- lib/stream_recorder.py | 147 +++++++++++++++++ 6 files changed, 329 insertions(+), 190 deletions(-) create mode 100644 lib/stream_recorder.py diff --git a/lib/key_poller.py b/lib/key_poller.py index b2ea49cb..deff08b9 100644 --- a/lib/key_poller.py +++ b/lib/key_poller.py @@ -30,7 +30,10 @@ def __exit__(self, type, value, traceback): def poll(self): if( IS_WINDOWS == True ): if( msvcrt.kbhit() ): - return msvcrt.getch().decode() + ch = msvcrt.getch() + if ch == b'\xe0' or ch == b'\000': + ch = msvcrt.getch() + return ch.decode() else: dr,dw,de = select.select([sys.stdin], [], [], 0) if not dr == []: diff --git a/lib/migrate_data.py b/lib/migrate_data.py index 98593a90..06b014ca 100644 --- a/lib/migrate_data.py +++ b/lib/migrate_data.py @@ -38,8 +38,10 @@ def migrate_data(): segments_dir = os.path.join(RECORDINGS_FOLDER, label, "segments") if not os.path.exists(segments_dir): os.makedirs(segments_dir) - print( "Resegmenting " + label + "..." ) wav_files = [x for x in os.listdir(source_dir) if os.path.isfile(os.path.join(source_dir, x)) and x.endswith(".wav")] + if len(wav_files) == 0: + continue + print( "Resegmenting " + label + "..." ) progress = 0 progress_chunk = 1 / len( wav_files ) skipped_amount = 0 diff --git a/lib/print_status.py b/lib/print_status.py index 3d9e28b1..01174474 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -19,13 +19,16 @@ def create_progress_bar(percentage: float = 1.0) -> str: filled_characters = round(max(0, min(LINE_LENGTH, LINE_LENGTH * percentage))) return "".rjust(filled_characters, PROGRESS_FILLED).ljust(LINE_LENGTH, PROGRESS_AVAILABLE) -def get_current_status(detection_state: DetectionState, multiplier = 1) -> List[str]: - recorded_timestring = ms_to_srt_timestring( detection_state.ms_recorded * multiplier, False) +def get_current_status(detection_state: DetectionState, extra_states: List[DetectionState] = []) -> List[str]: + total_ms_recorded = detection_state.ms_recorded + for extra_state in extra_states: + total_ms_recorded += extra_state.ms_recorded + recorded_timestring = ms_to_srt_timestring( total_ms_recorded, False) # Quality rating was manually established by doing some testing with added noise # And finding the results becoming worse when the SNR went lower than 10 quality = "" - if detection_state.ms_recorded * multiplier > 10000: + if total_ms_recorded > 10000: if detection_state.expected_snr >= 25: quality = "Excellent" elif detection_state.expected_snr >= 20: @@ -61,26 +64,32 @@ def get_current_status(detection_state: DetectionState, multiplier = 1) -> List[ "|".ljust(LINE_LENGTH - 2,"-") + "|", "| " + ("Noise floor (dBFS):" + str(round(detection_state.expected_noise_floor)).rjust(LINE_LENGTH - 24)) + " |", "| " + ("SNR:" + str(round(detection_state.expected_snr)).rjust(LINE_LENGTH - 9)) + " |", - ]) + ]) for label in detection_state.labels: # Quantity rating is based on 5000 30ms windows being good enough to train a label from the example model # And 1000 30ms windows being enough to train a label decently # With atleast 10 percent extra for a possible hold-out set during training + total_ms_detected = label.ms_detected + for extra_state in extra_states: + for extra_label in extra_state.labels: + if extra_label.label == label.label: + total_ms_detected += extra_label.ms_detected + quantity = "" - if label.ms_detected * multiplier < 16500: + if total_ms_detected < 16500: quantity = "Not enough" - elif label.ms_detected * multiplier > 16500 and label.ms_detected * multiplier < 41250: + elif total_ms_detected > 16500 and total_ms_detected < 41250: quantity = "Sufficient" - elif label.ms_detected * multiplier >= 41250 and label.ms_detected * multiplier < 82500: + elif total_ms_detected >= 41250 and total_ms_detected < 82500: quantity = "Good" - elif label.ms_detected * multiplier >= 82500: + elif total_ms_detected >= 82500: quantity = "Excellent" lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", "| " + label.label.ljust(LINE_LENGTH - 5) + " |", - "| " + "Recorded: " + ms_to_srt_timestring( label.ms_detected * multiplier, False ).rjust(LINE_LENGTH - 15) + " |", + "| " + "Recorded: " + ms_to_srt_timestring( total_ms_detected, False ).rjust(LINE_LENGTH - 15) + " |", "| " + "Data Quantity: " + quantity.rjust(LINE_LENGTH - 20) + " |", ]) diff --git a/lib/record_data.py b/lib/record_data.py index 4949819c..89d211a1 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -1,16 +1,7 @@ from config.config import * import pyaudio -import wave import time -from time import sleep -import scipy.io.wavfile -import audioop import math -import numpy as np -from scipy.fftpack import fft -from scipy.fftpack import fftfreq -from scipy.signal import blackmanharris -from lib.machinelearning import get_loudest_freq, get_recording_power import os import glob from queue import * @@ -19,19 +10,19 @@ import sys from lib.listen import validate_microphone_input from lib.key_poller import KeyPoller -from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame, post_processing -from lib.srt import persist_srt_file from lib.print_status import get_current_status, reset_previous_lines, clear_previous_lines from lib.typing import DetectionLabel, DetectionState -import struct -lock = threading.Lock() +from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY +from lib.typing import DetectionState, DetectionFrame +from lib.stream_recorder import StreamRecorder +from typing import List # Countdown from seconds to 0 def countdown( seconds ): with KeyPoller() as key_poller: for i in range( -seconds, 0 ): print("recording in... " + str(abs(i)), end="\r") - sleep( 1 ) + time.sleep( 1 ) if( record_controls(key_poller) == False ): return False; print(" ", end="\r") @@ -39,51 +30,93 @@ def countdown( seconds ): def record_controls( key_poller, recordQueue=None ): global currently_recording - global streams - global lock + global recorders ESCAPEKEY = '\x1b' SPACEBAR = ' ' + BACKSPACE = '\x08' character = key_poller.poll() - if(character is not None): - if( character == SPACEBAR ): + if(character is not None): + # Clear the last 5 seconds if backspace was pressed + if character == BACKSPACE: + if (recorders is not None): + for mic_index in recorders: + recorders[mic_index].pause() + should_resume = False + + for mic_index in recorders: + should_resume = recorders[mic_index].clear() + + if should_resume: + for mic_index in recorders: + recorders[mic_index].resume() + elif( character == ESCAPEKEY ): + currently_recording = False + return False + + elif character == SPACEBAR: if( recordQueue == None ): print( "Recording paused!" ) - with lock: - if( recordQueue != None ): - recordQueue['status'] = 'paused' - if (streams is not None): - for stream in streams: - streams[stream].stop_stream() + main_state = None + secondary_states = [] + if (recorders is not None): + for mic_index in recorders: + if main_state is None: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states.append(recorders[mic_index].get_detection_state()) + recorders[mic_index].pause() + recorders[mic_index].reset_label_count() + + # Do post processing and printing of the status + if main_state is not None: + index = 0 + for mic_index in recorders: + recorders[mic_index].post_processing( + lambda internal_progress, state, extra=secondary_states: print_status(main_state, extra) + ) + + # Update the states so the numbers count up nicely + if index == 0: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states[index - 1] = recorders[mic_index].get_detection_state() + index += 1 + main_state.state = "paused" + print_status(main_state, secondary_states) # Pause the recording by looping until we get a new keypress while( True ): - - ## If the audio queue exists - make sure to clear it continuously + # If the audio queue exists - make sure to clear it continuously if( recordQueue != None ): for key in recordQueue: - if not key.startswith('status'): - recordQueue[key].queue.clear() + recordQueue[key].queue.clear() character = key_poller.poll() - if(character is not None and ( recordQueue is None or recordQueue['status'] != 'processing') ): - if( character == SPACEBAR ): - with lock: - if( recordQueue != None ): - recordQueue['status'] = 'recording' - - if (streams is not None): - for stream in streams: - streams[stream].start_stream() + if character is not None: + if character == SPACEBAR: + if main_state is not None: + main_state.state = "recording" + print_status(main_state, secondary_states) + + # Wait for the sound of the space bar to dissipate before continuing recording + time.sleep(0.3) + if recorders is not None: + for mic_index in recorders: + recorders[mic_index].resume() return True - elif( character == ESCAPEKEY ): + # Clear the last 5 seconds if backspace was pressed + elif character == BACKSPACE: + if recorders is not None: + for mic_index in recorders: + recorders[mic_index].clear() + + # Stop the recording session + elif character == ESCAPEKEY: currently_recording = False return False time.sleep(0.3) - elif( character == ESCAPEKEY ): - currently_recording = False - return False return True def record_sound(): @@ -152,16 +185,10 @@ def record_sound(): print("You can pause/resume the recording session using the [SPACE] key, and stop the recording using the [ESC] key" ) - global streams global recordQueue - global audios - global files_recorded - files_recorded = 0 - streams = {} - audios = {} - recordQueue = { - 'status': 'recording' - } + global recorders + recorders = {} + recordQueue = {} labels = [directory] if( countdown( 5 ) == False ): return; @@ -173,146 +200,78 @@ def record_sound(): for index, microphone_index in enumerate(valid_mics): FULL_WAVE_OUTPUT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/source/mici_" + str(microphone_index) + "__" + time_string + ".wav" SRT_FILENAME = RECORDINGS_FOLDER + "/" + directory + "/segments/mici_" + str(microphone_index) + "__" + time_string + ".v" + str(CURRENT_VERSION) + ".srt" - non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILENAME, microphone_index, index==0, len(valid_mics)) + non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILENAME, microphone_index, index==0) - # wait for stream to finish (5) - while currently_recording: + # wait for stream to finish + while currently_recording == True: time.sleep(0.1) - for microphone_index in valid_mics: - streams['index' + str(microphone_index)].stop_stream() - streams['index' + str(microphone_index)].close() - audios['index' + str(microphone_index)].terminate() + main_state = None + secondary_states = [] + for mic_index in recorders: + if main_state is None: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states.append(recorders[mic_index].get_detection_state()) + recorders[mic_index].pause() + + index = 0 + for mic_index in recorders: + recorders[mic_index].stop( + lambda internal_progress, state, extra=secondary_states: print_status(main_state, extra) + ) + + # Update the states so the numbers count up nicely + if index == 0: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states[index - 1] = recorders[mic_index].get_detection_state() + index += 1 + main_state.state = "processed" + print_status(main_state, secondary_states) + # Consumes the recordings in a sliding window fashion - Always combining the two latest chunks together -def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audio, streams, print_stuff=False, mic_amount = 1): +def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_stuff=False): global recordQueue global currently_recording - global files_recorded - indexedQueue = recordQueue['index' + str(MICROPHONE_INPUT_INDEX)] - - amount_of_streams = len(streams) - detection_strategy = CURRENT_DETECTION_STRATEGY - - ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) - detection_labels = [] - for label in labels: - detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) - detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, 0, detection_labels) + global recorders + mic_index = 'index' + str(MICROPHONE_INPUT_INDEX) + indexedQueue = recordQueue[mic_index] + recorder = recorders[mic_index] - audioFrames = [] - false_occurrence = [] - current_occurrence = [] - index = 0 - detection_frames = [] - if print_stuff: - current_status = get_current_status(detection_state, mic_amount) + current_status = recorder.get_status() for line in current_status: print( line ) - comparison_wav_file = wave.open(SRT_FILE.replace(".v" + str(CURRENT_VERSION) + ".srt", "_comparison.wav"), 'wb') - totalAudioFrames = [] try: with KeyPoller() as key_poller: - # Write the source file first with the right settings to add the headers, and write the data later - totalWaveFile = wave.open(FULL_WAVE_OUTPUT_FILENAME, 'wb') - totalWaveFile.setnchannels(CHANNELS) - totalWaveFile.setsampwidth(audio.get_sample_size(FORMAT)) - totalWaveFile.setframerate(RATE) - totalWaveFile.close() - - # This is used to modify the wave file directly later - # Thanks to hydrogen18.com for offering the wav file explanation and code - CHUNK_SIZE_OFFSET = 4 - DATA_SUB_CHUNK_SIZE_SIZE_OFFSET = 40 - - LITTLE_ENDIAN_INT = struct.Struct('= 15 ): - byteString = b''.join(totalAudioFrames) - totalFrameCount += len(byteString) - totalAudioFrames = [] - appendTotalFile = open(FULL_WAVE_OUTPUT_FILENAME, 'ab') - appendTotalFile.write(byteString) - appendTotalFile.close() - - # Set the amount of frames available and chunk size - # By overriding the header part of the wave file manually - # Which wouldn't be needed if the wave package supported appending properly - # Thanks to hydrogen18.com for the explanation and code - appendTotalFile = open(FULL_WAVE_OUTPUT_FILENAME, 'r+b') - appendTotalFile.seek(0,2) - chunk_size = appendTotalFile.tell() - 8 - appendTotalFile.seek(CHUNK_SIZE_OFFSET) - appendTotalFile.write(LITTLE_ENDIAN_INT.pack(chunk_size)) - appendTotalFile.seek(DATA_SUB_CHUNK_SIZE_SIZE_OFFSET) - sample_length = 2 * totalFrameCount - appendTotalFile.write(LITTLE_ENDIAN_INT.pack(sample_length)) - appendTotalFile.close() - sleep(0.001) - - detection_state.status = "processing" - if print_stuff: - current_status = get_current_status(detection_state, mic_amount) - reset_previous_lines(len(current_status)) - for line in current_status: - print( line ) - processing = True - comparison_wav_file.setnchannels(1) - comparison_wav_file.setsampwidth(2) - comparison_wav_file.setframerate(RATE) - post_processing(detection_frames, detection_state, SRT_FILE, None, comparison_wav_file) - if print_stuff: - current_status = get_current_status(detection_state, mic_amount) - reset_previous_lines(len(current_status)) - for line in current_status: - print( line ) + # Only listen for keys in the main listener + if print_stuff: + record_controls( key_poller, recordQueue ) + time.sleep(0.001) + except Exception as e: print( "----------- ERROR DURING RECORDING -------------- " ) exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb) - for stream in streams: - streams[stream].stop_stream() - currently_recording = False + currently_recording = -1 def multithreaded_record( in_data, frame_count, time_info, status, queue ): queue.put( in_data ) @@ -320,27 +279,46 @@ def multithreaded_record( in_data, frame_count, time_info, status, queue ): return in_data, pyaudio.paContinue # Records a non blocking audio stream and saves the source and SRT file for it -def non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_logs, mic_amount): +def non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_logs): global recordQueue - global streams - global audios + global recorders mic_index = 'index' + str(MICROPHONE_INPUT_INDEX) - + recordQueue[mic_index] = Queue(maxsize=0) - micindexed_lambda = lambda in_data, frame_count, time_info, status, queue=recordQueue['index' + str(MICROPHONE_INPUT_INDEX)]: multithreaded_record(in_data, frame_count, time_info, status, queue) - audios[mic_index] = pyaudio.PyAudio() - streams[mic_index] = audios[mic_index].open(format=FORMAT, channels=CHANNELS, - rate=RATE, input=True, - input_device_index=MICROPHONE_INPUT_INDEX, - frames_per_buffer=round( RATE * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ), - stream_callback=micindexed_lambda) + micindexed_lambda = lambda in_data, frame_count, time_info, status, queue=recordQueue[mic_index]: multithreaded_record(in_data, frame_count, time_info, status, queue) + + detection_strategy = CURRENT_DETECTION_STRATEGY + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + detection_labels = [] + for label in labels: + detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) + + audio = pyaudio.PyAudio() + + recorders[mic_index] = StreamRecorder( + audio, + audio.open(format=FORMAT, channels=CHANNELS, + rate=RATE, input=True, + input_device_index=MICROPHONE_INPUT_INDEX, + frames_per_buffer=round( RATE * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT ), + stream_callback=micindexed_lambda), + FULL_WAVE_OUTPUT_FILENAME, + SRT_FILE, + DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, 0, detection_labels) + ) - consumer = threading.Thread(name='consumer', target=record_consumer, args=(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, audios[mic_index], streams, print_stuff, mic_amount)) + consumer = threading.Thread(name='consumer', target=record_consumer, args=(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_logs)) consumer.setDaemon( True ) consumer.start() - streams[mic_index].start_stream() - + recorders[mic_index].resume() + +def print_status(detection_state: DetectionState, extra_states: List[DetectionState]): + current_status = get_current_status(detection_state, extra_states) + reset_previous_lines(len(current_status)) + for line in current_status: + print( line ) + def validate_microphone_index(audio, input_index): micDict = {'name': 'Missing Microphone index ' + str(input_index)} try: diff --git a/lib/stream_processing.py b/lib/stream_processing.py index 10731c83..f9192fcb 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -154,7 +154,7 @@ def post_processing(frames: List[DetectionFrame], detection_state: DetectionStat current_label = None detected_label = None - # Recalculate the MS detection and duratoin type + # Recalculate the MS detection and duration type for label in detection_state.labels: label.ms_detected = 0 label.duration_type = determine_duration_type(label, frames) diff --git a/lib/stream_recorder.py b/lib/stream_recorder.py new file mode 100644 index 00000000..ecf53bf4 --- /dev/null +++ b/lib/stream_recorder.py @@ -0,0 +1,147 @@ +from config.config import * +import pyaudio +import struct +import wave +import numpy as np +from lib.print_status import get_current_status +from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame, post_processing +from lib.typing import DetectionState, DetectionFrame +from typing import List + +class StreamRecorder: + total_wav_filename: str + srt_filename: str + comparison_wav_filename: str + + audio: pyaudio.PyAudio + stream: pyaudio.Stream + detection_state: DetectionState + + length_per_frame: int + audio_frames: List[np.array] + total_audio_frames: List[np.array] + index: int + detection_frames: List[DetectionFrame] + current_occurrence: List[DetectionFrame] + false_occurrence: List[DetectionFrame] + + def __init__(self, audio: pyaudio.PyAudio, stream: pyaudio.Stream, total_wav_filename: str, srt_filename: str, detection_state: DetectionState): + self.total_wav_filename = total_wav_filename + self.srt_filename = srt_filename + self.comparison_wav_filename = srt_filename.replace(".v" + str(CURRENT_VERSION) + ".srt", "_comparison.wav") + + self.audio = audio + self.stream = stream + self.detection_state = detection_state + self.total_audio_frames = [] + self.audio_frames = [] + self.detection_frames = [] + self.current_occurrence = [] + self.false_occurrence = [] + self.index = 0 + self.length_per_frame = 0 + + # Write the source file first with the right settings to add the headers, and write the data later + totalWaveFile = wave.open(self.total_wav_filename, 'wb') + totalWaveFile.setnchannels(CHANNELS) + totalWaveFile.setsampwidth(audio.get_sample_size(FORMAT)) + totalWaveFile.setframerate(RATE) + totalWaveFile.close() + + # Add a single audio frame to the batch and start processing it + def add_audio_frame(self, frame: List[np.array]): + if self.length_per_frame == 0: + self.length_per_frame = len(frame) + + self.index += 1 + self.audio_frames.append(frame) + self.detection_state.ms_recorded += self.detection_state.ms_per_frame + audioFrames, detection_state, detection_frames, current_occurrence, false_occurrence = \ + process_audio_frame(self.index, self.audio_frames, self.detection_state, self.detection_frames, self.current_occurrence, self.false_occurrence) + + self.current_occurence = current_occurrence + self.false_occurrence = false_occurrence + self.detection_state = detection_state + self.detection_frames = detection_frames + self.audio_frames = audioFrames + self.total_audio_frames.append( audioFrames[-1] ) + + # Append to the total wav file only once every fifteen audio frames + # This is roughly once every 225 milliseconds + if len(self.total_audio_frames) >= 15: + self.persist_total_wav_file() + + def persist_total_wav_file(self): + # This is used to modify the wave file directly + CHUNK_SIZE_OFFSET = 4 + DATA_SUB_CHUNK_SIZE_SIZE_OFFSET = 40 + LITTLE_ENDIAN_INT = struct.Struct(' bool: + should_resume = self.detection_state == "recording" + self.pause() + + # TODO Clear last N seconds in data frames, audio frames etc + + return should_resume + + # Reset the counts of the state so they count up nicely during reprocessing of multiple streams + def reset_label_count(self): + for label in self.detection_state.labels: + label.ms_detected = 0 + + def get_detection_state(self) -> DetectionState: + return self.detection_state + + def get_status(self, detection_states: List[DetectionState] = []) -> List[str]: + return get_current_status(self.detection_state, detection_states) + + # Stop processing the streams and build the final files + def stop(self, callback = None): + self.pause() + self.persist_total_wav_file() + + comparison_wav_file = wave.open(self.comparison_wav_filename, 'wb') + comparison_wav_file.setnchannels(1) + comparison_wav_file.setsampwidth(2) + comparison_wav_file.setframerate(RATE) + post_processing(self.detection_frames, self.detection_state, self.srt_filename, callback, comparison_wav_file) + self.stream.close() + self.audio.terminate() + self.detection_frames = [] + + # Do all post processing related tasks that cannot be done during runtime + def post_processing(self, callback = None, comparison_wav_file: wave.Wave_write = None): + self.persist_total_wav_file() + post_processing(self.detection_frames, self.detection_state, self.srt_filename, callback, comparison_wav_file) From 57d62b0b34f1b23c1970b065548cbb3e86e107a7 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Tue, 14 Mar 2023 09:39:58 +0100 Subject: [PATCH 12/15] Added overview of currently recorded data with their time attached to it Added previously recorded ms to the current print status of the recording --- lib/default_config.py | 6 +++++- lib/migrate_data.py | 2 +- lib/print_status.py | 11 +++++++++-- lib/record_data.py | 22 ++++++++++++++++++---- lib/srt.py | 31 +++++++++++++++++++++++++++---- lib/stream_processing.py | 9 +++------ lib/stream_recorder.py | 2 +- lib/typing.py | 1 + 8 files changed, 65 insertions(+), 19 deletions(-) diff --git a/lib/default_config.py b/lib/default_config.py index 7d129596..69fdaa17 100644 --- a/lib/default_config.py +++ b/lib/default_config.py @@ -59,4 +59,8 @@ if( SPEECHREC_ENABLED == True ): SPEECHREC_ENABLED = dragonfly_spec is not None -BACKGROUND_LABEL = "silence" \ No newline at end of file +BACKGROUND_LABEL = "silence" + +# Detection strategies +CURRENT_VERSION = 1 +CURRENT_DETECTION_STRATEGY = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" diff --git a/lib/migrate_data.py b/lib/migrate_data.py index 06b014ca..05eb626e 100644 --- a/lib/migrate_data.py +++ b/lib/migrate_data.py @@ -1,6 +1,6 @@ from config.config import * import os -from lib.stream_processing import CURRENT_VERSION, process_wav_file +from lib.stream_processing import process_wav_file from lib.print_status import create_progress_bar, clear_previous_lines, get_current_status, reset_previous_lines from .typing import DetectionState import time diff --git a/lib/print_status.py b/lib/print_status.py index 01174474..1b548d48 100644 --- a/lib/print_status.py +++ b/lib/print_status.py @@ -70,21 +70,28 @@ def get_current_status(detection_state: DetectionState, extra_states: List[Detec # Quantity rating is based on 5000 30ms windows being good enough to train a label from the example model # And 1000 30ms windows being enough to train a label decently # With atleast 10 percent extra for a possible hold-out set during training - total_ms_detected = label.ms_detected + total_ms_detected = label.ms_detected + label.previous_detected for extra_state in extra_states: for extra_label in extra_state.labels: if extra_label.label == label.label: - total_ms_detected += extra_label.ms_detected + total_ms_detected += extra_label.ms_detected + extra_label.previous_detected + percent_to_next = 0 quantity = "" if total_ms_detected < 16500: + percent_to_next = (total_ms_detected / 16500 ) * 100 quantity = "Not enough" elif total_ms_detected > 16500 and total_ms_detected < 41250: + percent_to_next = ((total_ms_detected - 16500) / (41250 - 16500) ) * 100 quantity = "Sufficient" elif total_ms_detected >= 41250 and total_ms_detected < 82500: + percent_to_next = ((total_ms_detected - 41250) / (82500 - 41250) ) * 100 quantity = "Good" elif total_ms_detected >= 82500: quantity = "Excellent" + + if percent_to_next != 0: + quantity += " (" + str(round(percent_to_next)) + "%)" lines.extend([ "|".ljust(LINE_LENGTH - 2,"-") + "|", diff --git a/lib/record_data.py b/lib/record_data.py index 89d211a1..3a8693c7 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -15,6 +15,7 @@ from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY from lib.typing import DetectionState, DetectionFrame from lib.stream_recorder import StreamRecorder +from lib.srt import count_total_label_ms, ms_to_srt_timestring from typing import List # Countdown from seconds to 0 @@ -156,6 +157,8 @@ def record_sound(): print("No usable microphones selected - Exiting") return; + ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) + directory_counts = {} try: if os.path.exists(RECORDINGS_FOLDER): glob_path = RECORDINGS_FOLDER + "/*/" @@ -167,8 +170,17 @@ def record_sound(): # cut off glob path, but leave two more characters # at the start to account for */ # also remove the trailing slash - print(" - ", dirname[len(glob_path) - 2:-1]) + directory_name = dirname[len(glob_path) - 2:-1] + + # Count the currently recorded amount of data + current_count = count_total_label_ms(directory_name, os.path.join(RECORDINGS_FOLDER, directory_name), ms_per_frame) + directory_counts[directory_name] = current_count + time_recorded = " ( " + ms_to_srt_timestring(current_count, False).split(",")[0] + " )" + + print(" - ", directory_name.ljust(30) + time_recorded ) print("") + print("NOTE: It is recommended to record roughly the same amount for each sound") + print("As it will improve the ability for the machine learning models to learn from the data") except: # Since this is just a convenience feature, exceptions shall not # cause recording to abort, whatever happens @@ -189,7 +201,9 @@ def record_sound(): global recorders recorders = {} recordQueue = {} - labels = [directory] + labels = {} + labels[directory] = directory_counts[directory] if directory in directory_counts else 0 + if( countdown( 5 ) == False ): return; @@ -291,8 +305,8 @@ def non_blocking_record(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_ detection_strategy = CURRENT_DETECTION_STRATEGY ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) detection_labels = [] - for label in labels: - detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) + for label in list(labels.keys()): + detection_labels.append(DetectionLabel(label, 0, labels[label], "", 0, 0, 0, 0)) audio = pyaudio.PyAudio() diff --git a/lib/srt.py b/lib/srt.py index 8614cd36..eef85f8f 100644 --- a/lib/srt.py +++ b/lib/srt.py @@ -1,8 +1,9 @@ import time -from config.config import BACKGROUND_LABEL +from config.config import BACKGROUND_LABEL, CURRENT_VERSION from .typing import TransitionEvent, DetectionEvent, DetectionFrame from typing import List import math +import os def ms_to_srt_timestring( ms: int, include_hours=True): if ms <= 0: @@ -36,7 +37,7 @@ def persist_srt_file(srt_filename: str, events: List[DetectionEvent]): srt_file.write( ms_to_srt_timestring(event.start_ms) + " --> " + ms_to_srt_timestring(event.end_ms) + '\n' ) srt_file.write( event.label + '\n\n' ) -def parse_srt_file(srt_filename: str, rounding_ms: int) -> List[TransitionEvent]: +def parse_srt_file(srt_filename: str, rounding_ms: int, show_errors: bool = True) -> List[TransitionEvent]: transition_events = [] positive_event_list = [] @@ -65,7 +66,7 @@ def parse_srt_file(srt_filename: str, rounding_ms: int) -> List[TransitionEvent] if time_start < time_end: positive_event_list.append(str(time_start) + "---" + type_sound + "---start") positive_event_list.append(str(time_end) + "---" + type_sound + "---end") - else: + elif show_errors: print( ".SRT error at line " + str(line_index) + " - Start time not before end time! Not adding this event - Numbers won't be valid!" ) # Sort chronologically by time @@ -73,7 +74,8 @@ def parse_srt_file(srt_filename: str, rounding_ms: int) -> List[TransitionEvent] for time_index, time_event in enumerate(positive_event_list): # Remove duplicates if found if time_index != 0 and len(transition_events) > 0 and transition_events[-1].start_index == math.floor(int(time_event.split("---")[0]) / rounding_ms): - print( "Found duplicate entry at second " + str(math.floor(int(time_event.split("---")[0]) / rounding_ms) / 1000) + " - Not adding duplicate") + if show_errors: + print( "Found duplicate entry at second " + str(math.floor(int(time_event.split("---")[0]) / rounding_ms) / 1000) + " - Not adding duplicate") continue; if time_event.endswith("---start"): @@ -93,6 +95,27 @@ def parse_srt_file(srt_filename: str, rounding_ms: int) -> List[TransitionEvent] return transition_events +def count_total_label_ms(label: str, base_folder: str, rounding_ms: int) -> int: + total_ms = 0 + segments_dir = os.path.join(base_folder, "segments") + if os.path.isdir(segments_dir): + srt_files = [x for x in os.listdir(segments_dir) if os.path.isfile(os.path.join(segments_dir, x)) and x.endswith(".v" + str(CURRENT_VERSION) + ".srt")] + for srt_file in srt_files: + total_ms += count_label_ms_in_srt(label, os.path.join(segments_dir, srt_file), rounding_ms) + return total_ms + +def count_label_ms_in_srt(label: str, srt_filename: str, rounding_ms: int) -> int: + transition_events = parse_srt_file(srt_filename, rounding_ms, False) + total_ms = 0 + start_ms = -1 + for transition_event in transition_events: + if transition_event.label == label: + start_ms = transition_event.start_ms + elif start_ms > -1 and transition_event.label != label: + total_ms += transition_event.start_ms - start_ms + start_ms = -1 + + return total_ms def print_detection_performance_compared_to_srt(actual_frames: List[DetectionFrame], frames_to_read: int, srt_file_location: str, output_wave_file = None): ms_per_frame = actual_frames[0].duration_ms diff --git a/lib/stream_processing.py b/lib/stream_processing.py index f9192fcb..e6c80388 100644 --- a/lib/stream_processing.py +++ b/lib/stream_processing.py @@ -1,5 +1,5 @@ from .typing import DetectionLabel, DetectionFrame, DetectionEvent, DetectionState -from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE +from config.config import BACKGROUND_LABEL, RECORD_SECONDS, SLIDING_WINDOW_AMOUNT, RATE, CURRENT_VERSION, CURRENT_DETECTION_STRATEGY from typing import List import wave import math @@ -9,9 +9,6 @@ from .srt import persist_srt_file, print_detection_performance_compared_to_srt import os -CURRENT_VERSION = 1 -CURRENT_DETECTION_STRATEGY = "auto_dBFS_mend_dBFS_30ms_secondary_dBFS_reject_cont_45ms_repair" - def process_wav_file(input_file, srt_file, output_file, labels, progress_callback = None, comparison_srt_file = None, print_statistics = False): audioFrames = [] wf = wave.open(input_file, 'rb') @@ -26,7 +23,7 @@ def process_wav_file(input_file, srt_file, output_file, labels, progress_callbac detection_labels = [] for label in labels: - detection_labels.append(DetectionLabel(label, 0, "", 0, 0, 0, 0)) + detection_labels.append(DetectionLabel(label, 0, 0, "", 0, 0, 0, 0)) detection_state = DetectionState(detection_strategy, "recording", ms_per_frame, 0, True, 0, 0, 0, detection_labels) false_occurrence = [] @@ -313,7 +310,7 @@ def determine_duration_type(label: DetectionLabel, detection_frames: List[Detect # The assumption here is that discrete sounds cannot vary in length much as you cannot elongate the sound of a click for example # So if the length doesn't vary much, we assume discrete over continuous lengths = [x.end_ms - x.start_ms for x in label_events] - continuous_length_threshold = detection_frames[0].duration_ms * SLIDING_WINDOW_AMOUNT + continuous_length_threshold = 35 return "discrete" if np.std(lengths) < continuous_length_threshold else "continuous" def detection_frames_to_events(detection_frames: List[DetectionFrame]) -> List[DetectionEvent]: diff --git a/lib/stream_recorder.py b/lib/stream_recorder.py index ecf53bf4..53e9eb9b 100644 --- a/lib/stream_recorder.py +++ b/lib/stream_recorder.py @@ -4,7 +4,7 @@ import wave import numpy as np from lib.print_status import get_current_status -from lib.stream_processing import CURRENT_VERSION, CURRENT_DETECTION_STRATEGY, process_audio_frame, post_processing +from lib.stream_processing import process_audio_frame, post_processing from lib.typing import DetectionState, DetectionFrame from typing import List diff --git a/lib/typing.py b/lib/typing.py index 95a4d7dd..5d485156 100644 --- a/lib/typing.py +++ b/lib/typing.py @@ -33,6 +33,7 @@ class DetectionEvent: class DetectionLabel: label: str ms_detected: int + previous_detected: int duration_type: str min_ms: float From 31e85079c7b659d0318d1719e456b56b132e78d0 Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Tue, 14 Mar 2023 13:14:09 +0100 Subject: [PATCH 13/15] Added backspace key to remove audio from recording on the fly --- lib/record_data.py | 117 ++++++++++++++++++++++++++++------------- lib/stream_recorder.py | 52 ++++++++++++++++-- 2 files changed, 130 insertions(+), 39 deletions(-) diff --git a/lib/record_data.py b/lib/record_data.py index 3a8693c7..042cd721 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -41,13 +41,37 @@ def record_controls( key_poller, recordQueue=None ): # Clear the last 5 seconds if backspace was pressed if character == BACKSPACE: if (recorders is not None): + main_state = None + secondary_states = [] for mic_index in recorders: + if main_state is None: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states.append(recorders[mic_index].get_detection_state()) recorders[mic_index].pause() should_resume = False + # Clear and update the detection states + index = 0 + if main_state is not None: + main_state.state = "deleting" + print_status(main_state, secondary_states) + for mic_index in recorders: - should_resume = recorders[mic_index].clear() + should_resume = recorders[mic_index].clear(3) + if index == 0: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states[index - 1] = secondary_states[index - 1].get_detection_state() + index += 1 + print_status(main_state, secondary_states) + + if main_state is not None: + main_state.state = "recording" + print_status(main_state, secondary_states) + # Wait for the sound of the space bar to dissipate before continuing recording + time.sleep(0.3) if should_resume: for mic_index in recorders: recorders[mic_index].resume() @@ -73,6 +97,9 @@ def record_controls( key_poller, recordQueue=None ): # Do post processing and printing of the status if main_state is not None: index = 0 + main_state.state = "deleting" + print_status(main_state, secondary_states) + for mic_index in recorders: recorders[mic_index].post_processing( lambda internal_progress, state, extra=secondary_states: print_status(main_state, extra) @@ -84,6 +111,7 @@ def record_controls( key_poller, recordQueue=None ): else: secondary_states[index - 1] = recorders[mic_index].get_detection_state() index += 1 + main_state.state = "paused" print_status(main_state, secondary_states) @@ -107,11 +135,20 @@ def record_controls( key_poller, recordQueue=None ): for mic_index in recorders: recorders[mic_index].resume() return True - # Clear the last 5 seconds if backspace was pressed + # Clear the last 3 seconds if backspace was pressed elif character == BACKSPACE: - if recorders is not None: + if recorders is not None and main_state is not None: + index = 0 for mic_index in recorders: - recorders[mic_index].clear() + recorders[mic_index].clear(3) + if index == 0: + main_state = recorders[mic_index].get_detection_state() + else: + secondary_states[index - 1] = secondary_states[index - 1].get_detection_state() + index += 1 + print_status(main_state, secondary_states) + main_state.state = "paused" + print_status(main_state, secondary_states) # Stop the recording session elif character == ESCAPEKEY: @@ -129,34 +166,6 @@ def record_sound(): print( "And record tiny audio files to be used for learning later" ) print( "-------------------------" ) - # Note - this assumes a maximum of 10 possible input devices, which is probably wrong but eh - print("What microphone do you want to record with? ( Empty is the default system mic, [X] exits the recording menu )") - print("You can put a space in between numbers to record with multiple microphones") - for index in range(audio.get_device_count()): - device_info = audio.get_device_info_by_index(index) - if (device_info and device_info['name'] and device_info['maxInputChannels'] > 0): - default_mic = " - " if index != INPUT_DEVICE_INDEX else " DEFAULT - " - host_api = audio.get_host_api_info_by_index(device_info['hostApi']) - host_api_string = " " + host_api["name"] if host_api else "" - print("[" + str(index) + "]" + default_mic + device_info['name'] + host_api_string) - - mic_index_string = input("") - mic_indecis = [] - if mic_index_string == "": - mic_indecis = [str(INPUT_DEVICE_INDEX)] - elif mic_index_string.strip().lower() == "x": - return; - else: - mic_indecis = mic_index_string.split() - valid_mics = [] - for mic_index in mic_indecis: - if (str.isdigit(mic_index) and validate_microphone_index(audio, int(mic_index))): - valid_mics.append(int(mic_index)) - - if len(valid_mics) == 0: - print("No usable microphones selected - Exiting") - return; - ms_per_frame = math.floor(RECORD_SECONDS / SLIDING_WINDOW_AMOUNT * 1000) directory_counts = {} try: @@ -181,6 +190,7 @@ def record_sound(): print("") print("NOTE: It is recommended to record roughly the same amount for each sound") print("As it will improve the ability for the machine learning models to learn from the data") + print("") except: # Since this is just a convenience feature, exceptions shall not # cause recording to abort, whatever happens @@ -195,7 +205,40 @@ def record_sound(): if not os.path.exists(RECORDINGS_FOLDER + "/" + directory + "/source"): os.makedirs(RECORDINGS_FOLDER + "/" + directory + "/source") - print("You can pause/resume the recording session using the [SPACE] key, and stop the recording using the [ESC] key" ) + # Note - this assumes a maximum of 10 possible input devices, which is probably wrong but eh + print("What microphone do you want to record with? ( Empty is the default system mic, [X] exits the recording menu )") + print("You can put a space in between numbers to record with multiple microphones") + for index in range(audio.get_device_count()): + device_info = audio.get_device_info_by_index(index) + if (device_info and device_info['name'] and device_info['maxInputChannels'] > 0): + default_mic = " - " if index != INPUT_DEVICE_INDEX else " DEFAULT - " + host_api = audio.get_host_api_info_by_index(device_info['hostApi']) + host_api_string = " " + host_api["name"] if host_api else "" + print("[" + str(index) + "]" + default_mic + device_info['name'] + host_api_string) + + mic_index_string = input("") + mic_indecis = [] + if mic_index_string == "": + mic_indecis = [str(INPUT_DEVICE_INDEX)] + elif mic_index_string.strip().lower() == "x": + return; + else: + mic_indecis = mic_index_string.split() + valid_mics = [] + for mic_index in mic_indecis: + if (str.isdigit(mic_index) and validate_microphone_index(audio, int(mic_index))): + valid_mics.append(int(mic_index)) + + if len(valid_mics) == 0: + print("No usable microphones selected - Exiting") + return; + + print("") + print("Record keyboard controls:") + print("[SPACE] is used to pause and resume the recording session") + print("[BACKSPACE] removes the last 3 seconds of the recording") + print("[ESC] stops the current recording") + print("") global recordQueue global recorders @@ -231,8 +274,9 @@ def record_sound(): index = 0 for mic_index in recorders: + callback = None if currently_recording == -1 else lambda internal_progress, state, extra=secondary_states: print_status(main_state, extra) recorders[mic_index].stop( - lambda internal_progress, state, extra=secondary_states: print_status(main_state, extra) + callback ) # Update the states so the numbers count up nicely @@ -242,8 +286,9 @@ def record_sound(): secondary_states[index - 1] = recorders[mic_index].get_detection_state() index += 1 - main_state.state = "processed" - print_status(main_state, secondary_states) + if currently_recording != -1: + main_state.state = "processed" + print_status(main_state, secondary_states) # Consumes the recordings in a sliding window fashion - Always combining the two latest chunks together def record_consumer(labels, FULL_WAVE_OUTPUT_FILENAME, SRT_FILE, MICROPHONE_INPUT_INDEX, print_stuff=False): diff --git a/lib/stream_recorder.py b/lib/stream_recorder.py index 53e9eb9b..d74935c7 100644 --- a/lib/stream_recorder.py +++ b/lib/stream_recorder.py @@ -2,11 +2,13 @@ import pyaudio import struct import wave +import math import numpy as np from lib.print_status import get_current_status from lib.stream_processing import process_audio_frame, post_processing from lib.typing import DetectionState, DetectionFrame from typing import List +import io class StreamRecorder: total_wav_filename: str @@ -93,7 +95,7 @@ def persist_total_wav_file(self): appendTotalFile.seek(CHUNK_SIZE_OFFSET) appendTotalFile.write(LITTLE_ENDIAN_INT.pack(chunk_size)) appendTotalFile.seek(DATA_SUB_CHUNK_SIZE_SIZE_OFFSET) - sample_length = 2 * ( (self.index ) * self.length_per_frame ) + sample_length = 2 * ( self.index * self.length_per_frame ) appendTotalFile.write(LITTLE_ENDIAN_INT.pack(sample_length)) appendTotalFile.close() @@ -104,15 +106,59 @@ def resume(self): def pause(self): self.stream.stop_stream() + self.index -= len(self.total_audio_frames) self.total_audio_frames = [] self.audio_frames = [] # Clear out the last N seconds and pauses the stream, returns whether it should resume after def clear(self, seconds: float) -> bool: - should_resume = self.detection_state == "recording" + should_resume = self.detection_state != "paused" self.pause() - # TODO Clear last N seconds in data frames, audio frames etc + ms_per_frame = self.detection_state.ms_per_frame + frames_to_remove = math.floor(seconds * 1000 / ms_per_frame) + clear_file = False + if (self.index < frames_to_remove): + clear_file = True + self.index -= self.index if clear_file else frames_to_remove + self.current_occurrence = [] + self.false_occurrence = [] + + self.detection_frames = self.detection_frames[:-frames_to_remove] + self.detection_state.ms_recorded = len(self.detection_frames) * ms_per_frame + for label in self.detection_state.labels: + label.ms_detected = 0 + for frame in self.detection_frames: + if frame.label == label.label: + label.ms_detected += ms_per_frame + + # Just completely overwrite the file if we go back to the start for simplicities sake + if clear_file: + totalWaveFile = wave.open(self.total_wav_filename, 'wb') + totalWaveFile.setnchannels(CHANNELS) + totalWaveFile.setsampwidth(self.audio.get_sample_size(FORMAT)) + totalWaveFile.setframerate(RATE) + totalWaveFile.close() + + # Truncate the frames from the total wav file + else: + with open(self.total_wav_filename, 'r+b') as f: + # Drop the last N bytes from the file + f.seek(-frames_to_remove * self.length_per_frame, io.SEEK_END) + f.truncate() + + # Overwrite the total recording length + CHUNK_SIZE_OFFSET = 4 + DATA_SUB_CHUNK_SIZE_SIZE_OFFSET = 40 + LITTLE_ENDIAN_INT = struct.Struct(' Date: Tue, 14 Mar 2023 14:32:27 +0100 Subject: [PATCH 14/15] Minor changes for linux usability --- lib/record_data.py | 9 +++++---- lib/signal_processing.py | 6 +----- lib/stream_recorder.py | 4 +++- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/lib/record_data.py b/lib/record_data.py index 042cd721..6e4976dc 100644 --- a/lib/record_data.py +++ b/lib/record_data.py @@ -35,11 +35,12 @@ def record_controls( key_poller, recordQueue=None ): ESCAPEKEY = '\x1b' SPACEBAR = ' ' BACKSPACE = '\x08' + MINUS = '-' character = key_poller.poll() if(character is not None): - # Clear the last 5 seconds if backspace was pressed - if character == BACKSPACE: + # Clear the last 3 seconds if backspace was pressed + if character == BACKSPACE or character == MINUS: if (recorders is not None): main_state = None secondary_states = [] @@ -136,7 +137,7 @@ def record_controls( key_poller, recordQueue=None ): recorders[mic_index].resume() return True # Clear the last 3 seconds if backspace was pressed - elif character == BACKSPACE: + elif character == BACKSPACE or character == MINUS: if recorders is not None and main_state is not None: index = 0 for mic_index in recorders: @@ -236,7 +237,7 @@ def record_sound(): print("") print("Record keyboard controls:") print("[SPACE] is used to pause and resume the recording session") - print("[BACKSPACE] removes the last 3 seconds of the recording") + print("[BACKSPACE] or [-] removes the last 3 seconds of the recording") print("[ESC] stops the current recording") print("") diff --git a/lib/signal_processing.py b/lib/signal_processing.py index ea0ba3bf..bdb86213 100644 --- a/lib/signal_processing.py +++ b/lib/signal_processing.py @@ -8,11 +8,7 @@ import os from config.config import RATE -# When converting to ints from bytes, Windows uses a 32 bit number. -# Other OSes use the bytes shown. So for Windows we need different calculations for frame count -# ( https://stackoverflow.com/questions/72482769/numpy-returns-different-results-on-windows-and-unix ) -long_byte_size = 4 if os.name == 'nt' else 2 - +long_byte_size = 4 _mfscs = {} # Determine the decibel based on full scale of 16 bit ints ( same as Audacity ) diff --git a/lib/stream_recorder.py b/lib/stream_recorder.py index d74935c7..8de924d9 100644 --- a/lib/stream_recorder.py +++ b/lib/stream_recorder.py @@ -73,7 +73,7 @@ def add_audio_frame(self, frame: List[np.array]): if len(self.total_audio_frames) >= 15: self.persist_total_wav_file() - def persist_total_wav_file(self): + def persist_total_wav_file(self): # This is used to modify the wave file directly CHUNK_SIZE_OFFSET = 4 DATA_SUB_CHUNK_SIZE_SIZE_OFFSET = 40 @@ -177,6 +177,8 @@ def get_status(self, detection_states: List[DetectionState] = []) -> List[str]: def stop(self, callback = None): self.pause() self.persist_total_wav_file() + if self.index == 0: + os.remove(self.total_wav_filename) comparison_wav_file = wave.open(self.comparison_wav_filename, 'wb') comparison_wav_file.setnchannels(1) From 03f85d6f1f788ee127afbe7c13ca387817edaf8b Mon Sep 17 00:00:00 2001 From: Kevin te Raa Date: Tue, 14 Mar 2023 14:52:11 +0100 Subject: [PATCH 15/15] Updated the recording documentation --- docs/RECORDING.md | 25 ++++++++++++++-------- docs/media/settings-compare-detection.png | Bin 0 -> 38396 bytes docs/media/settings-record-progress.png | Bin 15915 -> 13748 bytes docs/media/settings-record.png | Bin 7473 -> 16059 bytes 4 files changed, 16 insertions(+), 9 deletions(-) create mode 100644 docs/media/settings-compare-detection.png diff --git a/docs/RECORDING.md b/docs/RECORDING.md index 27c18f65..dee89eea 100644 --- a/docs/RECORDING.md +++ b/docs/RECORDING.md @@ -5,26 +5,33 @@ In order to train a model, you need to record sounds first. You can do this by r ![Installing packages](media/settings-record.png) -This script will record sounds in seperate files of 30 milliseconds each and save them in your recordings folder ( data/recordings is the default place, which can be changed in the data/code/config.py file using the examples in lib/default_conifg.py ). +This script will record your microphone and save the detected areas inside of an SRT file. It will record in overlapping segments of 30 milliseconds. You have to be sure to record as little noise as possible. For example, if you are recording a bell sound, it is imperative that you only record that sound. -If you accidentally recorded a different sound, you can always delete the specific file from the recordings directory. + ![Installing packages](media/settings-record-progress.png) -In order to make sure you only record the sound you want to record, you can alter the power setting at the start. I usually choose a value between 1000 and 2000. -You can also trim out stuff below a specific frequency value. Neither the intensity, power or the frequency values I am using isn't actually an SI unit like dB or Hz, just some rough calculations which will go up when the loudness or frequency goes up. +During the recording, you can also pause the recording using SPACE or quit it using ESC. +If you feel a sneeze coming up, or a car passes by, you can press these keys to make sure you don't have to remove data. +If you accidentally did record a different sound, you can always press BACKSPACE or - to remove some data from the recording. -During the recording, you can also pause the recording using SPACE or quit it using ESC. If you feel a sneeze coming up, or a car passes by, you can press these keys to make sure you don't have to prune away a lot of files. +You can look at the 'Recorded' part during the recording session to see how much of your sound has been detected. ### Amount of data needed -I found that you need around 30 seconds of recorded sound, roughly 1000 samples, to get a working recognition of a specific sound. Depending on the noise it would take between a minute and two minutes to record the sounds ( there are less samples to pick from with short sounds like clicks, whereas longer sounds like vowels give more samples ). -You will start getting diminishing returns past two and a half minutes of recorded sound ( 5000 samples ), but the returns are still there. As of the moment of this writing, I used 15000 samples for the Hollow Knight demo. +The Data quantity part of the recording shows you whether we think you have enough data for a model. +The minimum required is about 16 seconds, 41 seconds is a good amount, and anything above 1 minute 22 seconds is considered excellent. +You will start getting diminishing returns after that, but the returns are still there. I used about 4 minutes per sound for the Hollow Knight demo. You can try any amount and see if they recognize well. -From this version onward, there will also be full recordings of the recording session saved in the source directory inside of the sound you are recording. This might come in handy when we start adding more sophisticated models in the future. +If you want the model to do well, you should aim to have about the same amount of recordings for every sound you record. + +### Checking the quality of the detection + +If you want to see if the detection was alright, you can either open up the SRT file inside the segments folder of your recorded sound and compare it to the source file, or use the comparison.wav file inside of the segments folder. +If you place both the source file and the comparison.wav file inside a program like Audacity, you can see the spots where it detected a sound. -You can use these source files to resegment the recordings you have made as well, by using the [V] menu at the start and then navigating to [S]. This will reuse the source files available to read out the wav data and persist them inside the data/output folder. +![Audacity comparing detection](media/settings-compare-detection.png) ### Background noise diff --git a/docs/media/settings-compare-detection.png b/docs/media/settings-compare-detection.png new file mode 100644 index 0000000000000000000000000000000000000000..e68cb5cd6280cdcc86462392209e2b10d7034996 GIT binary patch literal 38396 zcmb5VbzGDG*FSEfARtHx5()?c3R2RWL5g%qD;**&Ii>>AASKP{6p)Y(rF(S4=rLlW z2W)J=3*VpnbARv8egE_00j#cFan5<2^E}Vzxq{VHxMKBO`T2zlWszj~cUOVeHyjo8TrOOoXg&YC z*y)gOe&Iq~l%mXYEf3@EMv__Wu|y0pBJrb;zzC*q76(hVsZBKKXxlARDCGXAu3mPF z{Muf}q&Lmo4!(Q^VI7(>8FC(LAKI-iK>_ogR98^UG8b)#Rv4?)UlYC`#p{2#^?3Ut z;24)T-f>pNjpGtH{A6B(ef>q?R4!b|r*(w=eUkq1H{|>ZE_|A%S_X-}pCqg$C$so^ zkOxz{=AU0I@bgf<3-V#Qa(S;e9=gigTknqW=;1a{j7@JPoW*NK6<+zf)id==*jnGS zBL}GWZZOw}?XB5HyZD~$E6f_4wCY#m$o|>r{w1tu2ZIvsaHfhKO~+-SL8YzM`S+s> zG%sr5 z_ck!_4wUG6mfaM;xahX%M%z+qGyb-Gj>DywSQo&bZc@~956+p&P*w*4P`u4g|ED1ebXJDF&<9gu8_4m_rqVbY=sIq zQyWkXef@jZ#b1!1(mGokgPHPDh<$fK9^Y&gq2E3gjPqG(wr9Y>XN(DX#b#K7v4 zi6?qg6RCN%P%f@E=i*cW+E21!28ypLcXx+(K5?G@gnCi+)>1vTL9Ra^^I&FqUJx6? z!!XUT@l}v(h52Rc4d&x3;@!nd(|<#x@@5(i-=n)${i~QTJplq8j_Mg$ojGsTG`bsB zBdfTuv%a15U}8TEN?2)r55aZjr3t3;sEHKzJtpr`)n-bIIvClic-Zclc6jrBVjy2g zz$+JVk*uZ(?*m``8B7v!4E`>$%Ij%3sr7XD z^z`_*T735|tB>#IM0GfRqBx64*tf&Zf>wHAV1hf0h=x`93}QxERS)NojDIbvYy+iD z9>3vi+VVc>FiXvb4`w+|>Bi`AHGDu@FzI&X(;lgHwXpuK;HpBORcUIHzorZTOgZC# zIbEU$2{27x&1P4UfL35GpJDg+?>+| zla*as$LPLCAun=T7ISWwVDx^cbFnh0=?+Z|F-fne^(K@BxX2XUD|vtIuPr4!&@mpX zadTEFWryf`-R1w=-cOST^OaPQT_WwO>zPBH&mZ( zf5QEfgMMB9|Fu!X8!W?V#dng~`P%yZqsw^c8JkM%eH!qdt{|_`l~lS1>s%;RCw=i$ zVRI7K*Jj*M+kz`dqd(h<`Z)&+_N4{YZ)lUuSYB%8SAmeb$NiON4J9FXX2Y4#JMz{^ zK7#Igz8)C^p?zx4Guc(ekX;q~w#F7T45jipMG_h~SK@In>4!qo**%>Uy|UZiSkumasQTWi_i>dTnt* z*Ne|hJ(T=PSLkDIg(nV&h5{z}BtPK>OJrOPF|@vud*g_0bkK+H?oZ{Oo9T`Qp;pZL zQrJmgcM*v()$vPNdwt05RD6DUxv3pbxmtSZUTZJ)pLPxzv)qFvQ^3#*QA3aormQY+ z8@{6s^;y|s>g`W64wP8gNZP%_Ws%sqE`dB)M54CG9phVLUretQ9Q*q5WU4f&Ffjj7 zs;t%o7m@{t-$`dTMO#Sss4^+m-gRTKUA?o=pHQ$6aOA8ByUIXyMlBa1b=PQxD^nFH z-Va|L!Xc*<89NpaV|Sv(OXV$nAc>mX#7haYx}edqO6TB+2=Nj!ZSw|}holsKmrx)! z_TYme-l6-8@u=(K0^Y$C(po$dpQ+=@PLJIVti!$*>WEbg)93JZ&#qTlJ`3kl*5~9m z2@e;1fsAG_n{RYv@%iD6jrQ%io4wJlw4wF-*Q@VxxbD_+Poh*7n0t#X~k4(u8@3D!NoaN!z7G$TRp*D=I$EXyGtkqs>kHyk7hZ=GW~jdu4!fq znq|>GuO=tSU=M&Xu=qvl5-;r2YNRPyn(y=|;6Gh7rgizoP;??sIw1VYY}%4*HZ(9X zW;#^Lf{pRZyUZ#HTCxgwWO11mHr(;lYgW`x0VQ zxpJI8;72rdq7~8cyzYB;k8@@u#a_m(v3oWuqDPboe4s&Wl!75jCVW&Ul0Sx+gA2f1f)!|Ou{nx5BT z-$~}iuV=>mS@6@ks?9y^zG~>aAWLa%+kd~^olzxEXQX@Krp&FOr;?UTaxyBY2Tq{< z`*%Ixoq2}c4IT(hCG9x??OIn2^S&5-cdP7H9Y4R;tZzjB%|leU0OF4M6>#8}{j7l@ zx3Ef;Ya?;YLY!GH5AXLZg?v#2GS1SQlkuqCOQ|<7`nl?hJf2Yw%nShm-b3W834VM_ zDqKOo83Mj8vYT*e;6!+Pzt#>m-kC!YzUaDER7U4QX-`K_lqjEnmw$4s;NSCE{xbvF zQo_Y7KQ1jUe?(*nMmIP6AzW#2R;+BHO}tQCfjw2{@tUEQuGH(Pr;;5(V+psv%C{X* z3nRk@^UJBwt*x7#p|>QX*R}jFd0fsvK<9`oj+}WGlhx4!it7Hs2n#z&{D%2ky$VTackk>2iZ>e#-^C9;cTtJi5IM zIsw;k7XGH@$^XprGlbMWozymTLo$rV?&fGhOMdyM=|+4$m6lrV;j{32UwF_V`9i{K zSH;!C+4GkSUDt@eW$usOYRxbA%92~oDksf&(UxJ`b{Tu)!q3-PhQ`m${Nh>FypL1g zzHg}W%L8BdE%{_ms*+UHdgOfXx~)$R*roR1 z$9`!UZ%D28=J*^8QssTut-=C(v|aDMHF!#`@te}xP=it&Q7m9OZu{w zcDgU=YtiRNcooEUpDzICpI(yt2=3zyyKPl!1wDCwO|g@K?nEVoLBw83$Kk|dEbm8- zUDdaTWghL8!}7gP`3$R~s567mtW z9n5HODZOYGQR`JH|M%gP??PUA?@nvd2j7p938+)J8O*^mDr>61p`O7<{gN%1)Gg|c zMu<%7`6qF!iI`frLS+kEy_`T+uKi(0s;cyiQ@eKatKnXU*KZna;;q|l&9Ns#d&=8+ zu-&3f2g1&>TjoQa8u*((4@aM^S6{Q8Ql!&;3{kkD&e4`*lcj3QZ|t%0FpQ`EN#T~D zd>qG6b_Dfhb_>rnT{jp8Zp_N79xnSa+*D5aZY{i)OqV70W}F7|P>6O1gio*0Vk)V) z=;_w26P;P)<}zpD5UcB8l>Cv0!=9n~^Xq6KohPIsw@!;c>wk=H*H&WHctLZ+$>xdI zhu`Z=MuiWI-8T6CIq;JD14XTp`nH`us}iEWPC^a}yyKieB47m=7R8)(+i z&n#&Zod5d#;?0z2mg17Z3F8HQ`geNrG}KLse&T$f8&VO(8H6-fh(GSYN;bn~T_9&f zoJ@zsZFGo4H{F=8W<9=l;=}E+*CI8zAMqQ9(>KPRy_x0v(SVMjWJrA(X=5OxEAoGx zmnp1P-%x)HQvNwKdIC~1UE?woq6$jjA%D2428s(g)o-6gCNFboIvxecAFb~mW732$ zpGdI`wDGyF5~sg*JDEmein(4gLmDn}vBU?(t7@#aztMApNFb z?LLS~f*9`Dm`ly)D8S!_ytf`6k4U)X22;|3auz0j!EG_J$xxUZC9H7%zftbW2H2j_ zQ%jLLZj(=-;Sn7F2!``<7@xK>@q$UuR%9XaS{q=vQH^V(P_i@@<;{ZE$ntO@$@u7( z%#!bT#u7!Kp7SP#Ub}HlohtbuSHY03c+d#xAotB*Kvb!>?N_wL!Y5T?uc8O zMjvwQ+FiRJ7SCOtu)^(7bD}fcl5^J|<+GW}Jxe9zG>tbz@6UC+X!ZJ0=>K$WZHUyt zZn&}KbT*34{otI<1M`vs-sMjH?gq2Jb|#_a>(OnS6?HcHaC&-Lp7BKJnj7Y6z9y?g z^b}dn*t4)3PWQ7$20nW|5OjYP#MD*y>b0}Hi5b^~t!82>-ISJvRt49pbGUOX4ZAoouMi<*DhhI{&G}%&a*O1eGhCdL?`Wd~~{fA?gin3toLQv1A3Xw3K{k#{jUk;`( zh?f;(BtqXCaiuwDvc;sRuUrc5(YtacF{hQG5^7(#R5&Cay)Jxa7f@~g0w-hm>)`$a znW$UwEf5JRxj3V6sX~t69=G^A#^0@)$gs6GBb?6RxAXd~thinK-9*a{%_(*fbdiD| zTeL`{S6yKEml+Nxm{DG?H;Y{?(xyr|eBQ&eDE-O56 zYes#kAj+o8qsiZCy>UgCk&z$HZOL08+(_GN$j_geAz5rK!k_0|{hP-PGyb@aK_jkg z6Hlt@EH}hGcpN_S?a$7veW60$*K&u#mcr-suexKnOY~2_QD-UyDJ_ChuV?j?$gyZg z%dFNZ-Q@2T0+-^Lj@p#Cp}RpQLf`p}dg9FP*B6B4sg6RL4hLx9?tC*fJwyfTI-I%u zF)n^S{cF!PLElL$7iq>}!=@T@W?lO3{p2oX;{RL$LAC8BbkMq<`6UcO-99 zM#$Ef17=hEo)rq2+<#_$nZ!v&9^vZ;VPB=~zNpskyG>wjvR?5QAfcU>KV|m3&GzNn zXz}dPXTQ2+?DmV86xGEX*X9DBb~KDBx$tMnDYmNgaKYtfT8oOzM93?j(vhOpoUaEt zanusbynOA*|1c%oXYEwgIzTUJ0snX-==pIpLxQD+4 z4EGgT^e0y%gt6ZTFkIJloq@6t?ngz#|EeK1Sd`2i?T;dR`OzFa6Go;UWpKmCeEpjs zga=f;+Z%TCM8#gZKRGoQBdmP){+=GbKFz=UGsso!-SB($kyuBVx|2=iNLD0hEjz}0 zA12~u+O|K-FU{h$o{=JMF)r`w(Odk|^>*QSg*5+Gg73BGCQYdzrX69 z=SSt)*N1!Bj+v$x`!m-S9$INhx(T%Ir{g)DXD@uIigPy-mq+G_1Z&Yf@6$0vKsO3d zDi1ygk<{m1OMYb$ynZ68&DVD^fjMwsLgM+sz4p-69G`=XL^$e=6ZdT1ZJ@G^b!EMZ z&Np-qUIfs^1i=ONVkkNF1vlEIr!PR5<6{bAkLO9ayR33X3)nH01zc<_d8#3w8^B21;UB(WRfrNL$Tlm<)FKYv?pX)aY8Q!Dx%H;PIb z7S*wNBPVLhVB@q&N@n;-687 z^m%BQq!x|CzfHmt2!8^ZG7a-El9b{r-}2J7Z;52_Y=IP!Dc%>|T#<7xJ5>@2h3Jcq zR~oxrBE|L;_%c)0B?+H#2YiTYarBfLr}>6K#$|VQrw#3}Heqckx9EpcuSuVpOPREY zaj@sy^}@0@3Mcbe6!Vl$X?^Zj6NcsN_3L64m_u$g?06dm`lLN&GZwMIyg;d`AbC+Nr}{B-XqE^w!2sW69<`+kUj z4Gj$UKXHQ8e&QxLgVwTu9m^dSPv_a6ZIY}S?Fif2F!nXOB&$|ZacNcRu#c=x<-al5 zk<9lWgsn^^zWueObU(EBj{5LmAk@1%N7r#c8Vi23VHYr)=lhPr_>-S+h8!K$z;4Rz zEMr2zRHi>qsfXl%mC%Ollci(vX;m2q3lv+6Yol)~(B|`F{ljeXlSp%vct18QfwC@N zU-F-`>PhG?5oacUVPj!Ejnl8k-WbzgDzq?`?Pmj-yHZFm&HqO%wu4-L)l_~*YwD>k z^V@6G6<(wT63s#WP~e?vDQSGma?I-#x2>|yUA-r| z#*l4ZhbAuDq*sox`F`rNx?~VfK9x)mQ-0-5@CI4TtPJ(P=?F{tAvGytY=so5%aIw$@63Fg({-7W9t{88WgwO_4XXbxu+bNgY zFRD}BOVS|>TaJ1OYLi=!LX4fpO7k=JQvDsIyD2tEwY7&K3 z08G#Pq6Q$r-;MA!>if>=nT4}I$kEF@@1m0x{_X5aI`n=pvV*v(OPqlEoDs`h3F(ro zxDL?VLnQPJ=5TP<-i~=~k`r29G?~xYPguvDQA*iwMK*h*-6c7dz3onyZV2fb9Ha*z zH&ShQ<--$?@w_oH2eZ5}#tdML-%7n2H;)t#BIt(_RxyAmsqUebeV|elAujtDVbltH z%{B~0Ha)RikcPut1W>Ebu@SyoS9@s2oE87p?m0-4h)wsplMB!hh;0SOOBJmX=ed|8 z7)0H-vL9p$2Ac?7f7hT8s{3J0bT;cBV6Kk!921$)ydV!af5;~JcQyA>wF^XZMpWEmd(7gQd) z9UOAaaeNB-*sw}{MV{6fwlQzD-od3vdBOJ0=f8c+o8Y4Fp0pyN$KQs;58i}&!ziUs zNKbmfJ0~slP3imH5W+4i@rH#4>Q5l6t`hd-&w)6}*@nE;zy~lyzZs@V;6|+nMv9^F znkH$2?pp^r7&sr!(t5Gh3mvDJ_GfF1SG|)JI`-?b9iVocHTv0v6PEaiuU2B_Q)`bp zW?1ke;4^-rdnrUl{oGK&a&B!yT;vd+$}TQxfsO5Eq61NZR>oKE?>e#?eTfB$A10)Q zQb@041wC}phcC0RKGhKJekjl|X;nfOZ&`~y`FF4BiGf5{_-rK7`{L@6XWVk)&VlB%b;{w}Ou zB*^SRVOdv+dSAYcLHUNzo)v59scZMfErCup!l;wO?r* zp=hD{r8zF5jMC~8h}Nwu&%-&L#)bo9Z^^zeoE}E+mk*HfC%O?lg$AS`uS_r$>l}OO z5S;S*eq(R5_hwdP1IYf5q>xVZp^x2kXQrn%ZCX(-kRfe)4Zhwa_JE0ZiRfbAZzNOmv*8(XT=v~l$D_P zmmOK5X_VKDjj?47z*S~S)Wgkl{?iY3d$sV6k1%?;RHWfvGNS?4{vU{Y8a4h9XTkANRHCvwGHi`q+%tx_Z?wKD(1*0rhqxPn zQ@Q%|h^Fx6qI$R=rM~M}DQEwi$4RYb@#RKU#gT$i5T8VzWtS5?sp zDEtHYI-56KxnGjtv@0@F(y%wz%jdnnbQyk{W^Jq;fGpgdL)OL{_A0GCwz?|u{t8xc zP@$<338SprFN-qc6v@IF`izPEa)LeP#qV9vZo070vE>r8!O?R)e0&s!EAAoy>cJ zkJC@wDN+-Nordi@e>>E0u8FYmNZlxvW^VC zB*A>jy_e-wVZN_&j9<^x1SQ%yKU;psr&-6yDzHHF`C2yL#I1_ZlKVbr&01B2yK!gJ zeg}|K*HQZ_O?zBsoc+FLJ(=`~RPH2Q?ZsN#?{Em{v@hA42*$v1=(0|hhA)S1FyRZC zM3L%Eeio7C>Iga(&y^JVj*zc<9AB+KE@xboEsy4Y_2P!%0O3d$+WfSw&T9R7h+
kTmH#q`FU3j88Vc!HKI63w`LrgOLxzmb&V94q zLHQ=$E@(=2;uac`23_4!azHnrt7@xfpDuq-?s5mY%tw~uyVrv&=2OboQyGfpVHLjk zFxr1slBE+YTXf#!;I|pSXD&Q10v+Q;3~k02-N%MM2~9%N$ActR76RfAU?6^MxN7tr z<3_#+WvYtFm4?HC(V^0CShGiQ7o2S)_HucKR4&p9LHt8mw9ys={lR`6^+_?4NIAG1 ztH(MgW!>f13NtLP>xqok_?;@*NK^^Jw-idWG-RG%O0U<1SUkrs%imr`Z|f8Eryxgn z20c%HpNP&rg=14OM($G>evs4e?p(}DkH}dgJRX8g?1`J0mAX%qzpNM_PrC`yiv;bj z5wT!GP6XN|Qu=7J8TXkDHfjyPw3$FyvdeJyieL_gL){Vv8%UicrXsMwvFozWc<>YmR?i0 z0nhkZCoDUL_B1p-K>qN|&uftJtRH&j9R>GVO}#qTyx;1;>Jiih&u__MJ6qNztS<3> z$dx&uY$Boq-0${7qB$W?pnpU}A4#CHBPl=%p%mbTwXdEo&Ir>9>eix(_A)*JjOSZ1 z;-d1apv?TeCg%oGi+R!JlgE3GfviKzv6`y5#Yl?GgRAsLg_o1`Yu|OsHaE<=M1Uf} zhfGM#Jc+k37$HpQ8dTmzLI*^Y zETgz3wKPDVqQbHCslF=2w8qmQeAr(KdLXA@U$o!LWn(ta*w7LFg!;kyH$mB7hX0ME zIE|RBKJwk&OU6yXskl-+hIcw)#!VlR^Eun-<>~2X3k7;>RG~UF02%0(*SrO#)vJ#2 zf(f=mmDzN$XRP>d&TEfJYenf06SEG@XY0G)4WPyQ1)9uiHE>y8kmqJ*7^PXZs>8T2j#e^{x{_3F=Wn54KWB9@Q*If==&%q?Ur% zN5Fes^qtmyf<1SH<{*33SqB*&t3j_`4_u`;k>ht>1}7-mM7Zc9KK`W-YQGGCakFIH zuwvvK7W_j=akPNKx(*(9w+aTVd!K{>O5dGU8yC>pbq=9>s9>sQ;(&XTk`c^#!{+@< zZe^eSWQ5mBHT-77nTil{6a>zhc>hW>H{#wU(&96J6-Ro8)ph!ehsdne*{1R8OQUX=DCAc)7;n1_Kp94cc$clD9uXQN*qwki3*JC$W%0^tOa zjNkoowP>8*p!#t=NsdZCU)|S@1<*EWR?oTc;_+YH|0Q~P!7G50^yevhdeg!4?c}og z&$LaCZBeZBXjZ3stfHmsbV|GBZA~918UdwOZH5h|`=O;_$xob~Hd3G@vN^DdmPrj0 zKpi-C?Dk4c#KtYZKtPOrP_#Z`ZbxiY?m(oqi@%rd9#-cxs3ZA$rc)!lp+zD*hb(rV z?MFRg@|B^CcqVu`I?X3{xgK#gPr$in2^TYJ9~I|{-mxT}VUa0W-YiOPzAlI-J=rgr zpI;G{MV5c4u;N4V8RCY@ABXC`&tdX1tTg4#v+6fd8Ox9G9Ou4h(*ZJWbm<5*fUZ@@ z5fN`k_#_%u%6XCeQymqyA%ESJT@uQ@Y8X<`S3B4zb=p?Z@FU*X_b?SJHMzT2L&a{A z*(gK2obkHc!UqP@8FI<6x{X-iqI5seZuVNM6@WJ$p8~Y6TZbOLTt#n+fx=Dg%j!iy>ruwO?*p!=n2G^$n8_sstkAefArHTjx3&F{SmMnx ze4_u)(sF?}CyYc&i}NU&U-eyRo_`1;LJaENw(-h7uv_NzvTMLJnX|b; zGS+Kn-7qglSL8{UUmp$h5dq=06^gq_8^sGDtnV*xFV_MQ&t zH*IB&CF^_$t3^13NQl(0GZvoH<^QqB{!N_Kh*3cCHiEWiz8_C#BXx%E8$nUpQ0-N| zWuQ9dA8kS51UsVYN@@=S_u6k-b(Qq8V$B%&*pSDmzgEQr_n5g|#qQmEMni8wx^aZbth z3>9^1QR1JLIWfH74vCITsa@DNA=guBijfJX!HPQ_Ss4Z<^_i5XPDeky$FD|*DxrT& zl<{dU{G`DdD9csS?k#5x5K)>|j|T(W2Ljv>evw1dhZ4IhQ)D`Wi*BM6nJ+)Wvb%-o zhZl#WBO&g4OaoLQX=DgVCZ-cGS11wb0x*UlKE;#WRDLTqLk@b`l;uw^a$kau$NWGN z@z~N(jbZzjXxwJ=$xQNxM$MI&hv`j*-|(S#?&)foc{vb)>k}Yg{6BqJbAW zi2G&$bv<r%)Y-3Jq{(eI&k&OiaXUywbJx zGUvjM3h}RN{7sIE@1<`0nje0zM|sBu3d_D6blD~OhF&f7NcB?3n@*L3d!aJfq{#{hA69AY~C9C$ZSij4GQBK6}n6ZDI zbt5jAAe8EsOK>m z(9?d0v#v)Lb50Y3{16-Xro;DYeMOp|zps7=D2cY?|HV4EEYKn#$5{3Q`e?e6>9FX{mdGr)Ox=N&vvc0h3C0qJF5OBFXWyP0UCQP!g?WJ!BnJA0}-5H!r z0JQMKX46l$3obkN1M_1qT4)G2iQ5+b<|S$FvS~aW$C3CFNzCWg$0BY%Y?J~MhwrYu z?UQ%`q|Nndh^6{FtT<$zaT|1KMu+Vd1(~ESO!ZWA`o4;EXn?JQ3KX=`e(kvY%R~HO zo}mK=@$p98Ro+w6_F&WqHb2XI*xYO1`15!fvaAyZK?lsf`r)y)^5{)D`mmZ4e>%~; z=0YzevNYw+)o)lva4An?&N9hLQHM9@&(KNI(?Pd9)>~C~y6W9ADw~tV2BM&BQUu}1 z{o!-kfB6Ig{V35N#`>&<);cRm`E|QW1Pcf2o`rvVGKa-Ck?YpJ#kym*KZ$93WZpI_c<~>xcY|rc4n-Rz!OjpW#1BIt+fbv#4z$Uy|J?60z^UO1~ zru{2d_wqsuG|wCJ8dWY}9H1!bzvcmW9i8w~aOm^g?a%KblM`KO$5AhRVz6yNNVf>? zCb1*| zt8>IH0477b3or(4UXr8*-NiQxk-ZLf{2--t&Z*MBZ zroU9rDBP+24;TH{$VNygyXW&g`)x4T>@$%{v8jA>6XzX+)Zk`l%5LTYiW#%!8#srl zgMsa39#hidH_c}ob|t#@Jl|vqXX~P}%9=}YtuWevfsD><6}`L^FUH1EH_ZH5Jp#w_ zU_Fi>W%ltA02$wtVrxCx$7{(%l~WtP3tOx)9;s^2A%9R#3>ep-TOYt4={1hsRHzTy z%6bwz4H+=}8U9K!)^P=NA}7Lign$w+Q{oA5sMApo`3Aqba^fKsvoCH2v1mJ258dy! zlBfxKz-cw>-+lTW=|@SCTE1(qm^ZZT)nF{S13Vp->5QUdtPa< z#KM865+(l6A7a_WyhLE%x(Tr#QO?Tjft%AcgtX4%w&Vl-32Y5OO!L!o5SKClsr=yp zTQ`J9SrQY%KyMCGY! zYFR_E2$4>##Jjfq_&)c~%E3UPGUWyH`S#9Lr z@8itP*JS<)e1944lip4!D8w=C;>~sVu!=U1-O-vZ{l7$=)V1;s?enhYoSJtTLpC9) z4ryd3N+7)@KW-P(YKBw{zTJ++$2y@9Y1RuFj@dOp5T24pGv7zx%(05N{Vo>yp+!%6 z!^};x-~i5DdN1GOR#xDlGKYOmc%6cWt%7jmE##W3CVWrvW^^0W`#ZJvx#d%?79DLK z^_N1;SW{otZO#W039JMhEB+75iWm_EBOcZmH%>ta0JXM1I<;rD27o_1g=j!08P5^z zuRj_W7ez>HuN8F|jXS)78PliW_eS0OsyH@Ax4tY}xx)Org0(wrv*Dnb2R?b@b>#J~ zzX~t@=O9Yy@AfER)0NPbd0<%T+ig{;#Ag5Jb-1Q?=aW>cL_q9bPg37)9}45XxvqSf zME)?@o0ZXH^Okn2yn(8QFsBWmLHd1Y9>LqHKWY|R-nwzyDw$mGsrm z)^_}+c#*Y&#_^2heF16Wa{|M6!<-3S5fel_KOy?&x-J1_ z!l^CTzFU{nBdr6XG6A1Ij0F!cP4N>u1`bA z`**~Ufj?@L@vw!?sMz+||50Vk=OhoJ{2;Nx`x&jkWPxcb_82pY;GdUDmdF(8=D1K1^=MIeP(~}p z2U-7~fz|84J@W={-C*{Om^CIy7?z^`Q`PLE{6Tu#4Xz_P=%zOB(|dMZ4lEDPvD7H+ zFs1SB1Ef#GMnxXXJ5lh^O4`P$vl+apl@>RFkl{lxuK!x5|LCD|3p_>#Fg+mV6D318cPkQGz2F?xSi>FLjjh7Sw5t3@A6 z5H*|VjAU`-R;cK@t@h<5m;aci_%HtaLEF^`6J--$vljjVDcmUR0Cl#ksQ^*g^d9h} zS8F2d9sx~dVY2mZy8xr+UA1$#eSK3=(5&PQ_9T4RQNzv`(i8P&`mm zc1DA&P_IuahSd8_8rD!&u;r;+VIG{v4hCf|LrX>zyL)fEKC%FmG~?r7f{E)n-vzG4 z7WhOodq3T&*XJgK04bH(+pv_~+hfR!$?c}}0AX-ug=stQ%=ZJKShqO)6R+r&VWSOJ z2;S_|l_5Xa?oXnq`HG0sulB?$HmWZE9D2@q^|iz%FxZy7hqwb4_kK2=f4StYLREB) z>F;nqjL$+K33ErFe-qa$`WPz{k8KPA%_?Rhn)m6{`S~6{%lV9DrY7S5qWIBgMf0CA zKcxK1@$*~^^gMrh&yYFDw%$DyYIU?zfi zh?T$;Glci}0nM)6es*GNZy#&Q1dQ$8c@+cNErh3NbN@4ySm(axeU&7Tdp7}x!8GHy zmE_cXR3d;7Kv_8d{w2Cga+ov#889%%5BRK#B7qqUYNDxBq5@$L*LQV>bAa%u8$xlX zA#C#-z6}`wwI`evq?!x!*B-pe02u!#tGerw#w_wGb|(^tT(=Hu2XFW`IPX_Z1w%WX zhxWo_tomSrF3)9H2M&q#Ej%Cjat|~efa-ccWSidU*muQogLesN-apMy?zBAaiv^F* zs%<+O9?)5{xX~sie!enlc=IW=>4T8l;$iv6 zeKa}D3wr<2$8Qn~Py*6_?5=3(e5D@dmXQ@OFJ7_650mhALT1A0N+%C~l@{kpXakA> zAd*1G0oA2xhXnew6GlKm#Wxa+p~rzxnd!|<;z`FbHy(C7X$)!XGg6t&ah@vbea6aR zlxI`l0;=Z)ya)R+-c9?lCTGWS!I+PHCKcxWR%@m2HHEfGw^W&sI{ zhxz)Td`aWGn@F{&!q|)dN1pD>1V%O_a4Gv!!@CYbiSfs=31w>Db1f+0Jp zE=VFgH4#wFHo0&%>fUT0 z=}^_NfAjpDr;)Z*`Y!z7-h*gi??0~uvtm8gqo~edr1>-A1d$LqX>w}*msDIW(oH#A zNc78{^I7GMhdIx2uSg;v8>F_om*)e`)~W3%*&ie*Z(x$Jt_efE+;@R#U;RI*MTTGR zx2r}@31L6xGXhy8){p94K_*P0?XNY2Ew|z%x*^f)t5)5G3#9|!0C{QFh3H6NIk1SQ zOs97v^ll+=X?SUxamm|ODEKGpk+pn>UX8pmp^-=Ek!FiucH{WfSmOrM%}iPYRMZH- zVhUQ&oopG|iRwDRa;3=1y^Nx9DZ7h0@U#sd6PiK8T-O{|qh~0j#xoJIcniB~7H|BG z%AUD8HzDGM_lqtJN9&lT!(s>Z-vC=|H$Odb@eCJh(EXv@?jUSA*S={8(As`y-er6PUI`aSXVk1*pRWmxe%fOhS@Wn<^7yq4v%BB~8c;SxA*&2OR|sweT# zGg||L8R8kxpC1(Bm&8NQVMxiFimJ2mCNP7?hGuEg69-QUM0X}Yv~Lu--4%o&8w;I* zo2)pDPyY8U7Kv~};*H`bN3cSZr@%6wdM+p=Zq=?YD3D>zLE1$YD~mEh zYLB^e7bT_kJ||&)QQv~+Q%Q8G#r)|!=0#fOQsjrfrHiUH{56EDVi2M6TltiW>tl0i zZM!y_gJLs~T03-a-U$G6B*G4bWiKACepxY*I#e6xFzFFVPCb7Dj1)Xrc4v5?X&Onnf7AC?< zDTZYOB}yrSaG414;vC3OF@t}B{t#e47bEA?u8g8xc6<6==u2Kr09f>3qR!1fn~Lsx z=syB`=3!e9B02B@{K09bzw7U@yWm|14fJ)U#g4q&k3Uy+f&lW@`pOjEKwdpZRR9== zDVR27>gNTPi;f$DFE{^>SLR6>`xZ}zZ^;|ays)l7HRT>og@6`&wpvC^qNba`eJVnz`B5Pr6-5UT|148WuILK?v>s<86@7x z*Zs^(hFW0yR;c~uhe;z!^TWemP9ueftcA8wWx<~$nj0RzH7dFP59f7jbW977$Ti!3 z^saDRWg`CB>MgA!EcKQ&?Iqof86_5OvUr+iPw_B1w&f3Ppx-|wZ`udhS~R`DOy~F( zj)Z8a)FwPkuYHXwoZ6$+q?e?p8pH-9Lza(kY(Mlm7$JxThS@3(U)1(r`RXe_Pp(_Y z9rC(XZ1wLDzyj!geo=ujeP?cvUVQiS-_d^u1^#>Hp_J@Bz2@D1Ez-=VfDQQ%x0Jtb z46gQ-Bt6JY`eJVAN*YIBtRxrwmHZ(v)Diquja|WrJKCwtajgVk-k1Kl7yH7`>+_z0 z_pW~denXTbJHp1P(6`TD+`G=hhePk%3~E22ug=zzxo}~fI!MWop&;l&<;}|NR6vGJ zKUq9`r!+~MF0#v4If*x-(ARVd=4IcyHK?&OLV~{C7PGon=hdG(0(I&j7oBhLS(Q-d z@He`se|!1_*qriwbEV<0$_9#Tn%5<-RMOxNxY3}3&vM8?_A~_}zry$l_xaQ=XxX>* zp{&jW1{c|@n&(H$NR8}A9HxuV#@FQ2@#)J&Iz&JMOTIOFrS1^g3N}SQ2=H3J3;gu` z3k=jEp+k#?RFR_MC)$;CfZLnY-%BTk@)K4&RI7q*_5bJI#4@H_=m)i67}N{)T+<80If^Xb>T+n?)=t@i^`LAsJboK7@t*uU%=bh@F6u zvBu3#y)&2w;tD2hw=|X{)qDQLZrs;Yhx1?O=Lox(cF?wubXbbP>y-saUqy!1i_Hq5|`|WdFB7^HpUN^x}UsGqt$3N~panoAtFT&c`%^){K zxf6?D1SrTZgv9pp7Np>QRIM9_=pq;ziZvu|9HkyA%V)@Tj&WwE4NmS<@=|Q$ch<1G zbW*-Ljk4HRvmb)j=?-=!ix=LH@unJhB;oe|-xu41Ox#^1Oy4C8|fYzq#FqV>Fx#x6_6I`hM~Izq)Sqop}RW<6mXOg{apii zKip5f?{9tocwH`*GS{4IpMCb(XYbFB!{OQ6k6fsnRoa&#Kcq=r#}IgVPB?O~h!>=? zEMm~# z3r^l7c?5U@%4X2B9yP=d#mg$wIgQ<_Lqt;x( ztY7hULl+IXDA`A>wGqX%23S7|pW-k0Y=ULqlO4^N>$=-cRh@)!3}v$?WNOqereBz z&iEsCjV-TA1B0|zzdU2v?1`st{`p8FFzY zx$Qg)#7dT5(;1)HF5Su=#`(ba<*gBPtT6E%H>+UnaRB#__M z)e;sTJT(_bcJe5ndow>NMm3+o2l|9Gy1mm(DzBr6px+Qb-)We^Nx!FEIn_CdjL~Gf zlIZgs}02(TO+{UexiKE=29%e(PqSCc^ngbz`f(4DJbj+h$`i@Z@&6{kMwJv zFvd~e!7}LzHuuZwFSPi2{@+KB3lY>Q^tzs|{YBSfi`0jo8?UJ!2<1}}3c=sX1s+@H z?mNu$h@;lWv3XD?E=$z9MU;Fs6(=1lBO(rB8A*s`l#G2no?GPDbMlu?GhPc`8+dS* zH6dn;#&=SH&zW&@^DV9}RXyJ=uqT)6szQz6=@ed!$Lc`M`|asy)~uTiLmvSn`+HCI ztX<105W2X(xXV<{_o$z5yfT7 zms2|?r>TmCU+|!iIqRsfYCJVWf|s

dbnuIv^x7C_$gp?t!qLsHb@J**0adu?Btv zd|N5+H!CUP=vmK@{&g)$`^c#Q(XGBB`g)5r#|`b6bh*53P;liCpL=_5$u9Rxscqn7 z$obKHui$%W#1N0Mu)mzfUDnYlwhylTk1||t3hPkYbn!YSjtOpc6t{l1a$#CkQ~ku3 z@rdojHTMT3A%x%iB05;k!(EVBd6kR{pfVedp*F6c5@LMCu5>OA*B6m-6<|3JwMIgrFnky{7t+o8vkrCW%dUy z=>o_j$gYi{ma);?V@v1W%$;RbhGBGl41>EXyAMRgwjZd!CUIP0FYp`=)_w!!^J&`h zzuB}aVs=-LB%Y{hTmn7fhQ`K*8Dnbr?gy^{7Rm8V6^|nh&WA=Sw>Z?4;#+{`Ol?^s zsx|B*U%1+3B!}k?K1SUeFI-n&L4$@l_z%9P33GXgr{Ho|UW`uT8Tvc47?Vb%2m;6L z*XaqpvgU6DIin!Br7}@1-;Auj%Z(MKQ67nUqpDaeJ4HE{I0im#uRExt84~VuUHxGP zzD^tySZbDeBZan4sw`$7Nl%VozgsZo&lx$iL4SaaADQqeA*^4y-6Eokx^Fbk3^uBy z`HK-(xK4xd+U)RP&^(EMb7PX%N)N8fb|dWcA42 zIk6leuj4I`FQ)VU8ZOT~l)fAS8FQbki_^9vtwq=CNOYxdV!%bP8tS96(CZIDZuZnt~`j&%sdM|&tUCyY~VS4u?)0pIe2IB3Z ztiug+3+^#2qTxnK^MuV|!Qmrq3|yUE3;(I4<%Rpf1MT=tbjvAIEE1ftyRqnzl42%O z$qzi%PfZ$SBM%y%jqtNRsMThUJT_ft)ZGjWWo@c@wb*>7bvi_hy9eri_v)a$9cT5R z(SGLV4^%>Z2}z_yhCD5)mRGU@9wsdu@=-x@9jhZX%^q`M(ZoCnxzA5~PdeV;>%HEw zZ2NHC)(d`8&LLH`&47$J=&2e1h-SRMq68H$sht)*7o$ed#x@BDu7F;>A?H(ZwUceN zIj}0qJ0Jvhm@|>Wnxy(MzKuiwqnHTC0602}wM=X?_0@Gfp@Sy-{2$S-$BU=i92;bg z9k|aW_%@!!Jf(J%EWiHK%Pi6>=~SIv(v?DomPF#?aMt0i%$q;h9K%Asw2ZvSX5=z% z7Q?l=C-6A7*{@90Tk@AXsvrZl5ywEJbLq66Mn1bVyY9A&*!!503f}{ZqQ-PpIS~Qj z#vepUVF|(MZwuu=q#07U$tCm^S|^o^Sa;!kvVrleWTn^Xg9;0ji8Rpgsv;KiZ&9~x zC(s!7zbW`Sr5x+EoTnt;+CQ9>G|5w9$~ckcu_BWOFRoQOWj*bX8Lf+HIrQ0c>{du) zk9O8tCROFLrAXLk8rqE+Jq~S)US9r)nd>X-yF>=nW_K!t!=7z69%f1pcJ9$tht`gK zPujTYsVtKdH9cYfNxH&zo!72Lx!12D0^60V<C|ecuMUY^=!CHI_dAXxKZZeJt$JoWPD!TIV+R9WJ53 zyoWj43g$+yI&!xCHPZ+iuHb?eCG*Un&6-x_uG}-X`VtijY_IepiX7qDdh64$I-W)L zpt5LT;WNT>HzB%kKce)Qsi?h zEoZdB^TsVZi)~_TV&Q+|2I7A8Sa3)=W4RFpIXev7nV)=m59z-wpsN8%LErt(7PA|c z+CsnM9f<)meC8Fm91IGR_B$VMXukl@-eH|#=veQg&Rq2Io@pOsBheSKTb zC}J%pK%U&PD{9Ta;;6!GP!~@2L*StMc#r;9eJ=flk0q_uUD(z%r1`jSJyrsCwC6mi z_t7isGhtilwGxc|mo~pj(UDY9106$ZUe%lCeZJp@bDbGg7`y}$6(nLUoBX*`FHP=E zaVkUecn@abPp3&IgSd)K#r@-j0&ILIj;i&DuUvC5jL)$z+%WT)aCG9AVoa; z_xOl_M=bt&gkcNxyN~eq+N*=3@!uceEjgh($_kZ_=ghM(E_~W_ox*g)PjZ8^gG#p+ z?&Zz(C;d`hYgFpueRnkIBZhwRg=T%y6$JsoZ z{Q7mJJCLRMY$b71@?>e{(}A5=bIJal{M_lgFplBkPv8b9Q}stzYjR#j^8z82I~3?n z>4R?;tg7F~{P?JA@wvb(%xcrr#(HzWQi$ncS8Z9>J@M>K45c6e=PY!XQcVU&8;muW!2-gT!qUrZ; zhf73!vbG_oxO@?&13Su-1rOlb4Q~TTLgErMhjhE=fvyg;D8xRZZ4B=CKW-1m5y5}U zFu4%ol{t-pjWU??(-9_Mcq@78tD_;Ql9yY)M4Jf{M?E?4g)e*8zN87AnW_#p^UyeuZp6;+oDB`J0yj69I@-)|LjXa4a!`ZxlH zciUDxXO4FJw!zeDB0bzd_&|0agcxeJ7}+4qI;g8$0>!R~RrF#19_1wwjLeUOm-cuQ5?!om%UtV)gU&__;? z$)Sd?gOQV6&6|V12zB-v8y7_G1^FmwSaWeBBjeh zxYQ0usD0VLtMe&g@^P_Kd`w+2l!VCzFO+rC@aMsO2EznR^ToXK%dW`9H;ipa`^j~3 z;QvFe=X~;Uo(I6PX}AH)2H;=|o3pO%-mh1Ffy_&7aGDz?Cvk#hgHR~k( z-`e3CX6!TAa_{bfbRBYboqEmkY(%WLLApmj!kXqW+ZK=y=t}UKHpt$_hHD#-ug6H{ zAs0jMws!-*e-m>2njnW>w{a-~4%9+XA82VmOwB*G^j&5uz!8FPZj86Q8_r*}9J61z z|CO;c;6nMO&jb}SUo?sOxRNK_LUsYe`C4UFL$mDt{yg;A8B%G!2vn(^YhSJiCrE?) zo!1wiKAOaw%R42ZyBG!bWPY>sC)R@}FE+Xx#zo@;Hp~&>^WiQ(@SnA)&^FIZb%;qPMF&KuLFo4!W$GbBMc;e6HNKl&cPryT`N;jl;EUP|`t>3#+= z_!nIuhZ)8?z~Oa{Wli=Z^z1S+KV&Jrs!xb0IEom?tW18Ny_BAsaBB-1&KS}^qkX2M zK{&4D25D}w+5Lrmz_tPktWkq~X?LynKK@2KM~j?=jz0rz)LG5emY#1A2964*`dUBa zP}DlF*z9mnod(~v@=?i2nxw$D69bZPJuMGD!NU|hHudU+%_^2^Ab2o~hjH{_ znf<74sMTdf-~h?BYnZzbguELhZC~x6QK_-zz5BC~4~S#0g2rVsUMNGV*Cw5lR*bNt8 z=PP2c=wg%Wp9Krl<1gEarVD;uFkcl3P|6`j`SXn!@aBB#6gZ@x|EkCW=EV8nWVJ)Z zz874jyEA95c|qW{7}^%Z@hgPh*?`MQ9#ix$vW=iL%U4e;Lx6f$1Y-C*kq~zQ!>g&v z5IXitYGg3$hJf;NxwC@f9zs>GgPU;e!TogyNZRAluupQdKj$fHhj`D0d~HSex3jP8 zb>$!<*iqMQMCH4O&?EnctR`?G4}qa5eck{vdK2A;<~!cez15n}KyV^+#xq0(nfD1~ zZ(Y5x=ci5PsRN{yzBavvT?;%c@uI;u>b?s^{1pBL#=5X*UiORf-dtX!ac%o!TB;Tf zGR;!=1^VLmj)Zai8iABvU+0y{q&N+DWbh>e5jPdf>i%gQ%7X)O8mH_{(H?6}XGm8d zdvKUL$!#D^fjO7kLIh!eW^-=UxIiFCQm`-u7Y~Wh#2Fx8s|etU>(5?FOAV zkkI?14ext{bO2&4Ib5IO>tKV|`(y?IkIhsp)5A3HoZCWVjYnyibLK25`VNj<_NGau zCm-bWhFvOUQMwebnbPfn7;7GUM2wL)&owA1`D~^xqMM`a5J^D#c|@J(5n~2a6vqMx zezhNZ#%mduY}`gAe0HRntF1}iFovq&ZkUlUg^`Z+W8S<({d=mrbm0a_ARLZm3#ctG`Cr5Ik>ry5zecF{M{*rtM{;Lddb?qDhMVK;U}Jbr?sHg-_JH z`DPBHid1+`&ReR|UAooqc0l9Y>}+)``WNsv&(o4?tBG zsskFbu{jISr95l%3SV>QSxK!oo_+#z-!3(vauBtr*kjSGWD=^*4<&NoFA7HsOTE^6 z4W6iB35zB%KMWAyQ(5L>wC_c9EZlti9|^6k+xWDNg+g(F6*xyr1d6yhpp=OqS+ILK z$FTxxYRKTHbs-E;Xp2}n~Yuzs9Cw6ci@EavUlUN|a zsZ&~yOXr3Dyx#;TQr$BL4$x%Oy!a=W;c_N;hVCDdOt(Ks>h-@K)pq4v+DJmr1P zzbi6E)1HLDnR&hD*oP#toKUe~Gj*m&PFNkf(3Jyqh&qp3Q zJD{e6##hE7v^mDjM`T_GX;y9p2G1*g%~>4MCXX(w9HI$>(?2vL$qnzCmM~1F&h3J} zPPQgX87vn_vY7G=!~|Ym0E<@tVHL>@6h11i!a=KxF@cCN&hOC!+lO=>eh|>GmH~2c zrB`h-%LneMty;7WaNSWfpCVP@+-nqndw!~^!?l6-B}r(nD)>mrQ0;**7x8ZBIFsCg zMT)}xYHcGcl7FEd6;2d-vMJ>!Je`8`-j@e(Y2W=dWz1P7Hm-2Ax#veqE&=mUiS|JO z^>Z~9)_Llj^$CT)i9p#Q@5>v>Vpc!~nz9_ZRi?~>3mh-mx))AE?6#0mU61JZnQ(x7 z(0XOZ=^>Igc>@#12`Udd>2#_(o@nRp<0T?AB<*zJM3KM)$fL2)j5$N_2NLF{lkx#}#V$!LX*SYk6Dbp6wgJanvml z!yP}yr+zm_MTN9>h^}gR&(3sL32V2#5+wF+bg7kPg0K&|qx*i_C_)17ZB7D<**FyI z?f^T6gtLMewne7Q;6nm9$ngLV#(_RDG}&S4@PcqRblu{lq}zqnyiztM{UPej?>v|i!lEe}Ob?{vbr{s9$K!jL{U6p%I)iAwn!(C5+0C`lGkM*+63 z@I*a;FT(+q4Y4J{Y{hqmPH%B3`k^#v88Ar~*$bIfhDAGas_z==b&PTLQQaNj!r)2| zrs1>Kx>$v`+$r-VHfOl>|{UYom>N@TuG;Y*r1b2f|i>2pDt2s8)# z6X$(;qgTutmo;8PR;k_9!%JLvQqE#zm1KG^Xd}s=cvvDjg{PPcPBg$8!FSxl+~#sV zQm-s*|mm+e#d6Vf42^LeTT;AUjph7l>K(zu z&japu8zc<(VioF;4no-9S0&>!3(Hn3>V&&*R*C7~RG6=@@o>9%AP%|9taFOEWMPzX2fg*x1$r34QVXnk#%Dk!-e8WKPkkZu z$oRhuZ(KK|cFO#s%yUyuN$9YZQaljkw2y7f8yKQa-5sPhag}bKM{~3h1-S`9P=wemPo`>8(dnslh^N7^A=8oqU z2RL6CuFVRNYjkF0c-CVE_6Sd_zbWbF5`1&Q@xzgx4f8Hr350i*f@awwf15*WB>}}n{ zC>9Sjb*JT>(U~yjC(g@CJ)kHKB}#wIVipT%Av55{i%ts+_o6Od9f8;i*@X=5I0V)B zIvi99G&LodvDK`uz%c>{WF+o(ba$kptH_e0Di1&D%(P(9Ts(DiJ-+Wyb)*+Ip&~EM zAyWI`391X*3qo;I5{0rXs1->`s;oBkf%3yaaFo^Wn(&JAF&#A1#=RzWUR6wobUE6j z+B1#4JsCw0U+Ppv=XR+2Xwx}1J2KC!xA&6J5IBQE6Np2j(^r+-jI`c`CWK{u4wou& z~-uB?@#brRjJqa1%=SU23zPD{qxDINckgwu}s zccYzy?zj`3ShYa~A9ykNaj2OB#%qIm0Qt43BEDFwOAS6Yj5C@X#+?F;Of+o7**3xP z&iLyOr*P(e=NhNeR#GolIzxc3I$&MD`wcOp1dfg{|1A2CAQWJmF)8|ni(`Zua=|t} z0qiAYQ?S9F#Tt2Zeug?gh-VDFqM-qdzmM-;my@FGzomKO9*O@@yt9pAT0;r+hV<~x z;SFfkeCm?Li7mG3ov`j6%g|b0_cmyZ4&-|Hwh`K`P!xjM)`{1WT8gusD;5eQ%!yqS z<@7;(Jt^gK3mI)l(>qUUZ}qFv9o%vg&bF`@)Q>)iet*f=O|VfnOU&$Wvjm6ruF#Ag zQj0Rupf2+YDh^f!g*wxcC={G)3f@|NI`nrD=$7uS{NbG$m`vdl9g^~GDR&w5l<)4c zp?Xf@0Cv+7h|7pPez6z&I)x?dmX?zT7WxOL<{QrW6M&8$-kF-m0K^2#h>fQE1Fxc3 z_i}kkbx>$I$>;>u6M#*B-qFLx9I$>9I6CeSNL#O1^mO<;)MMQr4x7I9hhZPyAHnMA zps%SR50S4zL5H?8@`>TGsl#q@(s*s3C@%eM3mRu7k9gvz71rap7p{hU7fUc7cSF{? zgkN`kSV|67DL-zqwIQ#&L(s+XOf>fV7%!9FPxr z;mAa_5+cXgIgUF6F*bz0;BZ?00Qx$neWkxK!M5+t`{hR)hrBIMkp_XDnmACr@?L{N}WvtWBwKdS*vbB^9i?~6TN>*FeE&EG{ruOkym5m5y?B#@F&kW z-{9asB;v&JdPi467#fP1MW}<$)N;J>>jSF8e*Rmu@`<7q{uC~FrDdyI3H*V( zw_RqlF+XNcJvd%^xO~4tJZoIem_=>5a*22DIr4eZU#Y>eddRZM2%J~Nroj78`b$-^$ccFVz3@@1!~w{u1Y>P@3(Lfr1Q0>)>9QuhPVAVdRzgxZ2 z)GG;rDeZ};Yz;0q6%*JQx7wN5leek{m*o}LR5^p_Q+tA+v->4%@3F&0jw9~BD|6l7{EY3YL%w50l(V|~h`*#Wksk0LYQn6LU5HJ%v#R^+n;s%S5GGp5@u&;z#WD&J z#+)OK=LA_WQV)A%ZY7~u6+ReDfJ>o95{01RmA=nL5k2B%56xJx;vX%Rcfbjj9S~fyl8h5VFgNs>UkV+-y{OJ+M&5rAPE?O`j9 zR{ggC&T5$@b=pei(sSI%8z9m2^3Tr)p)UvU(}eDBNvO{;%oLpS&@fmz*9BFNPli*6 z%?7tyaE)Ye?EQ2Or9)ecwkWw-)NiXQt~5HEzzuSyue@!Wiqb&(#9@L(kC-tDs9yv5sT%Bo zw2xab-y}lxX{&>Ve?a+v)+r4VN)qdJU@Yi9&#ob6`MuQsiMIYUE5mj)} z;-Cp;yf4zdcC&9UXg9$UXws#-UCqxcy&-?ZCO{%VTjfF$4q+@N5=94C)qlR%!V}H5 zUz<`m>*5Y$l0YOZ70c-$0_@}*MpAdLJ9pwba4PPWBee(VCvd$3#8engBzbV}gAb96 zW!e{AX)|YM`|T2fhtI_&>0Nn!6A8v^4Q*z%GOzQQeYNBnjr^XAE@O}(}JLC6b zzf$H?R^4nAiQy&;l?%;CPQKpPy>{#PGaFwEmf#b`au?RO%gc^B-D~aa38G9m8tBUj z)oo5<-JBeKFVLW$`lKgB`xB_!Rh%J6;Utb2eanU(?dY++g}Y;sVvkB1;TO$I z&iTmbL+`;&(l;nGAw8m7P+1W%;NVnj)0A(+TCvNg0x|i8CaCaMcw)X?=1qB_Qg5Q= z{DP$NCofkxMY17txTZK8UvY`y?rY?n7AZUfX8_E!Nup-f+@{EBH9BSowCV#LZCsyT z_c>5Ic(UM7_w9L%g@__ym!t?z8`Q;tF(|`HI-L!w+c%+jx5xnWD8mmh&g8?5-Bb8+ z0@te~ZQgPX#T(BgT?Y;}qE6UrKAdhUM;{^=`L-2h)M_b42Vg5aPGRv&FmdNd5n^BP zgew-7L@DA9C?z47@_>vF>KZ4`M|rm?Z^V(1WuWK~rL~e2#zJqn2c^Dn{J@09>8EvW zbmJ7^>@8C;byo=>O)jJ@5jcfb-Dbs`8|vx6rqc$Evb4cY?y@Y^qwIIEgKy8b8ng3O z2gGiJWGE3DuZ~g&AHbq<5w$-x2>>h1cn@uia_wkcUN=Gfqw8ra?mz51HrEI=^1Rkd z=dS(JxG3N8EmG_$U^*;Pa=be1@3Lgv*!CK&!(Y22K%!hIJR(0Bg@Eb^@ z(UrFrn8D;Q`}90UxaQ)-qmx#T6r?TmcY_Oon8kqReakv+J9-wTsd1;9vTc3Qbv(B!Y{VNG=^6G=yszSCT_*jMjm@C>vX z081U+Ho*Zqg+58uQKw=^c8UZV(-7U>=WfuQKE)q6JtPrw#R#!EJAOm?9Pn9m`@H1O z5hxZTRT09xAztDqZ#B@rIaFcaBFv(mDY~vD$ z33Wqn)Yod&HMH{ASJmrsb#<0MGzmDWQ>w5~azo*- zxP^7@-?Ef9U--9I5SXq`jnEtX>{8(y8Fg(|&pYfR3O;Xw^!k3znRid^i<6zolkW_H zC*=MG3c61Os|-G;_bL*^dn(K^XO8W_wuhFu4DeaI`A8q>Z2t>*Q#^VfSg(FFBHGnU zH~S5OYEGd{?KxJ1s9EiYIobl27I>oMKfhNY1r6QJ63M2&y%rNjmz(A3aDReppu$07 zp&WY?B=hmNuCz8FFrg>lxuxs$Nhi2TNYrTLpI_{Fe_B$@{b(1$BV^hC4AYSevY$ce%OC0ps%=w*3G7&U4QZST(q# zA9}XMgtP?`sgqMf;t^NZOT~oHaDKqAVcRw2VxI$GWTFNV!CdUdCX2|_CeH{);)n_# zJ{EV|#5uBu2UrOe1HQa;q@KTRy^Mnbo`TcDe<4#|TDJ;-5&O*&HFE1VG;?fL&+%0N zwPHFC{ai9ddPw)(?)O%TiJG`j0O}fr64;(&zoRS8eY~lL?w2^}KS&#f9%-)fgUX*e z+cN9&5}V__1-4KU0YtZJ8w&ue|A8EKo`UQSUx%mF5KjW|ZIZS1bQ_d6na-xNW31}? z9Qy^V4B)9(7-m5E{7%^?CGv!eAj2OV`ajX)^Ugu25dgkl7@Z^Tr0*(&+qmWA6+(xY z|J`K2bG4lu68}7Y6FmP1a6d<%nE`Wpl*mo`oS_&n=e;HSs*j|?&qk{4WFHPtMI*yQ2?gbt+m%^l`*>T&JkliR(BMG;6H=^9J+IoB6xF7G8 zRV45^^>Yg7Nq)^Ak-(0gH+P`W&*LvqC(@@IUbKcf9T425c6 z>gmAW4ztKK+$6P1LcDbJ5YWyc6uQ$}!+{-lUXW^(S=qk@`iP5_ zD5g5BcOCkZs$Pj$YFMQMJo3ahWf-iy81BVchNx1!n4k=6WXXlZbQ8F;p(vVC7}e1i{&?lFg*Pbm*HLS`ltJD2BPdtNE$M_jA?m|??Gzy=VJhHoQD)zgII|iH%_{xEu|Kxh!MII6$ghl@6+A-za3L!(}LF z_l><4Luw|v>Y10#p3Iq7E}iPU4z=$axk}#@5OcV#KRdC#0y|;U(AgJff^!4k~Zog5ljQEkgxjx=dQ9nqAvF0+`{Xx6ea^ z4v^Ol;7^^XO{SklYSzpCt%S-1N zg~CKU_VCz)0JW)*ys%#rs)y8$tUBw})_T}lW#x@(Rj%fn3J-<)%1|g$zz|JB+=5O1y5K&GB z4@?r1^}&3%IF-qK4nEw*KKtW!R1<&ll4J`2UAcCi)j}uZ3MXnDg7LMrdhL`^doDm3 zD>gw*!U{1iJXLm?ctF&<^bIm;V0gXYv?MEplIr)fFY#<&Wq(xXymMc0JxIfTi*UI; zXD)u8!;sz=?Qx*CIJ5Q;@+no3{Osfr^$@94i>riBB$nygtk{gK=Dkw^sU2H)JIQ0a zJbwrgVP19SujPx>3R%^&RQI5Nm#?7o84T+kdVL*5y-+6o`t8klmCGCE)-O|cI$!`C z0{9N77r)P^K0m?#d*VZ$BJAiFXpD69=+{$v>;8A%`{325kwj&EiOgX-yl`*MNc zg($6i3%4z9pIN+`v)^d{lM1o}Do|f5$E+XHs?4_!N_)~KFg7j$A}nBX`Vh@JH>+gE zk!}a{f^f`*Egq6l!bTC^8L`5v*}vWLwXa_!Au)^7z_&da$Rm{)RzRC}Bu+QFi{`FV z6UD{IHr1^1(IyGIOAPhyk06r}mgz1QZeS^#gVHR_R z%lrWPL?$4LgI-cCdn>O-r*{CkMvO{{G4@>rEeMm&2Blw&-!3^Vfe)8SRO!cDzf5ru zq^^@c>*AMd7@GQdoMJM*0A5)*&qI1mGXPdqyjQP?3;2wKV+VEKkQw!Z$|CsaJCd(4 z(3h*J?yUDG3z`te;B4tb=FBj_M>$VR5!sE}0tp$!hHyKh!oXgT$>OE??`tp5XuO;4 zNmassOa5AjiK}FE|7Kb-X!Qseb;xYu6^Af2u=$ zpUc%LA@lQqzlCoEFVgOSoV1TB$-ipi z-iyloG2LujL#e0VvUri1lTSSY4r#$s2NH@xewNa@L0l?WZk{Lbye2>9z}c0115^C2}+ zlKw`C!tW7KeC=Xn%?uit^B?(_k?nm(46Q5~x|^u2k)cV6IM@_utFFeA^2Y&x+YzoC zDXh=;sX!c`nd-VPzLJ{T%y;YE7m&?skhXtynE@x6zXB5C0n(bIR`(WY6Q5F)A+TyN z=l0ib!|irGTS_+wH3~hMRhMaEo;pHM5ZGjVjLWI++aN|upvfMR?H0I&i-_!(6iH@4 zLe1v4)8o0>qRMB#aZSGe>{x3>^-6pHCDmXF=j~@E)whjskZBPh?sD}Fd~5y@L>hh} z?Zs+)&JZB0H}Cnyx?ZCg6*y4O#+4iaSMMV?tnbepr^L0}XYxMiKmBom+N{_~aD}gC zTIZg@mxf;g2eOW5PRwuel@E2LfrLsVNy?9RH$eq4cN4QeHx2a)DRLNsG>HRty8D1! zj-n18dpA}7Z)tRpEE*lF9x%|c=6o7@Hb4ui8rpjs#yN)hK8-eWh+t?Gshcb|HsM4gQlsm9=uZmi&Lnd10;6 zBHP=!gwH*zpfhp1AvBfE?xHFLr;ic6N6~zFcAbQ1VcpQmMb4K!*& zb>R|$BGnB3q?G0D2^_$XDtY2BJ$-eMtC?+A^DBSM=lz~N*s;$o5;jddhCh)$mPd#u zX~YbH7ZwA%hPFP)X#8kT(~yY#>%0Jtg{DRQ9i<$l=7`5atsD6G? z&WlA9_kPusVmZ>7Wyq|()VlvQ#q3<&ima*t{XClwZ~lS-eEE@^6(7`iRJyR5+l#Z{ zNh?m&xT(J__8$G95d}F9POP`{=IaDmK=`CR-W$H%bI^E%Sh5ij^4p63L1R(s4LJ>! z@b@t{FlT774ik)lD5#TPoEPa-e`ShY%_7tQV)EbuG^p(%mUfkQS32zZ)+mTw_0Ms2 z1MoHh6T4w(@Xh?pY53QXH0a(WF3Y@71%yH2-h~9|+ynxLvaY~5C!_<~ehobh$!iJ? zcT!A|SzyTPW78#X)hV&Rd=$(HmkHw%^wK>Fbyiij>K0y;4@VDDtR0M*^% z2YqS^KUeM{nW#f}@o{$Vv(PFir=2e`a53laT)`{jW&V(Hh2;D@%{{sU<3MO%m;~2i zfGz=->~&lFm&~H`Y}VINz%`z*i=z#BmHg%AodKFHa5P*!4lX5Ty=N1I5xFYH`0Rk> z^mATbRT-sS9{41l=DC#-7>Gj(+N$2Xk~0EWp1{55O8;|m z$>oC}__of*m1Z?(bwge~9}z!?ey6mmGLGhNId;iv^!f!o0vbq3alV%w?+ddai!F>N z$EB4iiFR8daI5#@?S!HQE*0+mLDEHDG6MbEPMB@mI& zpjLsb=UrUjOZ`C`z?Twn2eMR8fZ?fR#?elsibMb9y}&pCQDTLvJ`ySU>P;y@k8;)D z0Y8B+uis^pDSY&YSXu^+GCGQ+I0ifQ4vUZKjy@73%o@*eaHF#^RV|s|GKGByU^Cn~ z+N^FOMh*Hqz*UchF+@;71`Sq@Oiv*C35-uwFq6IzRHOHO5FWrX zJlu|$!bm(^D&UnQ6f`pUYgf30FDWsq+W9WzK}j6L;QOGgoRGf9Mp5db3|BBc;C`c|3S=(?kexC~8jx@44wVCmOq zz~6RxATf^9JGb7C-hZb}3ov4fR|Wr8K002J)UABn4Eb%vz7BLZXc3-9qG`zd3)r_< zKo#w-hGFODFs#!G*(_fVEk7OB6j4)G=PedFR;97&eJum1hmrO;NZ{1>_4fLJKxQ-p zvnj>Rj456q9i}h@k-}AL&jTdpOq76^ic@kFU?7FR^iT(=TOLqEGIzAhnk4eIxPJLq zkamn*uwCo??T~C*U*uLsNuSYTna7l z{}75y%Yqdt$uQ0HqXgJR>vYuvfaS~Z9#NcK8FT83kdG>iNz5k1V7{V=weWpxQ6_NZ z{&nwNIh_E7&q^yp7}qwWjEhwMB%L;*OqVCuzdsDZjb@qhfQc{ID(CgvNDb7d3J3O> z@X;5>t%1z3b`*1Dez8}_kSTlykDOib@}Ud;WoorOvMfTI%|F zkg;y;Lr69N6FO3~Y*{XR?LYK}Sc#rLRL|+LIbRd}so^jfu%w(J>whe%$2Ev!yCpUv z65UmX;Cn1r-8?((rzIh_ni{J-t>`_)P zFJ;>N>ybH_^HjMZ#b~M7;6FuLFN}-#q;Q!a64f{Tt`FU=21?H<=4wqsgl!OhCIMQ< z8uEY4f2z*C4s><;Lkf`9JJjp}L@RgE>~v~;Hc{>fI>CnrfXv&>3VOoC%W|`8NaL+1 zlW#pA@7ae*62Y}ZpkuDb*6^~Qop-o%{!fj0{-Pez_Khg^?^C>gzdivog{+BAi$mWSM0UR1lS96^D8jypA6OurV3s;voCEo zDHq5)`aj0ve{$!pu>U{R9-;per18f<{ois z!?6OZZ`RMcikya2CxUUc6d z>z@N6Eu5oiSnm@IEMXjDDr3%SiXwadf83_0t`L*tF>lyM2S|Oc7a@q4&p?s4gfYK; z4%nS85SLz2X^XBk^K|b=Kv^jhMY0AT9IY7tY-#o16Dr>)p^RwI{3Uvah**KF8Eq-Otid&j zLzWh3JYfW`4-g&5xR8%)G1>J9tOav8iQx;OlgTK-gDw1NXUGDCMz;Q;4i7Z=!bb-l z+o=g>2#@$_&}q>1nQ?tKj(fp#vdNqK{-u`OjriXO53K+9!GpgQJFfi&!QSP=W+5(j z@Bw-#T*e8uL9~PO337WNNi)hnhe-JULc68$BTFTf-Q|Z6lsUz0Jg|@Oca)N5jZ(+` zI->=M<6nbn>-PQuT-N2M^uO%(PnBYm{(y4Ici=MB6(vIvkD3E4+k4uAg4pq=zq;&9<16N zOvk(dVx%CK`A#DWdKOi6QDNRvrX< z5l_Do9#V_y8?!I2tU`nhC=JPI?$7_hJ1j=#XxFJSc7gcm!oK`jS_8a5;RMv+B|QK6 zu@cb$50LvN=9oIuptLCMb#sxTck3S|z`=mB)F`mvc($~V2lpJnTzIE6IB8$8iI~7$ z|9-G!&XNAH0Zx>l_;Qo9#c{Iemi z9#Q}<%2%l=!rKy7u?wP4!uX>CBM>*YBcxrZ#?S2}k$C9_C1wre*udzn{wL>!`#=Q2 v{j!J0Enh3z+RLayzT5xs%N&To@82kxgUYtgAHIhohm?_2lqeN@;rssqMcpcw literal 0 HcmV?d00001 diff --git a/docs/media/settings-record-progress.png b/docs/media/settings-record-progress.png index 50de8a14c344c9d7000f73665b8c45a6dd34f445..5f7cb9bef17e9ec00077df8177f54ec431f0be26 100644 GIT binary patch literal 13748 zcmch;cUTkMw>At20)hfY=`ARcD!n%alomjwi8K*H???$%Ff^qpAOw&iy@nzP(t8cP zqx9Z;4|#{@dCs}c@09O5|Ge)XTwIx%y=Tpyz3z3dd#wq5p{8*A7VRw@9Gu%qit;aU zaPS0haBy{q@PH$rxlePzKU|lW3bHswz4U9q4}6QKs!wrnN+L-vO$dPB#Ey!3E;u-M zny>$G+Z}SuaBzf_l;odkc^GYd>vMW2)w|sO-U`$faB}{<2i|M?WX7E3SYp%SsRWCF z8xKRt&CTid9D-9E#a##UOy#_SycKs#3TC1J=-n4e+Uxa~8^+l4shO)|U0EXVB5g50 zg)C7OWdRgHE?Y#v2?8UcY#O`dfm5&lyHieuU55)R?S8)4^MiJ>XoZFrMA_x(-U@04 zdv;otuo^+HH#X9L8;?mw^%)U(CO{ivwjNHHsiBiq?HI?6Hx2;-GwbJYfCiD;xeAL9 zfa49o-QdA)74N3j^C(l0?|>64#(f`C$5rsfvYC`swl0-9u49Sz$!t; zUuh7lvFyPM!qKOJ_f{}ttn@SLkuo=5T}}+|{^I}SQeuQV6#pV;%k{mh{Fs02UD=L+ zIU5Mt{d1Mp?(di>d+nvvO$FYxMfPrsXYV*ieoFuG4NAP{$8KdVGr-sqefarR_iI6R z)Iuaz!LpKHoa>(-DU9_K`7a!vkrMMZ1UW<$luz6fl26;?I!tKzo+m{v+ff+i*LRP- zQi~le2w~xltr39wO+0B8H1LwN2&_BWu?~^@*r^^aywv_IX-ca|8)i*45Wc);;hf}d zI5vQ>T|i;T48s?G>+_tevgJT-1FxZ4zytlTll_Z^hDc19jzwFl(&q_M?$r;%0;A2r zmL7~jxyw%0y1h-?HKBfG$o3hvIrCz+Wjg6z%x2-n==GFn<}`+|g(MUesY34EcZLY^ zefV=3G-oK`S}qnsYjzQ4QPJ^k%JJ|Cli>04a4UFOqb{St+zIb6)a8sSLAXpIsAys0 z@W@k++b-{=Hs4gU3px8|HeQMb@3S$A|IdzawPmKggHDGgg)jWd@{%UbpBnB@kdF*v9%6NSl zQ{hqpR*$}>)dm81jXM6n_=#4mm6yUU!8A|QOe)u0yjv%1ru=tJej@$i(#clqw2=7K zVbhRwqeUb(ca1~y7#|L;EroybcXd<$akfYr6Q15rD7ppVs@hZ%k2i@#9jbbi zbPdZGC%w|ivf1CK_UwpDW|Ila8M9UK4(ec>7Om zw6FD(6wm$>H_qs!@>}f2SB$Xb*?JY% zrgP|SuoKo=L827gn)qUcdc?e+q^o*ak@rxL?>?>ej{(_2 zbzjOu&fCF|QG2h+(?`b?eYK+`PH}(W>YIt;&WY4@AugpwN|Mu#gnU!OSiCU4G{<*L zcTOWaGJMRK&fg%+ze`?5`RU5to9)AAumr_Z+gtkxScgnzyl8w|E9YgBMX36)41+8r zVG(Og>%#9c-E~OliXPw9K!ciYysW*$Aaiw;AuB4@)IUYJ;ml&<)8r|G7QWYo<05b@ zkZx=xP)v-<4xeeasU|M3tR3?lkXX!u8%jl35Iv8dEm-(1ZfaQOQ8VYXrmM|`>3c5# zy4CGtIlf1x03d-3y5?!w+=T76@kwioXJvm_LZR;DAcECa-Vmyj!w|AknTz)gr?yrq zevM$OYft^yqU5o21RY28tcAI6skY$_7ayG)UMB7@C1IfBR~~Xb5+st2O{FPG=K|Yp zha9fHp69W`zUD&&c9+uh=Z=gUzs5W*ev6~>RUJOWT?A1;L>1->#)LxK=@@n8f9%u~ znnKy2E46=Q=#jHMmo2hTx3}LQS#ze?@>(&~zI+-XebkT4ssYs>IMRHJ9d)pao)

os^cebd12v;(nh)hlX>YK1qT9}ECD>XT1Q8tkFXiUOXcxMxmGX9fZe_1i9*>%Y$ zq=D=Ay=e;UZQ)9(TVH8h#fyIfVGnho0UbWiT0Gcpo@E=Q~%%$fxw$ zg)h%zPN1=15*fQVhuJL31=C06&i8#c+o+C{dIBiEB(7uQlpjc!Pt!ER=gd5!MNYnp z6+2v;LcL;bsINXrNQP53o6mN$==k_)PEdwN+l6DgpC~`LWH(x(n9am(I_s+W6Sddn zRz}mb=M--DQN@*JQu9&oX5>0Sq(!AnF;mf}q=kwxC>gH$1b%5o6&N8&urzM{Sfs?v zk8#00D0VIL4UDBvF;_m9eCcRSj}Y0V)Tru3&&*F^_e;z~MdFxOS>MR%FL#-2IiF>%`tcpjM4t*ytdInj z>s@SaU6B|!$C0_7#m^5*--K^Q7)mRl>qIooE)b+r0;qn`TR!hKO`3kDg8w8_*30xN z44?#4QmP%dDFlBX$`4L>C#;-4i(MNP3G{1*dvFy%rGhy$wKcx@cMxKkvNjd;>(=@| zY1xXD*1oVWSDZUot;sT8PQCi=CsYl-eRd_v)GA{3-6b-0?l%bjoovHf8&(<&AKns4 z3U1$jc5ylU`vnqVOK!N&+Kb*C4RrE$#YEJGa;&9y8}7s0jc`XHVXJ)@@}3P_l_%5# z?kywsigI__szMY_h(an_P*cKZ*x;ZIG}(75FRbjA0>e|841NMw`em8|Rydb8$^G_k zyA--wD7zBi>Q0qP|o79Ea8Z=xTe!>pAZ zcMg~zw{S?5MtE(OId;^~YT6nNhQ5|WEmsj$M=rBho(XAkO=Z5k=dCYCoY>H=!-ykX ztr4+Y8tgPwgXamQ#ub4;pGcZ4&#k_jr2H7aU{Fjq81=yER+-1U6rVxMg^13!Sk&9l z{)*4V?joACKZ36aJmfE!`6aV(;;GU;#{2Xx8xAS3(uz_IN1|*f{FqiKx?Z+f2ni&* zS4{5{30a1`!O%BL_l=x$9S$quor_p$q>tT|v_29$a2b-;_v-VT zwnz*4M?J_vMbf8uU?H4}Im9U<(sC0s9HDa25zp+R3 za1N7BBt$2>pEQC>^ zt~-yZZLh6P4bR z(s#A*9~^zh(QX-BwZLM6pcl%0b4Q@Pk{9W=%u@{zK$X zA?E{8$3@s=kQ@ToC~_-evtjwBKIhu^A1CTb8Q^bm8mA)9`o`15iXBh+)B2Nwi7q;7 zE{6bPA>mv`C<*)+#HA5?}+L=x(&g$&wEEuvJMzTb*y)OC%631G8xl@q} zr}2jSmCBIJcHplVU2tucU?-=OvTj>FM~`yaS=nS5S;$7^#w`1QD@B_IIzfQD~6s;vEE~W zDO0*2lAb+MB${O4k<2fLWzLDd`M8Q+S2qU%0U7HVg8ElG{n%}3k#zG7Zi}O!Z{93- zw6ku{NN1yWQ$h(f3!d@Y?X9bmJJ3c%N7BjrDb@5L9G*|ANE;bNR$^4~6)F5E>TK*7PYgCz)gg1a+St1 zmMPS#6sDY%a}K2+p9umHr{-omhh}!mH>RvY#hZ^OXWkOMLM8`VprDATM@#QuU+8%| zIuxYWU3q7OP@S`7FL5IaE2vz`PrA}H?h87EZ@RxtMsHlD`yKGK6c7%O4y5>F)iNMC~om~%S zokf0*f!5ZQyBm^p%zKvady*+~rU`;E7yKm_6{qA(VFLYQOndf@FfD*BV}V}4(H zWf^>|*nzuPzlcU>5l%EC#9-4U`O|VrkVGf=n$F7@cR>KSF8io^rcNWRhUD{5&2)nj zROVNig$&d7VG zyGZzl3744T-}Q%)3seajpN^CPz!9r+l#4)B@uT|vJN(^20@%Ulgons9U&_9wdK}^{L$#hMi4#Z3WI^xE`U;Wk zwKL&#%8l6NHcL}t@JyQzhh{I)QjoqdyP>;>gAqtB`Hmn`&6Mq?D&!@`k~o$ie8=GCC?fj+pxCE4&bkrNPwnlL}ne zt`oqY*lIXyPTyF);^VXB7r^i)KfB~u27B$z2=a6=OnEBwX~dtC@x0yK`x$-9Yr~Fk zvoMZ)-!+vVgKOe;wnI|tPizd04ke;_84)_l22ycdECDZS#B?3D%P<%7e?H03`mTjs zmwiTbX08l7zmNg1`rvzV?s#(_kiB|^>=+qW%Wf)_?|3lKb$H@;HTCdXV;a!fdCv^U zON@8^C2jui~+Osf*aP_LO>G?4#4uvjxJVVTs`z>y2azwRcc;w(YwViNY$t+5^D(o`O~tw!+>e%g z^`=ke`G)$aC)2}K;-&~SQlwE{c07SO4*SHt;Vh_j45y}8PT+a(c&nph4Cj5t^g}$H z6e2Zg-{+BT4IZL9O?9CP<34<5z3Z%Fz7<9TU1kH1>{bcr=0CAyq}7P4TWI&Gh&=|? zhW@EaI3H+p({VU|T((DyyA{^MJB$(&`B=kfe3U@^Xa*sRA`S}Hk1=7gj2p zwMmzvcSw)ui&^h)-mYsysv*gw8)W$0D`SxmIr`xsz z6&yz@HU3*spO8eBOe??H_C5dFpwM4SX0y;~9V8^1DH+NZRyQ_OQt36w??WXgu+vjF z00hcIs>*@IVt~Kde3k2LIRx{@q^isgJwJ6I z?>9bs&2=K0HbX>oOCr;d?j}6wa8uTnZ+1gM3G{9tVsQH&=lqIttFz-H&k!lXt)p>9 z(R{YBIaN!+AsZW=!a4=Km_108$TDuMlBFl6POHcw(gmCh1wn+?G<`r@PEpENzusELr+bqR5LivE?1T<&u4}!ZUk^o-lnvxB}SO# zJ@|HK5XwW^H`3!PQJ(Jd%Co%LY)|udi}o!J>~`KuN}(|K&nE5Rog^-G{8BLEGTt)dsg|7M_(@^Ymc0H}S-wX^iQricfgtf@%>_ zH;pLqwl$H0#@)q_7i@gkJZ<8VjG&i7qyAkQc|E-juua#*v9KElvV}K=D~=_Hw&ou2utTCA(sk-uM6faH$*MSx|AmFAIn} zp?YocYhy`t&x|4uVu@J^wpwknOk%rIYi@r60q#I$@s-c~TxjL_=dlGd7zfJr$J_W< zx>?~`AJ-%D?!{86WeME}FVf^}yDH(?U=lu@1=mPga7HIfus>b^Ojg^M3%#=%Op09= z0WaPfo^i2vmr1Acc;{zv&WM!%3$T1RqLr5xg<{zg(}*!)Z&49Y2ttx<$*cS6K*E~W zLtNnL=ML2m3W6-FYldlbAUuK!oSGF>n-(9G{(nDkvnr8R+my`Sm@p}_T z974iXvO6<3|t?-E-zKx>XE^}K2e1C-wpk*mt4f=UQLBfrIcnT zd#*EF6M-mTyRqyF{d2SbI^w_B^Y54Z_*=XJ)S|z5f?tBd-}cn_!oJ*ZT|uiJg)get z_%*j(u^sqa7D(&*DX5*wlwxIkeOI?zQivOhSZ~8Yi;ZQ#ntH<~`PJq+*cO>UbyVTU zQ=CI&=t|>PkIpeQZ(w*E7z-H76UNK*m-top0Mr&)KL42yjX&xh+{o8~>{oP%nZQ_@ zFr3jo%^S?0v^gB})b7nBGzWcO6v-|mRQ;hEWXHd-{D?hx z0IbcHTF^Td`)Bh)rfxbAJMjxA2sxXvQm$GHr;$Wa^Cjwv?OS#TeYQZVL*V+`#z=Ed z+_M>kCiCq;5J)opnerh1vS+{i60*09j)n7Be3aXzdaw9-xW1qsYC$)j<<|%$Hp*A# zfvTw$$Kt8NmBZ_u9_(Ag)RV6(>&5~?$I__QB(G+5&<*(WMT&jmyL0yS79Y8c4-*2x z=23o7(NCF7efxY4%Wtwim6#a&3S@8(CB}Z7Oe(N>&UlAbHCfF|av2gv!^@w>s~-nQ zOb&)S?`8&{jJWTm&mel4FQ42Kzv!$fucLXEdU>$6Y2XRo56H^WJ;EZPHVI zIrMPcyxDHFsR}wKs`2B7x&BJY;g~#|AiHR=Kjk6e+KEfWV=FSu%&+OSI`kMp!bENT z(G3mDt(A>05H6Dk>u+U~@4^w@0rIC!?oXIC&FB_EAZh>8fdZYnCqrhLL&zemd40C= z7-@>x!KfubBM=G=Z$P)oT$Q|5v7<+7C+Oj#o!`P`BF7x?q}1{hlu81fh&^)^?SA}H zW5Sc^cz{33nx?<~_Wp^I87u0Yq1>>6A6-!$R8x}K1^LtSyYy?)sGD~kV%O{q#=;=j zTOGwE&Ej!=Ui}tr-mX`{G~UbDtRpqfA#cM}q}0@c6GI6TnzPV4xKr~%q?j57++zC} z$VK>BKiCehWvv(P-}y2$aL?42c4as1^6IFJt69K2urJZG%M1NmU*aKbG2C;V2KJ(w z&Da)v=FdF|tPm(c`h$ws^=%$sl8ud=Q%D9CDun&%>+$7tka+Xs`94?2Pz<3BfbPAz z5Lrew_E)ddpo)!y`X z{rZ}j|0_oPyHEZvuwYX+>)gTC-Y+G_b3NBRhj8WSiMSpzq+#qo1JD0r(*FsYb_-+I z2&J*ypkb!Vx(8>VDe;4$tKhxX#-9BCB_vPnfw{^Mm_t^cDdT3*aUNI37jV zId;nVFjNtgl@#;g5#-Ukc?GSkXNs~~kSQdyoyMOuseTbTS)S7*mqNO-y2>Qv)-W<2kuQR7!PTLEIR0Q%gk$AT*^ZWNwD(;amC zRb3e?>SP9sPZ#TU-^hx8rnZnx8^fXBs*{rc_Cre~4GFuR){cbt;7&aHMgHT>ny+?K z-g4T=$)^6$RNuj+9e)SVtzCLfmc@gP&(;w%;}NfOU&;)}?DbJ8Jq6eJt(kM0VVt^+&;!M^Q3l#1*Ef|m zs<9oop#5iBD`nCSPSOWyh7YC_IOV3J`*a|x<(~9Xt>?M|E)pSlW*~a)bBdrWrD1}j zPt!CMv7Mpj(|SptPU*<14{fLth)jRAy&@npb)UX_I2-$-j8axwPAsj_El9wV9Mcgi zmuv1O9r=d=DNi2{I5RN2Rd?0UjOd&qq&5NWhG0vbLC}&ucaYg7LT@7feddNZQsJup zQ;|XfJPYfrhY zL-5*={hz&@ItHW_Bl3Wh16bm#wq}ZRFwkw$Hd3R3>{;EdnUE$wiDzrzs$~VIw5#cg z<$HX)*UrO1*+MY?UF^rARFwj(DU^#C71B{jpdMf!T?{hV_~YB}n#!@5(_lM2C=}w7 z7VPkYY_R=-Ek&pcG7nlZ9z0jGRr*D$19gm-m?;2Rq(@i9M#C7kDC{EVNoiufMg3ed zgRu}k_2HeP8~EX9%D}of_g5%CET6sq>N0-YYgg>7DMXUyF9`4vJ2$;OF|?BhS|a2U zPw51h2ypy=3cLUA`YW76yXmA^`O*RprSkhW$Yqn`F3;+d#Mepu4$DC82V`(NAm(c& zOy6Ctvo<(a9z=Rn48gKo~n+L}+G6%WOK^--Ao3=|V0#!aCK&zJ0*{%*{1} zoOVO%xV7*%QJ>cIvBGExqcYRm`I;FJJsN&dE6J>nv?e4WU8 zuQQM@u~E)bsl0ys1JbCji44d}rjSi$xbPjSuPS)#J&3Ae$3{^tYsM1KdSz<8za05u zo!5omXN@-Ir`VT`5zi3$vzU-&$$g#a+=*4%{Z*z7(dcseeKnJ%g;b8_NhE?(bOnyu z*gGqnO^*yV(K@-ZV57zsq+>g~)I8A-mF<`={1W>Ug%DBMsag!RzaZ}8Thhm-clpA( zo=zj9V~K<9nm;KIc)|G72X!asCi*x43Z@>0c~~!ts-N*$P*ld{->l?=n`|E>0BlW~ z?7yR2{}UqqFTnJ_Uh@Bftr`5{$26?8vZkJYm`6m#A!0*ubh7A9&(ccbpAc1v09hdr zZbSgvI^voxQhf9e&#+%_;Xrj^WfPtAj5PXn0JeIU-pLGRZ&{8v|$;efBfo>LfkEt+H!(;c(6Ek z&i5i*ljv}{_p0%hFplW2sP6$XiSCT`Re?gb_6v+8^cV@q(T}QYs4TXjkBPwq2JvR_|GlzFGE$q%3skh#Z`zN>nA`jPYmwJ*Ao24WfVsBzD9cg9d@D}dZ%CfN?$8TBaV5?nsdg(j_}0$x7WOU z%)#xcK@?SP%#2pCL8?3EgbR^#C3IUyoF`1@_%^Vx9L6e^$@O)m&;J`*w~ z2&zqGFoaoe1-|{Dl;1UQk9zj#fij7Cqb`>t1-5`+Tf>zPBr6?iz-yqszx;cJf&6Lo z#0O+Dxhx)#DywFbC-IV34dBz7Cv(0zu;1b5dR z=X@#fq6Bby)(qApK`YD{v#p2hAbF5Il0&uyl`%En`FZ-_%A+e_y(^pS0Db5h(2p!M z4Gj~wR5D6b*M_{B9Pn;=qz;V9|Gt!wG_vfAJ`^xN_N2;Kh`X#dMxI~P*^{uyUx@yd z%K`4p+t~Lr86FiQTSfXjERfTp`eDe!^Clxj`#g5@gWgTM^~w0CV340qcT44)YdsF0 zeDV3~bh$GtnHGeh5?Q4S7428i_sk&1IQ^yk85#+!twm^vuKP?C=1D|hh#;q(5wi0- zc@93mFt*dk+K;UcqvXy6U9gg{NI8xi$$_}19}a3Ns7f0H+6pRPI%UOG2(pr|M!H<% zARq}ZFhTyjLgI;q6~(3Ji8QyodkIMOQMa|A&E*P-lXrYJWQL>R7RThoKox-TW z*`d5cv$)&V2(w`W4eTx5s=KiQ`jBNZ>4~|t+nQO-_%*Y=UaEQ9ggWua)FfrpeO{8y z<7fk~#P7X%t_!H8a!&4f)%>y@l`o|MBFQ)ATcUl-(z+(?**r~c3zpa9g zHz%1;P#T7jKdL5~GQywm+4hitNfKI+r)0^D9#U{oM8CylgjTl8oPxx8SIlO&(+nWr zG1cnHbv)~E2d8Wyji{4X0&95-Xs!lr4OGlw=6MQQfBw+3WZf;ewniPcUzQy_c;{`D zn%VYv+oTZN>HWLI9`6!FPTq3K`E*>r2e&jHE21i*5x5$aNfuSm3fw%!o+0O>@|v~+ z#NuKS`@&fDwP)(gvmgc$2v1Mvbx);!UKIN<LXmEN{>mLw5!(xIItu(>_$NMcTJv-B`l@xoeOeF+moo1^9}aAq*li!nV3vx0 zyB$V4+9ahIQ=Kt(?VWL_(w6Wasczj=&&G9_&?%sP%wXyeCxHH)kfbI*gW6)SVz$zgPm`kFW{VLW~^F4)=N5Ji9w ztLv1*7DtkVH5O9@IR&)cJv@fXA(r4Op%j*ZA%D5_U80*y@zUXN0W?efUL=@iM^%HvY$ z;Xdkqac1(wEqDRW%a0Ciyc8`(4snU(E+z~X{adPHQA=~Da?Z8LD#rPFNkPFI&0Cu_ z=;d#dW1d9Tyd}Y+_;M{>i?l~kc-@xjTQz#FA5_aoA><2sii?GnelQNSym*Xgy(J;Z z1!E$F=9J;b`_vv0msLNX=wK&2Qh`Pq6lYRb{9=+Mx!Q=}dNpJT74(TN9adn!!{?WoC$kcvhuw)tj<4^2|N>!}uqcrq349RR=rB zeLD>wA2x8>?a^Z?bRfG(b;pqqiHrbNz$+xG-@btt=s=J<{WzsGk@hpP zIGDOmmc^l!fdkZa;||kgB{)mHU5jue+s$q!dgZQ`qnGHH=b->#V>c3ix6=S>XKxRq zJJ$Uywlw!vypJ%!nRuQH=T;<{*RF?_@BTs6-GZI*=8rD%$ZtPeYv!`!7Y}|jJ~Hu1 zwgB8nXsJjGbU$2NsOrW1Q?u#Ste_@@)WeL0w#C3RC~Pu>6C48~?V@uV4)*9Wl9Urfl=42E=76 z5*hrIgJ(3!KlS@uwtQx=cuZQzOw0GQT2E>7+9j!1{nuLhiR?>3O78;!(52SOl7V_A zNBc{_nzXI<3BV~C^f}xE>2JK3-!BdB-~a8Yp2CWaLS<=XJ-RgJ&D((<V$7ZrsEZt&xql0+f?6 zg?Z+Kb%-)tQS=-+#KZYmV{127I!sSr!64THa#Drle4(Ql1W2+@Kpt6TGhRnElulem zjv?x9BuES=e?6X{L*|Lh*15aSAEQ;2`^T((#5Z#D+;&WHTYf;F^*s`20wJHCauy2j z;X2%RrBHX+xsfUpaoX+l<>yi_3hAgN7wSUii5ol;*5%(up|kOedz|g(!^KuhelK#W zN5wG?$1H0(Ir!#+zrLgn^j!+>A$)m~M;UKA%|t%9a!y*btHP=u%ty8J=>l!#d|?&o z1?T6fp#90!JthW^996b)sXdYauSo+Y+*0Du=Q0OF1`Yk)ajudtD#rAJ@2{7TkVgVA zeiqObqYgiN9pi=|6n68tG143sHP-ghR~PB14`mw@v%W+CsdPOZ zS&Ru8#l5A06z>ojK0%;X! z^*r`pIsJ3yv2g+awlqlXga><-MqA7`TWwQ!DC9~6uIo;DP~Yh`LOyK0yeh1zYJ2K7 zwNO$()Yy|gzplxvX-VyNRl%pjZsEhPrmnCmjZpG#>R7A8>t0lAf+McvP^p$TSvwTn zyU(YQ0Ds7*XV#_~I!Tz#Dl)Z5hq62ebO;ZJ~wNtD^-=@GoOTMInsjh`_gj&FrlN8|2XPi)#U5?L=T~=z9rW zNTJL}Oe!uu5@+V&r2$RI+3a#%F!VID0cqZ?oYR7px%D1$=zrR5nZouVcy#MD(IB=+3?F< zbuSZgzm|$Br5oDD)%W`yWG%>jneXm9IzG%Cpzn3&NxBj2wyZ$@tdehpk^jQUbiXO7 z5UQGWEH`21cK@>Y`;BujYvPO>4Zj?gHR&eMv8mZSr5h_4d{B}`ac-1r>jkY0!9 z208FIpjTVdOPQj(Y#H;>B@L17Gr0wZf+?nlRlpRAo|hlt@v3|%n6UZ zC%HhI8fSE_;Y^jCxx=P5$F*UverUaGDIq(I&o=$-@*%}^#n}EM^)=xgl0lc5<=fHs zIqxU*Bn$A1r>1<~q9R3`UpZAl{*NI1ciIGW zHB?-GO9YMtHZYJhm0x=SkTd}~0Z%^ETSNfpBjLqHrRG5ceBYQH<}%9?omv0KX(q2# zTZH7bA_8=71^hk9I0||+WB&Up{=w0(=FSVU-VNf;>naTJl@d614U4&`|FN@IYqh~$w>&(&FJnL4PzVo z5AXYap6CDlJ>TDT{dQfjb2i4#+4=0eKJPdYuhry<2xtf}FffP|6=XFrFtFp%%U|%Y z(7%H@Fa^+em~NW#(ir7KbUWykJJwRFQWzLjF@y*+9P~QAvx2@G1_p8G?GI*;Q=tV0 zhQv=rS*bVPCI?vnt+ggdPlDlMBQ%J~(7p;z$^RPhgYE3WeAc&*a|P2`-^}lRKBJn= zGUMD%ajn5oS?tiXmr8Ak3g1G+Im{$m%v8x>EvaKyQS?fRCbvmy=H8eBk(1cyH^#tt zTLTG(!+jah;mz~NOZ1^y1s}t}*7?f0&kJu=?oL3#8j1{sxZH#3 zlT0sQf1`RW{5vXtQaU{$&niJ%TH2T8;>hEa`5*%7 z<5e&4xn|)dJhpc#ay$ce6qIg=d`w^;=;j+oL;vPv1zNT^2!U2k_r1&ZFHzO!(>b_? zEM!|2@`CW68F(5`V{%iu9hVLTwflA?m_nbv_5@P)&&kGquw%7dq9+h@}M2rh$HnV%)SAz~O71Bw~`@|)AuUYM83t_4Xnim9(SOo8Jf0c_4YmGuGBkx*T4?SZn?H@jg7zP#E z;dbnB_`p+rSbrm+Ei-JTKrL%we^FZYz8V%hYABr)sbwD9fr z>tPL7qO;a@O*>=tuk+H9vgz8?s`r1HTBu#`{)Cbd9%bDVJ4uJc<@NEwCOjnR5=*{`L+(N@M4y*1;j`a1m>va zglFEwsxx{>SYp8lZAc=|rSD`fv4Yl{6HQ(1KKhS6&oYT&4~V$v3UAKZ$yrdim$YC2 zYh#VDoI~_bFbOrBudMnuRRMp@?HyGAa<9yAoD8HDZleBDt~a}@Yb(WT2bP$2qL}m2 zIqT>H3X6KjpUJZp!TO#&jD977AL{HIx9t8j7MwtmJ7Ya#IJyPx)-8dWlJ>lvQOZ*tf5QW8>5N4z8RkH?Rrtt?Hg9|8F~56(8UzcnznAu76Kg!|0% zm_SdBbufK}C3|HF+c4QP)f?wY;|HMyK72)>_GS_a7=*RP+8h-IgH(mgsdohGB>`D2 zf#1_r=VG?DOc5%3{Zgs%sx(u6UmTLY2JA}8o2*vI+YDwi7@Vq8Edku1Y7zwUEUrvx z_X68N8BCN#!D5atijowghT9(iI^()TQ)#ZJ+x^Bl*a9kti7nhbh{_1PN+{&ZJey}u zr?XLF*B|eGm9W2r1!x`MulX)69H{(QC>$iLC0U3|C=2yQIZxNjXAB4duN@>wSrgKQ z!(4F@0u|i)VE@b$sTw_lf=a(LV)iUsR+w8yR4KOqw#Oi(+ zzD`~n?lE>W?tnB351z!-hd|JKx;+GT{n9|=`=tnn*6}o>P_nzR#90FZ^glNlW;F0s z!>(Ng5$)^8=paF@5#NR9OQzRC@?&6}D+Hbm*3A(qcuJYV)tgOu_bV}J;VkjPjdvfv z4S%DZaOP5P;4)#Sy9bCV*+e;}!{!WaNdj8hFS;&U;InD<-fpVWvB$xIoy6=gvi}nC*z7;DZWs(Jw21n|7 z`4V;`!6>bgtBvh$8qu}FFTG;)X6xH2Y}VltQF;n=ntqeSE3}&~ZCv!kRtQf8ZNW>W zxH6pxoVkPFTU7xl_3MvQFa3V}JcYlQBWG;%%%Ajp2Ro1{cwkwAoxS%|T=+%I;Ni?C zoLTIJjPvBv+jj)U;vz%=Ru)ar~MWz7EeFEQ9AK9 zB>(m#;tcdatThVi9o=+sOuFWz0a^Q&CXoUK;pioX9rRWQ`(nD8PLSDMvOr{7ZKurq z5trh7;-VE(Q$7_;^1CWSb@;PwoV;D~> z!KkvC(uwEqa>~&M`NF68NrvM?ZF~2QoZI^qjJl&B&wnpP@G-`KypbdL1u1?xU2hKL z6`wz@6A+p=pFBEGK>644hT-Q|pbz_5QV$`-6e5XfD?!z^cdP-)?mIyW0GnOGQht;@ zhI(wD9FrRD#>`R;C3gY_a|CUlNg@5o*4tM;xX&8>CU*6qtuZfQ%c4e>G2AVibA)Hh zGfD!r`ff-sP_AIoyXe!_-C@4jgfC;K3^vx%2d`KtofvgOZ|55tDb5V zr`(mN`9b|u>sLEhNIC_qy}Zb@$Z;2*d|*`(Hd)JYPCwx(8)lo_y$Dmc)i+=`dw^BO zmSGNjv8Wgr-@MuK27F|oU$=G1)4;)p{>-t+67IYt^_>%NTWl{0?UCm3*3^T$y;>rz z+#>jCN0Jn1|1B5<$<>#Fm(HF>UgTnD9(3*o*>GV{C?Gf;Xt#tumGX5H%nCHKwq=GT zaLr0*RG+!dM&+}5i3YIDK0r$EpE2_Ry%o)BZQ0%Sw14EBL&!(i#x*SPZw`DzA*fK&M7&AjL z0as_|XodUEv39e-8Bk8*8z9CLAXlZ*M&wn}@KCdL%Q7RiRXCNUq~Zou(RVHr&N=%o za{yItX0-Z+^>}&lz@>N_o?3GiPPuoLTdP9VeAN@4F%y6D@L28g;o153ulhD{XzPs3 z$>8q0;T{L13QpNZE5DHFS!uHR1Z(g<55Si{NWkFrr|4}RBFx8v0bA!VbTVwAT2&Iiiq0u%aZ8nIOQ@jbD)MkdfRQ2heDZFiE1}+{ zUG`-VDmS?n`I?7(ZCLgzXNg_-;lW)D3_F!X`!X@=AcJd-Vc7_8cq6O9&j18>B*QQ6 zT>06LEKk@E-(tOOLx|9@nh{9kjp7|TmKEM}vi7vhk&?B%5u0KUZF3FSv12-B;O{;8 z5dETav^`-uYVZ;KfnA>UH+|-aq3ivZiKk5d=Z9xpPK`dCoV2?pLV6&+p`kQZO=_1V zvF8MAd;xm3*=;}T2kWvU)sR{*bTjuBXDmu;l#`D$?Y;a`3zhg2O9dYL*0U8F&D6}< zzp;_YafJ^($7(?pN__EGP?N=oVO1$bxn2EV*%;kg=ouhCcdQ!)2f=dbhUdo9JM z>c>fY$`uOMOOwm*B2{{=|(-NUB}GoYPj=v|3(i3IDs!cse2;I%y- zGxJMg42+7Hk@>0DcuW}3@>gXgXKeE)hfc5k`1s?R$ca7GJ};S-;sV}#XfqEJt3F;#;S0E2?|Cx#)@JK!4m^MiYbvs>om*43^}CSU zz%p@)jyW!svRhOE;Q;l2UP$%cLDTe~9XiZHJ!n<>;>XH7{v^LYF7@fo>O(QXN|GfB zc?td2e5IkGp6ptdiqezq{*u!3DJ|^y;@r34b96r1FvHd)3steJv3}aHU{mr^4HgUx zZx&hA`-QkYY@0g@t&bLpzw+>rloP!oqELAX3=it-pQ!#q)S1hHBeZlm_(^I}CgVh7Ddvl5M=gV?Evy+2n}QsCCp4hFrt>x%7=9 z&2iA18#3q}1&TBDTw&l>?$*#}o!vtJt42F}My>LevDm~!ShrPJgaoHFLjDd%9p-rR z$9+DHBm7pnfo=;+w-H5Sv97$WhJxLvd>9xlFJ@E9ih0I--m1Af5Be8I@BszhHURbR zjU8pyue4Xi)$rfgZPqqcja^<2iP*4tL;DjKYMCdbt0;&Rh!;i8pe1FK6|5YEcZforp-IG)1{=PoLg3KYE=WcSuKG2FX=B@&zY@5^pm5a+ z|7J&}JRxr}E<|s0yyL-X!>;A&tOD=uD~^plh+%Z$lB*<1?%94<1Tr#>R7XOzpKah+ z-&6Kk4^`zUPwVJ|sh?+yU(K?_NOu+t8l78b})srfUlaHIb zA4@uowh>nTV2=^8^ChQI8`%-ij-e+@=bHtCpTvseiGE{QJA@F$21Bw99E>1{f?7RR zK?q6Z zB0)PBWTXx_ds6)Tb0=L94R>?OA`?KYv8tcZ_#6?8E&D^!w?tQKs2eD10Q_eaqx4DY z+^u7S=4O!I%q$ehT>*U@9x7KaY2a|}w^gT8SNbD|VZXXt~w5=9y{m1i9xy%E59!heT5Rl%MBB-;8egy4i;QRiPX! zl~#m4>U_vw#f4!>V%;Gr8JsP?aDO3|CiE+6Fai%?QL1%(@huU(xpTn{+F83M&1d&r zT_ijX`v}n=F1OlWz`Gto123k#2H7uAqb_RqL7!6l^C)Jnv2c7%r%UGIO+?=V(;GTA z8Y0K&meEHaI;C{2z)m^*z(q&NcHMHm03Oyu1$Ljbw-?UtN35Rj1b(+&80_fVBrD|9 zNQ#ke_~V(jiXr<9d7)*XgMbS2q8Z;b))l?X?9UU`&I~CM+gq2dGT(9JuG8EWwE9tE z=Q0)a1pQD{V7ilin-JseD0soyJGJurOiAZWDdlWy9xe3(+65tLsG)YfKs`G$U#xz9 z-MOYheCK3ogSSL`e;OE=vhov#QE*e_B?th?V@DH>?>3hHHl9f+1 z4{X-$b8B?QgRWv~eCJz~*B|Y5;Xr={LmMM6PE-a)-t|cVZ-UwY z10mV-h`SpvOb5b4{tcb&pH9**VBZW8=X>83Tg8|@=2p<1JsES4gxoB`Y+gs#`o<6@ z(^(|xxPVo6e_hVY!BLus5i{JTNq?Glh?t}=7J+S$}8lg z_&IDd;`a$aQF+M)rrBx+b==!vLykD$?ggS6w;dc+pp|f5^kLz(F|9AISJ)C1^$EN= zUu!da-JJs7ROI;F&XY|pf|iM{o|pE|GnY_!ck)WlkgaaSR``qXlU&{FQNTxiRlUbtSZ9h1sUrEOPsFTQJhQ60ni&~b3(=jGFf3ve}1r- z2_pyZx7+M|-+82%7pX2zrcQFAUhiK06;}oj5zX6jU$VPn28HpN+U@G{yX| z&$w3numWZc7*pS;T$Y0J0NS86U~D$eZ zd|CFvqocIsvxW_Nb@>2uhF}UiOVha#K+Ez05B*S!;!{QR9r{*^`MTuK!ds%Ce@x?- zrS^jT6dRC$0i72Fyur0oXO}~EWuE#%NZOtqT?}sOD<<>{aYSa)dtV37I=gj*I7YjL zF3=g~45ve0zen7)XyhW}qkYmb7|u54(b!vD)tKbs0ME_i=5x@g7ms1v#y!m>%8b9 zW`YZ5YVSYYsu3{zZ%U;JtyF9-DT6e}8^)p2qd2B#9nsL-ozo2~%rB`3%Zjh}Zeg9j z?QhOFfy~f8v*ar*0}qDPX65L5x1}s%nsU+W)>s(YlIi^Kr(^OUO$Nr;S%mwsO?2XD zlnO5N^9A(svOfIOeP)`fi0^}AlH(g9xe?T+Y$%X_s)+aCBG4`5LEFrYHg_$|1W+r( zi^7^hrf*3s1B%!j0B`!ct^~+z`o0Wjo)kMYCCvDH$69H#cMoA;99c!gmw9ktzg7n9 zradMLH;v!Ms-OV$=g2>!rH}Jm`SdwLD>W#Y4;ZbfpeLjL{`rSyOs*%LbLJ+N8lXOe zkKgs3+x;aZ-HmU4E4&&YvU5FiuX?Ff$ComlEq=Rnm{?YXu|pTC20ls7NC{)SS)5HZ zL0y)3+N-SEy5oQ2i{}=(O$|O@Y(-ryeB^CzLpu;Bq~D#Rx!lx>ZycA>UcD91Q;ywg-3Oe)88zRj0p4WZv?vR)+zd>CYQBf1S1%lx zt5z4=lVVjSg>J zoxTgPSzGwz%`)r#b+kQg+w-gV&HDvhWdgp zUwhn>B2gVFWQWnZ2BhAQog0rhe#v$6_917=l4>}sh<}7 zOfxOx*IzF#hf6yY8;d;#G&qGO;~H`uaGJCx%hFIrx==%n3+wB{W-0Z6;dOm#*=zvv z#u*WHAX7m@h@^Z(`{5H_EH=i^J(lMUf)Awej)Dz5Y>o5y;e@C8cpPKW50V#Ht_uUer2>kBm~ZXRAB(K=D*&kF3RLNfH3 zitG=A*6#mdkfZiUatbcw?)N^&0oDH^T<~_CMa(@c%*D-~YbIBg2LJjSM>&7vNRyox zOMa)+rYh8c{)~c4L(uvEDtYNmshuUlj!s#Ta!*6x<&q4>cbKF&!%~&Yy&XIQ+URtZ6g#Kvt%SlWD?^4)K)Xk?O~NomEFSeh z^s7k!ZTS6HxE@>rk|AD+GScHX>$;$uMV~Tlp1pkFnlO?eC^`s?iSg4QA)ySdQ3@S| zIK;Q}Ul1Nt5pgRV=f9x=V|w1GboGaQc0M(Wa5pZGAPus_gtP zs-i}I(c1H(C@N)w24vlnwEKPxLw|&Is805w?Y~9h$vL7oWBZ1C(hH_04JXP6YE&V) z3~{_qUCT}kA`ElKT^plcg=A#R+(^z$SMf@<-{hv3E6ov+wyil-RjVgEFpeemRL62^ zCnLd%O;3&2XUNuT*LBYJGk9JoWtczsdN$`}-ia`%^f<-D2x^eIl`&If^h)27A#ddf z2{aN;TR%U>%FA~uSgOLlKZL;N3XIDePRYQTHb3FB4_!x(nZ*ZL>P8Xv6En0e$}hV2 z@3(fGww@7R8SMN>|9EKi4yCC+U1wS!!UsBgqhtV`(crQ+ZHy03vR?U48E2$|q2%NL z^7G#KukRw7`EOARGK)GzrK$lv&0+rom)^}Y>-(K^#cCcE3+Tv-3Zk`D-a<$^OF7lV zcWQ)-V696%a5@fDzLU-!e=^qz8{`C6RMV?DZFYt{=i*obB+yZ8tq^$+zWPCQ@@H57 z(m%dI8cy6|o@fts4lFJJ&pkCb7I4e_v;$Rac7AAXZN2qL=sRR{E(MGp)oBMEg5sV0 za?;ekN846UoA}`&8BeX-d^l+D#BQP3;b!gC|{ zJ(K$J}R^O0x~r01mybC$*P z>XS@*{t1f%M-p-m)r+_Qy)_~;#nX?sd#tObSWhgUPemL?iIr+2RUJ2a0Lg$ki)BdJ zpct|FHhHiQNn!*`zE;T);1^o!wK6j+K5(DU#MFG)73!sm zY8sF`FL)>Lve)Wi3!!X($NAo1fn#e*C5zA5la3_cl$nIW|LBsbiRkcL91gM#1-ze63DlZq7FB)GV{Z&j8ZT$7tJ$#}t zaLoo4ovtoUrhq*KqT9PJGiVzJsZLiOdEJyhQ(OvwjR}3=m2O;YDSfU1 z5DI&LS5d6T6fBuO(M&$Ze%&Wk!MtF9yjum%_%`RKz-8_WmVKUqqE_NWogq65<%5@t2;X`&-Y6 z1+eKkDkTR#dxK*+szil00t+8rIY#}>QKr8+${$*f=L3ghIZR>-XE^TdZ6L;*DuBGc z8LSUF8>Xg-pTv0(1ikX~>s7h$cM@_Hfv4s>Rl=K(72&DA&Gz;MyIp#ST47dIPQ9qu zx9TjW!M->8}OGxRZ7(Z4!p?Q677&x{HNe1jQ9L~8_a0;cF#1Zl+d#nrEX-lFf z(%nTS7?I77Va6}J6R9kY=?lQWw1&=3EnKr1x6Vn25a*^hO0FN2MlkR%ZFe5!XqO8C zPnGc${abx}-cr_{puhFQ1!r9ILfZ58Os8k7hM~apcV34&t@b7ykrEqJM}5ZHvdHAq zXOUOA5#(?x0j5%4_hjYNwLG@qfnz)|ayhZrm}0DTu8{Y00Iz4g=eW@gABO>Zl2pcU zYpY@6FR{ig=5ruzSIwiVz`d_tyWol(mrq}p#v1m!L#q%YishgDS99^r)yKD;$~E{? zYl8jJ8Zrt^B#csFoH)!~O>Z7dNm*W}JiDUezI_``OEmK0ZWZST$k^^?j~>SQvIrif zIOjt*6-_dlqR11QKA3QQzGRugd<}% z&gnN|D}PVPm#6}vEmOtG9^+t_%)meOrq}Eoh62=2Vw0buDQP7%Ezr#KUm&SlzPnE6 zGirbT0kMIp9kOFr(#v@5w9{hH|09uw+U>h4fAeh)R!R@BGcJVp@96{I_(9tKQw$U1c}hpiDn7YErtw6CwuaMj+C+L*f0u0P^97 zR6vG%`~0Z`Yv-4V1D8rS<^tv|+5v{T?*V}~H~PNDrZ=pX+p?mc(vAb&SQ;A>mFJhi;G{ON*HAlmyPs+HnNkg?pOoHd|Bn_Jo$mke{Rqf`d$an|WsTkrG^VWH=8w5#Zg5)XonoHox$$<{qzp&G znEEZFJO~>ZZEmZP{a+30(}|&5>W6q9ce7>t{d*$j`0l?^?k&*03THQ^i9QiM&lMdN6y5Y zkvHRbkNEY+K;iq)E?p=liYar0|D>}tYG#3%N#zNQmC=@{^D@Keo+(PX;`)p^P{CDy z@U7Z$NLQRLk`ldfm3LhCZc~-m!c`CLie>}p^U0qjwz1v;??0mRC5&3Ui6?p-15L$| z$cML1JjNcL1(zO)`*yleiS!+`jK=UzczauAMc?$Wzv}#}3i|Kh#5Va?Zv{NHERoy@ z>wVb{Gz(&IQ0;gv6g^&nv5j{{i+213|91R5Zymq8?(XBxYZ%L2r5Q6)!^L~`R{umN z3I!Xpo{v*jYk)9s(2~YIV{+9Z~2|x&DMiHB{85SnV9>oQ?8&7 zc^JL)3oGH0X~m(f@c$uHTwW0t;#PlWuQzg*{wHmrimF^S)klXdZ%P#z697)?PAh-w z6uO_K%D&c;yqNzXQrPA+BP(se_PC~!J4K1Li#6YMhT2@&NVtYOq!-Y`V9*`ES{-n;D~Xh+{Aqa#M^kbd-8{q*7t|29?H zlWt{={63EGge#k&+n4v7tEer&er0Fe&y`t6^+EKXTqQ#*7PA63c!c6*&5SR*_|DgrQi^!Eh4|*I+NjVx9d&r|00RD9xKtufd{mk;K0jT(*6?Squ;U4=G+ z;6nuuyG;&=dEp~Va2~Xc_H3d@B|B-=<>n0Dc`S6n<(v1(chKwFEacC$i!^?B zI9T9TfIxD=UXnBuk+m0Y?^3N|Yu)|{h8%KkgCS`o0*n7ymX3F;Pf^Nx4bZanHCv@k zk_NhtC6jP{A`vq>EN6J5_^CtC`MGXRhC8Ndu4m&&uOhU`z*4Xhdns=KlkGhHiPirm zNrWJug_%y3?`1AxGlSNu8Trp|eE}iqKkR8S|HGa#g4xsKuXArFUbo*l^};J$Ewt3= z4qH((#ZS(N&#SHKT@b%Uqm2tL^$qcv^t>9$L~pUV$}^GWw9wq3fD4j|$Twc4RtM?j z$noy!vRph)RBxfsP4&x%Ed&}a*9fcA2sl#0w{)W=_|2o8sf7cnwbT}s^3-Urct}vO zrI13Tsk)zqP2Kw8N}I%Gx@*D?#^FfyLHNp_0j9-csM>RJ%HicLIP)5P_P6f#*gBfeL#i#3giu2y)6o(~5tOjKM6 zqsn*srAG(v`&^Qt;J_Cg65wm3LO;Dc|U(^&8ongO|FU~ZI(?>g>`liQ=Z z-ou2PrN9K!H9EKAD}NgW+X9_!#Q;Q8c`#niy|xDWdo=nWPh7SIKVs*w@`gA$20hZ| zB#cdcEeg(aW~Gisdt!X}fB1P_L?11U{pbPZ#j0X^>N_+X`R?7id{@5#fy_+L=o=*N zA@g6>3TAh0B*H$cnBPU)TlfBTg~k7ZRlD8_XlmKAX5I6F>yAC1DgDWRu&T#@B!lKL zp=-5Z!Z)nCmCaDxh5fsJRt<8-a0r{R={V_e^*82ue?4a7l#J4JeWUk+B16+uv1!rK zeY&Ng_`RIJSbsCxe=JAlVp=w9&sw|eOGju|hQw5(?o@)b;Q2b0i$LzSvvbnjZY^y$8cUHYjU|nx*PC9Ms1wk1m>2YGkf=A37S@o z#Ut(9#(r({QjoNg^u*`vA7pkLP-j1&sNlO*LkB2(;AFNiXu+P~LZ7bgXbMRvY;A0g z=utGQn%<~I)4j3}Er-5tjaJo{FTieNO({hb$MVtQLvU{5r!@F|D)7fB{n z5T6iX<*)VOxe>$L`MHqX7J0=3-;Y~3&V=jWq^?8(LlmC}q-{XyAOwM3+s+diM zzC$LbNrMrS>fSBHW@28!p|Bvjw72nuJ*qCVRNwSs$lr+A0*gP0CiYLY0c?3&ZD7sx ztl^w1AgUfT?$^7t{L}2CtU*3q1Vy~mqFe(=T68WhqV-uS+8loQ7gY^>qf7Bx%68Lc zII`d;X(DQOt{WeW@zQC`%*?DHiPdWxFE^ZWY~-zIU5Jz`RpJW2FsvW&RSX0!p*wyp zL;)Yr((hW({2!{C`jxpiWx%4I8PhPI%mhE)lDXPl*wI~dwKRm3fmNx7{Kkc5OR{o1 zfv6F-2jIe!)iy7iM1?0>o((5mh-Vv}$L%AFK{sn)ZUc3``S@aG zTM53ht1^fqB@VpygHqFqjb`!})+Y^66_%fH@b0O}1*mc6T5zOI1nGme{__8_{?K(l zxvLlwrQt9?OaK-}5JeuVp+|!w6R9-b>rbV(myg^QCqi$F6Uhm1}UJD1^WP5@xv)Ulk8%-t26?;9BR1Gr&k%~#pu+go|s z;PYwK$8!ZdZ9uC!)jsXAx6z;Psd}vQyf^%dzJNdU1?K;u?VX8uKsEOHg_`wTy7|vGHMXqJw%ojRwf|y@~H0}&;gOxHkwd1g^+D7+}6+T%%R-W zJ-!@`d`-p?X7SNn@@>M2sa*%3{!}&i#%6OF0*2q8osQ)!`5v%8*eCz`j}0iO?dmCc zkxTDmnIvy&Vp-OZO%wZu4M%K#^LI(#(k_o_$jzFpVZ6f6Wc&xYruPn!B^|;vWauKGzt=xSz+ZnA0m-QFN z(+7-{Akq*_B5ks^ib9cOl>=?P-OVu zgnf@2kLTOB`{xvBQEXYV^Ap|ok5;vwP^=yB%KU+3ma0qi_x*@F*!Fy1?hMP6oZs(#`Rj-UEb;Bujh@SZ3$N7@ zbQPI&$dwvqHrQ?djd)Il&O%Dyfzz{`U?>-7VuiplIb0YO##uzw6q2z0dX;djXz9LW zvxhCH#n5si{>x3gig(sXRMe@K;!dmeQyLbc~_9;rtQJqQ#bljXV0M49+8wA(?fh$E4O!dAG+T8h%&R>whAIlHp zKdxvrw=B^QWCKUPt7lf8y#I8Mu%blIT5zOkXGvHMjP;de8J%WUPOV$mvCH7t&JE&| zpxb^E40Df&{XDvr;1D?o*Wf4bnb%~FlH76S{i^$!Jg1J-l1yYm^$P^LNm)ww8x$6{ zT=3DoNG))k0EbRHu_Vn^NMob_wHgvVHJD17SFbKaw#>JbH-Gl1aVjJ90GhS_=BqBk zZeCqrZZ^h__myq>`I(-R+?LHTPOX1K6y11pw1UGWmYVLkV{Rm^5JtMY(#*83e!E=J zvn^jJcsrTR4FY=ZHstE})*D{(Er)u%UncSO%Bb0KjuoUahNG_|DCN^gxKphse`bMU z>qDed`;}A>-)Tbawe7X%H9fkvezdNB^e{j=UDDK%@@-_V>4j$BZII;jd5Ghp%I~*d z5ueY3>;HpP#YjB43QF86h?IBB{bIkoGoA28h@ZYCfk;DekW-mx!NpcmT%n>sYg=dM z)#U3t=!+Upw(5v3 z3?jNyd5K_G|Jj+ZF58=0;-uw!4ZI+cpuj;LRVr>B zeos*U92p#6Wh0>tRPxpU-(s`?jZs}RMh)=Z=HMvi?*6B6MOP5{Jum(yX*&o_(#bG_ z%v?L~A3w6KvJ5Ykq<(~u z3L#L_xyc6>bxn^%j@Hl4w{7~0_gkdyA#@T0x{o;l=B~XQ{JmnpYvjrmEh7j1%pjf9 zpN0zeFF^D892BJQkl?D#mHM4oMvLA@c~<9t%g{=9NbcwH*6DWLN8Elp65{Q%M03o~ zmlueIF8K0d;<5uf@0GX;b+!*FR>AR~`AzU{!O}$e4vhm~c0y|pRs6w?adZC1mkA4) zji+8wir0tzDQ)=4Ez5z^(}VK=BRZ>&XmcGYQLr{_`BOm2i*@MHK6ibNMYwgUUTdNtH_D((+&R7}IB6WrD(-!QoNSk^5%+P>6Gda&=D!BX)A ySkmBJwnl<%Jsth}@V(#!`f?l$Z>}?66bWGnwQ+bv?`^b?p(v*&TQ2?f!~X|PCHw^d diff --git a/docs/media/settings-record.png b/docs/media/settings-record.png index b21cb90ef32b41e38bfef9730a1f916ab399246f..c9b4e432d02d869735d49608874ae914eafba725 100644 GIT binary patch literal 16059 zcmdVBcT`jD*DV@UL`6UZ1f)q9M34?rqSB=ah=BAi(tA&Afb=RIA|0dzjPxqK1R{cj zBE9!s)9%Li*WPo#``z!3JI)_x3>Z&#MzZ(Q<}>G-YbErVh7#Ge+t)xK5Sfayyfz4Q zVFCmqlDu*OcxS-Ne;sIu+_jaSfXW6KSAmO5wvW{xgFqG0q*!xe;QFeIvVl7YMBaA( zPt@gHXaxevK2wo@{M^T6Gh@PhzP#*}Gm&lGSfd)l^1aC1xK&F+CWQadn;X+{jD}(H zm9m|euDrWLtflN=i=QUofWK`5ew!fui4cIrY%iPAlJ6hyHxp#>^A|YqD=&J>u&%q& z4si~p;plyCVh>B84?8+pY^Ni+*K!M$*Dfsj6)J;3GYSnQY=Q@?wfhHOLyVH^ZwXCo z-Stjnzd&;p1Zw!0^qd3u9;o0m8xaWf29x#QG-JGR=RG;zg`Je~n6?9Be$!XTdlN2? zA2rRWIn^`;?$ffJRi>;h$+mfb?kxhI4w_+G3tPa#ASA%+lZSsE<^ukEW2kUG#V)je zfA|0Am7&6(5}~+pW`7$7wt35p(>nm3p)})tKHC4a1OM|!2^1;gN2j;3z#NDZ+A^Sc z2>y+3K*l`gPA~z7T?fX|VqO)t@U{RR>5{c%E1goPe4Y0u2;?LBoa0`ha)b^|4<9Gc zgIkPWl)p7R`1<&;`3t@OVMz0*_)~2Cz7Lgs%Kgp+RIMztTvE(cjNjBI^o`VlX<@-i zN~ygBT=C@8{0!AB7Oi0>XS06W&1ow4w}Vx_h{8x0((pm+t>_rJ)PDP9OIqL&S6jPHY3;QVv2pOKitjRi?{ro!} zzfBaZbM)3zU4fg!bU7-V^H4&;ZY)pJn1wxnEi&^!iblon!`b{(1!!5V&)RN0bKu0N z@vv+cl{w!W{pF!cM&#fHaIre0kP+Hp%BLSksUYJl6VP4j>mIT$C-V!vYt{HIWdHKd zd%^rbHyLfgHo>_L9I4{bN%CpiBPG>ta$mhNKPN+i2yD{U#jMFm0)zZVj&_ zUE>X&EKB>Fdta0J@(gWUcSIXc8IAC4ulmmPZ0tS{M)y4XL zI#iXhZw#zEBDCTeHOV(vbx?WqTrL-#xpRtiof}SaD83r*E0D8mi zR?emZ4qq@$*Vijd<&GiD#Er?6-a6_xWf9HOSFOK(_aL8VzGpXtJ-Rvd=cs=aIgZrS z2yQ0#x^d+<%4KS-F1y%@Qrn()!N6v>q@vtN*};T-V}gy@T$RMQY+T^Tyt|c9bjUSf zfg*KX;qFS=Ia*5nDBOeef-e#U@Q}bKeOug5gWAi%#6F#-tzSr8RU4Ln7|0 z={ldWrX5=}`nwJs$m$s-_51$VTiKpnJ8E@H>jA zWUs@Mf+b(Wd1S5#6V^{GFZQjoC0jXyYTmeSkBfMh+&B)1aSEib7%NE`Lq%PzLc9kLOL3JHWZ2+zr-;5*k@4-d@Hwc=Q3LoB+)FqlKMq*3r7ZATINE9ue1MwIY4rbw5^D{5H`|~z4_h@nG{^jQ3zP|XOJDby zsvnC!YxGeQ=@|#P5TW48mvnFEgy?8GPa_S5(gk1n*>cB*Qlv|?jp-V6H9z7K6%Oye z@Jg)86e7GmS9krNv!kB={TaM>u!(GLOzl1ObO6gOY3s%b9d(EO(G-|M`iS|Naj#^& zX&_CI_x7SnkIPifZisOX#o=S{r?ktP4-mvl13?ySv~AFw>F{a!_!2S@?M_@+`;O(( ziJjr7p}!9!pAB{Dlo%w58SHus!D0kgtBZ|HYf0@@d9~z-)-_7XB8cK-H#&uLLWPr8 zgCNtFchxTFZ?wI$Ek#FIz4A#rHE6eE%Ae-EyySZzYD0V5x$*IX^y639G2j%)fjr4D z%m+u+xLd&-{c{VFcg`M5$3v3+bl6Lw`Hj9%GeMMvb zKYOI~6FTUn;I90~zJFHbqh=>t*4_!GojSE4SsA&BehX2Y41kIN8~;^TbXN z>?1hE!SB6+8$)mD;+D*t=SXeeb!imV6(D($tU0nlQx_cMJG~I(Duyf>DE`` zNIMqPLi{Y&DDv{RU98*eFZ|=~G`^I>dsRtcmw*Kb(K}ZGJGY+20!<6< zAB}VYTd4C`E!OzdBK^qeK-|aoaRAW#To5D(iEFC@Q#pPpUKZW3n}J zfo8l!qxZ=`8d|y>ZuNM~i9M8~TZBZ?CvIxsyTtSshu#0sy-AIxEV^Wg*Te zKW52I-!zgvR}Zi58cUsG$LqqV9IL=8o0}%-TJ5XruQNaQQ`y&)nWwipOy64Qx!0Eh zZBwj?8JpvK2cI9bQv>G;iSF?gjC?v44t`E+O}8A=;b~A}zXbJRq6I#V7?b_@!blPJ zO})9(#)Z}W4nD(ozp3MjC-qU;y(QyOaSUIAp-kKF+L+aOXJgWi!dcnw7)T??B`lk* zBJEJ~?2}8=(4ONq$=j3_iTeBQm~NC81kQz6_!XE4c?lci`WVY)BQ9AbLr|KdM)n5X z_DpZQionsg8${e@6LZa7%ELT;V%phkq9;6WF&$j^IGklfaUQ&KXq|5DzhwTl^k-1} znCId;r*N@)ub?TKZ-6y?UC0oPgpN)1YeqLcnK#{dPoZK_m3l@oTyE0AXHyXo_zdSZ z)j(jD1FPm3j4EC4A4gxeW$bje*se12AFF!gc(NOK`Au9YTd2S2)3{-=%ITM`7Kvhc zQ|+v$T%y&pg-7#KqHA~45U@xH#H*e<{XmK7A^NXzJLVNimP z!q4BcuJ<%FKg_-0JLDp-pyaoTg9X37P~9g&^~uJausla?aVpc#hrp>A8{CM$7|*3i zY}bt>Y}J)a2Mv67mC8xU%iU%9w*Te#XIK605lOdSmfRLY$BoOGeB#tOrt!`z{~-6j4P`dJ2tYMZdn)Y=$tH zcoj**bjP^S79^osrhF!Ke=-F@#ae6(*LLNmpr5KhuyENFs0lku25dA}c^C9a1mQWZ zr5fDg&NqwGM>Y`m`&9>Ko=h7j9oFBuXpeahZWGOUdYkZAYWr=dgP@!<*;3#6CaA>J zI<91qU9~}X$mbXVJAkVBX$CCQCgI1c-qn(>gDyKo^?6My)sXNG+3U-blhsaBMU5ij zfBW78U$9I1+9-s36hwRNHVU)@GKn3{*s2S-k(=fk z!1l_}|7JGNfv}BtiF$?BXaW;uCJUpY;Oa1;M~?cB0qeUYwBu#G+6Ge0H*tU6;Xp&z@_yc`tB{{5{rB(*vn5N$7+43`vZ{=p z*}>7k6pq8uo(0+;25C!wq;^-HIC!TS=uiOyl`lP0VCz;9u<&hK-<5ftB*rO>p_c>i zc8d-MmG143$mzt6%C&7O2_}H=C5b8Bz$EJh+JF_e`!DS6Jl`4Z_31rALhXi1^iuP; zZJoO@>@U_3-&h8ucEDi;4k)@a@d_pG*Rg}eKG%N@$0k&KLI>Vz*33Zmu5+Gj{Sv+a z%vriWI^So{x9`v?&*EWjBNN|-Z3J%kV%w(lKuS{-_j;a8`thASmbQFv)~r+qop#f% zd97+eho#6sL1%NF(!g#9sM__Xf$gSIGIWH9u?Din3B{ogtoqHQ8IO_mA5GIFtERvp zkaW1xy_>q?4%~+$^xz2oP220(1&~pMQWxKVE*9U-bI5hH&;Kgv#R;VYGwsbr#a9`3 z*Ixaoq=ib0{>mrMx2*z?Gxrz!iT*6=qkCzoKvFn#RhU59OCPz^D%wY>=9RU7qm6b~ zocY`_l3w=aki#Zx7M$uqYD%y^5|Wu`u|i?)+5ZZSg^`NEb;-E2yQ9+l9Db1xCV=(x z#M*H7(n+v0*3#jV)^DmTYY5~^`UiR+J1QeyXAK^hpAV^#Bx^mRbJ_>M745E>0-K}V zR_5@c9i$zicw(1nwT1}iPN~6nQ|Wygn?k-z_Ez+ZtA*N&L;NU&VU*AwB;g@s9a{h! zdZjn~U>dgDkd^{@sjr()>F|cszXXTs{_bQG9`L~718HTWI>Qm-gn@!{X;XOt*eHGT zfK2Op{IJr{+Et%o)0Pi68qcA%R!zBD&t7Ct104y@ef7ZrD|`!YyI^(?VJ`da`;`jVAbaPcANTG=`iK+V z)BLG^c47u|%dgr|q;JVQ~=KzOcJLNDVmjMAP9s=M81lsz0 zv#g}c5x_vzM3EZdoE)~w^|QwKGN|SnZyc=!bhDNCL_Gm|!6|7^0Fh86HH7j>gfU{W zW{5zU#M|QyHzY@klp)+*0*$gbovfKd54^pDFfPLORB-qpnG@84sQJb=*fzKeYuQK` zZdTs%laZqJd;CydIi%S8!<5LP%Igr9Q4a54!zm@y;V6os<+17__SDG}RW$iB2L3&M zu4S9^i|_;!|K5X`yXM%{f|IzNNdX2V%wgP+u8ToEtBMDl&)TN7PD`x zCKm2vS`*0%I~!}x7zlE`VnfB{M_!;ZR9uLJp`@axCsQlOu6PyJgPMc6$Zq9I$A5b& zX;);mCl9+E$r~4fEn)R1nz7jOMJrPD&V`yVRt^e|s2WP`85pgRF)TgdB0I(MJ&oCD zN)vBhg24Kb@9rbH21>*SIqcg6%oBy>q|H4(%-rv<51k6}l>UO|+=v!A6cShA;7x{f%he-*>t zOij?wxf&m@($QTXDob{IhY!rDE_4ypkQxqAvQWxOcjjMeE0biKzmu_==cqn(eBEEY zD8zk>m+1=~(IWac9G*2A4PI9VqU0UbSK8L(Sb>|I? zSSzjmI%vml>(t8Ltjk(@@O=vN%>DA^T2kkalMuk(GXh-ZmVKV-C4VWmjo*PF7ao9e z1s?0vKr9#UD_1jbqd_fXzm3l0Fwm91voZJ!TZJY}WIF6p4Y91!1(3`6*5BJQB?=0W zJTzty6%8A{PcRvXh9}#(2X0~o-b0=3~M=E$pGbjIT+AIC2 zPp30Q#liDD$Rg4dn=;XY2mq3;o;0D1*>1{)hAcCy(ve?V>h|7B|ME9{fcTxILG`5& zhBMc>9bzVC9tUX{{=XxlJVes1nzu&NM=DiBKW0zuC+}je?VDx8AB2H61b<#w@ipxN z44)+PCs$s7B2d$S%=Y*KOAB_4p}-MqB{uX2B)(~ z{U#42aaQ-v(Co-Rcp`}vZ|RI5#6 zWGv}Ul?YFTA{7^ReAwh@g2$9UiQKcE@8dvvT|%t? z)WAiZM7xks&O}ed@ZJ>L8y#Odeg1mDGw>_JFow!>Q2}D*j3s(+NciKtPd4qh9H}is zf7Y?lT-}Zs1p&T@0Fz?=O2=FNmF~Wjgc7!*>@^Fg2TJ@cQdDE}dvWG*&wExK$OF6i zhP0zsiPuA^Wt4}P_<^${(_eP%wut6|Sw4vm>4xuCD}zNfj1&Y)dZch+C!n|bbSjEb z%9{mbbd9FRGXg>bn+X|&eav^+?d+|Oagv#W6TM-msXfo(fNq>8e?T*ke>4zJ1q;{< zo@RwpCS?|}S@$4k3K%;PNP?KAhxA@tSJQ>33VRnN4vo_`FQPno2M*-4d7b6w95t>j z-fptL2&!(W7ZZ>tKO*71c|ye4d~kN4NgRUiIt|wo#$Cts2a~dE09%98l<49c`&xip zsQ!@4w@QLLSC;M06M@m5;>p^!1Bi>%81!e5y4$jGny2gDS{ClqT;`vk5~8;CZ9;!1 z-C~)d)C{|*SAIHFX!kixxG_BOe$Nk)cZKq^t(V-_TI09?@$iRcsB{3DS-&WqQLxKV zRTi+LaX57g>A)~^|IlQ@S*+GvEO;$7j6*%uq+|3faLO0ERkdpqQb!u1!f*YBSHp+> z51x?l%7VBas=(U^W(sa%aOOS}(3_B2po7?p$>7289I=O;es=asLOh54b`#hSNx=b- zeAoP|4(X%{W@9ka(qziNn1-dmfV6@3970!f*e^M+R>D>SN3T?iTFJ+P=lF)p9=>^4 zf&=(J0T;%>RQ`Qlvv)k2C3TZ3(0@4k@MjhI5Q#d^4>C>n=qPlj;mPjqsxI~TrvL{I zgje>%>+vOl>B$q;KKK4bJT*r^ufIh{@$Wr&hO020`Y&*VBH2?DaI{aAy-W5SNtOI6 zqiw!s_tsl$=-KTs+{qv02d3NH-!T#SCzATR9y!nPr-tNEzut3?9Pq6PAOaG1NxB>m zMa2n?&VO=HFp2d(-7`$UUgxS&=M9mmx$dLYc`X+{Je`xpb+O$$g4KOHPNT#g<`nAr z=^Tkx_5crwtKQ$v_|0B$A)ej{$cf3I1EbnA=R=_^(9P%vr&NLLEJ?zRpP6!WevuNh zvy9{y^9FNN;e!^0pI%GrFxfORX$Kc8e=#5jU$t;bnw^yJB(_!gs?5BdYW;G}VBK5A zJ6>Z(2-LD(yBuHoHY0ggp2FsA`jO5^i+Ew4|b+( zB9-)dZ2_vG=8an~oYLcs^~`_Rp2c`dxgr|aHtm^b7=3IHnz`&$c8(^{rN4!MFjM2$ zlP=}vt~A^Px5^1)PxKoUKxOIg!WM#PdM=t0{d!ZVJO$I4t{juyF)GMjE~xYU9bJ1R zii2ailIV@$jBy(66NNo{NyzHqD(u=7IC$@uruFyc=;)k|@5tyYw(-yIP!P@d@s}ze z{P4iWR1OW{Q7IoXi%1*IxJ$g2$%mF%?nZ8aS6jJi_sjb0BzzAAcm4*b<?ozZqm0dv?``W3 zo{R52z8qJX*-U_(i;l-3h#oS8;B)kI4!fTRyqzQ4`vddd)sf;-!^RSd^F&jz^RyLw z64IaC1{m@l&ln^L{Sb8rvt_71nSZ-M3_7`X++d$W^6H`Hrb8sV1VkupHLsMZCwJI( z|7~N;e+Q}DP?D)AgMsV29|kGkRkLMW5>*ae+4~U0f~Nn8Smyol)^-CRy>5s?9Qmjj z!6jV_v)fh?nC%>WR`Ey1-RZ0yCI5ehR16n0z^nh1a!w9HKjb}pTgzDFO2e>5Lma}O zmg3qKZ;r(rh+EJ~qCgSUTv(vP`(ys^2 zJ|2_4JS=;P=7$;VB#zNVii@gT3_Ozh+qRJ!olS1LQq34_11my{1uUPh(o9;5ju&d$ar@Olr)dY}H{sPny0U~K-U+CgS z&uHT`MU^f8gPv>*WUt!nwn*JPXD5R=ht(y3os?tkT7PK9>m~LmDgPJ9@_SR=lDW*^ z!0Lsm1ybr)0riHmJ-IboA7i<^b@7dA7F%u9T43Hy2pul;)itzpNdxif$UN>gsOD|j zy>lZ4^!9II&$6B#$C2HqCBp=e=G*^(Q9YEHnt5s1+p&s58eOz<>bqetuvn0fDRI~A z($3f(2esVaDeET%>K-cp;-#Q>XqMfs{*w8557!^Ga!aTCL-?gJRy{k=IXkJIvs3)f zUAYld?*Vjx>E`EpkP@SXlp4j^PY*B*yl+R~2m5hm)?mQ#Q z>%rG0KQ#i`>E%ps$2pg82+^pS&EfQwJSGx{B4^FnRug48S@4j=&?T{fAhz-ilL4>rwuv*86-}!2+%|Aus9AVIH4epYR`YFDJxObCa7O^w~GN zt4MNCb>g8Ki%5Z^xOBhFLlXw)*{-evJ*`)Idlj+B@zAvgYG04|)j4+Z|3mFreFh*a z=nx~)`@td3JqdAio>O9A_bQ)et#I4%j zJ(@@PQK|p)p8sSiV``BcZMM3Ah;BKiNy?#8R#qT51cD{3Rb*90azE8%L|nhVz;%k= zCBe@}JZqc`bHS$0DF9gs2CBrT+y9)*9r=G3egEL7E!lr_)a)P8 zw=?Ul(nRs!V+tQ84}KtYDiIWzx-g@63wF2}tRSrkB*bRZT9yGu)7UkJC}pEox3yyF z+I%v*i*ap@>1SpAM{k!O{*od5>TiPWxeqNymfLYd3vxsLpeOgpwkeAY*7 zZ3s-mW&&L(QT_*Uy-hYGO88x;mV3feCIiP%JzzqGZdoy&QD#)` zYbL&ztIcv5Vm?!s?&q2Qn8xzUnt`Hibl3U%Jf-UH?WSBuzmuxmd($SntVK`h7IdF7 zAI21a?EO^mBG!R(L<&ijh796VHv@raU5pjt21ZutZEb5vi5nqHU7F9NN|efOw`*wl z+1n61-W*)<+%aH-j1nPiWkg2e9gG= zE4D^52Wdmigiyq>sdgvT{+Fixi_?+$bjI^&9VPu=tG@G-HqSVIQhLa1(8-C0zSp9+ zo+APowfrHjncuT?37WgCBb!ei=a`=Kbd@n@m%Y@67t6F`e6&SB_n}w|UDxGRqS~um zMnhzFrfYiJ{8di|HuC*ccqOnF#iNljhOxOEZq71G$RL?i#>+s-5iKsNJ?f?8w9!_w zj5h$Sl8kHrP6rjhc3njOwo&mNO2Q$bB=?Mgv2=9Q~15wSuY*>brio|_iGrFZ6`q?CVGJUws zXLx6Sdm7bSE9u5MB%L^HvkIj|J-B%S1XfnoP6xFK`xbqS-@g|36j~MWCP@Vhr?j+Z z{9}^kmASJmLo#4-oU85WtC3UD&zL{km{FY2AN|(MMZs3gx14*PQ}0K?F7?MaYzGC! zhVWr2<|!M=?^?Xs)~xG(pY5ys>1Ia||JSZj#v@;S&a|8(^`@RHh-psA;j2c#oX8}- z-~csW^3SZ_34TU$vE>lR{Yu039BZoCy9NZ5K{MJAHwB?HU9Wd6_?g2M2Cfc*|I&p&S=U7hte_Y2Eky@2BILl?2rqn(cC_t!s#eYJP{S|Vywl<4 zJEk5VA*4cgv+rZ8DLz3OcPw|TB%djYoD-OuWWoKz80|*SMS2sf4P^AUJf8bQECsGt z{$j6Hhm1^DzYkHFKmLg;3h9ksK&4X`{EY8#(CJG$7cax+6Gevw6${71cpL++Zl=ox zJC|>D^sY^ZuV>_5!sYLu*-|i+MVbTb*AcftGoFCVETO0wW1U*Jg~Z|E%$+36mp4%f z{9~xxps9Sl#%d$j=D95hL6R{mG6eJHt%ldUIzqPYxMf{)iHOP zl@{T1GO=oA3uURji3ypHSqh!9oaCn#ISXQusiTpRHx}itckL^rTkE)OsDzw&PRC^+ z^~#v$=v}Ql5#&aFkxAS&a!qoY`U-B|xI94itw|gG;E?=>?DLZEJza9^+PFHoLJPrS zks)RF+zV6_{lAOw{Oyy;vag~}mdeF?#uce-p*3kzv@;|XX=|K4Ro(Yk>K0XcmoL)v zWU|bE7MBZ*$p*Qwq8shwQOI382`RiE4yl7nKfmKx8>C=nZ>3B~X} zdHN}G!mwWOfj55(YHqmW*Fy60RC#c@gM-lPO{E3tk zckPt2f!^rt$KSh204Y&`vESDR#nsltUrfC_P^9?d>HU`}GZ^Mx+~#`dWS3O%v|GCC z(F9qzuk&nu$3@c*e+1uLJvK@n)`fn-(djZpAFb{hrnB2pz4%A_nW2j^38~##@Wy1j zXIyJM;0u33sFM+z9-czw8H|gx?g0=(QNkJi9Yejn&OG2V68)I@LdzlG^Ce7oI%=Ts zm(;r4v!&=mAo8}GSgI`M{gFyDpS@dKxEs^&u7+u)H4d!C_uh!LFl*;Iz`r+_-Z9C& zBz!lW)_a8;l*JxDw~nH>2a~3@0Ab}!T^L73=#a2R^ypxG4H|_# zx%xWfa5ltmdpgR+ez{0Yi^HDeb-YZ%CUpo!G|1h{S{iT?F!bw0g>)3^`c5vZf8wGU7 z?;tW^F7lH%Ya>xRMik_75gYYiBzpdy_W9p`wXFA?1B%Bk&rd@MnC*Em6koIEy}!J@ zTr??RPuTk1WYRZ^YJLyW=H-?p)UEmQEd}YBamTaLFONI7iK9ZMz4&pL8`yRk*kP{F zy*!Q0x%|NewO;Mzn!us`9Y-$8=hVw^;^kJNLgm{g&tuaY`KJ@NjbiCWHd}KJ(BWYV z_tN_b9!SXgWIo&5aGrZ{(04{AgNh1ssJ(zA&SKt+XK6pQM}q`}8+t;XN(_uioqone z*Ya1xJf3rGcaLFFX}Cb|w6IJ3`6bG-1U&AW)N3F~Qdwhrc4^c33Bo0-b3IL1a`;4Z z-u`G{(NWoP^}ChV_QQ%qWeYX>Pfl@K0{1sFsg4dCnyO!)o|^kLUO!6>P(N6D?G9V+ zr@iYZW@(O~-HUl@dN(K~P_y!r;?(A>AMM9VH5$v@O(xW+?Q9G*D>K`6$ut2u33me8nEHqChm8VAb2acf1h%){9XB+NQELs^o`=SL{w5yH( zGzsipnchg(^{w2OUn;!$ulnAEbRQ>(X_#^w=N->{ z2+1(ZJ@!Qq91ztgMM|4`5g+fhFpkNZxO-Z$$>WYxBePGqui;`^;`h|Z5Ai`+Zt5(* z*ald6JSb#KG14Dp^F*8KW(f|4f-0^SNWka zbm!tL-kX8yNuvUuzNcjCE?HE>s(E+5ZVLJlTA$VCBkkBbid4OPDJs$TwJ@WwSIO!Q zIp0DaCtl_ah5wI-}EWBM#IwlXYn#={ZJ>XuCr9? zg5~>T$EH%!{*6!m_&`sD>mP(GjgQAKzplz#o@x5DOYz?9+2h3RXZddJp>Ms$m=;Dvh!=L^{XfLC{^u1D8BnEB^r(Um4q@8>^$KQX^` z#hAB zszVVX@%g8M3|dP5ubdAMdXmo>P~}AY95(x0p6|Vq$08h`QNCX9#nM)+AFS%qEHBF} zgVzHc4wi%0Em^q!1mTnxVo6dViylC27r&j+`!gnw$Pt7VnJ)fRg;~qo^=kKfHE2 zMDwuEcm6`h4xRJaj?kfSg}7_*sT=FyNs79j1-nsq33@_}mv4#byn27ttx*Zv-hfyK zDhQmcLP-5BCF%`SlCbMvIBm9{SfqkGE@yTGy!_UGIF4No)`ayB4ET>j6DJ`IGfZU^ zhw}fqKqHoC#x@wSN!Ux45_ElM=Sw8QvmdK@2-rB}T^F4J{W^HGY^y=kL+qGRNvd#J z|4U8G15N&6pQy~9tf}UmM{NArH%A&fk&4GwxtgkzPRwmycV19q=&RHFZ`ueqMsF^W z$d+s@jK+Pk~+ZH!0D ztFjSYa~-dC+3Ux`PLhB7Hg2P`5~fuzVmUP!6(}?cf?4AaMlTrMP43`H zU=n)IW4o_`3rVtE&*+lOtiA#c^k(&Te9eBEZrtTX^?~j3Hj{#AZnFu(H^<&mQq|sQ zs|~3C1d?sC?eemLel#5|zoym(Fw>WlBQaz7lVy4mE@>Mh9YVtw^;olAC0)gwZIMTg zHQ*6l@*9=61<(^i8ZR9O+>d+_wEZU1fODrXcN1i!{Lti3%~LRA-1P=5PqK>$G)2mL z^UZ`Zjj~qkFmbQJ9&pBhd{lwd*%k-nYKBiaV~@soH4yYoUHMxA;2#xXFyKd2c&75C zz;34%s6V&~AN5We9zI1tNV_y{pknB;Z^D!9oU1t(}oh{o}x8g zDntpNi7&Uh5;|>Ic2PKIR;GG>IgC2}Xx%rtIpYp?}A!vmgr9Kho1gX5*>br(V z?fvk>tXr%UMsz@r{H!r92WHr}weGCK`@%*^J~xWph_a2^=@UuN*2bJd)t-lZWvZJ6 zb^IN5UpSr5p4jQv0H>t{?c-~kaUV7uDVN{LGMnFmR!nul=P$4fZ|ZNG(v^7!LJt%8 z)4wrD;@!skfqEbtYRn5Vo&#nAA9GGMA()ybbTdoZigU+eEWhf=H8=-ZL8_WccS%dx zg&&#ouoyNtI+4Opl&cqtJZogD;1|?k4cTN;QG20#&v$ae*3WJ|LNe}a<0EA2C&7>I z3;Q3T^bE95Hp5TapyOPV%)(T?^r{-Gw(DPx9O;{He|wc4jyqw1w_^Je zGtYP2;6RbGnB}3;cl+JZ(n-7>!#*2x&Y+e5vCdJVVA7^2E!$d zBl)##$LWj}vHml{Gpio%IjU@06)E&22fbl|sEyHYug!-6g>L$?yAQ=`;;&8OOYIj- zUrxB_*jqgmY7Beo&$dBt!B`f-*c>e?FrvGqGM1?|p5V(S8-lU3po%UpC%kZ=QQ3Hz zz|nJOHE-VPU{Ul+u#~90z^n1EbGq+$wnCw^B@jhRmZBNgT(oSt+j?vEaMy{IY?VNu z7&@C?wym=!=G8oMR(c717dwo%uYTuQ|IFp-G+Z^gqx`d}O%AlPAFG;0%Hsq*vk>N44e2{)p~snwE^30Ywb7K_@C(Ph4l?EPJTKrf6yy=(}*!CMej(&16)t|DKzJ`g(QnW(#QZ%{uSZ4vD ztrOEo1+5F3lGznV|Al(BF@G~vR;aSS!GU{oHbR{@p>02|TJm|(X7Fp4;86~Kj&TWh zI*JqLkYrdVw1Oh|pCkWREAruBjmh1uJZ*8V4#{TpqwBN;CxA|U@fj|r2M{-U|1!cy zjk;tas5VflD4WTq>hmG^c*tu}f`OtFF~aF+mzp>B@nK3tig6Oxn>tfdV_1J&(_<;5 zTDdE~Rn=o-^o_LspmshDDq4|UzB%XmJ&#C|oU5O&Ix8|iPl?2#%2Y?rOFMwAcNrrY z3NB*!)`s+BBc}~7H`Fr_IM}^Hig8u|%D6Mu*p4=(6A!B5`K^FuZ)ZVi$E*;x@4k=I zZx=rJYZ7cz#%)Zt620cXpQolF>IVw>v_m47Nh$=-Ct$MJWdO?eyQ5!ov)NthH`#1L zlWCH)@)~}`71{$=Sx3Tt3nLt;nC7~V)jKo06&(uflLysMK5)>Ska+&Vs+^x<1W^I z-iQjqxLZB{sp#>#{Rf%0%eBDPfCgAba%n^>;-0@1#R!Mn+7L>+X;hdjqM9Q|kGg_5 z<@z=}q3l1&@fhrIbrXk}3zOac=FgI9>Qvr8+b$v93J=}xSlorXYp5)B=a=h)0@>>sf-&o4K@62$|L_6 zbi=#k-rt37dvzZRZ}o^sy4>15kFS1Caf#tvZxdR7pa$wLAE6BNQdNg1T10_?=b@h| zw;?@#|ZhYzA+M~Q>uq#av&jsAL^j%i#$ z+h$*3r{H3k;Grdh)uaM&EC#tFJhEDnEQc|xQ^CDv^vzmi6({jFJ|XFcrXN^LX=pn& zUN$~!*Qpq0&~=6jh}g%~nkXbuA=WLbfOINLfn8G*LGS_mk*UJ0@qm}H!*j{E44QLx z?1&I2J0T)5rzR zLLZCr!tGu850P{99zhUy!ilGd>-i2R!;!WfcFK>%H-)tZBMTGLmtMA;(v#+lJ{5D{ zeRek1>b!j{%d9)F*-SIRZ8%GmihOT4!`tGnay8GkDtuLf`#Z>ljHV}P#^8jhJDQ^G zf^^99x70K$#-Bkg;h)(~Z$9TZ&aQ`d)6Cp#5$-9neij8X0({>8Fq8joE&u(hpMXUX ZNb;D0a5y1}0DK9gqM#vP_QdSn{{<`%khA~* literal 7473 zcmd6sXHb*d*XV=TMNurMAW>0J5jg76NC^>85UHX_2a%>AgkBOzP(TnwI!G7kE%byC zLQtfGC?Qk{f`pO~N^9#H|eEqVW+F6p(X%O8-IZ3uxnf1 z=XKxG7XUca{?8@S1ub;~0Hizh?rJ^`vZGHCJcoO^f7Yk$Lp^^LM4M#4{;9QdSN?_2 z80iZW5gHwupR~?*uoF(~GcAU^yIC7t4t`GvY^; z69Lr7y`J$;31{*R{85rYz!Mk)rUTSF0zH*=cxXfK-K*_sMzqj;cGC$tNfxkEpUGOR`rb&k%T5;Xfd&>y~A}yz`YpYAs-ubBbXb&=tStG@dmkh z>?)Vg{sY9Fo1Gzo4$3zmYqBh)wkf*U1lQ6>I<3L+MPucO%C7Z2iC1+c*90Yq3&Tp) z4V28c;zqH-G5&maMz;ax<`HJ^UMM6Y&Amsr@K>F@$S<}(9!7Nh>BJuXU`x+wfh!$F zwhDFa?WmfAt;gd%W`E3N;T6o^M|a(FcU+yDTaORV$;kROF;PNK$qFq;?p?|+`umDqde8ww=Dt4h|U@7J^Q z{809mofXiokNIf=*|3NQcx9>Ff|gLO8?Pri0{V!aj{wam?9lFm8CcL+E`gm$>8A` zC`!T82#e(~s_o*ZBB-+Fii}PYG|(cC%l+K>B~a7nkH5KouYGIDhrLMLY@f%2)dhz% zDT$RR4cZZa=R?d|@dDE)?zaw4?C5S-t#+pBv8Lc*X7|aec=@x#;;XE7WVXzl+FFBx zKNi0g97YLvM2qT%rCZytG#E+bl31=?Z$e2;j~0_R#u)pazpydeo|N3 zW$5sp0YuLnov$s)&LyZ#v>4gzR`vW zrkc(U)eUW5aPLh&8A9OBnvswh!>F#Ge=LF%RFNJ=L;9)pEPEh1*DxmjZguC+SJfXu zkQO5zqT+CQybWkk>K>mi&X_yQn~5ZsI>=M2iqyPZ-$o~|?s*xbL?4Na%(A8k& z)_N)@hd9QqHIOHU^Tm zR7lOENZX$ngbPk9TIf3E^Tl*v^kTYD4mkEXFO>UKF0`}tr6FVbNe08?8ZnE1pL49l zN~5N@nuQGlpglW`s`isyZ{<4kcWARsw(qAxJ{01m*Cd^t% zSEun7$Cjc@LT4M=_LPtWdQ{B)(1@4Wp1$AbsYHwO{9iX8!%M8I4aoVQ1{IX?lw&ME zg-`t$dBcP&k&HvPmiCBpc4+gjCb0{i^J#n=gH35*Qtvfp&`5Rg%z_8h8mvvE(VR`o znu&{Mm30x;`sjGS0|^J3-8BC4ot-=N?!myG6PeS?$t<2n8pznXRO1I?9Bfb$;^Ph?Z7Rz3T|WI77VONSvBfQP~S3K#*A%rE?@?;>axw8J5Nt`y z7VlRhfDQH>t-i@@n93<+_c}ae-`<=<_*(cax$W*Hp12AjnFRm-GK5av2nl)tYuM!B z)b94?hnPxda9NYjCw&=+zlUMQimuD7Sh!2&n%r^`{!sAWj90;t^FV~Cxa*RQjvHK_ z-NUatYCrJv>BG>?YM{)}-+jh6Uxx0G*fJb!r&ww0rtJ^U9Vx_&&i|G4r}ziG%Unv{ zvt-)z;npMVP<654~Af6ApJ z52VU^WZRns_FzB>bNo_k$S_+r{;NPbfRDaH1KKOM%K*Ky()L3ial?C|(|*T3+bE3H zulh6>*(>QXU)`IxKiP7f@TH!J*J9P8<897uP5D!zr)BM@lIW>Ze;kV*_NK_M$ZSP=2e<)(up{`-QH0&?_s{XBHCE2<0$#3#*J_D zjXKMko%p7%jlf+nZUi?mh$nK1edL0+@T0p3t>b}}a+#+CnEY0T{tRwypS$KFlM7PI zTB~_T$hz61eA5EV3f#3~`Pqy%BtE{sGTpx`ATocreR5d+)h2b5GYVw>c_KF79sI7y zJblx{EV=1aB!W~TSaz@3?zB)iL z=?p1L{Tyc^{Lr%^obO9`;A@MEVZ{149a8MXZ=n0EjgL?pXzlQ|Bww75)$!S0Rd7ux1C@U&}Yr9h|ht5qS!0t^xUyb4M4iA5+t@^_^&aWBrW zs~J`0ZszhYMy5~Dr^pc);GFCLdlvOKifnM`fXYQ4i(AevY#iYkWFpE%$DeFOS;Pic zl~WcSLt@m8eY&5|mnC_qCh3+p@-?sKv1Ud@PEe{`Tc$IY?c8L3w%V!>_Ru+rh_(Gy(}n86`K+V~rHX!IwBr@0$>Zg& z@Zh3QrycLB89T_*;z2qwP zNrwE=k65yhnxV13ij&ct6xSgTdvd_E(Wg8(mls_A^5b3#(M+XN}FkwQn?r;jIqbK0-miTt*J2Flk@1h=Hi1@U1Q(5?5=}{PBI6+_=c4MTd6pI8zHPX+3;s4D9+Pa zqXuxsW@tgOIWggVIKGG-pg0QU-y5yQok)yLd>_oneB)-`kXxosr#?Xl>AuS>sZwFm zB)x7qS#Ys?W5E4`jZhPF^j(Pz`IcW5mQ-8-T&H0kXp}6i)aHKOc;1uY;F+SO zDmZOgPMF}TX?L20a^a_X4rtjWgpeD5Wf)}we#BM0@EB2uY@uPYr-zj13W6Dx`HTh* z6LE1qnTbljk87c9PHJRz+7Zs12!91+U8u8Bw)8Z$Yd7JnZ^h>X9{xR(*jtdC6La-Z z+ph~&iKB;)uqr@e??axpUGg)i)gm;sq^>5_$~D2Her$u0s@SJMC}G7L0uF0>CRvXf z>~@dd|1XM(8SVHl-u%Bq+P^ecF07nX@2m}g=i`+Qi8ohVZMzd^r+UbwmkVX<+t{@d7sefd zSZIPk_JYtYKi7p{FPYI6ua7?zhf0YH0)#Utby$-MmWNoe6DerkCS?K8_jKb1QM0I2 zT-haE3a*@oyWDoh4Vc{`VCHe5jM9_WiNqVA0UPkK_kjbJS!b-&CURdc=Zt}R_3wZr zOrym=-s`azJ3Pg1%by-^uICwg7qGL5%N-aA?^ z%|1%uk+-6iDCXQ?y9dsFZ!4=iDCKpWNTI;S_!dyZr+SAQrJwtK6!l4?L;W5^mwLH{ z9(QR0YKsbeOIp}=;wSX?>zm=TGporx>>oYhkIr0EyPcI%;(C1{dDZizM*uc-W69(R zK}9MhJ(*T7${pEe+%XRDOzSu<6Z=+GCE&}+%ep}{S?gavzNSKO6KcVuh|}262I0cT zN4SkRI@q%GaZd>K*YbqzikzZ(Kvui4d_1yAXDRIcW&1$6aVgeY?xe1yBndn~JF(S; zYt>?5fcw_jzKt3aRUWTt&Y!N?(HaV5os#h&_Wkq`vb{ah2C9yjKQs`Z3nn+Ir^aR? z=m+)6OQcPe-`^TEAna2!JyUJ$&LJVtSqU$@R{SFaL&2cm^H$L*h$<;NN68V#PSUFV zidoxf4|5CqxHUA#%e|Sk{Mm`|C&TtLB@L!BS!+F0J#FrCE7Uw7-iNCpB)6U_qsu31 z2Lujm8I<6R(ba<*&E9xsyzA(hG=CNNxv0N5C690h>#*0b;H}So@3Kqflt*Q5(-)kv zKqKV=O_Gdzzqj^j*T7q4krg`S`CDD}tvgup?rh&G4co71kjhBwpO3OHn*pg4c4w?_ z7+&+A9MYM%Hfu*e_@HdTH}rQE9fJ)Do#=jQ=?;V6l;*6Ke4d~R zKKPHLYS1C((7HrJBjajSdfQ4&`{MPzXR{J>Q^xg^TR~MjmoQI!7;e?@WoHxGUvKTL zaHi=B@=dAN>~G}yD%it{H{+3^2hEUG#VO&v_fSSEQmO; zw7SQ9dT@}S9|9M6`8xPk{P-LJf+s(7s_Z)(mY`_uniiWfQ1D7Nd*!Lm3U&7>v3j`t z15b0R4-MxLijmQ;7HpG-H??|CQR$D-*nRtMwZ8Dtlsh@P{9fw7?+Txkmo@C&5!4r48*-`mY>_9VhrjH)o8Rle zj8uk2Vg;5>*n(IyGU|;Lp-F+Y7I1TX?8DemIq=Kg*uCS5_O#6!)h}M&wL(|)+Cf&e zIFxeR+w>GNp}vrFxD5l{y2zlh*VkQCjx1CU^gJyaB{{U%+UfPBPR+L;z`ttyR)IM* zrmPP;RgKi5DVQRe_(T;PB-o3Jcg)x7yXHU2*%V4QEfPNEw%k}3f9DJi0#BJP5YklV;$Sao|YGvcnMm zzEfbp=BK#O2+cIeRXu7HF3DRs{WfcZ=h{*InJWH6X||9*u)(VuUTO<{Bn~|=_*JB4 zCs&Q4ByN-#i;s;3B?DQ&v-*`o_Mp|K1z~!|PSX<6O%v`fCrXr}=p0<%EPKcb#hdtL z?QyuW(wOsn_?du_bBC`Gc~uEhSJ~#LtmfVXROep?CQ4GS|FfW9AQ9J62)7Be!6p}E zXYhit6)FQ`y3nf+RS9@D&K}YH@k~%x%yixdg~X05y%KP$ZwfPp7rczs zqMq93;Ysp=WAffM&{bO9(GuKj7`zimc3MrhwGfmE-uotmK=u4E#{I$zU704iB_k~+ zTem;Xz67PDdoG9gYdyc_1W?@QMCwmD+^Odub~J{|Py zuDjhOW(fAdp>h+?n&;nYR$BOF8(nqt58+#HsOHAToN%`5BQL>M3NZN?Mn(TRb z+FHJhlPAGcA9sAtf>+B~rX8c&J&q^_DNc*`4f|?-goI0QZ`?*wed3tMdvPZgh0lc_ zyyGIXo|sPYLXa?(#9d=?3!FYM_ly(OPw_w#k~-rt90EP(bNqSKZ5H5RkJ?49yQx4v z2Xb*Rd2N3D3e9opX?&0F@l2WYlz7DpWx40_1@Bl$EAmw#FOhY!;IpF{0~K6S!Z!Yj z!t#g2R{I1OvSM(TAE#TtRELS{U;CD!dZsBVsPOuel%2K=l*r=1b{N;}=*I1Sddddx zKzW%Mc(85gKLzG(JB_*kb&}K+U0ZoIwU&$3ttO0H zxKHEfS{IcL9z|{IO^#(syS^6r`(;&bNbW?R++?|k-Of3vC~Q3nG~VZIo{HSaNQzu%;KyZ?gAe0E&nJEW zF2%ThoMeus@mxtuWt^UJ%3i3(Un+O)J`i7UkAPmg$pFn*TQ{SAmSS@j&nn z{?7uzk&5I!?|B|=*H6zF9<`qeXa#Nd!})KzE*yTCs#~Rhcri^_c?)k=D$T9S5H}E{ zi@wh`8r4eAwS+U1S+jIk5*E0u&uC-STLu3@^-S;Hn8U%6!#iT;;PuT55aN=e{MQ3o zl%3t@ZZ-j+h&qSFJw`t<>RzTTSQq32!L73o!7WP;4*m>$cHi