From 1bf4d1edd7dcd6782302f3bd16807421cd0253e8 Mon Sep 17 00:00:00 2001
From: HighExecutor <1123581321qwe>
Date: Mon, 9 Dec 2019 20:02:47 +0300
Subject: [PATCH] cleanup of old repository
---
.gitignore | 8 +
README.md | 1 +
resources/CyberShake_100.xml | 884 ++
resources/CyberShake_1000.xml | 8992 +++++++++++
resources/CyberShake_100_2.xml | 895 ++
resources/CyberShake_30.xml | 266 +
resources/CyberShake_50.xml | 442 +
resources/CyberShake_50_sweep.xml | 441 +
resources/CyberShake_75.xml | 673 +
resources/Epigenomics_100.xml | 825 +
resources/Epigenomics_24.xml | 198 +
resources/Epigenomics_46.xml | 380 +
resources/Epigenomics_72.xml | 594 +
resources/Epigenomics_997.xml | 8185 ++++++++++
resources/Inspiral_100.xml | 1027 ++
resources/Inspiral_1000.xml | 10332 +++++++++++++
resources/Inspiral_30.xml | 312 +
resources/Inspiral_50.xml | 522 +
resources/Inspiral_72.xml | 742 +
resources/Montage_100.xml | 1227 ++
resources/Montage_1000.xml | 12633 ++++++++++++++++
resources/Montage_25.xml | 277 +
resources/Montage_250.xml | 3458 +++++
resources/Montage_30.xml | 423 +
resources/Montage_40.xml | 426 +
resources/Montage_5.xml | 39 +
resources/Montage_50.xml | 588 +
resources/Montage_500.xml | 6960 +++++++++
resources/Montage_75.xml | 1000 ++
resources/Sipht_100.xml | 6406 ++++++++
resources/Sipht_30.xml | 2042 +++
resources/Sipht_60.xml | 4198 +++++
resources/Sipht_73.xml | 4367 ++++++
resources/Sipht_79.xml | 6186 ++++++++
resources/generated/CyberShake_100.xml | 266 +
resources/generated/CyberShake_30.xml | 266 +
resources/generated/CyberShake_40.xml | 266 +
resources/generated/CyberShake_50.xml | 266 +
resources/generated/CyberShake_60.xml | 266 +
resources/generated/CyberShake_65.xml | 266 +
resources/generated/CyberShake_70.xml | 266 +
resources/generated/CyberShake_75.xml | 266 +
resources/generated/CyberShake_80.xml | 266 +
resources/generated/CyberShake_90.xml | 266 +
resources/generated/Montage_100.xml | 426 +
resources/generated/Montage_250.xml | 3458 +++++
resources/generated/Montage_30.xml | 423 +
resources/generated/Montage_40.xml | 426 +
resources/generated/Montage_50.xml | 423 +
resources/generated/Montage_500.xml | 6960 +++++++++
resources/generated/Montage_60.xml | 429 +
resources/generated/Montage_70.xml | 429 +
resources/generated/Montage_80.xml | 423 +
resources/generated/Montage_90.xml | 432 +
resources/new_generated/CyberShake_100.xml | 895 ++
resources/new_generated/CyberShake_30.xml | 265 +
resources/new_generated/CyberShake_50.xml | 445 +
resources/new_generated/CyberShake_75.xml | 673 +
resources/small_bad.xml | 16 +
src/algs/SimpleRandomizedHeuristic.py | 134 +
src/algs/common/MapOrdSchedule.py | 107 +
src/algs/common/NewSchedulerBuilder.py | 254 +
src/algs/common/ScheduleBuilder.py | 256 +
src/algs/common/__init__.py | 1 +
src/algs/common/individuals.py | 42 +
src/algs/common/particle_operations.py | 187 +
src/algs/common/utilities.py | 106 +
src/algs/ga/GAFunctions2.py | 279 +
src/algs/ga/GAImpl.py | 366 +
src/algs/ga/GARunner.py | 178 +
src/algs/ga/common_fixed_schedule_schema.py | 197 +
src/algs/ga/nsga2.py | 66 +
src/algs/gsa/SimpleGsaScheme.py | 116 +
src/algs/gsa/heftbasedoperators.py | 108 +
src/algs/gsa/operators.py | 113 +
src/algs/gsa/ordering_mapping_operators.py | 95 +
src/algs/gsa/setbasedoperators.py | 43 +
src/algs/heft/DSimpleHeft.py | 84 +
src/algs/heft/DeadlineHeft.py | 63 +
src/algs/heft/HeftHelper.py | 163 +
src/algs/heft/PublicResourceManager.py | 58 +
src/algs/heft/simple_heft.py | 169 +
src/algs/peft/DSimplePeft.py | 80 +
src/algs/peft/PeftHelper.py | 164 +
src/algs/peft/PublicResourceManager.py | 58 +
src/algs/peft/simple_peft.py | 165 +
src/algs/pso/gapso.py | 28 +
src/algs/pso/mapping_operators.py | 57 +
src/algs/pso/ordering_operators.py | 117 +
src/algs/pso/rdpso/mapordschedule.py | 110 +
src/algs/pso/rdpso/mapping_operators.py | 61 +
src/algs/pso/rdpso/ordering_operators.py | 112 +
src/algs/pso/rdpso/particle_operations.py | 163 +
src/algs/pso/rdpso/rdpso.py | 249 +
src/algs/pso/rdpsoOrd/mapordschedule.py | 118 +
src/algs/pso/rdpsoOrd/mapping_operators.py | 57 +
src/algs/pso/rdpsoOrd/ordering_operators.py | 115 +
src/algs/pso/rdpsoOrd/particle_operations.py | 160 +
src/algs/pso/rdpsoOrd/rdpso.py | 164 +
src/algs/pso/sdpso.py | 115 +
src/algs/sa/SimulatedAnnealingScheme.py | 47 +
src/algs/sa/mappingops.py | 68 +
.../CommonComponents/ExperimentalManager.py | 114 +
src/core/CommonComponents/utilities.py | 15 +
src/core/environment/BaseElements.py | 384 +
src/core/environment/DAXExtendParser.py | 132 +
src/core/environment/DAXParser.py | 66 +
src/core/environment/ResourceGenerator.py | 19 +
src/core/environment/ResourceManager.py | 271 +
src/core/environment/Utility.py | 399 +
src/experiments/aggregate_utilities.py | 148 +
src/experiments/common.py | 74 +
.../comparison_experiments/GAvsHEFT.py | 125 +
.../comparison_experiments/HEFTvsPEFT.py | 86 +
.../comparison_experiments/HeftOnly.py | 20 +
.../comparison_experiments/OMPSOvsHEFT.py | 50 +
.../comparison_experiments/OMPSOvsRDPSO.py | 218 +
.../OMPSOvsRDPSO_onlyOrd.py | 159 +
src/experiments/ga/ga_base_experiment.py | 150 +
src/experiments/gsa/gsa_base_experiment.py | 96 +
src/experiments/gsa/omgsa_base_experiment.py | 100 +
src/experiments/pso/gapso_base_experiment.py | 164 +
src/experiments/pso/ompso_base_experiment.py | 116 +
src/experiments/pso/rdpso_base_experiment.py | 107 +
.../pso/rdpso_base_experiment_ordering.py | 100 +
src/experiments/pso/sdpso_base_experiment.py | 83 +
src/experiments/sa/sa_base_experiment.py | 76 +
src/settings.py | 11 +
128 files changed, 113408 insertions(+)
create mode 100644 .gitignore
create mode 100644 README.md
create mode 100644 resources/CyberShake_100.xml
create mode 100644 resources/CyberShake_1000.xml
create mode 100644 resources/CyberShake_100_2.xml
create mode 100644 resources/CyberShake_30.xml
create mode 100644 resources/CyberShake_50.xml
create mode 100644 resources/CyberShake_50_sweep.xml
create mode 100644 resources/CyberShake_75.xml
create mode 100644 resources/Epigenomics_100.xml
create mode 100644 resources/Epigenomics_24.xml
create mode 100644 resources/Epigenomics_46.xml
create mode 100644 resources/Epigenomics_72.xml
create mode 100644 resources/Epigenomics_997.xml
create mode 100644 resources/Inspiral_100.xml
create mode 100644 resources/Inspiral_1000.xml
create mode 100644 resources/Inspiral_30.xml
create mode 100644 resources/Inspiral_50.xml
create mode 100644 resources/Inspiral_72.xml
create mode 100644 resources/Montage_100.xml
create mode 100644 resources/Montage_1000.xml
create mode 100644 resources/Montage_25.xml
create mode 100644 resources/Montage_250.xml
create mode 100644 resources/Montage_30.xml
create mode 100644 resources/Montage_40.xml
create mode 100644 resources/Montage_5.xml
create mode 100644 resources/Montage_50.xml
create mode 100644 resources/Montage_500.xml
create mode 100644 resources/Montage_75.xml
create mode 100644 resources/Sipht_100.xml
create mode 100644 resources/Sipht_30.xml
create mode 100644 resources/Sipht_60.xml
create mode 100644 resources/Sipht_73.xml
create mode 100644 resources/Sipht_79.xml
create mode 100644 resources/generated/CyberShake_100.xml
create mode 100644 resources/generated/CyberShake_30.xml
create mode 100644 resources/generated/CyberShake_40.xml
create mode 100644 resources/generated/CyberShake_50.xml
create mode 100644 resources/generated/CyberShake_60.xml
create mode 100644 resources/generated/CyberShake_65.xml
create mode 100644 resources/generated/CyberShake_70.xml
create mode 100644 resources/generated/CyberShake_75.xml
create mode 100644 resources/generated/CyberShake_80.xml
create mode 100644 resources/generated/CyberShake_90.xml
create mode 100644 resources/generated/Montage_100.xml
create mode 100644 resources/generated/Montage_250.xml
create mode 100644 resources/generated/Montage_30.xml
create mode 100644 resources/generated/Montage_40.xml
create mode 100644 resources/generated/Montage_50.xml
create mode 100644 resources/generated/Montage_500.xml
create mode 100644 resources/generated/Montage_60.xml
create mode 100644 resources/generated/Montage_70.xml
create mode 100644 resources/generated/Montage_80.xml
create mode 100644 resources/generated/Montage_90.xml
create mode 100644 resources/new_generated/CyberShake_100.xml
create mode 100644 resources/new_generated/CyberShake_30.xml
create mode 100644 resources/new_generated/CyberShake_50.xml
create mode 100644 resources/new_generated/CyberShake_75.xml
create mode 100644 resources/small_bad.xml
create mode 100644 src/algs/SimpleRandomizedHeuristic.py
create mode 100644 src/algs/common/MapOrdSchedule.py
create mode 100644 src/algs/common/NewSchedulerBuilder.py
create mode 100644 src/algs/common/ScheduleBuilder.py
create mode 100644 src/algs/common/__init__.py
create mode 100644 src/algs/common/individuals.py
create mode 100644 src/algs/common/particle_operations.py
create mode 100644 src/algs/common/utilities.py
create mode 100644 src/algs/ga/GAFunctions2.py
create mode 100644 src/algs/ga/GAImpl.py
create mode 100644 src/algs/ga/GARunner.py
create mode 100644 src/algs/ga/common_fixed_schedule_schema.py
create mode 100644 src/algs/ga/nsga2.py
create mode 100644 src/algs/gsa/SimpleGsaScheme.py
create mode 100644 src/algs/gsa/heftbasedoperators.py
create mode 100644 src/algs/gsa/operators.py
create mode 100644 src/algs/gsa/ordering_mapping_operators.py
create mode 100644 src/algs/gsa/setbasedoperators.py
create mode 100644 src/algs/heft/DSimpleHeft.py
create mode 100644 src/algs/heft/DeadlineHeft.py
create mode 100644 src/algs/heft/HeftHelper.py
create mode 100644 src/algs/heft/PublicResourceManager.py
create mode 100644 src/algs/heft/simple_heft.py
create mode 100644 src/algs/peft/DSimplePeft.py
create mode 100644 src/algs/peft/PeftHelper.py
create mode 100644 src/algs/peft/PublicResourceManager.py
create mode 100644 src/algs/peft/simple_peft.py
create mode 100644 src/algs/pso/gapso.py
create mode 100644 src/algs/pso/mapping_operators.py
create mode 100644 src/algs/pso/ordering_operators.py
create mode 100644 src/algs/pso/rdpso/mapordschedule.py
create mode 100644 src/algs/pso/rdpso/mapping_operators.py
create mode 100644 src/algs/pso/rdpso/ordering_operators.py
create mode 100644 src/algs/pso/rdpso/particle_operations.py
create mode 100644 src/algs/pso/rdpso/rdpso.py
create mode 100644 src/algs/pso/rdpsoOrd/mapordschedule.py
create mode 100644 src/algs/pso/rdpsoOrd/mapping_operators.py
create mode 100644 src/algs/pso/rdpsoOrd/ordering_operators.py
create mode 100644 src/algs/pso/rdpsoOrd/particle_operations.py
create mode 100644 src/algs/pso/rdpsoOrd/rdpso.py
create mode 100644 src/algs/pso/sdpso.py
create mode 100644 src/algs/sa/SimulatedAnnealingScheme.py
create mode 100644 src/algs/sa/mappingops.py
create mode 100644 src/core/CommonComponents/ExperimentalManager.py
create mode 100644 src/core/CommonComponents/utilities.py
create mode 100644 src/core/environment/BaseElements.py
create mode 100644 src/core/environment/DAXExtendParser.py
create mode 100644 src/core/environment/DAXParser.py
create mode 100644 src/core/environment/ResourceGenerator.py
create mode 100644 src/core/environment/ResourceManager.py
create mode 100644 src/core/environment/Utility.py
create mode 100644 src/experiments/aggregate_utilities.py
create mode 100644 src/experiments/common.py
create mode 100644 src/experiments/comparison_experiments/GAvsHEFT.py
create mode 100644 src/experiments/comparison_experiments/HEFTvsPEFT.py
create mode 100644 src/experiments/comparison_experiments/HeftOnly.py
create mode 100644 src/experiments/comparison_experiments/OMPSOvsHEFT.py
create mode 100644 src/experiments/comparison_experiments/OMPSOvsRDPSO.py
create mode 100644 src/experiments/comparison_experiments/OMPSOvsRDPSO_onlyOrd.py
create mode 100644 src/experiments/ga/ga_base_experiment.py
create mode 100644 src/experiments/gsa/gsa_base_experiment.py
create mode 100644 src/experiments/gsa/omgsa_base_experiment.py
create mode 100644 src/experiments/pso/gapso_base_experiment.py
create mode 100644 src/experiments/pso/ompso_base_experiment.py
create mode 100644 src/experiments/pso/rdpso_base_experiment.py
create mode 100644 src/experiments/pso/rdpso_base_experiment_ordering.py
create mode 100644 src/experiments/pso/sdpso_base_experiment.py
create mode 100644 src/experiments/sa/sa_base_experiment.py
create mode 100644 src/settings.py
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b74c65d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+*.pyc
+*.orig
+
+.idea/
+temp/
+build/
+dist/
+__pycache__/
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..7e8c195
--- /dev/null
+++ b/README.md
@@ -0,0 +1 @@
+Workflow exeuciton simulator with different heuristic (HEFT, PEFT) and evolution (GA, PSO, GSA, SA) algorithms
\ No newline at end of file
diff --git a/resources/CyberShake_100.xml b/resources/CyberShake_100.xml
new file mode 100644
index 0000000..b27fb22
--- /dev/null
+++ b/resources/CyberShake_100.xml
@@ -0,0 +1,884 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/CyberShake_1000.xml b/resources/CyberShake_1000.xml
new file mode 100644
index 0000000..9ddca3c
--- /dev/null
+++ b/resources/CyberShake_1000.xml
@@ -0,0 +1,8992 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/CyberShake_100_2.xml b/resources/CyberShake_100_2.xml
new file mode 100644
index 0000000..dbf4e6e
--- /dev/null
+++ b/resources/CyberShake_100_2.xml
@@ -0,0 +1,895 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/CyberShake_30.xml b/resources/CyberShake_30.xml
new file mode 100644
index 0000000..96b44c8
--- /dev/null
+++ b/resources/CyberShake_30.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/CyberShake_50.xml b/resources/CyberShake_50.xml
new file mode 100644
index 0000000..875e154
--- /dev/null
+++ b/resources/CyberShake_50.xml
@@ -0,0 +1,442 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/CyberShake_50_sweep.xml b/resources/CyberShake_50_sweep.xml
new file mode 100644
index 0000000..b2068a2
--- /dev/null
+++ b/resources/CyberShake_50_sweep.xml
@@ -0,0 +1,441 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/CyberShake_75.xml b/resources/CyberShake_75.xml
new file mode 100644
index 0000000..02671a1
--- /dev/null
+++ b/resources/CyberShake_75.xml
@@ -0,0 +1,673 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/Epigenomics_100.xml b/resources/Epigenomics_100.xml
new file mode 100644
index 0000000..6a3324b
--- /dev/null
+++ b/resources/Epigenomics_100.xml
@@ -0,0 +1,825 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Epigenomics_24.xml b/resources/Epigenomics_24.xml
new file mode 100644
index 0000000..bb048fc
--- /dev/null
+++ b/resources/Epigenomics_24.xml
@@ -0,0 +1,198 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Epigenomics_46.xml b/resources/Epigenomics_46.xml
new file mode 100644
index 0000000..be70b83
--- /dev/null
+++ b/resources/Epigenomics_46.xml
@@ -0,0 +1,380 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Epigenomics_72.xml b/resources/Epigenomics_72.xml
new file mode 100644
index 0000000..f3775f6
--- /dev/null
+++ b/resources/Epigenomics_72.xml
@@ -0,0 +1,594 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Epigenomics_997.xml b/resources/Epigenomics_997.xml
new file mode 100644
index 0000000..03595cd
--- /dev/null
+++ b/resources/Epigenomics_997.xml
@@ -0,0 +1,8185 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Inspiral_100.xml b/resources/Inspiral_100.xml
new file mode 100644
index 0000000..f006486
--- /dev/null
+++ b/resources/Inspiral_100.xml
@@ -0,0 +1,1027 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Inspiral_1000.xml b/resources/Inspiral_1000.xml
new file mode 100644
index 0000000..87e2a32
--- /dev/null
+++ b/resources/Inspiral_1000.xml
@@ -0,0 +1,10332 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Inspiral_30.xml b/resources/Inspiral_30.xml
new file mode 100644
index 0000000..5623768
--- /dev/null
+++ b/resources/Inspiral_30.xml
@@ -0,0 +1,312 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Inspiral_50.xml b/resources/Inspiral_50.xml
new file mode 100644
index 0000000..8c23405
--- /dev/null
+++ b/resources/Inspiral_50.xml
@@ -0,0 +1,522 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Inspiral_72.xml b/resources/Inspiral_72.xml
new file mode 100644
index 0000000..05dc8aa
--- /dev/null
+++ b/resources/Inspiral_72.xml
@@ -0,0 +1,742 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_100.xml b/resources/Montage_100.xml
new file mode 100644
index 0000000..8f1426c
--- /dev/null
+++ b/resources/Montage_100.xml
@@ -0,0 +1,1227 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_1000.xml b/resources/Montage_1000.xml
new file mode 100644
index 0000000..40deee4
--- /dev/null
+++ b/resources/Montage_1000.xml
@@ -0,0 +1,12633 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_25.xml b/resources/Montage_25.xml
new file mode 100644
index 0000000..ecf6e0f
--- /dev/null
+++ b/resources/Montage_25.xml
@@ -0,0 +1,277 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_250.xml b/resources/Montage_250.xml
new file mode 100644
index 0000000..8e3ff93
--- /dev/null
+++ b/resources/Montage_250.xml
@@ -0,0 +1,3458 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_30.xml b/resources/Montage_30.xml
new file mode 100644
index 0000000..0d315b3
--- /dev/null
+++ b/resources/Montage_30.xml
@@ -0,0 +1,423 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_40.xml b/resources/Montage_40.xml
new file mode 100644
index 0000000..c544f3f
--- /dev/null
+++ b/resources/Montage_40.xml
@@ -0,0 +1,426 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_5.xml b/resources/Montage_5.xml
new file mode 100644
index 0000000..548df4a
--- /dev/null
+++ b/resources/Montage_5.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_50.xml b/resources/Montage_50.xml
new file mode 100644
index 0000000..17b9a36
--- /dev/null
+++ b/resources/Montage_50.xml
@@ -0,0 +1,588 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_500.xml b/resources/Montage_500.xml
new file mode 100644
index 0000000..bbdaf6d
--- /dev/null
+++ b/resources/Montage_500.xml
@@ -0,0 +1,6960 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Montage_75.xml b/resources/Montage_75.xml
new file mode 100644
index 0000000..3bbe0af
--- /dev/null
+++ b/resources/Montage_75.xml
@@ -0,0 +1,1000 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Sipht_100.xml b/resources/Sipht_100.xml
new file mode 100644
index 0000000..2d5ed39
--- /dev/null
+++ b/resources/Sipht_100.xml
@@ -0,0 +1,6406 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Sipht_30.xml b/resources/Sipht_30.xml
new file mode 100644
index 0000000..46208ca
--- /dev/null
+++ b/resources/Sipht_30.xml
@@ -0,0 +1,2042 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Sipht_60.xml b/resources/Sipht_60.xml
new file mode 100644
index 0000000..d6897cb
--- /dev/null
+++ b/resources/Sipht_60.xml
@@ -0,0 +1,4198 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Sipht_73.xml b/resources/Sipht_73.xml
new file mode 100644
index 0000000..a8ec0dd
--- /dev/null
+++ b/resources/Sipht_73.xml
@@ -0,0 +1,4367 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/Sipht_79.xml b/resources/Sipht_79.xml
new file mode 100644
index 0000000..388bed5
--- /dev/null
+++ b/resources/Sipht_79.xml
@@ -0,0 +1,6186 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/generated/CyberShake_100.xml b/resources/generated/CyberShake_100.xml
new file mode 100644
index 0000000..b7a852b
--- /dev/null
+++ b/resources/generated/CyberShake_100.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_30.xml b/resources/generated/CyberShake_30.xml
new file mode 100644
index 0000000..785cd20
--- /dev/null
+++ b/resources/generated/CyberShake_30.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_40.xml b/resources/generated/CyberShake_40.xml
new file mode 100644
index 0000000..8ae84b1
--- /dev/null
+++ b/resources/generated/CyberShake_40.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_50.xml b/resources/generated/CyberShake_50.xml
new file mode 100644
index 0000000..06d3ae0
--- /dev/null
+++ b/resources/generated/CyberShake_50.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_60.xml b/resources/generated/CyberShake_60.xml
new file mode 100644
index 0000000..baf4aaa
--- /dev/null
+++ b/resources/generated/CyberShake_60.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_65.xml b/resources/generated/CyberShake_65.xml
new file mode 100644
index 0000000..0d330ee
--- /dev/null
+++ b/resources/generated/CyberShake_65.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_70.xml b/resources/generated/CyberShake_70.xml
new file mode 100644
index 0000000..c559757
--- /dev/null
+++ b/resources/generated/CyberShake_70.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_75.xml b/resources/generated/CyberShake_75.xml
new file mode 100644
index 0000000..fc18ca3
--- /dev/null
+++ b/resources/generated/CyberShake_75.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_80.xml b/resources/generated/CyberShake_80.xml
new file mode 100644
index 0000000..8fbaf6b
--- /dev/null
+++ b/resources/generated/CyberShake_80.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/CyberShake_90.xml b/resources/generated/CyberShake_90.xml
new file mode 100644
index 0000000..e153eca
--- /dev/null
+++ b/resources/generated/CyberShake_90.xml
@@ -0,0 +1,266 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_100.xml b/resources/generated/Montage_100.xml
new file mode 100644
index 0000000..dcc9f3f
--- /dev/null
+++ b/resources/generated/Montage_100.xml
@@ -0,0 +1,426 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_250.xml b/resources/generated/Montage_250.xml
new file mode 100644
index 0000000..8e3ff93
--- /dev/null
+++ b/resources/generated/Montage_250.xml
@@ -0,0 +1,3458 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_30.xml b/resources/generated/Montage_30.xml
new file mode 100644
index 0000000..0d315b3
--- /dev/null
+++ b/resources/generated/Montage_30.xml
@@ -0,0 +1,423 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_40.xml b/resources/generated/Montage_40.xml
new file mode 100644
index 0000000..c544f3f
--- /dev/null
+++ b/resources/generated/Montage_40.xml
@@ -0,0 +1,426 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_50.xml b/resources/generated/Montage_50.xml
new file mode 100644
index 0000000..3ffa942
--- /dev/null
+++ b/resources/generated/Montage_50.xml
@@ -0,0 +1,423 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_500.xml b/resources/generated/Montage_500.xml
new file mode 100644
index 0000000..bbdaf6d
--- /dev/null
+++ b/resources/generated/Montage_500.xml
@@ -0,0 +1,6960 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_60.xml b/resources/generated/Montage_60.xml
new file mode 100644
index 0000000..12f6a83
--- /dev/null
+++ b/resources/generated/Montage_60.xml
@@ -0,0 +1,429 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_70.xml b/resources/generated/Montage_70.xml
new file mode 100644
index 0000000..44dd8ef
--- /dev/null
+++ b/resources/generated/Montage_70.xml
@@ -0,0 +1,429 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_80.xml b/resources/generated/Montage_80.xml
new file mode 100644
index 0000000..b65c5d1
--- /dev/null
+++ b/resources/generated/Montage_80.xml
@@ -0,0 +1,423 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/generated/Montage_90.xml b/resources/generated/Montage_90.xml
new file mode 100644
index 0000000..37416ff
--- /dev/null
+++ b/resources/generated/Montage_90.xml
@@ -0,0 +1,432 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/new_generated/CyberShake_100.xml b/resources/new_generated/CyberShake_100.xml
new file mode 100644
index 0000000..dbf4e6e
--- /dev/null
+++ b/resources/new_generated/CyberShake_100.xml
@@ -0,0 +1,895 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/new_generated/CyberShake_30.xml b/resources/new_generated/CyberShake_30.xml
new file mode 100644
index 0000000..946c95c
--- /dev/null
+++ b/resources/new_generated/CyberShake_30.xml
@@ -0,0 +1,265 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/new_generated/CyberShake_50.xml b/resources/new_generated/CyberShake_50.xml
new file mode 100644
index 0000000..76f46f0
--- /dev/null
+++ b/resources/new_generated/CyberShake_50.xml
@@ -0,0 +1,445 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/resources/new_generated/CyberShake_75.xml b/resources/new_generated/CyberShake_75.xml
new file mode 100644
index 0000000..02671a1
--- /dev/null
+++ b/resources/new_generated/CyberShake_75.xml
@@ -0,0 +1,673 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/small_bad.xml b/resources/small_bad.xml
new file mode 100644
index 0000000..7a6f026
--- /dev/null
+++ b/resources/small_bad.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/algs/SimpleRandomizedHeuristic.py b/src/algs/SimpleRandomizedHeuristic.py
new file mode 100644
index 0000000..9cf2c88
--- /dev/null
+++ b/src/algs/SimpleRandomizedHeuristic.py
@@ -0,0 +1,134 @@
+import random
+
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Scheduler, ScheduleItem, Schedule
+
+
+class SimpleRandomizedHeuristic(Scheduler):
+
+ def __init__(self,
+ workflow,
+ nodes,
+ estimator):
+ self.workflow = workflow
+ self.nodes = nodes
+ self.workflow_size = workflow.get_task_count()
+ self.estimator = estimator
+
+ self.task_map = {task.id: task for task in HeftHelper.get_all_tasks(self.workflow)}
+ self.node_map = {node.name: node for node in nodes}
+
+ self.initial_chromosome = None
+ pass
+
+ def schedule(self, fixed_schedule_part=None, current_time=0.0):
+
+ estimate = self.estimator.estimate_transfer_time
+ # TODO: make common utility function with ScheduleBuilder
+ def is_last_version_of_task_executing(item):
+ return item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.UNSTARTED
+
+ def _get_ready_tasks(children, finished_tasks):
+ def _is_child_ready(child):
+ ids = set([p.id for p in child.parents])
+ result = False in [id in finished_tasks for id in ids]
+ return not result
+ ready_children = [child for child in children if _is_child_ready(child)]
+ return ready_children
+
+ if fixed_schedule_part is None:
+ schedule_mapping = {node: [] for node in self.nodes}
+ ready_tasks = [child.id for child in self.workflow.head_task.children]
+ task_to_node = dict()
+ finished_tasks = set()
+ else:
+ schedule_mapping = {node: [item for item in items] for (node, items) in fixed_schedule_part.mapping.items()}
+ finished_tasks = [item.job.id for (node, items) in fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)]
+ finished_tasks = set([self.workflow.head_task.id] + finished_tasks)
+ unfinished = [task for task in self.workflow.get_all_unique_tasks() if not task.id in finished_tasks]
+ ready_tasks = [task.id for task in _get_ready_tasks(unfinished, finished_tasks)]
+ task_to_node = {item.job.id: (node, item.start_time, item.end_time) for (node, items) in fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)}
+
+
+ def is_child_ready(child):
+ ids = set([p.id for p in child.parents])
+ result = False in [id in finished_tasks for id in ids]
+ return not result
+
+
+ def find_slots(node, comm_ready, runtime):
+ node_schedule = schedule_mapping.get(node, list())
+ free_time = 0 if len(node_schedule) == 0 else node_schedule[-1].end_time
+ ## TODO: refactor it later
+ f_time = max(free_time, comm_ready)
+ f_time = max(f_time, current_time)
+ base_variant = [(f_time, f_time + runtime + 1)]
+ zero_interval = [] if len(node_schedule) == 0 else [(0, node_schedule[0].start_time)]
+ middle_intervals = [(node_schedule[i].end_time, node_schedule[i + 1].start_time) for i in range(len(node_schedule) - 1)]
+ intervals = zero_interval + middle_intervals + base_variant
+
+ #result = [(st, end) for (st, end) in intervals if st >= comm_ready and end - st >= runtime]
+ ## TODO: rethink rounding
+ result = [(st, end) for (st, end) in intervals if (current_time < st or abs((current_time - st)) < 0.01) and st >= comm_ready and (runtime < (end - st) or abs((end - st) - runtime) < 0.01)]
+ return result
+
+ def comm_ready_func(task, node):
+ ##TODO: remake this stub later.
+ if len(task.parents) == 1 and self.workflow.head_task.id == list(task.parents)[0].id:
+ return 0
+ return max([task_to_node[p.id][2] + estimate(node, task_to_node[p.id][0], task, p) for p in task.parents])
+
+
+
+ def get_possible_execution_times(task, node):
+ ## pay attention to the last element in the resulted seq
+ ## it represents all available time of node after it completes all its work
+ ## (if such interval can exist)
+ ## time_slots = [(st1, end1),(st2, end2,...,(st_last, st_last + runtime)]
+ runtime = self.estimator.estimate_runtime(task, node)
+ comm_ready = comm_ready_func(task, node)
+ time_slots = find_slots(node, comm_ready, runtime)
+ return time_slots, runtime
+
+ while len(ready_tasks) > 0:
+ choosed_index = random.randint(0, len(ready_tasks) - 1)
+ task = self.task_map[ready_tasks[choosed_index]]
+
+ #TODO: make checking for all nodes are dead.(It's a very rare situation so it is not consider for now)
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+ choosed_node_index = random.randint(0, len(alive_nodes) - 1)
+ node = alive_nodes[choosed_node_index]
+
+
+ time_slots, runtime = get_possible_execution_times(task, node)
+ choosed_time_index = 0 if len(time_slots) == 1 else random.randint(0, len(time_slots) - 1)
+ time_slot = time_slots[choosed_time_index]
+
+ start_time = time_slot[0]
+ end_time = start_time + runtime
+
+ item = ScheduleItem(task, start_time, end_time)
+ ##schedule_mapping[node].append(item)
+ Schedule.insert_item(schedule_mapping, node, item)
+ task_to_node[task.id] = (node, start_time, end_time)
+
+ ##print('I am here')
+ ready_tasks.remove(task.id)
+ finished_tasks.add(task.id)
+
+ ready_children = [child for child in task.children if is_child_ready(child)]
+ for child in ready_children:
+ ready_tasks.append(child.id)
+
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+
+
+
+
+
+
+
+
diff --git a/src/algs/common/MapOrdSchedule.py b/src/algs/common/MapOrdSchedule.py
new file mode 100644
index 0000000..ff429f8
--- /dev/null
+++ b/src/algs/common/MapOrdSchedule.py
@@ -0,0 +1,107 @@
+from deap.base import Fitness
+from src.algs.common.NewSchedulerBuilder import place_task_to_schedule
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import Utility
+
+MAPPING_SPECIE = "MappingSpecie"
+ORDERING_SPECIE = "OrderingSpecie"
+
+
+def build_schedule(workflow, estimator, resource_manager, solution):
+ """
+ the solution consists all parts necessary to build whole solution
+ For the moment, it is mentioned that all species taking part in algorithm
+ are necessary to build complete solution
+ solution = {
+ s1.name: val1,
+ s2.name: val2,
+ ....
+ }
+ """
+ ms = solution[MAPPING_SPECIE]
+ os = solution[ORDERING_SPECIE]
+
+ assert check_precedence(workflow, os), "Precedence is violated"
+
+ ms = {t: resource_manager.byName(n) for t, n in ms}
+ schedule_mapping = {n: [] for n in set(ms.values())}
+ task_to_node = {}
+ for t in os:
+ node = ms[t]
+ t = workflow.byId(t)
+ (start_time, end_time) = place_task_to_schedule(workflow,
+ estimator,
+ schedule_mapping,
+ task_to_node,
+ ms, t, node, 0)
+
+ task_to_node[t.id] = (node, start_time, end_time)
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+
+def check_precedence(workflow, task_seq):
+ for i in range(len(task_seq)):
+ task = workflow.byId(task_seq[i])
+ pids = [p.id for p in task.parents]
+ for j in range(i + 1, len(task_seq)):
+ if task_seq[j] in pids:
+ return False
+ return True
+
+
+def fitness(wf, rm, estimator, position):
+ if isinstance(position, Schedule):
+ sched = position
+ else:
+ sched = build_schedule(wf, estimator, rm, position)
+
+ # isvalid = Utility.is_static_schedule_valid(wf,sched)
+ # if not isvalid:
+ # print("NOT VALID SCHEDULE!")
+
+ makespan = Utility.makespan(sched)
+ ## TODO: make a real estimation later
+ cost = 0.0
+ Fitness.weights = [-1.0, -1.0]
+ fit = Fitness(values=(makespan, cost))
+ ## TODO: make a normal multi-objective fitness estimation
+ fit.mofit = makespan
+ return fit
+
+
+
+def mapping_from_schedule(schedule):
+ mapping = {item.job.id: node.name for node, items in schedule.mapping.items()
+ for item in items}
+ return mapping
+
+
+def ordering_from_schedule(schedule):
+ ordering = sorted((item for node, items in schedule.mapping.items() for item in items),
+ key=lambda x: x.start_time)
+ ordering = [item.job.id for item in ordering]
+ return ordering
+
+
+def ord_and_map(schedule):
+ return mapping_from_schedule(schedule), ordering_from_schedule(schedule)
+
+
+def validate_mapping_with_alive_nodes(mapping, rm):
+ """
+ :param mapping: is a dict {(task_id):(node_name)}
+ :param rm: resource manager
+ :return:
+ """
+ #TODO transform mapping from task:runtime to task:node
+
+ alive_nodes = [node.name for node in rm.get_nodes() if node.state != Node.Down]
+ for task_id, node_name in mapping.items():
+ if node_name not in alive_nodes:
+ return False
+ return True
+
+
+
diff --git a/src/algs/common/NewSchedulerBuilder.py b/src/algs/common/NewSchedulerBuilder.py
new file mode 100644
index 0000000..14fee89
--- /dev/null
+++ b/src/algs/common/NewSchedulerBuilder.py
@@ -0,0 +1,254 @@
+from copy import deepcopy
+from functools import reduce
+import operator
+from pprint import pprint
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import tracing
+
+
+def _comm_ready_func(workflow,
+ estimator,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node):
+ estimate = estimator.estimate_transfer_time
+ ##TODO: remake this stub later.
+ if len(task.parents) == 1 and workflow.head_task.id == list(task.parents)[0].id:
+ return 0
+
+ ## TODO: replace it with commented string below later
+ res_list = []
+ for p in task.parents:
+ c1 = task_to_node[p.id][2]
+ c2 = estimate(node.name, chrmo_mapping[p.id].name, task, p)
+ res_list.append(c1 + c2)
+
+ return max(res_list)
+## chrmo_mapping: task-node mapping
+def place_task_to_schedule(workflow,
+ estimator,
+ schedule_mapping,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node,
+ current_time):
+
+ runtime = estimator.estimate_runtime(task, node)
+ comm_ready = _comm_ready_func(workflow,
+ estimator,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node)
+
+ def _check(st, end):
+ return (0.00001 < (st - current_time)) \
+ and st >= comm_ready and (0.00001 < (end - st) - runtime)
+
+ node_schedule = schedule_mapping.get(node, list())
+
+
+ ## TODO: add case for inserting between nothing and first element
+ size = len(node_schedule)
+ result = None
+ i = 0
+ if size > 0 and _check(0, node_schedule[0]):
+ i = -1
+ result = (0, node_schedule[0].start_time)
+ else:
+ while i < size - 1:
+ st = node_schedule[i].end_time
+ end = node_schedule[i + 1].start_time
+ if _check(st, end):
+ break
+ i += 1
+ if i < size - 1:
+ result = (st, end)
+ else:
+ free_time = 0 if len(node_schedule) == 0 else node_schedule[-1].end_time
+ ## TODO: refactor it later
+ f_time = max(free_time, comm_ready)
+ f_time = max(f_time, current_time)
+ result = (f_time, f_time + runtime)
+ i = size - 1
+ pass
+ pass
+
+ previous_elt = i
+ st_time = result[0]
+ end_time = st_time + runtime
+ item = ScheduleItem(task, st_time, end_time)
+
+ node_schedule.insert(previous_elt + 1, item)
+
+ schedule_mapping[node] = node_schedule
+ return (st_time, end_time)
+
+
+class NewScheduleBuilder:
+
+ def __init__(self,
+ workflow,
+ resource_manager,
+ estimator,
+ task_map,
+ node_map,
+ # fixed part of schedule. It need to be accounted when new schedule is built, but it's not possible to cahnge something inside it
+ fixed_schedule_part):
+ self.workflow = workflow
+ self.nodes = HeftHelper.to_nodes(resource_manager.get_resources())
+ self.estimator = estimator
+ ##TODO: Build it
+ self.task_map = task_map
+ ##TODO: Build it
+ self.node_map = node_map
+
+ self.fixed_schedule_part = fixed_schedule_part
+ # construct initial mapping
+ # eliminate all already scheduled tasks
+
+ pass
+
+ def _create_helping_structures(self, chromo):
+ # copy fixed schedule
+ # TODO: make common utility function with SimpleRandomizedHeuristic
+ def is_last_version_of_task_executing(item):
+ return item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.UNSTARTED
+
+ schedule_mapping = {node: [item for item in items] for (node, items) in self.fixed_schedule_part.mapping.items()}
+
+ ## add nodes which can be in the resource manager, but empty in chromo and fixed schedule
+ ## in such they wouldn't be added to the resulted schedule at all
+ ## Note: schedule must contain all nodes from ResourceManager always
+ for node in self.nodes:
+ if node not in schedule_mapping:
+ schedule_mapping[node] = []
+
+
+ finished_tasks = [item.job.id for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)]
+ finished_tasks = set([self.workflow.head_task.id] + finished_tasks)
+
+ unfinished = [task for task in self.workflow.get_all_unique_tasks() if not task.id in finished_tasks]
+
+ ready_tasks = [task.id for task in self._get_ready_tasks(unfinished, finished_tasks)]
+
+
+
+ chrmo_mapping = {item.job.id: self.node_map[node.name] for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)}
+
+ for (node_name, tasks) in chromo.items():
+ for tsk_id in tasks:
+ chrmo_mapping[tsk_id] = self.node_map[node_name]
+
+ task_to_node = {item.job.id: (node, item.start_time, item.end_time) for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)}
+
+ return (schedule_mapping, finished_tasks, ready_tasks, chrmo_mapping, task_to_node)
+
+ def __call__(self, chromo, current_time):
+
+ count_of_tasks = lambda mapping: reduce(operator.add, (len(tasks) for node, tasks in mapping.items()), 0)
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+
+ alive_nodes_names = [node.name for node in alive_nodes]
+ for node_name, tasks in chromo.items():
+ if node_name not in alive_nodes_names and len(tasks) > 0:
+ raise ValueError("Chromo is invalid. There is a task assigned to a dead node")
+ if count_of_tasks(chromo) + len(self.fixed_schedule_part.get_unfailed_tasks_ids()) != len(self.workflow.get_all_unique_tasks()):
+
+ print("==Chromosome==================================")
+ print(chromo)
+ print("=fixed_schedule_part===================================")
+ print(self.fixed_schedule_part)
+
+ raise Exception("The chromosome not a full. Chromo length: {0}, Fixed part length: {1}, workflow size: {2}".
+ format(count_of_tasks(chromo), len(self.fixed_schedule_part.get_unfailed_tasks_ids()),
+ len(self.workflow.get_all_unique_tasks())))
+
+ # TODO: add not to schedule
+ #if count_of_tasks(chromo) + count_of_tasks(self.fixed_schedule_part.mapping) !=
+
+ (schedule_mapping, finished_tasks, ready_tasks, chrmo_mapping, task_to_node) = self._create_helping_structures(chromo)
+
+ # print("SCHEDULE_MAPPING")
+ # print("AlIVE_NODES", alive_nodes)
+ # pprint(schedule_mapping)
+
+ #chromo_copy = {nd_name: [item for item in items] for (nd_name, items) in chromo.items()}
+ chromo_copy = deepcopy(chromo)
+
+
+ if len(alive_nodes) == 0:
+ raise Exception("There are not alive nodes")
+
+
+
+ #print("Building started...")
+ while len(ready_tasks) > 0:
+
+ # ## TODO: only for debug. Remove it later.
+ # print("alive nodes: {0}".format(alive_nodes))
+ # for node_name, tasks in chromo_copy.items():
+ # print("Node: {0}, tasks count: {1}".format(node_name, len(tasks)))
+
+ count_before = count_of_tasks(chromo_copy)
+ if len(alive_nodes) == 0:
+ raise ValueError("Count of alive_nodes is zero")
+ for node in alive_nodes:
+ if len(chromo_copy[node.name]) == 0:
+ continue
+ ## TODO: Urgent! completely rethink this procedure
+
+ tsk_id = None
+ for i in range(len(chromo_copy[node.name])):
+ if chromo_copy[node.name][i] in ready_tasks:
+ tsk_id = chromo_copy[node.name][i]
+ break
+
+
+ if tsk_id is not None:
+ task = self.task_map[tsk_id]
+ #del chromo_copy[node.name][0]
+ chromo_copy[node.name].remove(tsk_id)
+ ready_tasks.remove(tsk_id)
+
+ (start_time, end_time) = place_task_to_schedule(self.workflow,
+ self.estimator,
+ schedule_mapping,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node,
+ current_time)
+
+ task_to_node[task.id] = (node, start_time, end_time)
+
+ finished_tasks.add(task.id)
+
+ ready_children = self._get_ready_tasks(task.children, finished_tasks)
+ for child in ready_children:
+ ready_tasks.append(child.id)
+ count_after = count_of_tasks(chromo_copy)
+ if count_before == count_after:
+ raise Exception("Unable to properly process a chromosome."
+ " Perhaps, due to invalid fixed_schedule_part or the chromosome.")
+ pass
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+ ##TODO: redesign all these functions later
+
+ def _get_ready_tasks(self, children, finished_tasks):
+ def _is_child_ready(child):
+ for p in child.parents:
+ if p.id not in finished_tasks:
+ return False
+ return True
+ ready_children = [child for child in children if _is_child_ready(child)]
+ return ready_children
+
+ pass
+
diff --git a/src/algs/common/ScheduleBuilder.py b/src/algs/common/ScheduleBuilder.py
new file mode 100644
index 0000000..b286991
--- /dev/null
+++ b/src/algs/common/ScheduleBuilder.py
@@ -0,0 +1,256 @@
+from src.algs.heft import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+
+class ScheduleBuilder:
+
+ def __init__(self,
+ workflow,
+ resource_manager,
+ estimator,
+ task_map,
+ node_map,
+ # fixed part of schedule. It need to be accounted when new schedule is built, but it's not possible to cahnge something inside it
+ fixed_schedule_part):
+ self.workflow = workflow
+ self.nodes = HeftHelper.to_nodes(resource_manager.get_resources())
+ self.estimator = estimator
+ ##TODO: Build it
+ self.task_map = task_map
+ ##TODO: Build it
+ self.node_map = node_map
+
+ self.fixed_schedule_part = fixed_schedule_part
+ # construct initial mapping
+ # eliminate all already scheduled tasks
+
+ pass
+
+
+ def _create_helping_structures(self, chromo):
+ # copy fixed schedule
+ # TODO: make common utility function with SimpleRandomizedHeuristic
+ def is_last_version_of_task_executing(item):
+ return item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.UNSTARTED
+
+ schedule_mapping = {node: [item for item in items] for (node, items) in self.fixed_schedule_part.mapping.items()}
+
+ finished_tasks = [item.job.id for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)]
+ finished_tasks = set([self.workflow.head_task.id] + finished_tasks)
+
+ unfinished = [task for task in self.workflow.get_all_unique_tasks() if not task.id in finished_tasks]
+
+ ready_tasks = [task.id for task in self._get_ready_tasks(unfinished, finished_tasks)]
+
+
+
+ chrmo_mapping = {item.job.id: self.node_map[node.name] for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)}
+
+ for (node_name, tasks) in chromo.items():
+ for tsk_id in tasks:
+ chrmo_mapping[tsk_id] = self.node_map[node_name]
+
+ task_to_node = {item.job.id: (node, item.start_time, item.end_time) for (node, items) in self.fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)}
+
+ return (schedule_mapping, finished_tasks, ready_tasks, chrmo_mapping, task_to_node)
+
+
+ def __call__(self, chromo, current_time):
+
+ (schedule_mapping, finished_tasks, ready_tasks, chrmo_mapping, task_to_node) = self._create_helping_structures(chromo)
+
+ chromo_copy = dict()
+ for (nd_name, items) in chromo.items():
+ chromo_copy[nd_name] = []
+ for item in items:
+ chromo_copy[nd_name].append(item)
+
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+ if len(alive_nodes) == 0:
+ raise Exception("There are not alive nodes")
+
+ while len(ready_tasks) > 0:
+
+ for node in alive_nodes:
+ if len(chromo_copy[node.name]) == 0:
+ continue
+ if node.state == Node.Down:
+ continue
+
+ ## TODO: Urgent! completely rethink this procedure
+
+ tsk_id = None
+ for i in range(len(chromo_copy[node.name])):
+ if chromo_copy[node.name][i] in ready_tasks:
+ tsk_id = chromo_copy[node.name][i]
+ break
+
+
+ if tsk_id is not None:
+ task = self.task_map[tsk_id]
+ #del chromo_copy[node.name][0]
+ chromo_copy[node.name].remove(tsk_id)
+ ready_tasks.remove(tsk_id)
+
+ time_slots, runtime = self._get_possible_execution_times(
+ schedule_mapping,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node,
+ current_time)
+
+ time_slot = next(time_slots)
+ start_time = time_slot[0]
+ end_time = start_time + runtime
+
+ item = ScheduleItem(task, start_time, end_time)
+
+ # need to account current time
+ Schedule.insert_item(schedule_mapping, node, item)
+ task_to_node[task.id] = (node, start_time, end_time)
+
+ finished_tasks.add(task.id)
+
+ #ready_children = [child for child in task.children if self._is_child_ready(finished_tasks, child)]
+ ready_children = self._get_ready_tasks(task.children, finished_tasks)
+ for child in ready_children:
+ ready_tasks.append(child.id)
+
+
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+ ##TODO: redesign all these functions later
+
+ def _get_ready_tasks(self, children, finished_tasks):
+ def _is_child_ready(child):
+ # ids = [p.id for p in child.parents]
+ # result = False in [id in finished_tasks for id in ids]
+ # return not result
+ for p in child.parents:
+ if p.id not in finished_tasks:
+ return False
+ return True
+ ready_children = [child for child in children if _is_child_ready(child)]
+ return ready_children
+
+ ## TODO: remove this obsolete code later
+ # def _find_slots(self,
+ # schedule_mapping,
+ # node,
+ # comm_ready,
+ # runtime,
+ # current_time):
+ # node_schedule = schedule_mapping.get(node, list())
+ # free_time = 0 if len(node_schedule) == 0 else node_schedule[-1].end_time
+ # ## TODO: refactor it later
+ # f_time = max(free_time, comm_ready)
+ # f_time = max(f_time, current_time)
+ # base_variant = [(f_time, f_time + runtime)]
+ # zero_interval = [] if len(node_schedule) == 0 else [(0, node_schedule[0].start_time)]
+ # middle_intervals = [(node_schedule[i].end_time, node_schedule[i + 1].start_time) for i in range(len(node_schedule) - 1)]
+ # intervals = zero_interval + middle_intervals + base_variant
+ #
+ # ## TODO: rethink rounding
+ # result = [(st, end) for (st, end) in intervals if (current_time < st or abs((current_time - st)) < 0.01) and st >= comm_ready and (runtime < (end - st) or abs((end - st) - runtime) < 0.01)]
+ # return result
+
+ def _find_slots(self,
+ schedule_mapping,
+ node,
+ comm_ready,
+ runtime,
+ current_time):
+
+ node_schedule = schedule_mapping.get(node, list())
+ return FreeSlotIterator(current_time, comm_ready, runtime, node_schedule)
+
+
+
+ def _comm_ready_func(self,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node):
+ estimate = self.estimator.estimate_transfer_time
+ ##TODO: remake this stub later.
+ if len(task.parents) == 1 and self.workflow.head_task.id == list(task.parents)[0].id:
+ return 0
+
+ ## TODO: replace it with commented string below later
+ res_list = []
+ for p in task.parents:
+ c1 = task_to_node[p.id][2]
+ c2 = estimate(node, chrmo_mapping[p.id], task, p)
+ res_list.append(c1 + c2)
+
+ return max(res_list)
+ ##return max([task_to_node[p.id][2] + estimate(node, chrmo_mapping[p.id], task, p) for p in task.parents])
+
+ def _get_possible_execution_times(self,
+ schedule_mapping,
+ task_to_node,
+ chrmo_mapping,
+ task,
+ node,
+ current_time):
+ ## pay attention to the last element in the resulted seq
+ ## it represents all available time of node after it completes all its work
+ ## (if such interval can exist)
+ ## time_slots = [(st1, end1),(st2, end2,...,(st_last, st_last + runtime)]
+ runtime = self.estimator.estimate_runtime(task, node)
+ comm_ready = self._comm_ready_func(task_to_node,
+ chrmo_mapping,
+ task,
+ node)
+ time_slots = self._find_slots(schedule_mapping,
+ node,
+ comm_ready,
+ runtime,
+ current_time)
+ return time_slots, runtime
+ pass
+
+class FreeSlotIterator:
+
+ def __init__(self, current_time, comm_ready, runtime, node_schedule):
+ self.current = 0
+ self.can_move = True
+
+ self.current_time = current_time
+ self.comm_ready = comm_ready
+ self.runtime = runtime
+ self.node_schedule = node_schedule
+ self.size = len(node_schedule) - 1
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.current < self.size:
+ i = self.current
+ while i < self.size:
+ st = self.node_schedule[i].end_time
+ end = self.node_schedule[i + 1].start_time
+ i += 1
+ if self._check(st, end):
+ break
+ self.current = i
+
+ if i < self.size:
+ return st, end
+ if self.can_move:
+ self.can_move = False
+ free_time = 0 if len(self.node_schedule) == 0 else self.node_schedule[-1].end_time
+ ## TODO: refactor it later
+ f_time = max(free_time, self.comm_ready)
+ f_time = max(f_time, self.current_time)
+ base_variant = (f_time, f_time + self.runtime)
+ return base_variant
+ raise StopIteration()
+
+ def _check(self, st, end):
+ #return (self.current_time < st or abs((self.current_time - st)) < 0.01) and st >= self.comm_ready and (self.runtime < (end - st) or abs((end - st) - self.runtime) < 0.01)
+ return (0.00001 < (st - self.current_time)) and st >= self.comm_ready and (0.00001 < (end - st) - self.runtime)
+
diff --git a/src/algs/common/__init__.py b/src/algs/common/__init__.py
new file mode 100644
index 0000000..f4bb036
--- /dev/null
+++ b/src/algs/common/__init__.py
@@ -0,0 +1 @@
+__author__ = 'nikolay'
diff --git a/src/algs/common/individuals.py b/src/algs/common/individuals.py
new file mode 100644
index 0000000..6ea9b98
--- /dev/null
+++ b/src/algs/common/individuals.py
@@ -0,0 +1,42 @@
+from uuid import uuid4
+from deap import creator
+from deap.base import Fitness
+
+"""
+This file contains only wrappers type for individuals
+participating in evolution processes. It was created due to the fact that
+pure base types of Python like dict or list cannot be extended with additional fields,
+which is useful for transferring information during an evolution process.
+(NOTE: of course we can create and use wrappers class explicitly after entering into an evolution process procedure,
+but for the sake of simplicity and referring to deap examples of code, It have been decided use mentioned above scheme)
+
+"""
+
+
+class FitnessStd(Fitness):
+ weights = (-1.0, -1.0)
+
+
+## we cannot create uid property through creator.create due to its internal algorithm
+class DictBasedIndividual(dict):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.uid = uuid4()
+ pass
+
+
+class ListBasedIndividual(list):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.uid = uuid4()
+ pass
+
+
+class FitAdapter:
+ """
+ simple class-adapter for dealing with deap's fitness-based operators
+ """
+ def __init__(self, entity, values=()):
+ self.entity = entity
+ self.fitness = FitnessStd(values)
+ pass
diff --git a/src/algs/common/particle_operations.py b/src/algs/common/particle_operations.py
new file mode 100644
index 0000000..e4e7df5
--- /dev/null
+++ b/src/algs/common/particle_operations.py
@@ -0,0 +1,187 @@
+from numbers import Number
+from uuid import uuid4
+import math
+from src.algs.common.individuals import FitAdapter
+
+"""
+This file contains classes of particles for PSO and GSA
+with methods of transformations from combinatorial to continious spaces
+and vice versa
+"""
+
+##TODO: it can be upgraded with numpy classes to speed up execution
+##TODO: write test cases
+
+
+class Particle(FitAdapter):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.uid = uuid4()
+ self._velocity = None
+ self._best = None
+
+ def _get_best(self): return self._best
+ def _set_best(self, b): self._best = b
+
+ def _get_velocity(self): return self._velocity
+ def _set_velocity(self, v): self._velocity = v
+
+ best = property(_get_best, _set_best)
+ velocity = property(_get_velocity, _set_velocity)
+ pass
+
+
+
+class MappingParticle(Particle):
+
+ def __init__(self, mapping):
+ super().__init__(mapping)
+ self.velocity = MappingParticle.Velocity({})
+ pass
+
+ def __sub__(self, other):
+ # return Position({k: self[k] for k in self.keys() - other.keys()})
+ return MappingParticle.Velocity({item: 1.0 for item in self.entity.items()# - other.entity.items()
+ })
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ return MappingParticle.Velocity({k: other for k, v in self.entity.items()})
+ raise ValueError("Other has not a suitable type for multiplication")
+
+ def emptify(self):
+ return MappingParticle.Velocity({})
+
+ class Velocity(dict):
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return MappingParticle.Velocity({k: 1.0 if v * other > 1.0 else v * other for k, v in self.items()})
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ vel = MappingParticle.Velocity({k: max(self.get(k, 0), other.get(k, 0)) for k in set(self.keys()).union(other.keys())})
+ return vel
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def cutby(self, alpha):
+ return MappingParticle.Velocity({k: v for k, v in self.items() if v >= alpha})
+
+ def vector_length(self):
+ return len(self)
+
+
+ pass
+ pass
+
+
+class OrderingParticle(Particle):
+
+ @staticmethod
+ def _to_limit(val, min, max):
+ if val > max:
+ return max
+ if val < min:
+ return min
+ return val
+
+ def __init__(self, ordering):
+ """
+ :param ordering: has the following form
+ {
+ task_id: value
+ }
+ """
+ super().__init__(ordering)
+ pass
+
+ def __sub__(self, other):
+ if not isinstance(other, OrderingParticle):
+ raise ValueError("Invalid type of the argument for this operation")
+ velocity = OrderingParticle.Velocity({task_id: self.entity[task_id] - other.entity[task_id]
+ for task_id in self.entity})
+ return velocity
+
+ def __add__(self, other):
+ if not isinstance(other, OrderingParticle.Velocity):
+ raise ValueError("Invalid type of the argument for this operation: {0}".format(type(other)))
+
+ if len(other) == 0:
+ return OrderingParticle({task_id: self.entity[task_id] for task_id in self.entity})
+
+ velocity = OrderingParticle({task_id: self.entity[task_id] + other[task_id]
+ for task_id in self.entity})
+ return velocity
+
+ def limit_by(self, min=-1, max=-1):
+ for t in self.entity:
+ self.entity[t] = OrderingParticle._to_limit(self.entity[t], min, max)
+ pass
+
+ def emptify(self):
+ return OrderingParticle.Velocity({k: 0.0 for k in self.entity})
+
+ class Velocity(dict):
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return OrderingParticle.Velocity({k: 1.0 if v * other > 1.0 else v * other for k, v in self.items()})
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ if isinstance(other, OrderingParticle.Velocity):
+ if len(self) == 0:
+ return OrderingParticle.Velocity({task_id: other[task_id] for task_id in other.keys()})
+ vel = OrderingParticle.Velocity({task_id: self[task_id] + other[task_id] for task_id in self.keys()})
+ return vel
+ raise ValueError("{0} has not a suitable type for adding".format(other))
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def limit_by(self, min=-1, max=1):
+ for t in self:
+ self[t] = OrderingParticle._to_limit(self[t], min, max)
+ pass
+
+ def vector_length(self):
+ return math.sqrt(sum(val*val for t, val in self.items()))/len(self)
+ pass
+ pass
+
+
+class CompoundParticle(Particle):
+ def __init__(self, mapping_particle, ordering_particle):
+ super().__init__(None)
+ self.mapping = mapping_particle
+ self.ordering = ordering_particle
+ self._best = None
+ pass
+
+ def _get_best(self):
+ return self._best
+
+ def _set_best(self, value):
+ self._best = value
+ if value is not None:
+ self.mapping.best = value.mapping
+ self.ordering.best = value.ordering
+ else:
+ self.mapping.best = None
+ self.ordering.best = None
+ pass
+
+ best = property(_get_best, _set_best)
+ pass
+
+
+
diff --git a/src/algs/common/utilities.py b/src/algs/common/utilities.py
new file mode 100644
index 0000000..365cc7e
--- /dev/null
+++ b/src/algs/common/utilities.py
@@ -0,0 +1,106 @@
+import random
+def mapping_as_vector(mapping):
+ """
+ mapping MUST be a dictionary
+ converts position to a single vector by joining mapping and ordering structure
+ all workflow tasks is sorted by task.id and thus it provides idempotentity for multiple runs
+ """
+ mapp_string = [node_name for task_id, node_name in sorted(mapping.items(), key=lambda x: x[0])]
+ return mapp_string
+
+def cannot_be_zero(number, replace_for_zero=0.000001):
+ return replace_for_zero if round(abs(number), 6) < 0.000001 else number
+
+def gather_info(logbook, stats, g, pop, best, need_to_print=True):
+ """
+ for co-evolution scheme, it is required to record best, instead of min of population
+ """
+ data = stats.compile(pop) if stats is not None else None
+
+ if data is None:
+ return None
+
+ if best is not None:
+ # TODO: very bad practice. remake it.
+ data['best'] = best.fitness.values[0] # best[1].values[0]
+
+ if logbook is not None:
+ logbook.record(gen=g, evals=len(pop), **data)
+ if need_to_print:
+ if best is None:
+ print(logbook.stream)
+ else:
+ print(logbook.stream)#print(logbook.stream + "\t" + str(round(data['best'], 2)))
+ return data
+
+def logbooks_in_data(logbooks, with_best=False, need_print=False):
+ """
+ Reduce several logbooks from experiment to the one logbook with average data.
+ Used in cpso cgsa
+ """
+ res = dict()
+ for logbook in logbooks:
+ for it in logbook:
+ if (it['gen'], 'avg') in res:
+ res[(it['gen'], 'avg')] += it['avg']
+ res[(it['gen'], 'min')] += it['min']
+ if with_best:
+ res[(it['gen'], 'best')] += it['best']
+ else:
+ res[(it['gen'], 'avg')] = it['avg']
+ res[(it['gen'], 'min')] = it['min']
+ if with_best:
+ res[(it['gen'], 'best')] = it['best']
+ log_len = len(logbooks)
+ for it in range(len(logbooks[0])):
+ res[(it, 'avg')] /= log_len
+ res[(it, 'min')] /= log_len
+ if with_best:
+ res[(it, 'best')] /= log_len
+ if need_print:
+ if with_best:
+ print(str(res[(it, 'avg')]) + "\t" + str(res[(it, 'min')]) + "\t" + str(res[(it, 'best')]))
+ else:
+ print(str(res[(it, 'avg')]) + "\t" + str(res[(it, 'min')]))
+ return res
+
+def data_to_file(file_path, gen, data, with_best=False, comment=None):
+ """
+ Write logbook data to the file.
+ Used in cpso cgsa.
+ """
+ file = open(file_path, 'w')
+ if comment is not None:
+ file.write("#" + comment + "\n")
+ if with_best:
+ file.write("#gen\tavg\tmin\tbest\n")
+ else:
+ file.write("#gen\tavg\tmin\n")
+ for i in range(gen):
+ if with_best:
+ file.write(str(i) + "\t" + str(data[(i, 'avg')]) + "\t" + str(data[(i, 'min')]) + "\t" + str(data[(i, 'best')]) + "\n")
+ else:
+ file.write(str(i) + "\t" + str(data[(i, 'avg')]) + "\t" + str(data[(i, 'min')]) + "\n")
+ file.close()
+
+def unzip_result(tuple_list):
+ """
+ Just an unzip list of tuple to 2 lists
+ """
+ fst_list = [fst for fst, snd in tuple_list]
+ snd_list = [snd for fst, snd in tuple_list]
+ return fst_list, snd_list
+
+def weight_random(list):
+ """
+ return index of choosen element after weight random
+ """
+ summ = sum(list)
+ norm = [v / summ for v in list]
+ rnd = random.random()
+ idx = 0
+ stack = norm[idx]
+ while rnd >= stack:
+ idx += 1
+ stack += norm[idx]
+ return idx
diff --git a/src/algs/ga/GAFunctions2.py b/src/algs/ga/GAFunctions2.py
new file mode 100644
index 0000000..5c80b4a
--- /dev/null
+++ b/src/algs/ga/GAFunctions2.py
@@ -0,0 +1,279 @@
+## from Buyya
+import random
+
+from src.algs.common.NewSchedulerBuilder import NewScheduleBuilder
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import ScheduleItem
+from src.core.environment.Utility import Utility
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+
+
+def mark_finished(schedule):
+ for (node, items) in schedule.mapping.items():
+ for item in items:
+ item.state = ScheduleItem.FINISHED
+
+def unmoveable_tasks(fixed_schedule_part):
+ def is_last_version_of_task_executing(item):
+ return item.state == ScheduleItem.EXECUTING \
+ or item.state == ScheduleItem.FINISHED \
+ or item.state == ScheduleItem.UNSTARTED
+ # there is only finished, executing and unstarted tasks planned by Heft, perhaps it should have been called unplanned tasks
+ finished_tasks = [item.job.id for (node, items) in fixed_schedule_part.mapping.items()
+ for item in items if is_last_version_of_task_executing(item)]
+ return finished_tasks
+
+class GAFunctions2:
+ ## A chromosome representation
+ ##{
+ ## node_name: task1.id, task2.id, ... #(order of tasks is important)
+ ## ...
+ ##}
+ def __init__(self, workflow, resource_manager, estimator):
+
+ self.counter = 0
+ self.workflow = workflow
+
+ ##interface Estimator
+
+ self.estimator = estimator
+ self.resource_manager = resource_manager
+
+ nodes = resource_manager.get_nodes()#list(HeftHelper.to_nodes(resource_manager.get_resources()))
+ ranking = HeftHelper.build_ranking_func(nodes, lambda job, agent: estimator.estimate_runtime(job, agent),
+ lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj))
+ sorted_tasks = ranking(self.workflow)
+
+ self.nodes = nodes
+ self.sorted_tasks = sorted_tasks
+ self.workflow_size = len(sorted_tasks)
+
+ self.task_map = {task.id: task for task in sorted_tasks}
+ self.node_map = {node.name: node for node in nodes}
+
+ self.initializing_alg = SimpleRandomizedHeuristic(self.workflow, self.nodes, self.estimator)
+
+ self.initial_chromosome = None##GAFunctions.schedule_to_chromosome(initial_schedule)
+ pass
+
+ @staticmethod
+ def schedule_to_chromosome(schedule, fixed_schedule_part=None):
+ if schedule is None:
+ return None
+ def ids(items):
+ return [item.job.id for item in items if item.state != ScheduleItem.FAILED]
+ chromosome = {node.name: ids(items) for (node, items) in schedule.mapping.items()}
+ if fixed_schedule_part is None:
+ return chromosome
+
+ fixed_chromosome = {node.name: ids(items) for (node, items) in fixed_schedule_part.mapping.items()}
+
+ for node, tasks in fixed_chromosome.items():
+ for t in tasks:
+ chromosome[node].remove(t)
+
+ count = 0
+ for node, tasks in chromosome.items():
+ count += len(tasks)
+ if count == 0:
+ return None
+
+ return chromosome
+
+ def build_initial(self, fixed_schedule_part, current_time):
+ def initial():
+ return self.random_chromo(fixed_schedule_part, current_time)
+ return initial
+
+ def random_chromo(self, fixed_schedule_part, current_time):
+
+ res = random.random()
+ # # # TODO:
+ if res > 1.00 and self.initial_chromosome is not None:
+ return self.initial_chromosome
+
+ ##return [self.random_chromo() for j in range(self.size)]
+ sched = self.initializing_alg.schedule(fixed_schedule_part, current_time)
+ #TODO: remove it later
+ # mark_finished(sched)
+ # seq_time_validaty = Utility.validateNodesSeq(sched)
+ # dependency_validaty = Utility.validateParentsAndChildren(sched, self.workflow)
+
+ ## TODO: Urgent! Need to remove failed Tasks
+ chromo = GAFunctions2.schedule_to_chromosome(sched)
+ if fixed_schedule_part is not None:
+ # remove fixed_schedule_part from chromosome
+ # def is_last_version_of_task_executing(item):
+ # return item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.UNSTARTED
+ # # there is only finished, executing and unstarted tasks planned by Heft, perhaps it should have been called unplanned tasks
+ # finished_tasks = [item.job.id for (node, items) in fixed_schedule_part.mapping.items() for item in items if is_last_version_of_task_executing(item)]
+ finished_tasks = unmoveable_tasks(fixed_schedule_part)
+
+ # TODO: make common utility function with ScheduleBuilder and SimpleRandomizedHeuristic
+ chromo = {node_name: [id for id in ids if not (id in finished_tasks)] for (node_name, ids) in chromo.items()}
+
+
+ return chromo
+
+
+
+ def build_fitness(self, fixed_schedule_part, current_time):
+ # builder = ScheduleBuilder(self.workflow, self.resource_manager, self.estimator, self.task_map, self.node_map, fixed_schedule_part)
+ builder = NewScheduleBuilder(self.workflow, self.resource_manager, self.estimator, self.task_map, self.node_map, fixed_schedule_part)
+
+ def fitness(chromo):
+
+ ## TODO: remove it later.
+ # t_ident = str(threading.current_thread().ident)
+ # t_name = str(threading.current_thread().name)
+ # print("Time: " + str(current_time) + " Running ga in isolated thread " + t_name + " " + t_ident)
+
+ ## value of fitness function is the last time point in the schedule
+ ## built from the chromo
+ ## chromo is {Task:Node},{Task:Node},... - fixed length
+
+ schedule = builder(chromo, current_time)
+ time = Utility.makespan(schedule)
+
+ # time = 1
+ # ## TODO: remove it later.
+ # k = 0
+ # for i in range(100000):
+ # k += i
+
+ return (1/time,)
+ ## TODO: redesign it later
+ return fitness
+
+ def build_schedule(self, chromo, fixed_schedule_part, current_time):
+ # builder = ScheduleBuilder(self.workflow, self.resource_manager, self.estimator, self.task_map, self.node_map, fixed_schedule_part)
+ builder = NewScheduleBuilder(self.workflow, self.resource_manager, self.estimator, self.task_map, self.node_map, fixed_schedule_part)
+ schedule = builder(chromo, current_time)
+ return schedule
+
+ def crossover(self, child1, child2):
+
+ ## TODO: only for debug. remove it later.
+ ##return
+
+ #estimate size of a chromosome
+
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+ if len(alive_nodes) == 0:
+ raise Exception(" There are only dead nodes!!!!!!!!!!!!!")
+
+
+ # TODO: corner case when fixed schedule is complete. need to resolve this kind of case early.
+ size = len([item for (node_name, items) in child1.items() for item in items])
+ if size == 0:
+ raise Exception("Chromosome is empty")
+ # return None
+ i1 = random.randint(0, size - 1)
+ i2 = random.randint(0, size - 1)
+ index1 = min(i1, i2)
+ index2 = max(i1, i2)
+
+ def chromo_to_seq(chromo):
+ result = []
+ for (nd_name, items) in chromo.items():
+ result += [(nd_name, item) for item in items]
+ return result
+
+ def fill_chromo(chromo, seq):
+ chromo.clear()
+ for node in self.nodes:
+ chromo[node.name] = []
+ for (nd_name, tsk_id) in seq:
+ chromo[nd_name].append(tsk_id)
+
+ ch1 = chromo_to_seq(child1)
+ ch2 = chromo_to_seq(child2)
+
+ if len(ch1) != size or len(ch2) != size:
+ print("Ch1: {0}".format(len(ch1)))
+ print("Ch2: {0}".format(len(ch2)))
+ raise Exception("Transformed chromosome is broken")
+
+
+ window = dict()
+ for i in range(index1, index2):
+ tsk_id = ch1[i][1]
+ window[ch1[i][1]] = i
+
+ for i in range(size):
+ tsk_id = ch2[i][1]
+ if tsk_id in window:
+ buf = ch1[window[tsk_id]]
+ ch1[window[tsk_id]] = ch2[i]
+ ch2[i] = buf
+
+ fill_chromo(child1, ch1)
+ fill_chromo(child2, ch2)
+ return child1, child2
+
+ # def swap_mutation(self, chromo):
+ # node_index = random.randint(0, len(self.nodes) - 1)
+ # node_seq = chromo[self.nodes[node_index].name]
+ #
+ # while True:
+
+ def mutation(self, chromosome):
+ ## TODO: only for debug. remove it later.
+ #return chromosome
+ # simply change one node of task mapping
+ #TODO: make checking for all nodes are dead.(It's a very rare situation so it is not consider for now)
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+ node1 = alive_nodes[random.randint(0, len(alive_nodes) - 1)]
+ node2 = alive_nodes[random.randint(0, len(alive_nodes) - 1)]
+
+ ch = chromosome[node1.name]
+ if len(chromosome[node1.name]) > 0:
+ length = len(chromosome[node1.name])
+ ind = random.randint(0, length - 1)
+ dna = chromosome[node1.name][ind]
+ del chromosome[node1.name][ind]
+ chromosome[node2.name].append(dna)
+ return chromosome
+
+ def sweep_mutation(self, chromosome):
+ ## TODO: only for debug. remove it later.
+ #return chromosome
+
+ def is_dependent(tsk1, tsk2):
+ for p in tsk1.parents:
+ if tsk1.id == tsk2.id:
+ return True
+ else:
+ return is_dependent(p, tsk2)
+ return False
+
+ #return chromosome
+ #TODO: make checking for all nodes are dead.(It's a very rare situation so it is not considered for now)
+ alive_nodes = [node for node in self.nodes if node.state != Node.Down]
+ node = alive_nodes[random.randint(0, len(alive_nodes) - 1)]
+
+
+ ch = chromosome[node.name]
+ if len(chromosome[node.name]) > 0:
+ length = len(chromosome[node.name])
+ ind = random.randint(0, length - 1)
+ tsk1 = self.task_map[chromosome[node.name][ind]]
+ dna = chromosome[node.name][ind]
+
+ count = 0
+ while count < 5:
+ ind1 = random.randint(0, length - 1)
+
+ tsk2 = self.task_map[chromosome[node.name][ind1]]
+ if (not is_dependent(tsk1, tsk2)) and (not is_dependent(tsk2, tsk1)):
+ chromosome[node.name][ind] = chromosome[node.name][ind1]
+ chromosome[node.name][ind1] = dna
+ break
+ else:
+ count += 1
+
+ return chromosome
+
+ pass
+
diff --git a/src/algs/ga/GAImpl.py b/src/algs/ga/GAImpl.py
new file mode 100644
index 0000000..9136adf
--- /dev/null
+++ b/src/algs/ga/GAImpl.py
@@ -0,0 +1,366 @@
+import copy
+import random
+from threading import Lock
+
+from deap import tools
+from deap import creator
+from deap import base
+import deap
+from deap.tools import History
+import numpy
+from src.algs.ga.GAFunctions2 import GAFunctions2
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.ResourceManager import Schedule
+from src.core.environment.Utility import timing
+
+
+class SynchronizedCheckpointedGA:
+
+ def __init__(self):
+ self.current_result = None
+ self.lock = Lock()
+
+ self._is_stopped = False
+ self.stop_lock = Lock()
+ self._current_pop = None
+ pass
+
+ ## need to implement
+ def _construct_result(self, result):
+ return result
+
+
+ def _save_result(self, result):
+ self.lock.acquire()
+ self.current_result = result
+ self.lock.release()
+ pass
+
+ def _get_result(self):
+ self.lock.acquire()
+ result = self.current_result
+ self.lock.release()
+ return result
+
+ def _save_pop(self, pop):
+ self.lock.acquire()
+ self._current_pop = copy.deepcopy(pop)
+ self.lock.release()
+ pass
+
+
+
+ def get_result(self):
+ self.lock.acquire()
+ result = self.current_result
+ self.lock.release()
+ constructed = self._construct_result(result)
+ return constructed
+
+ def get_pop(self):
+ self.lock.acquire()
+ result = self._current_pop
+ self.lock.release()
+ return result
+
+ def stop(self):
+ self.stop_lock.acquire()
+ self._is_stopped = True
+ self.stop_lock.release()
+
+ def is_stopped(self):
+ self.stop_lock.acquire()
+ result = self._is_stopped
+ self.stop_lock.release()
+ return result
+
+
+class GAFactory:
+
+ DEFAULT_IDEAL_FLOPS = 20
+ DEFAULT_POPULATION = 50
+ DEFAULT_CROSSOVER_PROBABILITY = 0.8
+ DEFAULT_REPLACING_MUTATION_PROBABILITY = 0.5
+ DEFAULT_SWEEP_MUTATION_PROBABILITY = 0.4
+ DEFAULT_GENERATIONS = 100
+ DEFAULT_INIT_SCHED_PERCENT = 0.05
+
+ _default_instance = None
+
+ @staticmethod
+ def default():
+ if GAFactory._default_instance is None:
+ GAFactory._default_instance = GAFactory()
+ return GAFactory._default_instance
+
+ def __init__(self):
+ pass
+
+ def create_ga(self, *args, **kwargs):
+ is_silent = kwargs["silent"]
+ wf = kwargs["wf"]
+ rm = kwargs["resource_manager"]
+ estimator = kwargs["estimator"]
+ ga_params = kwargs["ga_params"]
+
+
+ POPSIZE = ga_params.get("population", self.DEFAULT_POPULATION)
+ INIT_SCHED_PERCENT = ga_params.get("init_sched_percent", self.DEFAULT_INIT_SCHED_PERCENT)
+ CXPB = ga_params.get('crossover_probability', self.DEFAULT_CROSSOVER_PROBABILITY)
+ MUTPB = ga_params.get('replacing_mutation_probability', self.DEFAULT_REPLACING_MUTATION_PROBABILITY)
+ NGEN = ga_params.get('generations', self.DEFAULT_GENERATIONS)
+ SWEEPMUTPB = ga_params.get('sweep_mutation_probability', self.DEFAULT_SWEEP_MUTATION_PROBABILITY)
+
+ Kbest = ga_params.get('Kbest', POPSIZE)
+
+
+ ga_functions = kwargs.get("ga_functions", GAFunctions2(wf, rm, estimator))
+
+ check_evolution_for_stopping = kwargs.get("check_evolution_for_stopping", True)
+
+
+ def default_fixed_schedule_part(resource_manager):
+ fix_schedule_part = Schedule({node: [] for node in HeftHelper.to_nodes(resource_manager.get_resources())})
+ return fix_schedule_part
+
+
+
+ ##================================
+ ##Create genetic algorithm here
+ ##================================
+ creator.create("FitnessMax", base.Fitness, weights=(1.0,))
+ creator.create("Individual", dict, fitness=creator.FitnessMax)
+
+ toolbox = base.Toolbox()
+ # Attribute generator
+ toolbox.register("attr_bool", ga_functions.build_initial(None, 0))
+ # Structure initializers
+ toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_bool)
+ toolbox.register("population", tools.initRepeat, list, toolbox.individual)
+
+
+ ## default case
+ fix_schedule_part = default_fixed_schedule_part(rm)
+ toolbox.register("evaluate", ga_functions.build_fitness(fix_schedule_part, 0))
+
+
+ toolbox.register("mate", ga_functions.crossover)
+ toolbox.register("mutate", ga_functions.mutation)
+ # toolbox.register("select", tools.selTournament, tournsize=4)
+ toolbox.register("select", tools.selRoulette)
+ # toolbox.register("select", tools.selBest)
+ # toolbox.register("select", tools.selTournamentDCD)
+ # toolbox.register("select", tools.selNSGA2)
+
+ repeated_best_count = 10
+
+ class GAComputation(SynchronizedCheckpointedGA):
+
+ EVOLUTION_STOPPED_ITERATION_NUMBER = "EvoStpdIterNum"
+
+ def __init__(self):
+ super().__init__()
+ pass
+
+ @timing
+ def __call__(self, fixed_schedule_part, initial_schedule, current_time=0, initial_population=None):
+ print("Evaluating...")
+ toolbox.register("evaluate", ga_functions.build_fitness(fixed_schedule_part, current_time))
+ toolbox.register("attr_bool", ga_functions.build_initial(fixed_schedule_part, current_time))
+ # Structure initializers
+ toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_bool)
+ toolbox.register("population", tools.initRepeat, list, toolbox.individual)
+
+
+ ga_functions.initial_chromosome = GAFunctions2.schedule_to_chromosome(initial_schedule, fixed_schedule_part)
+
+
+ if initial_population is None:
+ initial_population = []
+
+ if ga_functions.initial_chromosome is None:
+ print("empty_init_solutions")
+ init_solutions = []
+ else:
+ init_solutions = [creator.Individual(copy.deepcopy(ga_functions.initial_chromosome)) for _ in range(int(POPSIZE*INIT_SCHED_PERCENT))]
+
+ pop = initial_population + toolbox.population(n=POPSIZE - len(initial_population) - len(init_solutions)) + init_solutions
+
+ ## TODO: experimental change
+ history = History()
+
+ # Decorate the variation operators
+ #toolbox.decorate("mate", history.decorator)
+ # toolbox.decorate("mutate", history.decorator)
+
+ # Create the population and populate the history
+ #history.update(pop)
+ #===================================================
+
+ hallOfFame = deap.tools.HallOfFame(5)
+
+ stats = tools.Statistics(key=lambda x: 1/x.fitness.values[0])
+ stats.register("min", numpy.min)
+ stats.register("max", numpy.max)
+ stats.register("avg", numpy.mean)
+ stats.register("std", numpy.std)
+
+ logbook = tools.Logbook()
+ logbook.header = ["gen"] + stats.fields
+
+
+ # Evaluate the entire population
+ fitnesses = list(map(toolbox.evaluate, pop))
+ for ind, fit in zip(pop, fitnesses):
+ ind.fitness.values = fit
+
+
+
+ previous_raised_avr_individuals = []
+
+ # Begin the evolution
+ res_list = [0 for _ in range(NGEN)]
+ print(NGEN)
+ for g in range(NGEN):
+
+ if self.is_stopped():
+ break
+
+ hallOfFame.update(pop)
+
+ # logbook.record(pop=copy.deepcopy(pop))
+
+ # check if evolution process has stopped
+
+ # if (check_evolution_for_stopping is True) and len(previous_raised_avr_individuals) == repeated_best_count:
+ # length = len(previous_raised_avr_individuals)
+ # whole_sum = sum(previous_raised_avr_individuals)
+ # mean = whole_sum / length
+ # sum2 = sum(abs(x - mean) for x in previous_raised_avr_individuals)
+ # std = sum2/length
+ # ## TODO: uncomment it later. output
+ # # print("std: " + str(std))
+ # if std < 0.0001:
+ # print(" Evolution process has stopped at " + str(g) + " iteration")
+ # res = self._get_result()
+ # extended_result = (res[0], res[1], res[2], res[3], g)
+ # self._save_result(extended_result)
+ # break
+
+
+
+
+
+ # print("-- Generation %i --" % g)
+ # Select the next generation individuals
+ offspring = pop#toolbox.select(pop, len(pop))
+ # Clone the selected individuals
+ offspring = list(map(toolbox.clone, offspring))
+ # Apply crossover and mutation on the offspring
+ for child1, child2 in zip(offspring[::2], offspring[1::2]):
+ if random.random() < CXPB:
+ toolbox.mate(child1, child2)
+ del child1.fitness.values
+ del child2.fitness.values
+
+ for mutant in offspring:
+ if random.random() < SWEEPMUTPB:
+ ga_functions.sweep_mutation(mutant)
+ del mutant.fitness.values
+ continue
+ if random.random() < MUTPB:
+ toolbox.mutate(mutant)
+ del mutant.fitness.values
+
+
+ # Evaluate the individuals with an invalid fitness
+ invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
+
+ fitnesses = list(map(toolbox.evaluate, invalid_ind))
+
+ for ind, fit in zip(invalid_ind, fitnesses):
+ ind.fitness.values = fit
+ #pop[:] = offspring
+
+ # mix with the best individuals of the time
+ sorted_pop = sorted(pop + list(hallOfFame) + offspring, key=lambda x: x.fitness.values, reverse=True)
+ pop = sorted_pop[:Kbest:] + toolbox.select(sorted_pop[Kbest:], POPSIZE - Kbest)
+
+
+
+ # Gather all the fitnesses in one list and print the stats
+ fits = [ind.fitness.values[0] for ind in pop]
+
+ length = len(pop)
+ mean = sum(fits) / length
+ sum2 = sum(x*x for x in fits)
+ std = abs(sum2 / length - mean**2)**0.5
+ worst = 1/min(fits)
+ best = 1/max(fits)
+ avg = 1/mean
+
+ data = stats.compile(pop)
+ logbook.record(gen=g, **data)
+ res_list[g] = res_list[g] + data['min']
+ if not is_silent:
+ print(logbook.stream)
+ # print("-- Generation %i --" % g)
+ # print(" Worst %s" % str(worst))
+ # print(" Best %s" % str(best))
+ # print(" Avg %s" % str(avr))
+ # print(" Std %s" % str(1/std))
+
+ best = self._find_best(pop)
+ # the last component is iteration number when evolution stopped
+ result = (best, pop, fixed_schedule_part, current_time, g)
+ self._save_result(result)
+ self._save_pop(pop)
+
+ if len(previous_raised_avr_individuals) == repeated_best_count:
+ previous_raised_avr_individuals = previous_raised_avr_individuals[1::]
+ previous_raised_avr_individuals.append(1/mean)
+
+
+ pass
+ #
+ # import matplotlib.pyplot as plt
+ # import networkx
+ #
+ # graph = networkx.DiGraph(history.genealogy_tree)
+ # graph = graph.reverse() # Make the grah top-down
+ # colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
+ # networkx.draw(graph, node_color=colors)
+ # plt.show()
+
+
+ # best = self._find_best(pop)
+ # self._save_result((best, pop, fixed_schedule_part, current_time))
+
+ ## return the best fitted individual and resulted population
+
+
+ print("Ready")
+ return self.get_result(), logbook
+
+ def _find_best(self, pop):
+ # resulted_pop = [(ind, ind.fitness.values[0]) for ind in pop]
+ # result = max(resulted_pop, key=lambda x: x[1])
+ # return result[0]
+ result = max(pop, key=lambda x: x.fitness.values[0])
+ return result
+
+ def _construct_result(self, result):
+ (best, pop, fixed_schedule_part, current_time, stopped_iteration) = result
+ ## TODO: make additional structure to return elements
+ return best, pop, ga_functions.build_schedule(best, fixed_schedule_part, current_time), stopped_iteration
+ pass
+
+ return GAComputation()
+ pass
+
+
+
+
+
+
+
diff --git a/src/algs/ga/GARunner.py b/src/algs/ga/GARunner.py
new file mode 100644
index 0000000..3b7e5f2
--- /dev/null
+++ b/src/algs/ga/GARunner.py
@@ -0,0 +1,178 @@
+from copy import deepcopy
+from datetime import datetime
+
+from src.algs.ga.GAFunctions2 import mark_finished
+from src.algs.ga.GAImpl import GAFactory
+from src.algs.heft.DSimpleHeft import DynamicHeft
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.CommonComponents.ExperimentalManagers import ExperimentEstimator, ExperimentResourceManager
+from src.core.environment.BaseElements import Workflow
+from src.core.environment.ResourceManager import Schedule
+from src.core.environment.Utility import GraphVisualizationUtility as viz, wf
+from src.core.environment.ResourceGenerator import ResourceGenerator
+from src.core.environment.Utility import Utility
+
+DEFAULT_GA_PARAMS = {
+ "population": 1000,
+ "crossover_probability": 0.8,
+ "replacing_mutation_probability": 0.5,
+ "sweep_mutation_probability": 0.4,
+ "generations": 50
+}
+
+class BaseRunner:
+ def _construct_environment(self, *args, **kwargs):
+ wf_name = kwargs["wf_name"]
+ nodes_conf = kwargs.get("nodes_conf", None)
+ ideal_flops = kwargs.get("ideal_flops", 20)
+ transfer_time = kwargs.get("transfer_time", 100)
+
+ dax1 = '../../resources/' + wf_name + '.xml'
+ # wf = Utility.readWorkflow(dax1, wf_name)
+
+ _wf = wf(wf_name)
+
+ rgen = ResourceGenerator(min_res_count=1,
+ max_res_count=1,
+ min_node_count=4,
+ max_node_count=4,
+ min_flops=5,
+ max_flops=10)
+ resources = rgen.generate()
+ transferMx = rgen.generateTransferMatrix(resources)
+
+ if nodes_conf is None:
+ bundle = Utility.get_default_bundle()
+ resources = bundle.dedicated_resources
+ transferMx = bundle.transfer_mx
+ ideal_flops = bundle.ideal_flops
+ ##TODO: end
+ else:
+ ## TODO: refactor it later.
+ resources = ResourceGenerator.r(nodes_conf)
+ transferMx = rgen.generateTransferMatrix(resources)
+ ##
+
+ estimator = ExperimentEstimator(transferMx, ideal_flops, dict(), transfer_time)
+ resource_manager = ExperimentResourceManager(resources)
+ return (_wf, resource_manager, estimator)
+
+ def _validate(self, wf, estimator, schedule):
+ max_makespan = Utility.makespan(schedule)
+ seq_time_validaty = Utility.validateNodesSeq(schedule)
+ sched = deepcopy(schedule)
+ mark_finished(sched)
+ Utility.validate_static_schedule(wf, schedule)
+ ## TODO: obsolete remove it later
+ # dependency_validaty = Utility.validateParentsAndChildren(sched, wf)
+ # transfer_dependency_validaty = Utility.static_validateParentsAndChildren_transfer(sched, wf, estimator)
+ # print("=============Results====================")
+ # print(" Makespan %s" % str(max_makespan))
+ # print(" Seq validaty %s" % str(seq_time_validaty))
+ # print(" Dependancy validaty %s" % str(dependency_validaty))
+ # print(" Transfer validaty %s" % str(transfer_dependency_validaty))
+
+ def run(self, *args, **kwargs):
+ pass
+
+
+
+class MixRunner(BaseRunner):
+ def __call__(self, wf_name, ideal_flops, is_silent=False, is_visualized=True, ga_params=DEFAULT_GA_PARAMS, nodes_conf = None, transfer_time=100, heft_initial=True, **kwargs):
+
+ wf = None
+ ## TODO: I know This is a dirty hack
+ if isinstance(wf_name, Workflow):
+ wf = wf_name
+ wf_name = wf.name
+
+ print("Proccessing " + str(wf_name))
+
+ (_wf, resource_manager, estimator) = self._construct_environment(wf_name=wf_name, nodes_conf=nodes_conf, ideal_flops=ideal_flops,transfer_time=transfer_time)
+
+ wf = wf if wf is not None else _wf
+
+ alg_func = GAFactory.default().create_ga(silent=is_silent,
+ wf=wf,
+ resource_manager=resource_manager,
+ estimator=estimator,
+ ga_params=ga_params)
+
+ def _run_heft():
+ dynamic_planner = DynamicHeft(wf, resource_manager, estimator)
+ nodes = HeftHelper.to_nodes(resource_manager.resources)
+ current_cleaned_schedule = Schedule({node: [] for node in nodes})
+ schedule_dynamic_heft = dynamic_planner.run(current_cleaned_schedule)
+
+ self._validate(wf, estimator, schedule_dynamic_heft)
+
+ if is_visualized:
+ viz.visualize_task_node_mapping(wf, schedule_dynamic_heft)
+ # Utility.create_jedule_visualization(schedule_dynamic_heft, wf_name+'_heft')
+ pass
+ return schedule_dynamic_heft
+
+ # @profile_decorator
+ def _run_ga(initial_schedule, saveIt=True):
+ def default_fixed_schedule_part(resource_manager):
+ fix_schedule_part = Schedule({node: [] for node in HeftHelper.to_nodes(resource_manager.get_resources())})
+ return fix_schedule_part
+
+ fix_schedule_part = default_fixed_schedule_part(resource_manager)
+ ((the_best_individual, pop, schedule, iter_stopped), logbook) = alg_func(fix_schedule_part, initial_schedule)
+
+ self._validate(wf, estimator, schedule)
+
+ name = wf_name +"_bundle"
+ path = '../../resources/saved_schedules/' + name + '.json'
+ if saveIt:
+ Utility.save_schedule(path, wf_name, resource_manager.get_resources(), estimator.transfer_matrix, ideal_flops, schedule)
+
+ if is_visualized:
+ viz.visualize_task_node_mapping(wf, schedule)
+ # Utility.create_jedule_visualization(schedule, wf_name+'_ga')
+ pass
+
+ return schedule, logbook
+
+ def _run_sa(initial_schedule):
+
+
+
+ return None
+
+ ##================================
+ ##Dynamic Heft Run
+ ##================================
+ heft_schedule = _run_heft()
+ ##===============================================
+ ##Simulated Annealing
+ ##===============================================
+ _run_sa(heft_schedule)
+ ##================================
+ ##ga Run
+ ##================================
+
+ ## TODO: remove time measure
+ tstart = datetime.now()
+ # ga_schedule = heft_schedule
+ if heft_initial:
+ ga_schedule, logbook = _run_ga(heft_schedule, False)
+ else:
+ ga_schedule, logbook = _run_ga(None, False)
+ # ga_schedule = _run_ga(None)
+
+ tend = datetime.now()
+ tres = tend - tstart
+ print("Time Result: " + str(tres.total_seconds()))
+
+ #print("Count of nodes: " + str(sum(1 if len(items) > 0 else 0 for n, items in ga_schedule.mapping.items())))
+
+ print("===========================================")
+ heft_makespan = Utility.makespan(heft_schedule)
+ ga_makespan = Utility.makespan(ga_schedule)
+ print("Profit: " + str((1 - ga_makespan/heft_makespan)*100))
+ print("===========================================")
+ return (ga_makespan, heft_makespan, ga_schedule, heft_schedule, logbook)
+
+
diff --git a/src/algs/ga/common_fixed_schedule_schema.py b/src/algs/ga/common_fixed_schedule_schema.py
new file mode 100644
index 0000000..dfe37cf
--- /dev/null
+++ b/src/algs/ga/common_fixed_schedule_schema.py
@@ -0,0 +1,197 @@
+import copy
+import functools
+import operator
+import random
+from threading import Lock
+
+from deap import tools
+import deap
+from src.algs.common.individuals import FitnessStd, DictBasedIndividual
+from src.algs.common.utilities import gather_info
+from src.algs.ga.GAFunctions2 import GAFunctions2
+
+
+class IteratedCheckpointedAlg:
+
+ def __init__(self, alg):
+
+ self._alg = alg
+
+ self.current_result = None
+ self.lock = Lock()
+
+ self._is_stopped = False
+ self.stop_lock = Lock()
+ self._current_pop = None
+ pass
+
+ def __call__(self, logbook, stats, initial_pop, gen, **kwargs):
+ best = None
+
+ for g in range(gen):
+ new_pop, new_logbook, new_best = self._iteration(logbook, stats, g, initial_pop, best, **kwargs)
+ initial_pop, logbook, best = new_pop, new_logbook, new_best
+ self._save_result((initial_pop, logbook, best))
+ self._save_pop(initial_pop)
+ pass
+
+
+ def _iteration(self, logbook, stats, gen_curr, pop, best, **params):
+ pop, logbook, new_best = self._alg(logbook=logbook, stats=stats, gen_curr=gen_curr, initial_pop=pop, best=best, invalidate_fitness=True, **params)
+ return pop, logbook, new_best
+
+
+ ## need to implement
+ def _construct_result(self, result):
+ return result
+
+ def _save_result(self, result):
+ self.lock.acquire()
+ self.current_result = result
+ self.lock.release()
+ pass
+
+ def _get_result(self):
+ self.lock.acquire()
+ result = self.current_result
+ self.lock.release()
+ return result
+
+ def _save_pop(self, pop):
+ self.lock.acquire()
+ self._current_pop = copy.deepcopy(pop)
+ self.lock.release()
+ pass
+
+ def get_result(self):
+ self.lock.acquire()
+ result = self.current_result
+ self.lock.release()
+ constructed = self._construct_result(result)
+ return constructed
+
+ def get_pop(self):
+ self.lock.acquire()
+ result = self._current_pop
+ self.lock.release()
+ return result
+
+ def stop(self):
+ self.stop_lock.acquire()
+ self._is_stopped = True
+ self.stop_lock.release()
+
+ def is_stopped(self):
+ self.stop_lock.acquire()
+ result = self._is_stopped
+ self.stop_lock.release()
+ return result
+
+
+def partially_fixed_schedule():
+ raise NotImplementedError()
+
+
+def run_ga(toolbox, logbook, stats, gen_curr, gen_step=1, invalidate_fitness=True, initial_pop=None, **params):
+ """
+ toolbox must have the methods:
+ generate
+ evaluate
+ clone
+ mate
+ sweep_mutation
+ mutate
+ select_parents
+ select
+ """
+ N = len(initial_pop) if initial_pop is not None else params["n"]
+ pop = initial_pop if initial_pop is not None else toolbox.generate(N)
+ CXPB, SWEEPMUTPB, MUTPB, KBEST = params["cxpb"], params["sweepmutpb"], params["mutpb"], params["kbest"]
+ IS_SILENT = params["is_silent"]
+
+ hallOfFame = deap.tools.HallOfFame(KBEST)
+
+
+ # Evaluate the entire population
+
+
+ ## This should map operator taken from toolbox to provide facilities for future parallelization
+ ## EXAMPLE:
+ ## fitnesses = list(map(toolbox.evaluate, pop))
+ ## for ind, fit in zip(pop, fitnesses):
+ ## ind.fitness.values = fit
+ for p in pop:
+ p.fitness = toolbox.evaluate(p)
+
+ best = None
+
+ for g in range(gen_curr, gen_step, 1):
+ # print("Iteration")
+
+ hallOfFame.update(pop)
+
+ # Select the next generation individuals
+
+ parents = list(map(toolbox.clone, pop))
+ # select_parents must return list of pairs [(par1,par2),]
+ offsprings = toolbox.select_parents(parents) if hasattr(toolbox, "select_parents") else list(zip(parents[::2], parents[1::2]))
+ # Clone the selected individuals
+
+ # Apply crossover and mutation on the offspring
+ for child1, child2 in offsprings:
+ if random.random() < CXPB:
+ toolbox.mate(child1, child2)
+ del child1.fitness.values
+ del child2.fitness.values
+ offsprings = functools.reduce(operator.add, [[child1, child2] for child1, child2 in offsprings], [])
+ for mutant in offsprings:
+ if random.random() < SWEEPMUTPB:
+ toolbox.sweep_mutation(mutant)
+ del mutant.fitness.values
+ continue
+ if random.random() < MUTPB:
+ toolbox.mutate(mutant)
+ del mutant.fitness.values
+
+
+ # Evaluate the individuals with an invalid fitness
+ invalid_ind = [ind for ind in offsprings if not ind.fitness.valid]
+
+ ## This should map operator taken from toolbox to provide facilities for future parallelization
+ for p in invalid_ind:
+ p.fitness = toolbox.evaluate(p)
+
+ # mix with the best individuals of the time
+ sorted_pop = sorted(pop + list(hallOfFame) + list(offsprings), key=lambda x: x.fitness, reverse=True)
+ pop = sorted_pop[:KBEST:] + toolbox.select(sorted_pop[KBEST:], N - KBEST)
+
+ gather_info(logbook, stats, g, pop, None, need_to_print=not IS_SILENT)
+
+ best = max(pop, key=lambda x: x.fitness)
+ pass
+
+ return pop, logbook, best
+
+
+def generate(n, ga_functions, fixed_schedule_part,
+ current_time, init_sched_percent,
+ initial_schedule):
+ init_ind_count = int(n*init_sched_percent)
+ res = []
+ if initial_schedule is not None and init_ind_count > 0:
+ ga_functions.initial_chromosome = DictBasedIndividual(GAFunctions2.schedule_to_chromosome(initial_schedule))
+ init_chromosome = ga_functions.initial_chromosome
+ init_arr = [copy.deepcopy(init_chromosome) for _ in range(init_ind_count)]
+ res = res + init_arr
+ if n - init_ind_count > 0:
+ generated_arr = [DictBasedIndividual(ga_functions.random_chromo(fixed_schedule_part, current_time))
+ for _ in range(n - init_ind_count)]
+ res = res + generated_arr
+ return res
+
+
+def fit_converter(func):
+ def wrap(*args, **kwargs):
+ x = func(*args, **kwargs)
+ return FitnessStd(values=(1/x[0], 0.0))
+ return wrap
diff --git a/src/algs/ga/nsga2.py b/src/algs/ga/nsga2.py
new file mode 100644
index 0000000..a9e933a
--- /dev/null
+++ b/src/algs/ga/nsga2.py
@@ -0,0 +1,66 @@
+"""
+an mapping and/or ordering version of nsga-2
+"""
+import random
+
+
+def run_nsga2(toolbox, logbook, stats, gen_curr, gen_step=1, invalidate_fitness=True, pop=None, **params):
+ """
+ toolbox must contain the following functions:
+ population,
+ map,
+ fitness,
+ select,
+ clone,
+ mutate
+ """
+ n, CXPB, MU = params['n'], params['crossover_probability'], params['mutation_probability']
+
+ if pop is None:
+ pop = toolbox.population(n)
+
+ def calculate_fitness(offsprings):
+ invalid_ind = [ind for ind in offsprings if not ind.fitness.valid]
+ fitnesses = toolbox.map(toolbox.fitness, invalid_ind)
+ for ind, fit in zip(invalid_ind, fitnesses):
+ ind.fitness = fit
+ return invalid_ind
+
+ invalid_ind = calculate_fitness(pop)
+ # This is just to assign the crowding distance to the individuals
+ # no actual selection is done
+ pop = toolbox.select(pop, len(pop))
+
+ # Begin the generational process
+ for gen in range(gen_curr, gen_curr + gen_step):
+
+ record = stats.compile(pop) if stats is not None else {}
+ if logbook is not None:
+ logbook.record(gen=gen, evals=len(invalid_ind), **record)
+ print(logbook.stream)
+
+ # Vary the population
+ # offspring = tools.selTournamentDCD(pop, len(pop))
+ offspring = toolbox.select(pop, len(pop))
+ offspring = [toolbox.clone(ind) for ind in offspring]
+
+ for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
+ if random.random() <= CXPB:
+ toolbox.mate(ind1, ind2)
+
+ toolbox.mutate(ind1)
+ toolbox.mutate(ind2)
+ del ind1.fitness.values, ind2.fitness.values
+
+ # Evaluate the individuals with an invalid fitness
+ invalid_ind = calculate_fitness(offspring)
+
+ # Select the next generation population
+ pop = toolbox.select(pop + offspring, MU)
+ pass
+
+ return pop, logbook, max(pop, key=lambda x: x.fitness)
+
+
+
+
diff --git a/src/algs/gsa/SimpleGsaScheme.py b/src/algs/gsa/SimpleGsaScheme.py
new file mode 100644
index 0000000..d36ea8a
--- /dev/null
+++ b/src/algs/gsa/SimpleGsaScheme.py
@@ -0,0 +1,116 @@
+from copy import deepcopy
+import functools
+import operator
+import random
+from src.algs.common.utilities import cannot_be_zero, gather_info
+from src.core.environment.Utility import profile_decorator
+
+
+def _randvecsum(vectors):
+ l = len(vectors[0])
+ val = [_randsum(v[i] for v in vectors) for i in range(l)]
+ return val
+
+def _randsum(iterable):
+ add = lambda a, b: a + random.random()*b
+ return functools.reduce(add, iterable)
+
+def calculate_velocity_and_position(p, fvm, estimate_position):
+ ## get vector of total influential force for all dimensions
+ total_force = _randvecsum(vec for vec in fvm[p])
+ p.acceleration = [f/p.mass for f in total_force]
+ # p.velocity = toolbox.velocity(p, velocity, acceleration)
+ p.velocity = random.random()*p.velocity + p.acceleration
+ p = estimate_position(p)
+ return p
+
+def run_gsa(toolbox, stats, logbook, n, gen_curr, gen_step, initial_pop=None, kbest=None, ginit=None, **params):
+ """
+ This method is targeted to propose a prototype implementation of
+ Gravitational Search Algorithm(gsa). It is intended only for initial steps
+ of algorithm understanding and shouldn't be considered for real applications
+
+ We need specify the following enities:
+ 1) Vector representation
+ 2) generate() - generation of initial solutions
+ 3) fitness(p) (with mofit field)
+ 4) mass(p, worst, best)
+ 5) estimate_force(p, pop, kbest, G)
+ 6) update(p) - function of getting new position
+ 7) G(g, i) - changing of G-constant, where i - a number of a current iteration
+ 8) kbest(kbest, i) - changing of kbest
+ 9) w - inertia koeff
+ """
+
+ G = ginit
+ kbest_init = kbest
+
+ ## initialization
+ ## generates random solutions
+ pop = toolbox.generate(n) if initial_pop is None else initial_pop
+
+ best = None
+
+ for i in range(gen_curr, gen_curr + gen_step):
+ ## fitness estimation
+ for p in pop:
+ ## toolbox.fitness must return Fitness
+ p.fitness = toolbox.fitness(p)
+ ## mass estimation
+ ## It is assumed that a minimization task is solved
+ pop = sorted(pop, key=lambda x: x.fitness)
+ best_fit = pop[0].fitness
+ worst_fit = pop[-1].fitness
+ # TODO: this is a hack
+ max_diff = best_fit.values[0] - worst_fit.values[-1]
+ max_diff = cannot_be_zero(max_diff)
+ for p in pop:
+ p.mass = cannot_be_zero((p.fitness.values[0] - worst_fit.values[0]) / max_diff)
+ ## convert to (0, 1) interval
+ ## TODO: perhaps this should 'warn' message
+ # mass_sum = cannot_be_zero(sum(p.mass for p in pop))
+ # for p in pop:
+ # p.mass = p.mass/mass_sum
+
+ ## TODO: only for debug. remove it later.
+ #print(functools.reduce(operator.add, (" " + str(round(p.mass, 4)) for p in pop), ""))
+
+
+ ## estimate all related forces
+ ## fvm is a matrix of VECTORS(due to the fact we are operating in d-dimensional space) size of 'pop_size x kbest'
+ ## in fact we can use wrapper for the entity of pop individual but python has duck typing,
+ ## so why don't use it, if you use it carefully?
+ for p in pop:
+ p.force = toolbox.estimate_force(p, pop, kbest, G)
+
+ ##statistics gathering
+ ## TODO: replace it back
+ data = stats.compile(pop) if stats is not None else None
+ if logbook is not None:
+ logbook.record(gen=i, kbest=kbest, G=G, evals=len(pop), **data)
+ #print(logbook.stream)
+ # gather_info(logbook, stats, i, pop, need_to_print=True)
+
+ new_best = max(pop, key=lambda x: x.fitness)
+ if best is None:
+ best = deepcopy(new_best)
+ else:
+ best = deepcopy(max(best, new_best, key=lambda x: x.fitness))
+ ## compute new velocity and position
+ for p in pop:
+ toolbox.update(p)
+ # position = toolbox.position if hasattr(toolbox, 'position') else None
+ # pop = [toolbox.velocity_and_position(p, forces, position) for p, f in zip(pop, fvm)]
+
+ ## change gravitational constants
+ G = toolbox.G(ginit, i, gen_step)
+ kbest = toolbox.kbest(kbest_init, kbest, i, gen_step)
+
+ ##removing temporary elements
+ for p in pop:
+ if hasattr(p, 'fitness'): del p.fitness
+ if hasattr(p, 'acceleration'): del p.acceleration
+ pass
+ return pop, logbook, best
+
+
diff --git a/src/algs/gsa/heftbasedoperators.py b/src/algs/gsa/heftbasedoperators.py
new file mode 100644
index 0000000..9c9e3db
--- /dev/null
+++ b/src/algs/gsa/heftbasedoperators.py
@@ -0,0 +1,108 @@
+import math
+
+from deap import tools
+from deap.base import Fitness
+from src.algs.common.individuals import ListBasedIndividual
+from src.algs.common.MapOrdSchedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.MapOrdSchedule import fitness as basefitness
+
+
+
+## TODO: remake vector representation -> sorted
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+
+
+def schedule_to_position(schedule):
+ """
+ this function extracts only mapping from a schedule
+ because position is considered only as a mapping
+ """
+ items = lambda: iter((item, node) for node, items in schedule.mapping.items() for item in items)
+ if not all(i.is_unstarted() for i, _ in items()):
+ raise ValueError("Schedule is not valid. Not all elements have unstarted state.")
+
+ mapping = ListBasedIndividual([n.name for _, n in sorted(items(), key=lambda x: x[0].job.id)])
+ return mapping
+
+def generate(wf, rm, estimator):
+ sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
+ return schedule_to_position(sched)
+
+def force_vector_matrix(rm, pop, kbest, G, e=0.0):
+ """
+ returns matrix of VECTORS size of 'pop_size*kbest'
+ distance between two vectors is estimated with hamming distance
+ Note: pop is sorted by decreasing of goodness, so to find kbest
+ we only need to take first kbest of pop
+ """
+ ## calculate components of DIRECTED force vector
+ # sub = lambda seq1, seq2: [0 if s1 == s2 else 1 for s1, s2 in zip(seq1, seq2)]
+ ## pure hack to account it as preventing forces to change the position in particular dimension
+ sub = lambda seq1, seq2: [1 for _ in zip(seq1, seq2)]
+ zero = lambda: [0 for _ in range(len(pop[0]))]
+
+ def squared(a):
+ val = (G*(a.mass * a.mass)/1)
+ return [val for _ in range(len(pop[0]))]
+
+ def dist(a, b):
+ diff_by_flops = lambda r1, r2: math.fabs(rm.byName(r1).flops - rm.byName(r2).flops)/(rm.byName(r1).flops + rm.byName(r2).flops)
+ return sum([(0 if r1 == r2 else 1) + diff_by_flops(r1, r2) for r1, r2 in zip(a, b)])
+
+ def estimate_force(a, b):
+ a_string = a#mapping_as_vector(a)
+ b_string = b#mapping_as_vector(b)
+
+ R = dist(a_string, b_string)
+ ## TODO: here must be a multiplication of a vector and a number
+ #val = (G*(a.mass * b.mass)/(R + e))
+ val = (G*(a.mass * b.mass)/(1 if R == 0 else 2))
+ f = [val * d for d in sub(a_string, b_string)]
+ return f
+
+ mat = {p.uid: [(squared(b), b) if p == b else (estimate_force(p, b), b) for b in pop[0:kbest]] for p in pop}
+ return mat
+
+def velocity_and_position(wf, rm, estimator, p, fvm, estimate_position=None):
+
+ def change(d):
+ ## this function returns new state for dimension d
+ ## depending on acting forces of other masses
+ class TempWrapper:
+ def __init__(self, pd, dacceleration):
+ self.pd = pd
+ self.fitness = Fitness(values=(dacceleration,))
+ pass
+
+ ## get all forces which act from all other participating masses to mass p
+ ## for all vectors of force save force value and point in discrete dimension where it is
+ dforces = [TempWrapper(mass[d], f[d]/p.mass) for f, mass in fvm[p.uid]]
+ #dforces = [tw for tw in dforces if tw.fitness.values[0] < 0]
+
+
+ ## case without changing of current place in space
+ ## acts like yet another divicion for roulette
+ # not_changing = sum([mass.mass for _, mass in fvm[p.uid]])/(p.mass*len(fvm[p.uid]))
+ # if not_changing < 1:
+ # dforces.append(TempWrapper(p[d], sum([x.fitness.values[0] for x in dforces]) * not_changing))
+
+
+ if sum([t.fitness.values[0] for t in dforces]) == 0:
+ ## corner case, when all accelerations(fitnesses) equal 0
+ return p[d]
+ else:
+ el = tools.selRoulette(dforces, 1)[0]
+ # el = tools.selTournament(dforces, 1, 2)[0]
+ return el.pd
+ ## construct new position vector based on forces
+ new_p = ListBasedIndividual([change(i) for i in range(len(p))])
+ return new_p
+
+def fitness(wf, rm, estimator, ordering, position):
+ solution = {MAPPING_SPECIE: list(zip(wf.get_tasks_id(), position)),
+ ORDERING_SPECIE: ordering}
+ fit = basefitness(wf, rm, estimator, solution)
+ return fit
+
+
+
diff --git a/src/algs/gsa/operators.py b/src/algs/gsa/operators.py
new file mode 100644
index 0000000..c1f348d
--- /dev/null
+++ b/src/algs/gsa/operators.py
@@ -0,0 +1,113 @@
+"""
+1) Vector representation
+2) generate() - generation of initial solutions
+3) fitness(p) (with mofit field)
+4) mass(p, worst, best)
+5) force_vector_matrix(pop, kbest, G)
+6) position(p, velocity) - function of getting new position
+7) G(g, i) - changing of G-constant, where i - a number of a current iteration
+8) kbest(kbest, i) - changing of kbest
+"""
+import random
+import math
+
+from deap.base import Fitness
+from src.algs.common.MapOrdSchedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.utilities import mapping_as_vector
+from src.core.environment.Utility import Utility
+from src.experiments.cga.utilities.common import hamming_distances
+from src.algs import SimpleRandomizedHeuristic
+
+
+def generate(wf, rm, estimator):
+ sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
+ return schedule_to_position(sched)
+
+
+
+def force_vector_matrix(pop, kbest, G, e=0.0):
+ """
+ returns matrix of VECTORS size of 'pop_size*kbest'
+ distance between two vectors is estimated with hamming distance
+ Note: pop is sorted by decreasing of goodness, so to find kbest
+ we only need to take first kbest of pop
+ """
+ sub = lambda seq1, seq2: [0 if s1 == s2 else 1 for s1, s2 in zip(seq1, seq2)]
+ zero = lambda: [0 for _ in range(len(pop[0]))]
+
+ def estimate_force(a, b):
+ a_string = a.as_vector()
+ b_string = b.as_vector()
+
+ R = hamming_distances(a_string, b_string)
+ ## TODO: here must be a multiplication of a vector and a number
+ val = (G*(a.mass*b.mass)/R + e)
+ f = [val * d for d in sub(a_string, b_string)]
+ return f
+
+ mat = [[zero() if p == b else estimate_force(p, b) for b in pop[0:kbest]] for p in pop]
+ return mat
+
+def position(wf, rm, estimator, position, velocity):
+ ## TODO: do normal architecture of relations in the first place
+ ## TODO: rework it in an elegant way
+ raise NotImplementedError()
+ unchecked_tasks = wf.get_all_unique_tasks()
+ def change(d):
+ if d.startswith("ID"):
+ s = set(node.name for node in rm.get_nodes())
+ s.remove(d)
+ s = list(s)
+ new_name = d if len(s) == 0 else s[random.randint(0, len(s) - 1)]
+ else:
+
+ s = set(t.id for t in tasks)
+ s.remove(d)
+ s = [el for el in s if not el.checked]
+ ## TODO: add condition for checking of precedence
+ if len(s) == 0:
+ ## TODO: check case
+ new_name = d
+ else:
+ while len(s) > 0:
+ el = s[random.randint(0, len(s) - 1)]
+ task = wf.byId(el)
+ if all(is_checked(el) for p in task.parents):
+ task.checked = True
+ new_name = el
+ break
+ else:
+ s.remove(el)
+ pass
+ threshold = 0.4
+ new_vector = [change(d) if vd > threshold else d for vd, d in zip(velocity, position.as_vector())]
+ new_position = Position.from_vector(new_vector)
+ return new_position
+
+def G(ginit, i, iter_number, all_iter_number=None):
+ ng = ginit*(1 - i/iter_number) if all_iter_number is None else ginit*(1 - i/all_iter_number)
+ return ng
+
+def Kbest(kbest_init, kbest, i, iter_number, all_iter_number=None):
+ """
+ basic implementation of kbest decreasing
+ """
+ iter_number = iter_number if all_iter_number is None else all_iter_number
+ d = iter_number / kbest_init
+ nkbest = math.ceil(abs(kbest_init - i/d))
+ return nkbest
+
+
+def schedule_to_position(schedule):
+ """
+ this function converts valid schedule
+ to mapping and ordering strings
+ """
+ items = lambda: iter((item, node) for node, items in schedule.mapping.items() for item in items)
+ if not all(i.is_unstarted() for i, _ in items()):
+ raise ValueError("Schedule is not valid. Not all elements have unstarted state.")
+
+ mapping = {i.job.id: n.name for i, n in items()}
+ ordering = sorted([i for i, _ in items()], key=lambda x: x.start_time)
+ ordering = [el.job.id for el in ordering]
+ return Position(mapping, ordering)
\ No newline at end of file
diff --git a/src/algs/gsa/ordering_mapping_operators.py b/src/algs/gsa/ordering_mapping_operators.py
new file mode 100644
index 0000000..26ab6d3
--- /dev/null
+++ b/src/algs/gsa/ordering_mapping_operators.py
@@ -0,0 +1,95 @@
+import functools
+import operator
+import random
+
+from src.algs.common.particle_operations import MappingParticle, Particle
+from src.algs.common.utilities import cannot_be_zero
+from src.algs.pso.mapping_operators import position_update
+from src.algs.pso.ordering_operators import generate as pso_generate
+
+
+def force(p, pop, kbest, G):
+ def mutual_force(p1, p2):
+ """
+ estimate force between p1 and p2
+ p must be a Particle with overriden operators
+ """
+ diff = p1 - p2
+ dist = 1#diff.vector_length()
+ #print("dist: {0}".format(dist))
+ dist = cannot_be_zero(dist)
+ f_abs = G * (p1.mass * p2.mass) / dist
+ force = diff * f_abs
+ return force
+
+ pop = sorted(pop, key=lambda x: x.mass)
+ active_elements = pop[0:kbest]
+ # if p in active_elements:
+ # return p.emptify()
+ forces = [mutual_force(p, a) for a in active_elements]
+ common_force = functools.reduce(operator.add, forces)
+ return common_force
+
+
+def mapping_update(w, c, p):
+ acceleration = p.force/p.mass
+ alpha = random.random()
+ new_velocity = p.velocity*w + acceleration*c*alpha
+ new_entity = position_update(p, new_velocity)
+ p.entity = new_entity
+ p.velocity = new_velocity
+ pass
+
+
+def ordering_update(w, c, p, min=-1, max=1):
+ acceleration = p.force/p.mass
+ alpha = random.random()
+ new_velocity = p.velocity*w + acceleration*c*alpha
+ new_position = (p + new_velocity)
+ new_position.limit_by(min, max)
+ p.entity = new_position.entity
+ pass
+
+
+class CompoundParticle(Particle):
+ def __init__(self, mapping_particle, ordering_particle):
+ super().__init__(None)
+ self.mapping = mapping_particle
+ self.ordering = ordering_particle
+ self._best = None
+
+ self._mass = None
+ self._force = None
+
+ pass
+
+ def _get_mass(self):
+ return self._mass
+
+ def _set_mass(self, value):
+ self._mass = value
+ self.mapping.mass = self._mass
+ self.ordering.mass = self._mass
+ pass
+
+ def _get_force(self):
+ return self._force
+
+ def _set_force(self, value):
+ self._force = value
+ mapping_force, ordering_force = value
+ self.mapping.force = mapping_force
+ self.ordering.force = ordering_force
+ pass
+
+
+ mass = property(_get_mass, _set_mass)
+ force = property(_get_force, _set_force)
+ pass
+
+
+def generate(wf, rm, estimator, schedule=None, fixed_schedule_part=None, current_time=0.0):
+ particle = pso_generate(wf, rm, estimator, schedule, fixed_schedule_part, current_time)
+ particle = CompoundParticle(particle.mapping, particle.ordering)
+ return particle
+
diff --git a/src/algs/gsa/setbasedoperators.py b/src/algs/gsa/setbasedoperators.py
new file mode 100644
index 0000000..4a92baf
--- /dev/null
+++ b/src/algs/gsa/setbasedoperators.py
@@ -0,0 +1,43 @@
+import functools
+import operator
+from distance import hamming
+from src.algs.common.particle_operations import MappingParticle
+from src.algs.common.utilities import cannot_be_zero
+from src.algs.pso.mapping_operators import update
+
+
+def mapping_force_vector_matrix(pop, kbest, G):
+ """
+ :return: dictionary of p_uid with array of associated influential powers
+ """
+ def force(p1, p2):
+ """
+ estimate force between p1 and p2
+ p must be a Position
+ """
+ ## TODO: remake it later
+ dist = hamming(p1.entity, p2.entity)
+ dist = cannot_be_zero(dist)
+ f_abs = G * (p1.mass * p2.mass) / dist
+ force = {item: p * f_abs for item, p in (p1.entity - p2.entity).items()}
+ return MappingParticle.Velocity(force)
+
+ pop = sorted(pop, key=lambda x: x.mass)
+ active_elements = pop[0:kbest]
+ forces = [[force(p, a) for a in active_elements] for p in pop]
+ return forces
+
+
+def mapping_velocity_and_position(p, fvm, position_func, beta=1.0):
+ forces = fvm[p.uid]
+ force = functools.reduce(operator.add, forces)
+ acceleration = force / p.mass
+ new_velocity = p.velocity*beta + acceleration
+ new_position = update(p.entity, new_velocity)
+ new_particle = MappingParticle(new_position)
+ new_particle.velocity = new_velocity
+ return new_particle
+
+
+
+
diff --git a/src/algs/heft/DSimpleHeft.py b/src/algs/heft/DSimpleHeft.py
new file mode 100644
index 0000000..a36c0d9
--- /dev/null
+++ b/src/algs/heft/DSimpleHeft.py
@@ -0,0 +1,84 @@
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import timing
+from src.algs.heft.simple_heft import StaticHeftPlanner
+
+
+class DynamicHeft(StaticHeftPlanner):
+ executed_tasks = set()
+
+ def get_nodes(self):
+ resources = self.resource_manager.get_resources()
+ nodes = HeftHelper.to_nodes(resources)
+ return nodes
+ # return self.resource_manager.get_nodes()
+
+ def __init__(self, workflow, resource_manager, estimator, ranking=None):
+ self.current_schedule = Schedule(dict())
+ self.workflow = workflow
+ self.resource_manager = resource_manager
+ self.estimator = estimator
+ self.ranking = ranking
+
+ self.current_time = 0
+
+ nodes = self.get_nodes()
+
+
+
+ # print("A: " + str(self.wf_jobs))
+
+ #TODO: remove it later
+ # to_print = ''
+ # for job in self.wf_jobs:
+ # to_print = to_print + str(job.id) + " "
+ # print(to_print)
+ pass
+
+ @timing
+ def run(self, current_cleaned_schedule):
+ ## current_cleaned_schedule - this schedule contains only
+ ## finished and executed tasks, all unfinished and failed have been removed already
+ ## current_cleaned_schedule also have down nodes and new added
+ ## ALGORITHM DOESN'T CHECK ADDING OF NEW NODES BY ITSELF
+ ## nodes contain only available now
+
+ ## 1. get all unscheduled tasks
+ ## 2. sort them by rank
+ ## 3. map on the existed nodes according to current_cleaned_schedule
+
+ nodes = self.get_nodes()
+ live_nodes = [node for node in nodes if node.state != Node.Down]
+
+ for_planning = HeftHelper.get_tasks_for_planning(self.workflow, current_cleaned_schedule)
+ ## TODO: check if it sorted properly
+ for_planning = set([task.id for task in for_planning])
+
+ self.wf_jobs = self.make_ranking(self.workflow, live_nodes) if self.ranking is None else self.ranking
+
+ sorted_tasks = [task for task in self.wf_jobs if task.id in for_planning]
+
+ # print("P: " + str(sorted_tasks))
+
+ new_sched = self.mapping([(self.workflow, sorted_tasks)], current_cleaned_schedule.mapping, nodes, self.commcost, self.compcost)
+ return new_sched
+
+ def endtime(self, job, events):
+ """ Endtime of job in list of events """
+ for e in events:
+ if e.job == job and (e.state == ScheduleItem.FINISHED or e.state == ScheduleItem.EXECUTING or e.state == ScheduleItem.UNSTARTED):
+ return e.end_time
+
+def run_heft(workflow, resource_manager, estimator):
+ """
+ It simply runs src with empty initial schedule
+ and returns complete schedule
+ """
+ heft = DynamicHeft(workflow, resource_manager, estimator)
+ nodes = resource_manager.get_nodes()
+ init_schedule = Schedule({node: [] for node in nodes})
+ return heft.run(init_schedule)
+
+
+
diff --git a/src/algs/heft/DeadlineHeft.py b/src/algs/heft/DeadlineHeft.py
new file mode 100644
index 0000000..0dbd8cc
--- /dev/null
+++ b/src/algs/heft/DeadlineHeft.py
@@ -0,0 +1,63 @@
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import timing
+from src.algs.heft.simple_heft import StaticHeftPlanner
+
+
+class DeadlineHeft(StaticHeftPlanner):
+ executed_tasks = set()
+
+ def get_nodes(self):
+ resources = self.resource_manager.get_resources()
+ nodes = HeftHelper.to_nodes(resources)
+ return nodes
+
+ def __init__(self, workflow, resource_manager, estimator, ranking=None):
+ self.current_schedule = Schedule(dict())
+ self.workflow = workflow
+ self.resource_manager = resource_manager
+ self.estimator = estimator
+ self.ranking = ranking
+
+ self.current_time = 0
+
+ nodes = self.get_nodes()
+ pass
+
+ @timing
+ def run(self, current_cleaned_schedule):
+
+ nodes = self.get_nodes()
+ live_nodes = [node for node in nodes if node.state != Node.Down]
+
+ for_planning = HeftHelper.get_tasks_for_planning(self.workflow, current_cleaned_schedule)
+ for_planning = set([task.id for task in for_planning])
+
+ self.wf_jobs = self.make_ranking(self.workflow, live_nodes) if self.ranking is None else self.ranking
+
+ sorted_tasks = [task for task in self.wf_jobs if task.id in for_planning]
+
+ # print("P: " + str(sorted_tasks))
+
+ new_sched = self.mapping([(self.workflow, sorted_tasks)], current_cleaned_schedule.mapping, nodes, self.commcost, self.compcost)
+ return new_sched
+
+ def endtime(self, job, events):
+ """ Endtime of job in list of events """
+ for e in events:
+ if e.job == job and (e.state == ScheduleItem.FINISHED or e.state == ScheduleItem.EXECUTING or e.state == ScheduleItem.UNSTARTED):
+ return e.end_time
+
+def run_heft(workflow, resource_manager, estimator):
+ """
+ It simply runs src with empty initial schedule
+ and returns complete schedule
+ """
+ heft = DeadlineHeft(workflow, resource_manager, estimator)
+ nodes = resource_manager.get_nodes()
+ init_schedule = Schedule({node: [] for node in nodes})
+ return heft.run(init_schedule)
+
+
+
diff --git a/src/algs/heft/HeftHelper.py b/src/algs/heft/HeftHelper.py
new file mode 100644
index 0000000..f511d24
--- /dev/null
+++ b/src/algs/heft/HeftHelper.py
@@ -0,0 +1,163 @@
+from functools import partial
+
+from src.core.environment.ResourceManager import Scheduler, ScheduleItem, Schedule
+
+
+class HeftHelper(Scheduler):
+
+ @staticmethod
+ def heft_rank(wf, rm, estimator):
+ nodes = rm.get_nodes()
+ ranking = HeftHelper.build_ranking_func(nodes,
+ lambda job, agent: estimator.estimate_runtime(job, agent),
+ lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj))
+ sorted_tasks = [t.id for t in ranking(wf)]
+ return sorted_tasks
+
+
+ @staticmethod
+ def to_nodes(resources):
+ result = set()
+ for resource in resources:
+ result.update(resource.nodes)
+ result = list(sorted(result, key=lambda x: x.name))
+ return result
+
+ @staticmethod
+ def build_ranking_func(nodes, compcost, commcost):
+ task_rank_cache = dict()
+
+ def ranking_func(wf):
+ wf_dag = HeftHelper.convert_to_parent_children_map(wf)
+ rank = partial(HeftHelper.ranking, nodes=nodes, succ=wf_dag,
+ compcost=compcost, commcost=commcost,
+ task_rank_cache=task_rank_cache)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+
+ ## TODO: sometimes sort gives different results
+ ## TODO: it's normal because of only elements with the same rank change their place
+ ## TODO: relatively each other with the same rank
+ ## TODO: need to get deeper understanding of this situation
+ #jbs = [(job, rank(job))for job in jobs]
+ #jbs = sorted(jbs, key=lambda x: x[1])
+ #jbs = list(reversed(jbs))
+ #print("===========JBS=================")
+ #for job, rk in jbs:
+ # print("J: " + str(job) + " " + str(rk))
+ #print("===========END_JBS=================")
+
+ jobs = sorted(jobs, key=rank)
+
+
+
+ return list(reversed(jobs))
+
+ return ranking_func
+
+ @staticmethod
+ def ranking(ni, nodes, succ, compcost, commcost, task_rank_cache):
+
+ w = partial(HeftHelper.avr_compcost, compcost=compcost, nodes=nodes)
+ c = partial(HeftHelper.avr_commcost, nodes=nodes, commcost=commcost)
+ ##cnt = partial(self.node_count_by_soft, nodes=nodes)
+
+ def estimate(ni):
+ result = task_rank_cache.get(ni,None)
+ if result is not None:
+ return result
+ if ni in succ and succ[ni]:
+ ##the last component cnt(ni)/nodes.len is needed to account
+ ## software restrictions of particular task
+ ## and
+ ##TODO: include the last component later
+ result = w(ni) + max(c(ni, nj) + estimate(nj) for nj in succ[ni]) ##+ math.pow((nodes.len - cnt(ni)),2)/nodes.len - include it later.
+ else:
+ result = w(ni)
+ task_rank_cache[ni] = result
+ return result
+
+ """print( "%s %s" % (ni, result))"""
+ result = estimate(ni)
+ if hasattr(ni, 'priority'):
+ if ni.priority > 0:
+ result += pow(120, ni.priority)
+ result = float(round(result, 5)) + HeftHelper.get_seq_number(ni)
+ else:
+ result = int(round(result, 5) * 1000000) + HeftHelper.get_seq_number(ni)
+ return result
+
+ @staticmethod
+ def get_seq_number(task):
+ ## It is assumed that task.id have only one format ID000[2 digits number]_000
+ id = task.id
+ number = id[5:7]
+ return int(number)
+
+
+ @staticmethod
+ def avr_compcost(ni, nodes, compcost):
+ """ Average computation cost """
+ return sum(compcost(ni, node) for node in nodes) / len(nodes)
+
+ @staticmethod
+ def avr_commcost(ni, nj, nodes, commcost):
+ ##TODO: remake it later.
+ # return 10
+ """ Average communication cost """
+ n = len(nodes)
+ if n == 1:
+ return 0
+ npairs = n * (n - 1)
+ return 1. * sum(commcost(ni, nj, a1, a2) for a1 in nodes for a2 in nodes
+ if a1 != a2) / npairs
+
+ @staticmethod
+ def convert_to_parent_children_map(wf):
+ head = wf.head_task
+ map = dict()
+ def mapp(parents, map):
+ for parent in parents:
+ st = map.get(parent, set())
+ st.update(parent.children)
+ map[parent] = st
+ mapp(parent.children, map)
+ mapp(head.children, map)
+ return map
+
+ @staticmethod
+ def get_all_tasks(wf):
+ map = HeftHelper.convert_to_parent_children_map(wf)
+ tasks = [task for task in map.keys()]
+ return tasks
+
+ @staticmethod
+ def clean_unfinished(schedule):
+ def clean(items):
+ return [item for item in items if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING]
+ new_mapping = {node: clean(items) for (node, items) in schedule.mapping.items()}
+ return Schedule(new_mapping)
+
+ @staticmethod
+ def get_tasks_for_planning(wf, schedule):
+ ## TODO: remove duplicate code later
+ def clean(items):
+ return [item.job for item in items if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING]
+ def get_not_for_planning_tasks(schedule):
+ result = set()
+ for (node, items) in schedule.mapping.items():
+ unfin = clean(items)
+ result.update(unfin)
+ return result
+ all_tasks = HeftHelper.get_all_tasks(wf)
+ not_for_planning = get_not_for_planning_tasks(schedule)
+ # def check_in_not_for_planning(tsk):
+ # for t in not_for_planning:
+ # if t.id == tsk.id:
+ # return True
+ # return False
+ # for_planning = [tsk for tsk in all_tasks if not(check_in_not_for_planning(tsk))]
+ for_planning = set(all_tasks) - set(not_for_planning)
+ return for_planning
+
+
+ pass
diff --git a/src/algs/heft/PublicResourceManager.py b/src/algs/heft/PublicResourceManager.py
new file mode 100644
index 0000000..10a5e26
--- /dev/null
+++ b/src/algs/heft/PublicResourceManager.py
@@ -0,0 +1,58 @@
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.BaseElements import SoftItem, Node
+
+
+class PublicResourceManager:
+ # public_resources_manager:
+ # determine nodes of proper soft type
+ # check and determine free nodes
+ # determine reliability of every nodes
+ # determine time_of_execution probability for (task,node) pair
+
+ def __init__(self, public_resources, reliability_map, probability_estimator):
+ self.public_resources = public_resources
+ self.reliability_map = reliability_map
+ self.probability_estimator = probability_estimator
+
+ self.busies_nodes = set()
+
+ ## get available nodes by soft type
+ def get_by_softreq(self, soft_reqs):
+ nodes = HeftHelper.to_nodes(self.public_resources)
+ def check_reqs(node):
+ return (soft_reqs in node.soft) or (SoftItem.ANY_SOFT in node.soft)
+ gotcha = [node for node in nodes if node.state != Node.Down and check_reqs(node)]
+ return gotcha
+
+ def isBusy(self, node):
+ return node.name in self.busies_nodes
+
+ def checkBusy(self, node, is_busy):
+ if not is_busy:
+ self.busies_nodes.remove(node.name)
+ else:
+ self.busies_nodes.add(node.name)
+
+ def checkDown(self, node_name, is_down):
+ nodes = HeftHelper.to_nodes(self.public_resources)
+ for nd in nodes:
+ if nd.name == node_name:
+ if is_down:
+ nd.state = Node.Down
+ else:
+ nd.state = Node.Unknown
+ pass
+
+
+ def get_reliability(self, node_name):
+ return self.reliability_map[node_name]
+
+ def isCloudNode(self, node):
+ result = node.name in [nd.name for nd in HeftHelper.to_nodes(self.public_resources)]
+ return result
+
+
+
+
+
+
diff --git a/src/algs/heft/simple_heft.py b/src/algs/heft/simple_heft.py
new file mode 100644
index 0000000..9d5b1f8
--- /dev/null
+++ b/src/algs/heft/simple_heft.py
@@ -0,0 +1,169 @@
+from functools import partial
+from pprint import pprint
+
+from src.algs.common.ScheduleBuilder import FreeSlotIterator
+from src.algs.heft.HeftHelper import HeftHelper
+from src.core.environment.ResourceManager import Scheduler
+from src.core.environment.BaseElements import Node
+from src.core.environment.BaseElements import SoftItem
+from src.core.environment.ResourceManager import ScheduleItem
+from src.core.environment.ResourceManager import Schedule
+from src.core.environment.Utility import reverse_dict
+
+## TODO: obsolete remove this test later
+class StaticHeftPlanner(Scheduler):
+ global_count = 0
+ def __init__(self):
+ self.task_rank_cache = dict()
+ self.current_time = 0
+ pass
+
+ def compcost(self, job, agent):
+ return self.estimator.estimate_runtime(job, agent)
+
+ def commcost(self, ni, nj, A, B):
+ return self.estimator.estimate_transfer_time(A, B, ni, nj)
+
+ def make_ranking(self, wf, nodes):
+ ##resources = self.resource_manager.get_resources()
+ ##print("common nodes count:" + str(len(toNodes(resources))))
+ ##nodes = HeftHelper.to_nodes(resources)
+ ranking_func = HeftHelper.build_ranking_func(nodes, self.compcost, self.commcost)
+ wf_jobs = ranking_func(wf)
+ return wf_jobs
+
+
+ def schedule(self):
+ """
+ create inter-priority
+ """
+ def byPriority(wf):
+ return 0 if wf.priority is None else wf.priority
+
+ ##simple inter priority sorting
+ sorted_wfs = sorted(self.workflows, key=byPriority)
+ wf_jobs = {wf: [] for wf in sorted_wfs}
+ resources = self.resource_manager.get_resources()
+ ##print("common nodes count:" + str(len(toNodes(resources))))
+ nodes = HeftHelper.to_nodes(resources)
+
+ wf_jobs = {wf: self.make_ranking(wf, nodes) for wf in sorted_wfs}
+
+ ##new_schedule = self.get_unchanged_schedule(self.old_schedule, time)
+ new_schedule = Schedule({node: [] for node in nodes})
+ new_plan = new_schedule.mapping
+
+ for (wf, jobs) in wf_jobs.items():
+
+
+ new_schedule = self.mapping([(wf, jobs)],
+ new_plan,
+ nodes,
+ self.commcost,
+ self.compcost)
+ new_plan = new_schedule.mapping
+
+ return new_schedule
+
+ def mapping(self, sorted_jobs, existing_plan, live_nodes, commcost, compcost):
+ """def allocate(job, orders, jobson, prec, compcost, commcost):"""
+ """ Allocate job to the machine with earliest finish time
+
+ Operates in place
+ """
+
+
+ ## TODO: add finished tasks
+ jobson = dict()
+ for (node, items) in existing_plan.items():
+ for item in items:
+ if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING:
+ jobson[item.job] = node
+
+
+ new_plan = existing_plan
+
+
+
+
+ def ft(machine):
+ #cost = st(machine)
+ runtime = compcost(task, machine)
+ cost = st(machine, runtime) + runtime
+ ##print("machine: %s job:%s cost: %s" % (machine.name, task.id, cost))
+ ##print("machine: " + str(machine.name) + " cost: " + str(cost))
+
+ return cost
+
+ if len(live_nodes) != 0:
+ ## in case if there is not any live nodes we just return the same cleaned schedule
+ for wf, tasks in sorted_jobs:
+ ##wf_dag = self.convert_to_parent_children_map(wf)
+ wf_dag = HeftHelper.convert_to_parent_children_map(wf)
+ prec = reverse_dict(wf_dag)
+ for task in tasks:
+ st = partial(self.start_time, wf, task, new_plan, jobson, prec, commcost)
+
+ # ress = [(key, ft(key)) for key in new_plan.keys()]
+ # agent_pair = min(ress, key=lambda x: x[1][0])
+ # agent = agent_pair[0]
+ # start = agent_pair[1][0]
+ # end = agent_pair[1][1]
+
+ # agent = min(new_plan.keys(), key=ft)
+ agent = min(live_nodes, key=ft)
+ runtime = compcost(task, agent)
+ start = st(agent, runtime)
+ end = ft(agent)
+
+ # new_plan[agent].append(ScheduleItem(task, start, end))
+ Schedule.insert_item(new_plan, agent, ScheduleItem(task, start, end))
+
+ jobson[task] = agent
+
+
+ new_sched = Schedule(new_plan)
+ return new_sched
+
+ def start_time(self, wf, task, orders, jobson, prec, commcost, node, runtime):
+
+ ## check if soft satisfy requirements
+ if self.can_be_executed(node, task):
+ ## static or running virtual machine
+ ## or failed it works here too
+ if node.state is not Node.Down:
+
+ if len(task.parents) == 1 and wf.head_task.id == list(task.parents)[0].id:
+ comm_ready = 0
+ else:
+ parent_tasks = set()
+ for p in task.parents:
+ val = self.endtime(p, orders[jobson[p]]) + commcost(p, task, node, jobson[p])
+ parent_tasks.add(val)
+ comm_ready = max(parent_tasks)
+
+
+ (st, end) = next(FreeSlotIterator(self.current_time, comm_ready, runtime, orders[node]))
+ return st
+
+
+ # agent_ready = orders[node][-1].end_time if orders[node] else 0
+ # return max(agent_ready, comm_ready, self.current_time)
+ else:
+ return 1000000
+ else:
+ return 1000000
+
+ def can_be_executed(self, node, job):
+ ## check it
+ return (job.soft_reqs in node.soft) or (SoftItem.ANY_SOFT in node.soft)
+
+ def endtime(self, job, events):
+ """ Endtime of job in list of events """
+ # for e in reverse(events):
+ # if e.job.id == job.id:
+ # return e.end_time
+
+ for e in events:
+ if e.job == job:
+ return e.end_time
diff --git a/src/algs/peft/DSimplePeft.py b/src/algs/peft/DSimplePeft.py
new file mode 100644
index 0000000..58d73a0
--- /dev/null
+++ b/src/algs/peft/DSimplePeft.py
@@ -0,0 +1,80 @@
+from src.algs.peft.PeftHelper import PeftHelper
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import timing
+from src.algs.peft.simple_peft import StaticPeftPlanner
+
+
+class DynamicPeft(StaticPeftPlanner):
+ executed_tasks = set()
+ def get_nodes(self):
+ resources = self.resource_manager.get_resources()
+ nodes = PeftHelper.to_nodes(resources)
+ return nodes
+ # return self.resource_manager.get_nodes()
+
+ def __init__(self, workflow, resource_manager, estimator, oct, ranking=None):
+ self.current_schedule = Schedule(dict())
+ self.workflow = workflow
+ self.resource_manager = resource_manager
+ self.estimator = estimator
+ self.oct = oct
+
+ self.current_time = 0
+
+ nodes = self.get_nodes()
+
+ self.wf_jobs = self.make_ranking(self.workflow, nodes) if ranking is None else ranking
+
+ # print("A: " + str(self.wf_jobs))
+
+ #TODO: remove it later
+ # to_print = ''
+ # for job in self.wf_jobs:
+ # to_print = to_print + str(job.id) + " "
+ # print(to_print)
+ pass
+
+ @timing
+ def run(self, current_cleaned_schedule):
+ ## current_cleaned_schedule - this schedule contains only
+ ## finished and executed tasks, all unfinished and failed have been removed already
+ ## current_cleaned_schedule also have down nodes and new added
+ ## ALGORITHM DOESN'T CHECK ADDING OF NEW NODES BY ITSELF
+ ## nodes contain only available now
+
+ ## 1. get all unscheduled tasks
+ ## 2. sort them by rank
+ ## 3. map on the existed nodes according to current_cleaned_schedule
+
+ nodes = self.get_nodes()
+
+ for_planning = PeftHelper.get_tasks_for_planning(self.workflow, current_cleaned_schedule)
+ ## TODO: check if it sorted properly
+ for_planning = set([task.id for task in for_planning])
+
+ sorted_tasks = [task for task in self.wf_jobs if task.id in for_planning]
+
+ # print("P: " + str(sorted_tasks))
+
+ new_sched = self.mapping([(self.workflow, sorted_tasks)], current_cleaned_schedule.mapping, nodes, self.commcost, self.compcost)
+ return new_sched
+
+ def endtime(self, job, events):
+ """ Endtime of job in list of events """
+ for e in events:
+ if e.job == job and (e.state == ScheduleItem.FINISHED or e.state == ScheduleItem.EXECUTING or e.state == ScheduleItem.UNSTARTED):
+ return e.end_time
+
+def run_peft(workflow, resource_manager, estimator):
+ """
+ It simply runs peft with empty initial schedule
+ and returns complete schedule
+ """
+ oct = PeftHelper.get_OCT(workflow, resource_manager, estimator)
+ peft = DynamicPeft(workflow, resource_manager, estimator, oct)
+ nodes = resource_manager.get_nodes()
+ init_schedule = Schedule({node: [] for node in nodes})
+ return peft.run(init_schedule)
+
+
+
diff --git a/src/algs/peft/PeftHelper.py b/src/algs/peft/PeftHelper.py
new file mode 100644
index 0000000..294fa8f
--- /dev/null
+++ b/src/algs/peft/PeftHelper.py
@@ -0,0 +1,164 @@
+from functools import partial
+
+from src.core.environment.ResourceManager import Scheduler, ScheduleItem, Schedule
+
+
+class PeftHelper(Scheduler):
+
+ @staticmethod
+ def peft_rank(wf, rm, estimator):
+ nodes = rm.get_nodes()
+ ranking = PeftHelper.build_ranking_func(nodes,
+ lambda job, agent: estimator.estimate_runtime(job, agent),
+ lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj))
+ sorted_tasks = [t.id for t in ranking(wf)]
+ return sorted_tasks
+
+
+ @staticmethod
+ def to_nodes(resources):
+ result = set()
+ for resource in resources:
+ result.update(resource.nodes)
+ return result
+
+ @staticmethod
+ def build_ranking_func(nodes, compcost, commcost, oct):
+ task_rank_cache = dict()
+
+ def ranking_func(wf):
+ wf_dag = PeftHelper.convert_to_parent_children_map(wf)
+ rank = partial(PeftHelper.ranking, nodes=nodes, oct=oct)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+
+ ## TODO: sometimes sort gives different results
+ ## TODO: it's normal because of only elements with the same rank change their place
+ ## TODO: relatively each other with the same rank
+ ## TODO: need to get deeper understanding of this situation
+ #jbs = [(job, rank(job))for job in jobs]
+ #jbs = sorted(jbs, key=lambda x: x[1])
+ #jbs = list(reversed(jbs))
+ #print("===========JBS=================")
+ #for job, rk in jbs:
+ # print("J: " + str(job) + " " + str(rk))
+ #print("===========END_JBS=================")
+
+ jobs = sorted(jobs, key=rank)
+
+
+ return list(reversed(jobs))
+
+ return ranking_func
+
+ @staticmethod
+ def ranking(ni, nodes, oct):
+
+ result = sum(oct[(ni,p)] for p in nodes) / len(nodes)
+ result = int(round(result, 6) * 1000000) + PeftHelper.get_seq_number(ni)
+ return result
+
+ @staticmethod
+ def get_seq_number(task):
+ ## It is assumed that task.id have only one format ID000[2 digits number]_000
+ id = task.id
+ number = id[5:7]
+ return int(number)
+
+
+ @staticmethod
+ def avr_compcost(ni, nodes, compcost):
+ """ Average computation cost """
+ return sum(compcost(ni, node) for node in nodes) / len(nodes)
+
+ @staticmethod
+ def avr_commcost(ni, nj, pk, pw, nodes, commcost):
+ if (pk == pw):
+ return 0
+ return sum(commcost(ni, nj, pk, a) for a in nodes) / len(nodes)
+
+ @staticmethod
+ def convert_to_parent_children_map(wf):
+ head = wf.head_task
+ map = dict()
+ def mapp(parents, map):
+ for parent in parents:
+ st = map.get(parent, set())
+ st.update(parent.children)
+ map[parent] = st
+ mapp(parent.children, map)
+ mapp(head.children, map)
+ return map
+
+ @staticmethod
+ def get_all_tasks(wf):
+ map = PeftHelper.convert_to_parent_children_map(wf)
+ tasks = [task for task in map.keys()]
+ return tasks
+
+ @staticmethod
+ def clean_unfinished(schedule):
+ def clean(items):
+ return [item for item in items if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING]
+ new_mapping = {node: clean(items) for (node, items) in schedule.mapping.items()}
+ return Schedule(new_mapping)
+
+ @staticmethod
+ def get_tasks_for_planning(wf, schedule):
+ ## TODO: remove duplicate code later
+ def clean(items):
+ return [item.job for item in items if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING]
+ def get_not_for_planning_tasks(schedule):
+ result = set()
+ for (node, items) in schedule.mapping.items():
+ unfin = clean(items)
+ result.update(unfin)
+ return result
+ all_tasks = PeftHelper.get_all_tasks(wf)
+ not_for_planning = get_not_for_planning_tasks(schedule)
+ # def check_in_not_for_planning(tsk):
+ # for t in not_for_planning:
+ # if t.id == tsk.id:
+ # return True
+ # return False
+ # for_planning = [tsk for tsk in all_tasks if not(check_in_not_for_planning(tsk))]
+ for_planning = set(all_tasks) - set(not_for_planning)
+ return for_planning
+
+
+ @staticmethod
+ def get_OCT(wf, rm, estimator):
+ wf_dag = PeftHelper.convert_to_parent_children_map(wf)
+ w = lambda job, agent: estimator.estimate_runtime(job, agent)
+ c = lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj)
+ task_rank_cache = dict()
+ return PeftHelper.create_OCT(wf, rm.get_nodes(), wf_dag, w, c, task_rank_cache)
+
+ @staticmethod
+ def create_OCT(wf, nodes, succ, compcost, commcost, task_rank_cache):
+ #result = (max(min(getCost(tj, pw) + w(tj,pw) + c(ti,tj)) for pw in nodes) for tj in succ[ti])
+ w = compcost
+ c = partial(PeftHelper.avr_commcost, nodes=nodes, commcost=commcost)
+ jobs = set(succ.keys()) | set(x for xx in succ.values() for x in xx)
+
+ def get_oct_elem(ti, pk):
+ result = task_rank_cache.get((ti,pk), None)
+ if result is not None:
+ return result
+ if ti in succ and succ[ti]:
+
+ #result = max(min(get_oct_elem(tj, pw) + w(tj,pw) + c(ti,tj, pk, pw) for pw in nodes) for tj in succ[ti])
+ result = max(min(get_oct_elem(tj, pw) + (w(tj,pw))*100 + c(ti,tj, pw, pk) for pw in nodes) for tj in succ[ti])
+ else:
+ result = 0
+ task_rank_cache[(ti,pk)] = result
+ return result
+
+ oct = dict()
+ for ti in jobs:
+ for pk in nodes:
+ oct[(ti,pk)] = get_oct_elem(ti, pk)
+ return oct
+
+
+
+
diff --git a/src/algs/peft/PublicResourceManager.py b/src/algs/peft/PublicResourceManager.py
new file mode 100644
index 0000000..b40e6a1
--- /dev/null
+++ b/src/algs/peft/PublicResourceManager.py
@@ -0,0 +1,58 @@
+from src.algs.peft.PeftHelper import PeftHelper
+from src.core.environment.BaseElements import SoftItem, Node
+
+
+class PublicResourceManager:
+ # public_resources_manager:
+ # determine nodes of proper soft type
+ # check and determine free nodes
+ # determine reliability of every nodes
+ # determine time_of_execution probability for (task,node) pair
+
+ def __init__(self, public_resources, reliability_map, probability_estimator):
+ self.public_resources = public_resources
+ self.reliability_map = reliability_map
+ self.probability_estimator = probability_estimator
+
+ self.busies_nodes = set()
+
+ ## get available nodes by soft type
+ def get_by_softreq(self, soft_reqs):
+ nodes = PeftHelper.to_nodes(self.public_resources)
+ def check_reqs(node):
+ return (soft_reqs in node.soft) or (SoftItem.ANY_SOFT in node.soft)
+ gotcha = [node for node in nodes if node.state != Node.Down and check_reqs(node)]
+ return gotcha
+
+ def isBusy(self, node):
+ return node.name in self.busies_nodes
+
+ def checkBusy(self, node, is_busy):
+ if not is_busy:
+ self.busies_nodes.remove(node.name)
+ else:
+ self.busies_nodes.add(node.name)
+
+ def checkDown(self, node_name, is_down):
+ nodes = PeftHelper.to_nodes(self.public_resources)
+ for nd in nodes:
+ if nd.name == node_name:
+ if is_down:
+ nd.state = Node.Down
+ else:
+ nd.state = Node.Unknown
+ pass
+
+
+ def get_reliability(self, node_name):
+ return self.reliability_map[node_name]
+
+ def isCloudNode(self, node):
+ result = node.name in [nd.name for nd in PeftHelper.to_nodes(self.public_resources)]
+ return result
+
+
+
+
+
+
diff --git a/src/algs/peft/simple_peft.py b/src/algs/peft/simple_peft.py
new file mode 100644
index 0000000..a5dbe63
--- /dev/null
+++ b/src/algs/peft/simple_peft.py
@@ -0,0 +1,165 @@
+from functools import partial
+
+from src.algs.common.ScheduleBuilder import FreeSlotIterator
+from src.algs.peft.PeftHelper import PeftHelper
+from src.core.environment.ResourceManager import Scheduler
+from src.core.environment.BaseElements import Node
+from src.core.environment.BaseElements import SoftItem
+from src.core.environment.ResourceManager import ScheduleItem
+from src.core.environment.ResourceManager import Schedule
+from src.core.environment.Utility import reverse_dict
+
+## TODO: obsolete remove this test later
+class StaticPeftPlanner(Scheduler):
+ global_count = 0
+ def __init__(self):
+ self.task_rank_cache = dict()
+ self.current_time = 0
+ pass
+
+ def compcost(self, job, agent):
+ return self.estimator.estimate_runtime(job, agent)
+
+ def commcost(self, ni, nj, A, B):
+ return self.estimator.estimate_transfer_time(A, B, ni, nj)
+
+ def make_ranking(self, wf, nodes):
+ ##resources = self.resource_manager.get_resources()
+ ##print("common nodes count:" + str(len(toNodes(resources))))
+ ##nodes = PeftHelper.to_nodes(resources)
+ ranking_func = PeftHelper.build_ranking_func(nodes, self.compcost, self.commcost, self.oct)
+ wf_jobs = ranking_func(wf)
+ return wf_jobs
+
+
+ def schedule(self):
+ """
+ create inter-priority
+ """
+ def byPriority(wf):
+ return 0 if wf.priority is None else wf.priority
+
+ ##simple inter priority sorting
+ sorted_wfs = sorted(self.workflows, key=byPriority)
+ wf_jobs = {wf: [] for wf in sorted_wfs}
+ resources = self.resource_manager.get_resources()
+ ##print("common nodes count:" + str(len(toNodes(resources))))
+ nodes = PeftHelper.to_nodes(resources)
+
+ wf_jobs = {wf: self.make_ranking(wf, nodes) for wf in sorted_wfs}
+
+ ##new_schedule = self.get_unchanged_schedule(self.old_schedule, time)
+ new_schedule = Schedule({node: [] for node in nodes})
+ new_plan = new_schedule.mapping
+
+ for (wf, jobs) in wf_jobs.items():
+
+
+ new_schedule = self.mapping([(wf, jobs)],
+ new_plan,
+ nodes,
+ self.commcost,
+ self.compcost)
+ new_plan = new_schedule.mapping
+
+ return new_schedule
+
+ def mapping(self, sorted_jobs, existing_plan, nodes, commcost, compcost):
+ """def allocate(job, orders, jobson, prec, compcost, commcost):"""
+ """ Allocate job to the machine with earliest finish time
+
+ Operates in place
+ """
+
+
+ ## TODO: add finished tasks
+ jobson = dict()
+ for (node, items) in existing_plan.items():
+ for item in items:
+ if item.state == ScheduleItem.FINISHED or item.state == ScheduleItem.EXECUTING:
+ jobson[item.job] = node
+
+
+ new_plan = existing_plan
+
+
+
+
+ def ft(machine):
+ #cost = st(machine)
+ runtime = compcost(task, machine)
+ cost = st(machine, runtime) + runtime + self.oct[(task, machine)]
+ ##print("machine: %s job:%s cost: %s" % (machine.name, task.id, cost))
+ ##print("machine: " + str(machine.name) + " cost: " + str(cost))
+
+ return cost
+
+ def ft_run(machine):
+ runtime = compcost(task, machine)
+ cost = st(machine, runtime) + runtime
+ return cost
+
+ for wf, tasks in sorted_jobs:
+ ##wf_dag = self.convert_to_parent_children_map(wf)
+ wf_dag = PeftHelper.convert_to_parent_children_map(wf)
+ prec = reverse_dict(wf_dag)
+ for task in tasks:
+ st = partial(self.start_time, wf, task, new_plan, jobson, prec, commcost)
+
+ # ress = [(key, ft(key)) for key in new_plan.keys()]
+ # agent_pair = min(ress, key=lambda x: x[1][0])
+ # agent = agent_pair[0]
+ # start = agent_pair[1][0]
+ # end = agent_pair[1][1]
+
+ agent = min(new_plan.keys(), key=ft)
+ runtime = compcost(task, agent)
+ start = st(agent, runtime)
+ end = ft_run(agent)
+
+ # new_plan[agent].append(ScheduleItem(task, start, end))
+ Schedule.insert_item(new_plan, agent, ScheduleItem(task, start, end))
+
+ jobson[task] = agent
+ new_sched = Schedule(new_plan)
+ return new_sched
+
+ def start_time(self, wf, task, orders, jobson, prec, commcost, node, runtime):
+
+ ## check if soft satisfy requirements
+ if self.can_be_executed(node, task):
+ ## static or running virtual machine
+ ## or failed it works here too
+ if node.state is not Node.Down:
+
+ if len(task.parents) == 1 and wf.head_task.id == list(task.parents)[0].id:
+ comm_ready = 0
+ else:
+ comm_ready = max([self.endtime(p, orders[jobson[p]])
+ + commcost(p, task, node, jobson[p]) for p in task.parents])
+
+
+ (st, end) = next(FreeSlotIterator(self.current_time, comm_ready, runtime, orders[node]))
+ return st
+
+
+ # agent_ready = orders[node][-1].end_time if orders[node] else 0
+ # return max(agent_ready, comm_ready, self.current_time)
+ else:
+ return 1000000
+ else:
+ return 1000000
+
+ def can_be_executed(self, node, job):
+ ## check it
+ return (job.soft_reqs in node.soft) or (SoftItem.ANY_SOFT in node.soft)
+
+ def endtime(self, job, events):
+ """ Endtime of job in list of events """
+ # for e in reverse(events):
+ # if e.job.id == job.id:
+ # return e.end_time
+
+ for e in events:
+ if e.job == job:
+ return e.end_time
diff --git a/src/algs/pso/gapso.py b/src/algs/pso/gapso.py
new file mode 100644
index 0000000..d600ec4
--- /dev/null
+++ b/src/algs/pso/gapso.py
@@ -0,0 +1,28 @@
+
+def run_gapso(toolbox, logbook, stats, gen, n, ga, pso):
+
+ pop = toolbox.population(n)
+
+ best = None
+
+ for g in range(gen):
+ for p in pop:
+ p.fitness = toolbox.fitness(p)
+
+ b = max(pop, key=lambda x: x.fitness)
+ best = max(best, b, key=lambda x: x.fitness) if best is not None else b
+
+ (pop, _, _) = pso(gen_curr=g, gen_step=1, pop=pop, best=best)
+ (pop, _, _) = ga(gen_curr=1, gen_step=1, pop=pop)
+
+ data = stats.compile(pop) if stats is not None else None
+ if logbook is not None:
+ logbook.record(gen=g, evals=len(pop), **data)
+ print(logbook.stream)
+ pass
+
+
+ return pop, logbook, best
+
+
+
diff --git a/src/algs/pso/mapping_operators.py b/src/algs/pso/mapping_operators.py
new file mode 100644
index 0000000..2b0caed
--- /dev/null
+++ b/src/algs/pso/mapping_operators.py
@@ -0,0 +1,57 @@
+import random
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.MapOrdSchedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.particle_operations import MappingParticle
+from src.algs.pso.sdpso import velocity_update
+from src.algs.common.MapOrdSchedule import fitness as basefitness
+
+
+def position_update(mapping_particle, velocity):
+ def _cutting_by_task(velocity, cur_task):
+ return [node for (task, node), v in velocity.items()]
+ alpha = random.random()
+ cut_velocity = velocity.cutby(alpha)
+ new_position = {}
+ for task in mapping_particle.entity:
+ available_nodes = _cutting_by_task(cut_velocity, task)
+ if len(available_nodes) == 0:
+ available_nodes = [mapping_particle.entity[task]]
+
+ #print("=== task: {0}; available nodes: {1}".format(task, [node.entity for node in available_nodes]))
+
+ # new_node = tools.selRoulette(available_nodes, 1)[0].entity
+ # new_node = max(available_nodes, key=lambda x: x.fitness).entity
+ # new_node = tools.selTournament(available_nodes, 1, 2)[0].entity
+ new_node = available_nodes[random.randint(0, len(available_nodes) - 1)]#.entity
+ new_position[task] = new_node
+ return new_position
+
+
+def schedule_to_position(schedule):
+ return MappingParticle({item.job.id: node.name for node, items in schedule.mapping.items() for item in items})
+
+
+def update(w, c1, c2, p, best, pop):
+ p.velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_position = position_update(p, p.velocity)
+ p.entity = new_position
+ pass
+
+
+def generate(wf, rm, estimator, n):
+ pop = []
+ for i in range(n):
+ sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
+ particle = schedule_to_position(sched)
+ particle.velocity = MappingParticle.Velocity({})
+ pop.append(particle)
+ return pop
+
+
+def construct_solution(particle, sorted_tasks):
+ return {MAPPING_SPECIE: [(t, particle.entity[t]) for t in sorted_tasks], ORDERING_SPECIE: sorted_tasks}
+
+
+def fitness(wf, rm, estimator, sorted_tasks, particle):
+ solution = construct_solution(particle, sorted_tasks)
+ return basefitness(wf, rm, estimator, solution)
\ No newline at end of file
diff --git a/src/algs/pso/ordering_operators.py b/src/algs/pso/ordering_operators.py
new file mode 100644
index 0000000..c4fa507
--- /dev/null
+++ b/src/algs/pso/ordering_operators.py
@@ -0,0 +1,117 @@
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.particle_operations import MappingParticle, OrderingParticle, CompoundParticle
+from src.algs.ga.GAFunctions2 import unmoveable_tasks
+from src.algs.pso.mapping_operators import construct_solution
+from src.algs.common.MapOrdSchedule import ord_and_map, build_schedule as base_build_schedule, \
+ validate_mapping_with_alive_nodes
+from src.algs.common.MapOrdSchedule import fitness as basefitness
+from src.algs.pso.sdpso import velocity_update
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+
+
+def build_schedule(wf, rm, estimator, particle):
+ ordering_particle = particle.ordering
+ mapping_particle = particle.mapping
+ ordering = numseq_to_ordering(wf, ordering_particle)
+ solution = construct_solution(mapping_particle, ordering)
+ sched = base_build_schedule(wf, estimator, rm, solution)
+ return sched
+
+
+def fitness(wf, rm, estimator, particle):
+ sched = build_schedule(wf, rm, estimator, particle)
+ return basefitness(wf, rm, estimator, sched)
+
+
+def ordering_to_numseq(ordering, min=-1, max=1):
+ step = abs((max - min)/len(ordering))
+ initial = min
+ ord_position = []
+ for job_id in ordering:
+ initial += step
+ ord_position.append(initial)
+ return ord_position
+
+
+def numseq_to_ordering(wf, ordering_particle, fixed_tasks_ids=[]):
+
+ def recover_ordering(ordering):
+ """
+ this function is aimed to recover ordering
+ taking into account parent-child relations
+ :param ordering:
+ :return:
+ """
+
+ # 1. for each taks you should verify can it be executed
+ # regarding to parents have already been put to correct sequence
+
+ left_ordering = [task_id for task_id, value in ordering]
+ correct_ordering = [] + fixed_tasks_ids
+
+ while len(left_ordering) > 0:
+
+ length_before = len(correct_ordering)
+
+ not_ready_to_execute = []
+ for task_id in left_ordering:
+ if wf.is_task_ready(task_id, correct_ordering):
+ correct_ordering.append(task_id)
+ else:
+ not_ready_to_execute.append(task_id)
+ left_ordering = not_ready_to_execute
+
+ length_after = len(correct_ordering)
+
+ assert length_before < length_after, 'there is no new task recovered for correct ordering. ' \
+ 'length_before: {0}, length_after: {1}'.format(length_before, length_after) + ' ' \
+ 'probably, error in the logic.'
+
+ corrected_ordering = correct_ordering[len(fixed_tasks_ids):]
+ return corrected_ordering
+
+
+
+
+
+
+
+
+
+ ordering = sorted(ordering_particle.entity.items(), key=lambda x: x[1])
+
+ ordering = recover_ordering(ordering)
+ return ordering
+
+
+def generate(wf, rm, estimator, schedule=None, fixed_schedule_part=None, current_time=0.0):
+ sched = schedule if schedule is not None else SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule(fixed_schedule_part, current_time)
+
+ if fixed_schedule_part is not None:
+ un_tasks = unmoveable_tasks(fixed_schedule_part)
+ clean_sched = Schedule({node: [item for item in items if item.job.id not in un_tasks and item.state != ScheduleItem.FAILED]
+ for node, items in sched.mapping.items()})
+ else:
+ clean_sched = sched
+
+ mapping, ordering = ord_and_map(clean_sched)
+ ordering_numseq = ordering_to_numseq(ordering)
+ ordering_map = {task_id: val for task_id, val in zip(ordering, ordering_numseq)}
+ ord_p, map_p = OrderingParticle(ordering_map), MappingParticle(mapping)
+ ord_p.velocity = OrderingParticle.Velocity({})
+ map_p.velocity = MappingParticle.Velocity({})
+
+ result = CompoundParticle(map_p, ord_p)
+ if schedule is None and not validate_mapping_with_alive_nodes(result.mapping.entity, rm):
+ raise Exception("found invalid solution in generated array")
+ return result
+
+
+def ordering_update(w, c1, c2, p, best, pop, min=-1, max=1):
+ new_velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_velocity.limit_by(min, max)
+ new_entity = (p + new_velocity)
+ new_entity.limit_by(min, max)
+ p.entity = new_entity.entity
+ p.velocity = new_velocity
+ pass
diff --git a/src/algs/pso/rdpso/mapordschedule.py b/src/algs/pso/rdpso/mapordschedule.py
new file mode 100644
index 0000000..2269b7a
--- /dev/null
+++ b/src/algs/pso/rdpso/mapordschedule.py
@@ -0,0 +1,110 @@
+from deap.base import Fitness
+from src.algs.common.NewSchedulerBuilder import place_task_to_schedule
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import Utility
+
+
+MAPPING_SPECIE = "MappingSpecie"
+ORDERING_SPECIE = "OrderingSpecie"
+
+from src.algs.pso.rdpso.rdpso import mappingTransform
+
+def build_schedule(workflow, estimator, resource_manager, solution):
+ """
+ the solution consists all parts necessary to build whole solution
+ For the moment, it is mentioned that all species taking part in algorithm
+ are necessary to build complete solution
+ solution = {
+ s1.name: val1,
+ s2.name: val2,
+ ....
+ }
+ """
+ ms = solution[MAPPING_SPECIE]
+ os = solution[ORDERING_SPECIE]
+
+ assert check_precedence(workflow, os), "Precedence is violated"
+
+ ms = {t: resource_manager.byName(n) for t, n in ms}
+ schedule_mapping = {n: [] for n in set(ms.values())}
+ task_to_node = {}
+ for t in os:
+ node = ms[t]
+ t = workflow.byId(t)
+ (start_time, end_time) = place_task_to_schedule(workflow,
+ estimator,
+ schedule_mapping,
+ task_to_node,
+ ms, t, node, 0)
+
+ task_to_node[t.id] = (node, start_time, end_time)
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+
+def check_precedence(workflow, task_seq):
+ for i in range(len(task_seq)):
+ task = workflow.byId(task_seq[i])
+ pids = [p.id for p in task.parents]
+ for j in range(i + 1, len(task_seq)):
+ if task_seq[j] in pids:
+ return False
+ return True
+
+
+def fitness(wf, rm, estimator, position):
+ if isinstance(position, Schedule):
+ sched = position
+ else:
+ sched = build_schedule(wf, estimator, rm, position)
+
+ # isvalid = Utility.is_static_schedule_valid(wf,sched)
+ # if not isvalid:
+ # print("NOT VALID SCHEDULE!")
+
+ makespan = Utility.makespan(sched)
+ ## TODO: make a real estimation later
+ cost = 0.0
+ Fitness.weights = [-1.0, -1.0]
+ fit = Fitness(values=(makespan, cost))
+ ## TODO: make a normal multi-objective fitness estimation
+ fit.mofit = makespan
+ return fit
+
+
+
+def mapping_from_schedule(schedule, mapMatrix):
+ mapping = {item.job.id: mapMatrix[item.job.id, node.name] for node, items in schedule.mapping.items()
+ for item in items}
+ return mapping
+
+
+def ordering_from_schedule(schedule):
+ ordering = sorted((item for node, items in schedule.mapping.items() for item in items),
+ key=lambda x: x.start_time)
+ ordering = [item.job.id for item in ordering]
+ return ordering
+
+
+def ord_and_map(schedule, mapMatrix):
+ return mapping_from_schedule(schedule, mapMatrix), ordering_from_schedule(schedule)
+
+
+def validate_mapping_with_alive_nodes(mapping, rm, mapMatrix):
+ """
+ :param mapping: is a dict {(task_id):(node_name)}
+ :param rm: resource manager
+ :return:
+ """
+ #TODO transform mapping from task:runtime to task:node
+ mapping_tr = mappingTransform(mapping, mapMatrix, rm.get_nodes())
+
+ alive_nodes = [node.name for node in rm.get_nodes() if node.state != Node.Down]
+ for task_id, node_name in mapping_tr.items():
+ if node_name not in alive_nodes:
+ return False
+ return True
+
+
+
diff --git a/src/algs/pso/rdpso/mapping_operators.py b/src/algs/pso/rdpso/mapping_operators.py
new file mode 100644
index 0000000..64c485c
--- /dev/null
+++ b/src/algs/pso/rdpso/mapping_operators.py
@@ -0,0 +1,61 @@
+import random
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.pso.rdpso.mapordschedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.pso.rdpso.particle_operations import MappingParticle
+from src.algs.pso.rdpso.rdpso import velocity_update
+from src.algs.pso.rdpso.mapordschedule import fitness as basefitness
+
+#!!!!!TODO movement
+def position_update(mapping_particle, velocity):
+ part_entity = mapping_particle.entity
+ new_position = {k: part_entity[k] + velocity.get(k) for k in part_entity}
+ return new_position
+ """
+ def _cutting_by_task(velocity):
+ return [node for (task, node), v in velocity.items()]
+ alpha = random.random()
+ cut_velocity = velocity.cutby(alpha)
+ new_position = {}
+ for task in mapping_particle.entity:
+ available_nodes = _cutting_by_task(cut_velocity)
+ if len(available_nodes) == 0:
+ available_nodes = [mapping_particle.entity[task]]
+
+ #print("=== task: {0}; available nodes: {1}".format(task, [node.entity for node in available_nodes]))
+
+ # new_node = tools.selRoulette(available_nodes, 1)[0].entity
+ # new_node = max(available_nodes, key=lambda x: x.fitness).entity
+ # new_node = tools.selTournament(available_nodes, 1, 2)[0].entity
+ new_node = available_nodes[random.randint(0, len(available_nodes) - 1)]#.entity
+ new_position[task] = new_node
+ """
+
+#TODO maybe need set like mapordschedule
+def schedule_to_position(schedule):
+ return MappingParticle({item.job.id: node.name for node, items in schedule.mapping.items() for item in items})
+
+
+def update(w, c1, c2, p, best, pop):
+ p.velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_position = position_update(p, p.velocity)
+ p.entity = new_position
+ pass
+
+
+def generate(wf, rm, estimator, n):
+ pop = []
+ for i in range(n):
+ sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
+ particle = schedule_to_position(sched)
+ particle.velocity = MappingParticle.Velocity({})
+ pop.append(particle)
+ return pop
+
+
+def construct_solution(particle, sorted_tasks):
+ return {MAPPING_SPECIE: [(t, particle.entity[t]) for t in sorted_tasks], ORDERING_SPECIE: sorted_tasks}
+
+
+def fitness(wf, rm, estimator, sorted_tasks, particle):
+ solution = construct_solution(particle, sorted_tasks)
+ return basefitness(wf, rm, estimator, solution)
\ No newline at end of file
diff --git a/src/algs/pso/rdpso/ordering_operators.py b/src/algs/pso/rdpso/ordering_operators.py
new file mode 100644
index 0000000..d6d6fe3
--- /dev/null
+++ b/src/algs/pso/rdpso/ordering_operators.py
@@ -0,0 +1,112 @@
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.pso.rdpso.particle_operations import MappingParticle, OrderingParticle, CompoundParticle
+from src.algs.ga.GAFunctions2 import unmoveable_tasks
+from src.algs.pso.rdpso.mapping_operators import construct_solution
+from src.algs.pso.rdpso.mapordschedule import ord_and_map, build_schedule as base_build_schedule, \
+ validate_mapping_with_alive_nodes
+from src.algs.pso.rdpso.mapordschedule import fitness as basefitness
+from src.algs.pso.rdpso.rdpso import velocity_update, orderingTransform, mappingTransform
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import Utility
+import copy
+
+
+def build_schedule(wf, rm, estimator, particle, mapMatrix, rankList, filterList):
+ ordering_particle = particle.ordering
+ mapping_particle = copy.copy(particle.mapping)
+ mapping_particle.entity = mappingTransform(mapping_particle.entity, mapMatrix, rm.get_nodes())
+ ordering = numseq_to_ordering(wf, ordering_particle, rankList, filterList)
+ solution = construct_solution(mapping_particle, ordering)
+ sched = base_build_schedule(wf, estimator, rm, solution)
+ return sched
+
+
+def fitness(wf, rm, estimator, particle, mapMatrix, rankList, filterList):
+ sched = build_schedule(wf, rm, estimator, particle, mapMatrix, rankList, filterList)
+ return basefitness(wf, rm, estimator, sched)
+
+#!!!!!!TODO ordering particle init, need create task rank list to ordering
+def ordering_to_numseq(ordering, rankList):
+ ord_position = []
+ for job in ordering:
+ ord_position.append((job, rankList[job]))
+ return ord_position
+
+
+def numseq_to_ordering(wf, ordering_particle, rankList, filterList, fixed_tasks_ids=[]):
+ def recover_ordering(ordering):
+ corrected_ordering = []
+
+ while len(ordering) > 0:
+ ord_iter = iter(ordering)
+ while True:
+ t = next(ord_iter)
+ if Utility.is_enough_to_be_executed(wf, t, corrected_ordering + fixed_tasks_ids):
+ ordering.remove((t))
+ corrected_ordering.append(t)
+ break
+ else:
+ #print("incorrect " + str([task[0] for task in ordering_particle.entity]))
+ pass
+ pass
+ return corrected_ordering
+
+ ordering = orderingTransform(ordering_particle.entity, rankList, filterList)
+
+ ordering = recover_ordering(ordering)
+
+ for it in range(len(ordering_particle.entity)):
+ ordering_particle.entity[it] = (ordering[it], ordering_particle.entity[it][1])
+
+ return ordering
+
+def test_valid(wf, ordering_particle, rankList, filterList, fixed_tasks_ids=[]):
+
+ def recover_ordering(ordering):
+ corrected_ordering = []
+
+ for it in range(len(ordering)):
+ t = ordering[it]
+ if Utility.is_enough_to_be_executed(wf, t, corrected_ordering + fixed_tasks_ids):
+ corrected_ordering.append(t)
+ else:
+ #print("INCORRECT")
+ return False
+
+
+ ordering = orderingTransform(ordering_particle.entity, rankList, filterList)
+ #ordering = [task[0] for task in ordering_particle.entity]
+ return recover_ordering(ordering)
+
+
+
+def generate(wf, rm, estimator, mapMatrix=None, rankList=None, filterList=None, schedule=None, fixed_schedule_part=None, current_time=0.0):
+ sched = schedule if schedule is not None else SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule(fixed_schedule_part, current_time)
+
+ if fixed_schedule_part is not None:
+ un_tasks = unmoveable_tasks(fixed_schedule_part)
+ clean_sched = Schedule({node: [item for item in items if item.job.id not in un_tasks and item.state != ScheduleItem.FAILED]
+ for node, items in sched.mapping.items()})
+ else:
+ clean_sched = sched
+
+ mapping, ordering = ord_and_map(clean_sched, mapMatrix)
+ ordering_numseq = ordering_to_numseq(ordering, rankList)
+
+ #ordering_map = {task_id: val for task_id, val in zip(ordering, ordering_numseq)}
+
+ ord_p, map_p = OrderingParticle(ordering_numseq), MappingParticle(mapping)
+ ord_p.velocity = OrderingParticle.Velocity({})
+ #map_p.velocity = MappingParticle.Velocity({})
+ result = CompoundParticle(map_p, ord_p)
+ if schedule is None and not validate_mapping_with_alive_nodes(result.mapping.entity, rm, mapMatrix):
+ raise Exception("found invalid solution in generated array")
+ return result
+
+
+def ordering_update(w, c1, c2, p, best, pop):
+ new_velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_entity = (p + new_velocity)
+ p.entity = new_entity.entity
+ p.velocity = new_velocity
+ pass
diff --git a/src/algs/pso/rdpso/particle_operations.py b/src/algs/pso/rdpso/particle_operations.py
new file mode 100644
index 0000000..37208ff
--- /dev/null
+++ b/src/algs/pso/rdpso/particle_operations.py
@@ -0,0 +1,163 @@
+from numbers import Number
+from uuid import uuid4
+import math
+from src.algs.common.individuals import FitAdapter
+
+
+class Particle(FitAdapter):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.uid = uuid4()
+ self._velocity = None
+ self._best = None
+
+ def _get_best(self): return self._best
+ def _set_best(self, b): self._best = b
+
+ def _get_velocity(self): return self._velocity
+ def _set_velocity(self, v): self._velocity = v
+
+ best = property(_get_best, _set_best)
+ velocity = property(_get_velocity, _set_velocity)
+ pass
+
+
+
+class MappingParticle(Particle):
+ def __init__(self, mapping):
+ super().__init__(mapping)
+ self.velocity = MappingParticle.Velocity({})
+ pass
+
+ def __sub__(self, other):
+ # return Position({k: self[k] for k in self.keys() - other.keys()})
+ return MappingParticle.Velocity({item[0]: (self.entity[item[0]] - other.entity[item[0]]) for item in self.entity.items() #- other.entity.items()
+ })
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ return MappingParticle.Velocity({k: v * other for k, v in self.entity.items()})
+ raise ValueError("Other has not a suitable type for multiplication")
+
+ def emptify(self):
+ return MappingParticle.Velocity({})
+
+ class Velocity(dict):
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return MappingParticle.Velocity({k: v * other for k, v in self.items()})
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ if (len(self) == 0):
+ vel = MappingParticle.Velocity({k: other.get(k) for k in set(other.keys())})
+ else:
+ vel = MappingParticle.Velocity({k: (self.get(k) + other.get(k)) for k in set(self.keys()).union(other.keys())})
+ return vel
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def cutby(self, alpha):
+ return MappingParticle.Velocity({k: v for k, v in self.items() if v >= alpha})
+
+ def vector_length(self):
+ return len(self)
+
+
+ pass
+ pass
+
+
+class OrderingParticle(Particle):
+
+ def __init__(self, ordering):
+ """
+ :param ordering: has the following form
+ {
+ task_id: value
+ }
+ """
+ super().__init__(ordering)
+ pass
+
+ def __sub__(self, other):
+ if not isinstance(other, OrderingParticle):
+ raise ValueError("Invalid type of the argument for this operation")
+ velocity = OrderingParticle.Velocity([self.entity[iter][1] - other.entity[iter][1]
+ for iter in range(len(self.entity))])
+ return velocity
+
+ def __add__(self, other):
+ if not isinstance(other, OrderingParticle.Velocity):
+ raise ValueError("Invalid type of the argument for this operation: {0}".format(type(other)))
+
+ if len(other) == 0:
+ return OrderingParticle([v for v in self.entity])
+
+ velocity = OrderingParticle([(self.entity[iter][0],self.entity[iter][1] + other[iter])
+ for iter in range(len(self.entity))])
+ return velocity
+
+ def emptify(self):
+ return OrderingParticle.Velocity({k: 0.0 for k in self.entity})
+
+ class Velocity(list):
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return OrderingParticle.Velocity([round(v * other, 0) for v in self])
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ if isinstance(other, OrderingParticle.Velocity):
+ if (len(self) == 0):
+ vel = OrderingParticle.Velocity([v for v in other])
+ else:
+ vel = OrderingParticle.Velocity([self[iter] + other[iter] for iter in range(len(self))])
+ return vel
+ raise ValueError("{0} has not a suitable type for adding".format(other))
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def vector_length(self):
+ return math.sqrt(sum(val*val for t, val in self.items()))/len(self)
+ pass
+ pass
+
+
+class CompoundParticle(Particle):
+ def __init__(self, mapping_particle, ordering_particle):
+ super().__init__(None)
+ self.mapping = mapping_particle
+ self.ordering = ordering_particle
+ self._best = None
+ pass
+
+ def _get_best(self):
+ return self._best
+
+ def _set_best(self, value):
+ self._best = value
+ if value is not None:
+ self.mapping.best = value.mapping
+ self.ordering.best = value.ordering
+ else:
+ self.mapping.best = None
+ self.ordering.best = None
+ pass
+
+ best = property(_get_best, _set_best)
+ pass
+
+
+
diff --git a/src/algs/pso/rdpso/rdpso.py b/src/algs/pso/rdpso/rdpso.py
new file mode 100644
index 0000000..233327a
--- /dev/null
+++ b/src/algs/pso/rdpso/rdpso.py
@@ -0,0 +1,249 @@
+"""
+This is a prototype of set-based PSO algorithm directed to solve workflow scheduling problem with fixed ordering.
+It must be refactored and made in reusable, configurable form later.
+
+Position =>{task_name: node_name}
+Velocity => {(task_name, node_name): probability}
+"""
+from copy import deepcopy
+from functools import partial
+import random
+import src.algs.pso.rdpso.ordering_operators
+
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.individuals import FitAdapter
+#from src.algs.pso.rdpso.mapordschedule import fitness as basefitness
+#from src.algs.pso.rdpso.mapordschedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.utilities import gather_info
+from src.core.environment.Utility import timing, RepeatableTiming
+import src.algs.pso.rdpso.ordering_operators
+from src.algs.heft.HeftHelper import HeftHelper
+import math
+
+
+#@RepeatableTiming(100)
+def run_pso(toolbox, logbook, stats, gen_curr, gen_step=1, invalidate_fitness=True, initial_pop=None, **params):
+
+ #print("PSO_STARTED")
+
+ pop = initial_pop
+
+ w, c1, c2 = params["w"], params["c1"], params["c2"]
+ n = len(pop) if pop is not None else params["n"]
+ #rm = params["rm"]
+ #nodes = rm.get_nodes()
+ wf = params["wf"]
+
+ mapMatrix = params["mapMatrix"]
+ rankList = params["rankList"]
+ filterList = params["ordFilter"]
+ #estimator = params["estimator"]
+
+ best = params.get('best', None)
+
+ hallOfFame = []
+ hallOfFameSize = int(math.log(n))
+
+ bestIndex = 0
+ changeChance = 0.1
+
+ #curFile = open("C:\Melnik\Experiments\Work\PSO_compare" + "\\" + "RDwithMap generations.txt", 'w')
+
+ if pop is None:
+ pop = toolbox.population(n)
+
+ for g in range(gen_curr, gen_curr + gen_step, 1):
+ #print("g: {0}".format(g))
+ for p in pop:
+ #if not src.algs.pso.rdpso.ordering_operators.test_valid(wf, p.ordering, rankList, filterList, fixed_tasks_ids=[]):
+ # print("part " + str(pop.index(p)) + " incorrect before correct fitness update")
+ if not hasattr(p, "fitness") or not p.fitness.valid:
+ #if 1==1:
+ p.fitness = toolbox.fitness(p, mapMatrix, rankList, filterList)
+ if not p.best or p.best.fitness < p.fitness:
+ p.best = deepcopy(p)
+ if not best or hallOfFame[hallOfFameSize-1].fitness < p.fitness:
+ hallOfFame = changeHall(hallOfFame, p, hallOfFameSize)
+ # Gather all the fitnesses in one list and print the stats
+ #gather_info(logbook, stats, g, pop)
+
+
+ bestIndex = changeIndex(bestIndex, changeChance, hallOfFameSize)
+ best = hallOfFame[bestIndex]
+
+ """
+ hallString = [part.fitness.values[0] for part in hallOfFame]
+ curFile.write(str(g) + " " + str(bestIndex) + " " + str(hallString) + "\n")
+ particlesFitness = sorted([part.fitness.values[0] for part in pop])
+ curFile.write(str(particlesFitness) + "\n")
+ curFile.write("\n")
+ """
+
+
+ for p in pop:
+
+ #if not src.algs.pso.rdpso.ordering_operators.test_valid(wf, p.ordering, rankList, filterList, fixed_tasks_ids=[]):
+ # print("part " + str(pop.index(p)) + " incorrect before update")
+ #toolbox.update(w, c1, c2, p, best, pop, g)
+ toolbox.update(w, c1, c2, p, best, pop)
+
+ #if not src.algs.pso.rdpso.ordering_operators.test_valid(wf, p.ordering, rankList, filterList, fixed_tasks_ids=[]):
+ # print("part " + str(pop.index(p)) + " incorrect after update")
+ #toolbox.update((w-w/gen_step*g), c1, c2, p, best, pop)
+ if invalidate_fitness and not g == gen_step-1:
+ for p in pop:
+ del p.fitness
+ #for p in pop:
+ #if not src.algs.pso.rdpso.ordering_operators.test_valid(wf, p.ordering, rankList, filterList, fixed_tasks_ids=[]):
+ # print("part " + str(pop.index(p)) + " incorrect after delete fitness")
+ pass
+
+
+ """
+ hallString = [part.fitness.values[0] for part in hallOfFame]
+ curFile.write(str("FINAL") + " " + str(bestIndex) + " " + str(hallString) + "\n")
+ particlesFitness = sorted([part.fitness.values[0] for part in pop])
+ curFile.write(str(particlesFitness) + "\n")
+ curFile.write("\n")
+ curFile.close()
+ """
+
+
+ hallOfFame.sort(key=lambda p:p.fitness, reverse=True)
+ best = hallOfFame[0]
+
+ return pop, logbook, best
+
+
+def velocity_update(w, c1, c2, pbest, gbest, velocity, particle, pop):
+ r1 = random.random()
+ r2 = random.random()
+
+ old_velocity = velocity*w
+ pbest_velocity = (pbest - particle)*(c2*r2)
+ gbest_velocity = (gbest - particle)*(c1*r1)
+
+ new_velocity = old_velocity + pbest_velocity + gbest_velocity
+ return new_velocity
+
+
+
+def initMapMatrix(jobs, nodes, estimator):
+ matrix = dict()
+ for job in jobs:
+ for node in nodes:
+ matrix[job.id, node.name] = 100 / estimator.estimate_runtime(job, node)
+ return matrix
+
+def initRankList(wf_dag, nodes, estimator):
+ return heftRank(wf_dag, nodes, estimator)
+
+
+def heftRank(wf, nodes, estimator):
+ compcost = lambda job, agent: estimator.estimate_runtime(job, agent)
+ commcost = lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj)
+ task_rank_cache = dict()
+ return ranking_func(wf, nodes, compcost, commcost, task_rank_cache)
+
+def ranking_func(wf_dag, nodes, compcost, commcost, task_rank_cache):
+ rank = partial(HeftHelper.ranking, nodes=nodes, succ=wf_dag,
+ compcost=compcost, commcost=commcost,
+ task_rank_cache=task_rank_cache)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ rank_list = dict()
+ for job in jobs:
+ rank_list[job.id] = rank(job)
+
+ return rank_list
+
+def mappingTransform(mapping, mapMatrix, nodes):
+ res = dict()
+ for k,v in mapping.items():
+ subList =[(node, abs(mapMatrix[(k, node.name)] - v)) for node in nodes]
+ appNode = (min(subList, key = lambda n : n[1]))[0]
+ res[k] = appNode.name
+ return res
+
+def orderingTransform(ordering, rankList, filterList):
+ res = []
+ rankCopy = rankList.copy()
+ for it in range(len(ordering)):
+ val = ordering[it]
+ curRankList = set()
+ curFilter = filterList[val[0]]
+
+ for item in rankCopy.items():
+ #if item[0] in curFilter:
+ curRankList.add(item)
+
+ subList = [(task, abs(val[1] - rank)) for (task, rank) in curRankList]
+ if (len(subList) == 0):
+ pass
+ #print("PRIVET")
+ curTask = min(subList, key=lambda t: t[1])[0]
+ res.append(curTask)
+ if curTask != val[0]:
+ swapTasks(ordering, val[0], curTask)
+ del rankCopy[curTask]
+ return res
+
+def filterList(wf):
+ wf_dag = HeftHelper.convert_to_parent_children_map(wf)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ resultFilter = {job.id:{j.id for j in jobs} for job in jobs}
+ headTask = wf.head_task
+ ready = dict()
+ readyParents = dict()
+ for task in headTask.children:
+ getTaskChildFilter(task, resultFilter, ready)
+ for task in jobs:
+ getTaskParentFilter(task, resultFilter, readyParents)
+ return resultFilter
+
+def getTaskChildFilter(task, filter, ready):
+ res = {task.id}
+ for child in task.children:
+ if child in ready:
+ res.update(ready[child])
+ else:
+ res.update(getTaskChildFilter(child, filter, ready))
+ filter[task.id].difference_update(res)
+ filter[task.id].add(task.id)
+ ready[task.id] = res
+ return res
+
+def getTaskParentFilter(task, filter, ready):
+ res = {task.id}
+ for parent in task.parents:
+ if parent.is_head:
+ continue
+ if parent in ready:
+ res.update(ready[parent])
+ else:
+ res.update(getTaskParentFilter(parent, filter, ready))
+ filter[task.id].difference_update(res)
+ filter[task.id].add(task.id)
+ ready[task.id] = res
+ return res
+
+def swapTasks(ordering, t1, t2):
+ fstList = [item[0] for item in ordering]
+ t1Idx = fstList.index(t1)
+ t2Idx = fstList.index(t2)
+ ordering[t2Idx] = (t1, ordering[t2Idx][1])
+ ordering[t1Idx] = (t2, ordering[t1Idx][1])
+
+def changeHall(hall, part, size):
+ if part.fitness in [p.fitness for p in hall]:
+ return hall
+ hall.append(deepcopy(part))
+ hall.sort(key=lambda p: p.fitness, reverse=True)
+ return hall[0:size]
+
+def changeIndex(idx, chance, size):
+ if random.random() < chance:
+ rnd = int(random.random() * size)
+ while (rnd == idx):
+ rnd = int(random.random() * size)
+ return rnd
+ return idx
diff --git a/src/algs/pso/rdpsoOrd/mapordschedule.py b/src/algs/pso/rdpsoOrd/mapordschedule.py
new file mode 100644
index 0000000..55c1b48
--- /dev/null
+++ b/src/algs/pso/rdpsoOrd/mapordschedule.py
@@ -0,0 +1,118 @@
+from deap.base import Fitness
+from src.algs.common.NewSchedulerBuilder import place_task_to_schedule
+from src.core.environment.BaseElements import Node
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import Utility
+
+
+MAPPING_SPECIE = "MappingSpecie"
+ORDERING_SPECIE = "OrderingSpecie"
+
+#from src.algs.pso.rdpsoOrd.rdpso import mappingTransform
+
+def build_schedule(workflow, estimator, resource_manager, solution):
+ """
+ the solution consists all parts necessary to build whole solution
+ For the moment, it is mentioned that all species taking part in algorithm
+ are necessary to build complete solution
+ solution = {
+ s1.name: val1,
+ s2.name: val2,
+ ....
+ }
+ """
+ ms = solution[MAPPING_SPECIE]
+ os = solution[ORDERING_SPECIE]
+
+ assert check_precedence(workflow, os), "Precedence is violated"
+
+ ms = {t: resource_manager.byName(n) for t, n in ms}
+ schedule_mapping = {n: [] for n in set(ms.values())}
+ task_to_node = {}
+ for t in os:
+ node = ms[t]
+ t = workflow.byId(t)
+ (start_time, end_time) = place_task_to_schedule(workflow,
+ estimator,
+ schedule_mapping,
+ task_to_node,
+ ms, t, node, 0)
+
+ task_to_node[t.id] = (node, start_time, end_time)
+ schedule = Schedule(schedule_mapping)
+ return schedule
+
+
+def check_precedence(workflow, task_seq):
+ for i in range(len(task_seq)):
+ task = workflow.byId(task_seq[i])
+ pids = [p.id for p in task.parents]
+ for j in range(i + 1, len(task_seq)):
+ if task_seq[j] in pids:
+ return False
+ return True
+
+
+def fitness(wf, rm, estimator, position):
+ if isinstance(position, Schedule):
+ sched = position
+ else:
+ sched = build_schedule(wf, estimator, rm, position)
+
+ # isvalid = Utility.is_static_schedule_valid(wf,sched)
+ # if not isvalid:
+ # print("NOT VALID SCHEDULE!")
+
+ makespan = Utility.makespan(sched)
+ ## TODO: make a real estimation later
+ cost = 0.0
+ Fitness.weights = [-1.0, -1.0]
+ fit = Fitness(values=(makespan, cost))
+ ## TODO: make a normal multi-objective fitness estimation
+ fit.mofit = makespan
+ return fit
+
+
+
+#def mapping_from_schedule(schedule, mapMatrix):
+def mapping_from_schedule(schedule):
+ #mapping = {item.job.id: mapMatrix[item.job.id, node.name] for node, items in schedule.mapping.items()
+ # for item in items}
+ mapping = {item.job.id: node.name for node, items in schedule.mapping.items()
+ for item in items}
+
+ return mapping
+
+
+def ordering_from_schedule(schedule):
+ ordering = sorted((item for node, items in schedule.mapping.items() for item in items),
+ key=lambda x: x.start_time)
+ ordering = [item.job.id for item in ordering]
+ return ordering
+
+
+#def ord_and_map(schedule, mapMatrix):
+def ord_and_map(schedule):
+ #return mapping_from_schedule(schedule, mapMatrix), ordering_from_schedule(schedule)
+ return mapping_from_schedule(schedule), ordering_from_schedule(schedule)
+
+
+#def validate_mapping_with_alive_nodes(mapping, rm, mapMatrix):
+def validate_mapping_with_alive_nodes(mapping, rm):
+ """
+ :param mapping: is a dict {(task_id):(node_name)}
+ :param rm: resource manager
+ :return:
+ """
+ #TODO transform mapping from task:runtime to task:node
+ #mapping_tr = mappingTransform(mapping, mapMatrix, rm.get_nodes())
+
+ alive_nodes = [node.name for node in rm.get_nodes() if node.state != Node.Down]
+ #for task_id, node_name in mapping_tr.items():
+ for task_id, node_name in mapping.items():
+ if node_name not in alive_nodes:
+ return False
+ return True
+
+
+
diff --git a/src/algs/pso/rdpsoOrd/mapping_operators.py b/src/algs/pso/rdpsoOrd/mapping_operators.py
new file mode 100644
index 0000000..9f0b28b
--- /dev/null
+++ b/src/algs/pso/rdpsoOrd/mapping_operators.py
@@ -0,0 +1,57 @@
+import random
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.pso.rdpsoOrd.mapordschedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.pso.rdpsoOrd.particle_operations import MappingParticle
+from src.algs.pso.rdpsoOrd.rdpso import velocity_update
+from src.algs.pso.rdpsoOrd.mapordschedule import fitness as basefitness
+
+#!!!!!TODO movement
+def position_update(mapping_particle, velocity):
+ def _cutting_by_task(velocity):
+ return [node for (task, node), v in velocity.items()]
+ alpha = random.random()
+ cut_velocity = velocity.cutby(alpha)
+ new_position = {}
+ for task in mapping_particle.entity:
+ available_nodes = _cutting_by_task(cut_velocity)
+ if len(available_nodes) == 0:
+ available_nodes = [mapping_particle.entity[task]]
+
+ #print("=== task: {0}; available nodes: {1}".format(task, [node.entity for node in available_nodes]))
+
+ # new_node = tools.selRoulette(available_nodes, 1)[0].entity
+ # new_node = max(available_nodes, key=lambda x: x.fitness).entity
+ # new_node = tools.selTournament(available_nodes, 1, 2)[0].entity
+ new_node = available_nodes[random.randint(0, len(available_nodes) - 1)]#.entity
+ new_position[task] = new_node
+ return new_position
+
+#TODO maybe need set like mapordschedule
+def schedule_to_position(schedule):
+ return MappingParticle({item.job.id: node.name for node, items in schedule.mapping.items() for item in items})
+
+
+def update(w, c1, c2, p, best, pop):
+ p.velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_position = position_update(p, p.velocity)
+ p.entity = new_position
+ pass
+
+
+def generate(wf, rm, estimator, n):
+ pop = []
+ for i in range(n):
+ sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
+ particle = schedule_to_position(sched)
+ particle.velocity = MappingParticle.Velocity({})
+ pop.append(particle)
+ return pop
+
+
+def construct_solution(particle, sorted_tasks):
+ return {MAPPING_SPECIE: [(t, particle.entity[t]) for t in sorted_tasks], ORDERING_SPECIE: sorted_tasks}
+
+
+def fitness(wf, rm, estimator, sorted_tasks, particle):
+ solution = construct_solution(particle, sorted_tasks)
+ return basefitness(wf, rm, estimator, solution)
\ No newline at end of file
diff --git a/src/algs/pso/rdpsoOrd/ordering_operators.py b/src/algs/pso/rdpsoOrd/ordering_operators.py
new file mode 100644
index 0000000..26937b6
--- /dev/null
+++ b/src/algs/pso/rdpsoOrd/ordering_operators.py
@@ -0,0 +1,115 @@
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.pso.rdpsoOrd.particle_operations import MappingParticle, OrderingParticle, CompoundParticle
+from src.algs.ga.GAFunctions2 import unmoveable_tasks
+from src.algs.pso.rdpsoOrd.mapping_operators import construct_solution
+from src.algs.pso.rdpsoOrd.mapordschedule import ord_and_map, build_schedule as base_build_schedule, \
+ validate_mapping_with_alive_nodes
+from src.algs.pso.rdpsoOrd.mapordschedule import fitness as basefitness
+from src.algs.pso.rdpsoOrd.rdpso import velocity_update, orderingTransform
+from src.core.environment.ResourceManager import Schedule, ScheduleItem
+from src.core.environment.Utility import Utility
+
+
+#def build_schedule(wf, rm, estimator, particle, mapMatrix, rankList, filterList):
+def build_schedule(wf, rm, estimator, particle, rankList):
+ ordering_particle = particle.ordering
+ mapping_particle = particle.mapping
+ ordering = numseq_to_ordering(wf, ordering_particle, rankList)
+ solution = construct_solution(mapping_particle, ordering)
+ sched = base_build_schedule(wf, estimator, rm, solution)
+ return sched
+
+
+#def fitness(wf, rm, estimator, particle, mapMatrix, rankList, filterList):
+def fitness(wf, rm, estimator, particle, rankList):
+ sched = build_schedule(wf, rm, estimator, particle, rankList)
+ return basefitness(wf, rm, estimator, sched)
+
+#!!!!!!TODO ordering particle init, need create task rank list to ordering
+def ordering_to_numseq(ordering, rankList):
+ ord_position = []
+ for job in ordering:
+ ord_position.append((job, rankList[job]))
+ return ord_position
+
+
+def numseq_to_ordering(wf, ordering_particle, rankList, fixed_tasks_ids=[]):
+ def recover_ordering(ordering):
+ corrected_ordering = []
+
+ while len(ordering) > 0:
+ ord_iter = iter(ordering)
+ while True:
+ t = next(ord_iter)
+ if Utility.is_enough_to_be_executed(wf, t, corrected_ordering + fixed_tasks_ids):
+ ordering.remove((t))
+ corrected_ordering.append(t)
+ break
+ else:
+ #print("incorrect " + str([task[0] for task in ordering_particle.entity]))
+ pass
+ pass
+ return corrected_ordering
+
+ ordering = orderingTransform(ordering_particle.entity, rankList)
+
+ ordering = recover_ordering(ordering)
+
+ for it in range(len(ordering_particle.entity)):
+ ordering_particle.entity[it] = (ordering[it], ordering_particle.entity[it][1])
+
+ return ordering
+
+def test_valid(wf, ordering_particle, rankList, fixed_tasks_ids=[]):
+
+ def recover_ordering(ordering):
+ corrected_ordering = []
+
+ for it in range(len(ordering)):
+ t = ordering[it]
+ if Utility.is_enough_to_be_executed(wf, t, corrected_ordering + fixed_tasks_ids):
+ corrected_ordering.append(t)
+ else:
+ #print("INCORRECT")
+ return False
+
+
+ ordering = orderingTransform(ordering_particle.entity, rankList)
+ #ordering = [task[0] for task in ordering_particle.entity]
+ return recover_ordering(ordering)
+
+
+
+#def generate(wf, rm, estimator, mapMatrix=None, rankList=None, filterList=None, schedule=None, fixed_schedule_part=None, current_time=0.0):
+def generate(wf, rm, estimator, rankList=None, schedule=None, fixed_schedule_part=None, current_time=0.0):
+ sched = schedule if schedule is not None else SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule(fixed_schedule_part, current_time)
+
+ if fixed_schedule_part is not None:
+ un_tasks = unmoveable_tasks(fixed_schedule_part)
+ clean_sched = Schedule({node: [item for item in items if item.job.id not in un_tasks and item.state != ScheduleItem.FAILED]
+ for node, items in sched.mapping.items()})
+ else:
+ clean_sched = sched
+
+ mapping, ordering = ord_and_map(clean_sched)
+ ordering_numseq = ordering_to_numseq(ordering, rankList)
+
+ #ordering_map = {task_id: val for task_id, val in zip(ordering, ordering_numseq)}
+
+ ord_p, map_p = OrderingParticle(ordering_numseq), MappingParticle(mapping)
+ ord_p.velocity = OrderingParticle.Velocity({})
+ map_p.velocity = MappingParticle.Velocity({})
+ result = CompoundParticle(map_p, ord_p)
+ #if schedule is None and not validate_mapping_with_alive_nodes(result.mapping.entity, rm, mapMatrix):
+ if schedule is None and not validate_mapping_with_alive_nodes(result.mapping.entity, rm):
+
+ raise Exception("found invalid solution in generated array")
+ return result
+
+
+def ordering_update(w, c1, c2, p, best, pop):
+ new_velocity = velocity_update(w, c1, c2, p.best, best, p.velocity, p, pop)
+ new_entity = (p + new_velocity)
+ p.entity = new_entity.entity
+ p.velocity = new_velocity
+ pass
diff --git a/src/algs/pso/rdpsoOrd/particle_operations.py b/src/algs/pso/rdpsoOrd/particle_operations.py
new file mode 100644
index 0000000..25ed2e7
--- /dev/null
+++ b/src/algs/pso/rdpsoOrd/particle_operations.py
@@ -0,0 +1,160 @@
+from numbers import Number
+from uuid import uuid4
+import math
+from src.algs.common.individuals import FitAdapter
+
+
+class Particle(FitAdapter):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.uid = uuid4()
+ self._velocity = None
+ self._best = None
+
+ def _get_best(self): return self._best
+ def _set_best(self, b): self._best = b
+
+ def _get_velocity(self): return self._velocity
+ def _set_velocity(self, v): self._velocity = v
+
+ best = property(_get_best, _set_best)
+ velocity = property(_get_velocity, _set_velocity)
+ pass
+
+
+
+class MappingParticle(Particle):
+ def __init__(self, mapping):
+ super().__init__(mapping)
+ self.velocity = MappingParticle.Velocity({})
+ pass
+
+ def __sub__(self, other):
+ # return Position({k: self[k] for k in self.keys() - other.keys()})
+ return MappingParticle.Velocity({item: 1.0 for item in self.entity.items() - other.entity.items()
+ })
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ return MappingParticle.Velocity({k: other for k, v in self.entity.items()})
+ raise ValueError("Other has not a suitable type for multiplication")
+
+ def emptify(self):
+ return MappingParticle.Velocity({})
+
+ class Velocity(dict):
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return MappingParticle.Velocity({k: 1.0 if v * other > 1.0 else v * other for k, v in self.items()})
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ vel = MappingParticle.Velocity({k: max(self.get(k, 0), other.get(k, 0)) for k in set(self.keys()).union(other.keys())})
+ return vel
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def cutby(self, alpha):
+ return MappingParticle.Velocity({k: v for k, v in self.items() if v >= alpha})
+
+ def vector_length(self):
+ return len(self)
+
+
+ pass
+ pass
+
+
+class OrderingParticle(Particle):
+
+ def __init__(self, ordering):
+ """
+ :param ordering: has the following form
+ {
+ task_id: value
+ }
+ """
+ super().__init__(ordering)
+ pass
+
+ def __sub__(self, other):
+ if not isinstance(other, OrderingParticle):
+ raise ValueError("Invalid type of the argument for this operation")
+ velocity = OrderingParticle.Velocity([self.entity[iter][1] - other.entity[iter][1]
+ for iter in range(len(self.entity))])
+ return velocity
+
+ def __add__(self, other):
+ if not isinstance(other, OrderingParticle.Velocity):
+ raise ValueError("Invalid type of the argument for this operation: {0}".format(type(other)))
+
+ if len(other) == 0:
+ return OrderingParticle([v for v in self.entity])
+
+ velocity = OrderingParticle([(self.entity[iter][0],self.entity[iter][1] + other[iter])
+ for iter in range(len(self.entity))])
+ return velocity
+
+ def emptify(self):
+ return OrderingParticle.Velocity({k: 0.0 for k in self.entity})
+
+ class Velocity(list):
+
+ def __mul__(self, other):
+ if isinstance(other, Number):
+ if other < 0:
+ raise ValueError("Only positive numbers can be used for operations with velocity")
+ return OrderingParticle.Velocity([round(v * other, 0) for v in self])
+ raise ValueError("{0} has not a suitable type for multiplication".format(other))
+
+ def __add__(self, other):
+ if isinstance(other, OrderingParticle.Velocity):
+ if (len(self) == 0):
+ vel = OrderingParticle.Velocity([v for v in other])
+ else:
+ vel = OrderingParticle.Velocity([self[iter] + other[iter] for iter in range(len(self))])
+ return vel
+ raise ValueError("{0} has not a suitable type for adding".format(other))
+
+ def __truediv__(self, denumenator):
+ if isinstance(denumenator, Number):
+ return self.__mul__(1/denumenator)
+ raise ValueError("{0} has not a suitable type for division".format(denumenator))
+
+ def vector_length(self):
+ return math.sqrt(sum(val*val for t, val in self.items()))/len(self)
+ pass
+ pass
+
+
+class CompoundParticle(Particle):
+ def __init__(self, mapping_particle, ordering_particle):
+ super().__init__(None)
+ self.mapping = mapping_particle
+ self.ordering = ordering_particle
+ self._best = None
+ pass
+
+ def _get_best(self):
+ return self._best
+
+ def _set_best(self, value):
+ self._best = value
+ if value is not None:
+ self.mapping.best = value.mapping
+ self.ordering.best = value.ordering
+ else:
+ self.mapping.best = None
+ self.ordering.best = None
+ pass
+
+ best = property(_get_best, _set_best)
+ pass
+
+
+
diff --git a/src/algs/pso/rdpsoOrd/rdpso.py b/src/algs/pso/rdpsoOrd/rdpso.py
new file mode 100644
index 0000000..de39f58
--- /dev/null
+++ b/src/algs/pso/rdpsoOrd/rdpso.py
@@ -0,0 +1,164 @@
+"""
+This is a prototype of set-based PSO algorithm directed to solve workflow scheduling problem with fixed ordering.
+It must be refactored and made in reusable, configurable form later.
+
+Position =>{task_name: node_name}
+Velocity => {(task_name, node_name): probability}
+"""
+from copy import deepcopy
+from functools import partial
+import random
+import src.algs.pso.rdpsoOrd.ordering_operators
+
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.individuals import FitAdapter
+#from src.algs.pso.rdpsoOrd.mapordschedule import fitness as basefitness
+#from src.algs.pso.rdpsoOrd.mapordschedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.utilities import gather_info
+from src.core.environment.Utility import timing, RepeatableTiming
+from src.algs.heft.HeftHelper import HeftHelper
+import math
+
+
+#@RepeatableTiming(100)
+def run_pso(toolbox, logbook, stats, gen_curr, gen_step=1, invalidate_fitness=True, initial_pop=None, **params):
+
+ #print("PSO_STARTED")
+
+ pop = initial_pop
+
+ w, c1, c2 = params["w"], params["c1"], params["c2"]
+ n = len(pop) if pop is not None else params["n"]
+ wf = params["wf"]
+ rankList = params["rankList"]
+
+ best = params.get('best', None)
+
+ hallOfFame = []
+ hallOfFameSize = int(math.log(n))
+
+ bestIndex = 0
+ changeChance = 0.1
+
+
+ if pop is None:
+ pop = toolbox.population(n)
+
+ for g in range(gen_curr, gen_curr + gen_step, 1):
+ #print("g: {0}".format(g))
+ for p in pop:
+ if not hasattr(p, "fitness") or not p.fitness.valid:
+ p.fitness = toolbox.fitness(p, rankList)
+ if not p.best or p.best.fitness < p.fitness:
+ p.best = deepcopy(p)
+ if not best or hallOfFame[hallOfFameSize-1].fitness < p.fitness:
+ hallOfFame = changeHall(hallOfFame, p, hallOfFameSize)
+ # Gather all the fitnesses in one list and print the stats
+ gather_info(logbook, stats, g, pop)
+
+
+
+ bestIndex = changeIndex(bestIndex, changeChance, hallOfFameSize)
+ best = hallOfFame[bestIndex]
+
+ for p in pop:
+
+ toolbox.update(w, c1, c2, p, best, pop)
+
+ if invalidate_fitness and not g == gen_step-1:
+ for p in pop:
+ del p.fitness
+ pass
+
+ hallOfFame.sort(key=lambda p:p.fitness, reverse=True)
+ best = hallOfFame[0]
+ return pop, logbook, best
+
+
+def velocity_update(w, c1, c2, pbest, gbest, velocity, particle, pop):
+ r1 = random.random()
+ r2 = random.random()
+
+ old_velocity = velocity*w
+ pbest_velocity = (pbest - particle)*(c1*r1)
+ gbest_velocity = (gbest - particle)*(c2*r2)
+
+ new_velocity = old_velocity + pbest_velocity + gbest_velocity
+ return new_velocity
+
+
+def initRankList(wf_dag, nodes, estimator):
+ return heftRank(wf_dag, nodes, estimator)
+ #return simpleRank(wf_dag)
+
+def simpleRank(wf_dag):
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ rank_list = dict()
+ rank = 100
+ for job in jobs:
+ rank_list[job.id] = rank
+ rank += 100
+ return rank_list
+
+
+def heftRank(wf, nodes, estimator):
+ compcost = lambda job, agent: estimator.estimate_runtime(job, agent)
+ commcost = lambda ni, nj, A, B: estimator.estimate_transfer_time(A, B, ni, nj)
+ task_rank_cache = dict()
+ return ranking_func(wf, nodes, compcost, commcost, task_rank_cache)
+
+def ranking_func(wf_dag, nodes, compcost, commcost, task_rank_cache):
+ rank = partial(HeftHelper.ranking, nodes=nodes, succ=wf_dag,
+ compcost=compcost, commcost=commcost,
+ task_rank_cache=task_rank_cache)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ rank_list = dict()
+ for job in jobs:
+ rank_list[job.id] = rank(job)
+
+ return rank_list
+
+def orderingTransform(ordering, rankList):
+ res = []
+ rankCopy = rankList.copy()
+ for it in range(len(ordering)):
+ val = ordering[it]
+ curRankList = set()
+
+ for item in rankCopy.items():
+ #if item[0] in curFilter:
+ curRankList.add(item)
+
+ subList = [(task, abs(val[1] - rank)) for (task, rank) in curRankList]
+ if (len(subList) == 0):
+ pass
+ #print("PRIVET")
+ curTask = min(subList, key=lambda t: t[1])[0]
+ res.append(curTask)
+ if curTask != val[0]:
+ swapTasks(ordering, val[0], curTask)
+ del rankCopy[curTask]
+ return res
+
+def swapTasks(ordering, t1, t2):
+ fstList = [item[0] for item in ordering]
+ t1Idx = fstList.index(t1)
+ t2Idx = fstList.index(t2)
+ ordering[t2Idx] = (t1, ordering[t2Idx][1])
+ ordering[t1Idx] = (t2, ordering[t1Idx][1])
+
+def changeHall(hall, part, size):
+ if part.fitness in [p.fitness for p in hall]:
+ return hall
+ hall.append(deepcopy(part))
+ hall.sort(key=lambda p: p.fitness, reverse=True)
+ return hall[0:size]
+
+def changeIndex(idx, chance, size):
+ if random.random() < chance:
+ rnd = int(random.random() * size)
+ while (rnd == idx):
+ rnd = int(random.random() * size)
+ return rnd
+ return idx
+
diff --git a/src/algs/pso/sdpso.py b/src/algs/pso/sdpso.py
new file mode 100644
index 0000000..3b38702
--- /dev/null
+++ b/src/algs/pso/sdpso.py
@@ -0,0 +1,115 @@
+"""
+This is a prototype of set-based PSO algorithm directed to solve workflow scheduling problem with fixed ordering.
+It must be refactored and made in reusable, configurable form later.
+
+Position =>{task_name: node_name}
+Velocity => {(task_name, node_name): probability}
+"""
+from copy import deepcopy
+import random
+import math
+import deap
+
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.individuals import FitAdapter
+from src.algs.common.MapOrdSchedule import MAPPING_SPECIE, ORDERING_SPECIE
+from src.algs.common.MapOrdSchedule import fitness as basefitness
+from src.algs.common.utilities import gather_info
+from src.core.environment.Utility import timing, RepeatableTiming
+
+
+#@RepeatableTiming(100)
+def run_pso(toolbox, logbook, stats, gen_curr, gen_step=1, invalidate_fitness=True, initial_pop=None, **params):
+
+ """
+ :param w:
+ :param c1:
+ :param c2:
+ :param gen:
+ :param n:
+ :param toolbox:
+ :param stats:
+ :param logbook:
+ :return:
+
+ for toolbox we need the following functions:
+ population
+ fitness
+ update
+
+ And the following params:
+ w
+ c1
+ c2
+ n
+ """
+ pop = initial_pop
+
+ w, c1, c2 = params["w"], params["c1"], params["c2"]
+ n = len(pop) if pop is not None else params["n"]
+ best = params.get('best', None)
+
+ if pop is None:
+ pop = toolbox.population(n)
+
+ if best is None:
+ for p in pop:
+ p.fitness = toolbox.fitness(p)
+
+ p = max(pop, key=lambda p: p.fitness)
+ best = deepcopy(p)
+
+ for g in range(gen_curr, gen_curr + gen_step, 1):
+ for p in pop:
+ if not hasattr(p, "fitness") or not p.fitness.valid:
+ p.fitness = toolbox.fitness(p)
+ if not p.best or p.best.fitness < p.fitness:
+ p.best = deepcopy(p)
+
+ if not best or best.fitness < p.fitness:
+ best = deepcopy(p)
+
+ # Gather all the fitnesses in one list and print the stats
+ gather_info(logbook, stats, g, pop, best)
+
+ for p in pop:
+ toolbox.update(w, c1, c2, p, best, pop)
+ if invalidate_fitness and not g == gen_step-1:
+ for p in pop:
+ del p.fitness
+ pass
+
+ return pop, logbook, best
+
+
+def velocity_update(w, c1, c2, pbest, gbest, velocity, particle, pop):
+ r1 = random.random()
+ r2 = random.random()
+
+ old_velocity = velocity*w
+ pbest_velocity = (pbest - particle)*(c2*r2)
+ gbest_velocity = (gbest - particle)*(c1*r1)
+
+ new_velocity = old_velocity + pbest_velocity + gbest_velocity
+ return new_velocity
+
+def changeHall(hall, part, size):
+ if part.fitness in [p.fitness for p in hall]:
+ return hall
+ hall.append(deepcopy(part))
+ hall.sort(key=lambda p: p.fitness, reverse=True)
+ return hall[0:size]
+
+def changeIndex(idx, chance, size):
+ if random.random() < chance:
+ rnd = int(random.random() * size)
+ while (rnd == idx):
+ rnd = int(random.random() * size)
+ return rnd
+ return idx
+
+
+
+
+
+
diff --git a/src/algs/sa/SimulatedAnnealingScheme.py b/src/algs/sa/SimulatedAnnealingScheme.py
new file mode 100644
index 0000000..145f0e5
--- /dev/null
+++ b/src/algs/sa/SimulatedAnnealingScheme.py
@@ -0,0 +1,47 @@
+import random
+
+
+def run_sa(toolbox, stats, logbook, initial_solution, T, N):
+ """
+ Simple Simulated Annealing implementation
+ toolbox must contain the following methods:
+ energy - value of objective which needs to be optimized
+ update_T
+ neighbor
+ transition_probability
+ attempts_count
+ """
+ ## initialization
+ current_solution = initial_solution
+ best = current_solution
+ current_solution.energy = toolbox.energy(current_solution)
+ g = 0
+ ## whole run
+ while round(T, 4) > 0.0:
+
+ data = stats.compile([current_solution]) if stats is not None else {}
+ if logbook is not None:
+ logbook.record(gen=g, T=T, **data)
+ print(logbook.stream)
+
+ attempt_count = toolbox.attempts_count(T)
+ for _ in range(attempt_count):
+ new_sol = toolbox.neighbor(current_solution)
+ new_sol.energy = toolbox.energy(new_sol)
+ tprob = toolbox.transition_probability(current_solution, new_sol, T)
+ if random.random() < tprob:
+ current_solution = new_sol
+ break
+ best = max(best, current_solution, key=lambda x: x.energy)
+
+ T = toolbox.update_T(T, N, g)
+ g += 1
+ pass
+
+ return best, logbook, current_solution
+
+
+
+
+
+
diff --git a/src/algs/sa/mappingops.py b/src/algs/sa/mappingops.py
new file mode 100644
index 0000000..401edec
--- /dev/null
+++ b/src/algs/sa/mappingops.py
@@ -0,0 +1,68 @@
+from copy import deepcopy
+from math import exp
+import random
+from deap import creator
+from src.algs.common.MapOrdSchedule import MAPPING_SPECIE, ORDERING_SPECIE, fitness
+
+
+class State(object):
+ def __init__(self):
+ energy = None
+ mapping = None
+ ordering = None
+ pass
+
+
+def energy(wf, rm, estimator, state):
+ position = {MAPPING_SPECIE: [item for item in state.mapping.items()],
+ ORDERING_SPECIE:state.ordering}
+ return fitness(wf, rm, estimator, position)
+
+
+def update_T(T0, T, N, g):
+ return T0 - ((T0/N) * g)
+
+
+def mapping_neighbor(wf, rm, estimator, count, state):
+ """
+ it takes current mapping, randomly chooses count: elements
+ and replaces it with new elements
+ :param count: count of genes which will be changed
+ :param state:
+ :return:
+ """
+
+ if count > len(state.mapping):
+ raise ValueError("count is greater than mapping")
+ mappings = list(state.mapping.items())
+ for_changes = []
+ while len(for_changes) < count:
+ new_ri = random.randint(0, len(mappings) - 1)
+ if new_ri not in for_changes:
+ for_changes.append(new_ri)
+ pass
+
+ def move_task(taskid, node_name):
+ nodes = list(rm.get_nodes())
+ if len(nodes) < 2:
+ return node_name
+ while True:
+ n = random.randint(0, len(nodes) - 1)
+ return nodes[n].name
+
+ num_seq = range(len(mappings))
+ try_to_move = lambda el, num: move_task(el[0], el[1]) if (num in for_changes) else el[1]
+ new_mappings = {el[0]: try_to_move(el, num) for el, num in zip(mappings, num_seq)}
+
+ new_state = deepcopy(state)
+ new_state.mapping = new_mappings
+ return new_state
+
+
+def transition_probability(current_state, new_state, T):
+ if new_state.energy > current_state.energy:
+ return 1
+ diff = new_state.energy.values[0] - current_state.energy.values[0]
+ return exp(-diff/T)
+
+
diff --git a/src/core/CommonComponents/ExperimentalManager.py b/src/core/CommonComponents/ExperimentalManager.py
new file mode 100644
index 0000000..2b8de87
--- /dev/null
+++ b/src/core/CommonComponents/ExperimentalManager.py
@@ -0,0 +1,114 @@
+from src.core.environment.BaseElements import Resource, Node
+from src.core.environment.ResourceManager import Estimator
+from src.core.environment.ResourceManager import ResourceManager
+import numpy as np
+
+
+class ModelTimeEstimator(Estimator):
+ """
+ Transfer time between 2 nodes in one blade = transfer_nodes, otherwise = transfer_blades
+ """
+
+ def __init__(self, bandwidth=10):
+ self.bandwidth = bandwidth # MB /sec
+
+ # #get estimated time of running the task on the node
+ def estimate_runtime(self, task, node):
+ result = task.runtime / np.sqrt(node.flops / 8)
+ return result
+
+ ## estimate transfer time between node1 and node2 for data generated by the task
+ def estimate_transfer_time(self, node1, node2, task1, task2):
+
+ if node1 == node2:
+ res = 0.0
+ else:
+
+ transfer_time = 0
+ for filename, file in task2.input_files.items():
+ if filename in task1.output_files:
+ transfer_time += (file.size / 1024 / 1024 / self.bandwidth) # data to MB and divide to MB/sec
+ res = transfer_time
+ return res
+
+
+class ExperimentResourceManager(ResourceManager):
+
+ def __init__(self, resources):
+ self.resources = resources
+ self.resources_map = {res.name: res for res in self.resources}
+ self._name_to_node = None
+
+ def node(self, node):
+ if isinstance(node, Node):
+ result = [nd for nd in self.resources_map[node.resource.name].nodes if nd.name == node.name]
+ else:
+ name = node
+ result = [nd for nd in self.get_nodes() if nd.name == name]
+
+ if len(result) == 0:
+ return None
+ return result[0]
+
+ def resource(self, resource):
+ return self.res_by_id(resource)
+
+ ##get all resources in the system
+ def get_resources(self):
+ return self.resources
+
+ def get_live_resources(self):
+ resources = self.get_resources()
+ result = set()
+ for res in resources:
+ if res.state != 'down':
+ result.add(res)
+ return result
+
+ def get_live_nodes(self):
+ resources = [res for res in self.get_resources() if res.state != 'down']
+ result = set()
+ for resource in resources:
+ for node in resource.nodes:
+ if node.state != "down":
+ result.add(node)
+ return result
+
+ def get_all_nodes(self):
+ result = set()
+ for res in self.resources:
+ for node in res.nodes:
+ result.add(node)
+ return result
+
+ def change_performance(self, node, performance):
+ ##TODO: rethink it
+ self.resources[node.resource][node].flops = performance
+
+ def byName(self, name):
+ if self._name_to_node is None:
+ self._name_to_node = {n.name: n for n in self.get_nodes()}
+ return self._name_to_node.get(name, None)
+
+ def res_by_id(self, id):
+ name = id.name if isinstance(id, Resource) else id
+ return self.resources_map[name]
+
+ def get_res_by_name(self, name):
+ """
+ find resource from resource list by name
+ """
+ for res in self.resources:
+ if res.name == name:
+ return res
+ return None
+
+ def get_node_by_name(self, name):
+ """
+ find node in all resources by name
+ """
+ for res in self.resources:
+ for node in res.nodes:
+ if node.name == name:
+ return node
+ return None
diff --git a/src/core/CommonComponents/utilities.py b/src/core/CommonComponents/utilities.py
new file mode 100644
index 0000000..6736d42
--- /dev/null
+++ b/src/core/CommonComponents/utilities.py
@@ -0,0 +1,15 @@
+from scoop import futures
+
+USE_SCOOP = True
+
+def multi_repeat(n, funcs):
+ if USE_SCOOP:
+ fs = [futures.submit(func) for _ in range(n) for func in funcs]
+ futures.wait(fs)
+ return [f.result() for f in fs]
+ else:
+ return [func() for _ in range(n) for func in funcs ]
+
+def repeat(func, n):
+ return multi_repeat(n, [func])
+
diff --git a/src/core/environment/BaseElements.py b/src/core/environment/BaseElements.py
new file mode 100644
index 0000000..fa92ff0
--- /dev/null
+++ b/src/core/environment/BaseElements.py
@@ -0,0 +1,384 @@
+from copy import copy
+import functools
+import sys
+import uuid
+import random
+# #just an enum
+
+
+class SoftItem:
+ windows = "windows"
+ unix = "unix"
+ matlab = "matlab"
+ ANY_SOFT = "any_soft"
+
+
+class Resource:
+
+ Down = "down"
+ Unknown = "unknown"
+ Static = "static"
+ Busy = "busy"
+ def __init__(self, name, nodes=None):
+ self.name = name
+ if nodes is None:
+ self.nodes = set()
+ else:
+ self.nodes = nodes
+ self.state = Resource.Unknown
+
+ def get_live_nodes(self):
+ result = set()
+ for node in self.nodes:
+ if node.state != Node.Down:
+ result.add(node)
+ return result
+
+ def get_cemetery(self):
+ result = set()
+ for node in self.nodes:
+ if node.state == Node.Down:
+ result.add(node)
+ return result
+
+ def __eq__(self, other):
+ if isinstance(other, Resource) and self.name == other.name:
+ return True
+ else:
+ return super().__eq__(other)
+
+ def __hash__(self):
+ return hash(self.name)
+
+
+class Node:
+
+ Down = "down"
+ Unknown = "unknown"
+ Static = "static"
+ Busy = "busy"
+
+ def __init__(self, name, resource, soft, flops=0):
+ self.name = name
+ self.soft = soft
+ self.resource = resource
+ self.flops = flops
+ self.state = Node.Unknown
+ self.id = uuid.uuid4()
+
+ def __str__(self):
+ return str(self.name)
+
+ def __repr__(self):
+ return str(self.name)
+
+ def __eq__(self, other):
+ if isinstance(other, Node) and self.name == other.name:
+ return True
+ else:
+ return super().__eq__(other)
+
+ def __hash__(self):
+ return hash(self.name)
+
+
+class SubWorkflow:
+ def __init__(self, id, name, head_task):
+ self.id = id
+ self.name = name
+ self.head_task = head_task
+
+ def get_real_wf(self):
+ task = self.head_task
+
+ def process_task(self, task):
+ result = []
+ if task.range is not None:
+ tasks_number = random.randint(task.range.min, task.range.max)
+ for i in range(task.range.min, tasks_number):
+ if type(task) is SubWorkflow:
+ result.append(task.get_real_wf())
+ else:
+ result.append(task.copy())
+ else:
+ if type(task) is SubWorkflow:
+ result = task.get_real_wf()
+
+
+class Workflow:
+ def __init__(self, id, name, head_task):
+ self.id = id
+ self.name = name
+ self.head_task = head_task
+ self.max_sweep = sys.maxsize
+
+ self._unique_tasks = None
+ self._id_to_task = None
+ self._parent_child_dict = None
+
+ def get_task_count(self):
+ unique_tasks = self.get_all_unique_tasks()
+ result = len(unique_tasks)
+ return result
+
+ def get_max_sweep(self):
+ if self.max_sweep == sys.maxsize:
+ def find_all_sweep_size(task, calculated):
+ max_sweep = 0
+ for child in task.children:
+ if child not in calculated:
+ max_sweep = find_all_sweep_size(child, calculated) + max_sweep
+ calc.add(child)
+
+ return max(max_sweep, 1)
+
+ if self.head_task is None:
+ self.max_sweep = 0
+ else:
+ calc = set()
+ self.max_sweep = find_all_sweep_size(self.head_task, calc)
+ return self.max_sweep
+
+ def get_all_unique_tasks(self):
+ """
+ Get all unique tasks in sorted order
+ """
+ if self._unique_tasks is None:
+ def add_tasks(unique_tasks, task):
+ unique_tasks.update(task.children)
+ for child in task.children:
+ add_tasks(unique_tasks, child)
+
+ unique_tasks = set()
+ if self.head_task is None:
+ result = []
+ else:
+ add_tasks(unique_tasks, self.head_task)
+ result = unique_tasks
+ self._unique_tasks = sorted(result, key=lambda x: x.id)
+ return copy(self._unique_tasks)
+
+ def get_tasks_id(self):
+ return [t.id for t in self._unique_tasks]
+
+ def byId(self, id):
+ if self._id_to_task is None:
+ self._id_to_task = {t.id: t for t in self.get_all_unique_tasks()}
+ return self._id_to_task.get(id, None)
+
+ def is_parent_child(self, id1, id2):
+ if self._parent_child_dict is None:
+ self._build_ancestors_map()
+ return (id2 in self._parent_child_dict[id1]) or (id1 in self._parent_child_dict[id2])
+
+ def by_num(self, num):
+ numstr = str(num)
+ zeros = "".join("0" for _ in range(5 - len(numstr)))
+ ## TODO: correct indexation
+ id = str.format("ID{zeros}{num}_000", zeros=zeros, num=numstr)
+ return self.byId(id)
+
+ def ancestors(self, id):
+ if self._parent_child_dict is None:
+ self._build_ancestors_map()
+ return self._parent_child_dict[id]
+
+ ## TODO: for one-time use. Remove it later.
+ # def avr_runtime(self, package_name):
+ # tsks = [tsk for tsk in HeftHelper.get_all_tasks(self) if package_name in tsk.soft_reqs]
+ # common_sum = sum([tsk.runtime for tsk in tsks])
+ # return common_sum / len(tsks)
+
+
+ def _build_ancestors_map(self):
+ self._parent_child_dict = {}
+
+ def build(el):
+ if el.id in self._parent_child_dict:
+ return self._parent_child_dict[el.id]
+ if len(el.children) == 0:
+ res = []
+ else:
+ all_ancestors = [[c.id for c in el.children]] + [build(c) for c in el.children]
+ res = functools.reduce(lambda seed, x: seed + x, all_ancestors, [])
+ self._parent_child_dict[el.id] = res
+ return res
+
+ build(self.head_task)
+ self._parent_child_dict = {k: set(v) for k, v in self._parent_child_dict.items()}
+
+ def is_task_ready(self, task_id, finished_tasks):
+ """
+ checks if a task with task_id is ready to execute
+ depending on what tasks have been already finished
+ :param task_id:
+ :param finished_tasks:
+ :return:
+ """
+
+ p_ids = [p.id for p in self.byId(task_id).parents]
+
+ # consists of only HEAD - it is ok
+ if self.head_task.id in p_ids:
+ return True
+
+ if all(p_id in finished_tasks for p_id in p_ids):
+ return True
+
+ return False
+
+
+
+class AbstractWorkflow(Workflow):
+ def copy(self):
+ result = AbstractWorkflow(self.id, self.name, self.head_task.copy("",True))
+ result.max_sweep = sys.maxsize
+ result._unique_tasks = self._unique_tasks
+ result._id_to_task = self._id_to_task
+ result._parent_child_dict = self._parent_child_dict
+ return result
+
+ def get_real_wf(self):
+ real_wf = self.copy()
+ self.head_task = self.head_task.get_real_tasks()
+ return real_wf
+
+
+class Range:
+ def __init__(self, min, max):
+ self.min = min
+ self.max = max
+
+
+class Task:
+ def __init__(self, id, internal_wf_id, is_head=False, alternates=None, subtask=False):
+ self.id = id
+ self.internal_wf_id = internal_wf_id
+ self.wf = None
+ self.parents = set() ## set of parents tasks
+ self.children = set() ## set of children tasks
+ self.soft_reqs = set() ## set of soft requirements
+ self.runtime = None ## flops for calculating
+ self.input_files = None ##
+ self.output_files = None
+ self.is_head = is_head
+ self.alternates = alternates
+ self.subtask = subtask
+
+ def copy(self,id="",full=False,children=False):
+ if id == "":
+ new_id = self.id
+ else:
+ new_id = self.id + "_" + id
+ result = Task(new_id, self.internal_wf_id, self.is_head)
+ if not children:
+ result.children = self.children
+ else:
+ result.children = set()
+ children_copy = copy(self.children)
+ for child in children_copy:
+ result.children.add(child.copy(id,full,children))
+ result.parents = self.parents
+ result.input_files = self.input_files
+ result.output_files = self.output_files
+ result.runtime = self.runtime
+ if full:
+ if hasattr(self, 'alternates'):
+ result.alternates = self.alternates
+ if hasattr(self, 'range'):
+ result.range = self.range
+ return result
+
+ def __str__(self):
+ return self.id
+
+ def __repr__(self):
+ return self.id
+
+ def get_list_tasks(self):
+ result = []
+ for child_task in self.children:
+ if len(child_task.children) == 0:
+ result.append(child_task)
+ list_tasks = child_task.get_list_tasks()
+ if len(list_tasks) > 0:
+ for list_task in list_tasks:
+ result.append(list_task)
+ return result
+
+ def get_real_tasks(self):
+ real_task = self.copy("",True)
+ children_copy = copy(real_task.children)
+ for child_task in children_copy:
+ if hasattr(child_task, 'range'):
+ result = []
+ tasks_number = random.randint(child_task.range.min, child_task.range.max)
+ if child_task.alternates is not None:
+ subwf_id = random.randint(0, len(child_task.alternates)-1)
+ subwf = child_task.alternates[subwf_id]
+ real_subwf = subwf.head_task.get_real_tasks()
+ real_subwf.parents = set()
+ for parent_task in child_task.parents:
+ real_subwf.parents.add(parent_task)
+ for i in range(0, tasks_number):
+ if child_task.alternates is not None:
+ result.append(real_subwf.copy(str(i),False,True))
+ else:
+ result.append(child_task.copy(str(i)))
+ for task in result:
+ for parent_task in child_task.parents:
+ parent_task.children.add(task)
+ if child_task.alternates is not None:
+ subwf_list_tasks = task.get_list_tasks()
+ for subwf_list_task in subwf_list_tasks:
+ for child_child_task in child_task.children:
+ child_child_task.parents.add(subwf_list_task)
+ subwf_list_task.children.add(child_child_task)
+ else:
+ for child_child_task in child_task.children:
+ child_child_task.parents.add(task)
+ for child_child_task in child_task.children:
+ if child_task in child_child_task.parents:
+ child_child_task.parents.remove(child_task)
+ for parent_task in child_task.parents:
+ parent_task.children.remove(child_task)
+ else:
+ if child_task.alternates is not None:
+ subwf_id = random.randint(0, len(child_task.alternates)-1)
+ subwf = child_task.alternates[subwf_id]
+ real_subwf = subwf.head_task.get_real_tasks()
+ real_subwf.parents = set()
+ for parent_task in child_task.parents:
+ real_subwf.parents.add(parent_task)
+ parent_task.children.add(real_subwf)
+ subwf_list_tasks = real_subwf.get_list_tasks()
+ for subwf_list_task in subwf_list_tasks:
+ for child_child_task in child_task.children:
+ child_child_task.parents.add(subwf_list_task)
+ subwf_list_task.children.add(child_child_task)
+
+ if child_task.alternates is not None:
+ if child_task in real_task.children:
+ real_task.children.remove(child_task)
+
+ child_task.alternates = None
+ child_task.get_real_tasks()
+ return real_task
+
+ # def __hash__(self):
+ # return hash(self.id)
+ #
+ # def __eq__(self, other):
+ # if isinstance(other, Task):
+ # return self.id == other.id
+ # else:
+ # return super().__eq__(other)
+
+class File:
+ def __init__(self, name, size):
+ self.name = name
+ self.size = size
+
+
+UP_JOB = Task("up_job", "up_job")
+DOWN_JOB = Task("down_job", "down_job")
diff --git a/src/core/environment/DAXExtendParser.py b/src/core/environment/DAXExtendParser.py
new file mode 100644
index 0000000..b2ebfae
--- /dev/null
+++ b/src/core/environment/DAXExtendParser.py
@@ -0,0 +1,132 @@
+import xml.etree.ElementTree as ET
+
+from src.core.environment.BaseElements import Task
+from src.core.environment.BaseElements import File
+from src.core.environment.BaseElements import Workflow, AbstractWorkflow,Range
+
+
+class DAXParser:
+ def __init__(self):
+ pass
+
+ def readFiles(self, job, task):
+ files = job.findall('./{http://pegasus.isi.edu/schema/DAX}uses')
+ def buildFile(file):
+ return File(file.attrib['file'],int(file.attrib['size']))
+ output_files = {fl.name:fl for fl in [buildFile(file) for file in files if file.attrib['link'] == "output"]}
+ input_files = {fl.name:fl for fl in [buildFile(file) for file in files if file.attrib['link'] == "input"]}
+ task.output_files = output_files
+ task.input_files = input_files
+
+
+ def parseXml(self, filepath, wfId, taskPostfixId, wf_name, is_head=True):
+ tree = ET.parse(filepath)
+ root = tree.getroot()
+ jobs = root.findall('./{http://pegasus.isi.edu/schema/DAX}job')
+ children = root.findall('./{http://pegasus.isi.edu/schema/DAX}child')
+ alternates = root.findall('./{http://pegasus.isi.edu/schema/DAX}subwf')
+ subwfs = set()
+ internal_id2Task = dict()
+
+ for subwf in alternates:
+ subwf_id = subwf.attrib['id']
+ subwf_jobs = subwf.findall('./{http://pegasus.isi.edu/schema/DAX}job')
+ subwf_tasks = dict()
+ for job in subwf_jobs:
+ internal_id = job.attrib['id']
+ id = internal_id # + "_" + taskPostfixId + "_" + subwf_id
+ name = job.attrib['name']
+ task = Task(id,internal_id,subtask=True)
+ if 'range' in job.attrib:
+ range_string = job.attrib['range']
+ if range_string != None:
+ parts = range_string.split('-')
+ task.range = Range(int(parts[0]), int(parts[1]))
+ if 'alternate' in job.attrib:
+ alternates_string = job.attrib['alternate']
+ if alternates_string != None:
+ parts = alternates_string.split(',')
+ task.alternate_ids = parts
+ task.soft_reqs.add(name)
+ task.runtime = float(job.attrib['runtime'])
+ self.readFiles(job, task)
+ internal_id2Task[task.id] = task
+ subwf_tasks[task.id] = task
+
+ for child in children:
+ id = child.attrib['ref']
+ try:
+ parents = [subwf_tasks[prt.attrib['ref']] for prt in child.findall('./{http://pegasus.isi.edu/schema/DAX}parent')]
+ child = subwf_tasks[id]
+ child.parents.update(parents)
+ for parent in parents:
+ parent.children.add(child)
+ except:
+ pass
+
+ heads = [task for (name, task) in subwf_tasks.items() if len(task.parents) == 0]
+ common_head = Task("000_" + subwf_id, "000")
+ common_head.runtime = 0
+ for head in heads:
+ head.parents = set([common_head])
+ common_head.children = heads
+ subwf = AbstractWorkflow(subwf_id,subwf_id,common_head)
+ subwfs.add(subwf)
+
+ for job in jobs:
+ ## build task
+ internal_id = job.attrib['id']
+ id = internal_id + "_" + taskPostfixId + "_" + wf_name
+ soft = job.attrib['name']
+ task = Task(id,internal_id)
+ if 'range' in job.attrib:
+ range_string = job.attrib['range']
+ if range_string != None:
+ parts = range_string.split('-')
+ task.range = Range(int(parts[0]), int(parts[1]))
+ if 'alternate' in job.attrib:
+ alternates_string = job.attrib['alternate']
+ if alternates_string != None:
+ parts = alternates_string.split(',')
+ task.alternate_ids = parts
+ task.soft_reqs.add(soft)
+ task.runtime = float(job.attrib['runtime'])
+ self.readFiles(job, task)
+ internal_id2Task[task.internal_wf_id] = task
+
+ for id in internal_id2Task:
+ if hasattr(internal_id2Task[id], 'alternate_ids'):
+ internal_id2Task[id].alternates = []
+ for alternate_id in internal_id2Task[id].alternate_ids:
+ alternate = next(subwf for subwf in subwfs if subwf.id==alternate_id)
+ internal_id2Task[id].alternates.append(alternate)
+
+ for child in children:
+ id = child.attrib['ref']
+ parents = [internal_id2Task[prt.attrib['ref']] for prt in child.findall('./{http://pegasus.isi.edu/schema/DAX}parent')]
+ child = internal_id2Task[id]
+ child.parents.update(parents)
+ for parent in parents:
+ parent.children.add(child)
+
+ heads = [task for (name, task) in internal_id2Task.items() if len(task.parents) == 0 and task.subtask == False ]
+
+ common_head = Task("000_" + taskPostfixId, "000", is_head)
+ if is_head != True:
+ common_head.runtime = 0
+ for head in heads:
+ head.parents = set([common_head])
+ common_head.children = heads
+
+ wf = AbstractWorkflow(wfId, wf_name, common_head)
+ wf.get_real_wf()
+ return wf
+
+
+
+
+
+
+
+
+
diff --git a/src/core/environment/DAXParser.py b/src/core/environment/DAXParser.py
new file mode 100644
index 0000000..07a5cce
--- /dev/null
+++ b/src/core/environment/DAXParser.py
@@ -0,0 +1,66 @@
+import xml.etree.ElementTree as ET
+
+from src.core.environment.BaseElements import Task
+from src.core.environment.BaseElements import File
+from src.core.environment.BaseElements import Workflow
+
+
+class DAXParser:
+ def __init__(self):
+ pass
+
+ def readFiles(self, job, task):
+ files = job.findall('./{http://pegasus.isi.edu/schema/DAX}uses')
+ def buildFile(file):
+ return File(file.attrib['file'],int(file.attrib['size']))
+ output_files = {fl.name:fl for fl in [buildFile(file) for file in files if file.attrib['link'] == "output"]}
+ input_files = {fl.name:fl for fl in [buildFile(file) for file in files if file.attrib['link'] == "input"]}
+ task.output_files = output_files
+ task.input_files = input_files
+
+
+ def parseXml(self, filepath, wfId, taskPostfixId, wf_name, is_head=True):
+ tree = ET.parse(filepath)
+ root = tree.getroot()
+ jobs = root.findall('./{http://pegasus.isi.edu/schema/DAX}job')
+ children = root.findall('./{http://pegasus.isi.edu/schema/DAX}child')
+ internal_id2Task = dict()
+ for job in jobs:
+ ## build task
+ internal_id = job.attrib['id']
+ id = internal_id + "_" + taskPostfixId + "_" + wf_name
+ soft = job.attrib['name']
+ task = Task(id,internal_id)
+ task.soft_reqs.add(soft)
+ task.runtime = float(job.attrib['runtime'])
+ self.readFiles(job, task)
+ internal_id2Task[task.internal_wf_id] = task
+
+ for child in children:
+ id = child.attrib['ref']
+ parents = [internal_id2Task[prt.attrib['ref']] for prt in child.findall('./{http://pegasus.isi.edu/schema/DAX}parent')]
+ child = internal_id2Task[id]
+ child.parents.update(parents)
+ for parent in parents:
+ parent.children.add(child)
+
+ heads = [task for (name, task) in internal_id2Task.items() if len(task.parents) == 0 ]
+
+ common_head = Task("000_" + taskPostfixId, "000", is_head)
+ if is_head != True:
+ common_head.runtime = 0
+ for head in heads:
+ head.parents = set([common_head])
+ common_head.children = heads
+
+ wf = Workflow(wfId, wf_name, common_head)
+ return wf
+
+
+
+
+
+
+
+
+
diff --git a/src/core/environment/ResourceGenerator.py b/src/core/environment/ResourceGenerator.py
new file mode 100644
index 0000000..d439f1a
--- /dev/null
+++ b/src/core/environment/ResourceGenerator.py
@@ -0,0 +1,19 @@
+from src.core.environment.BaseElements import Resource, Node, SoftItem
+
+
+class ResourceGenerator:
+
+ @staticmethod
+ def r(list_flops):
+ result = []
+ res = Resource("res_0")
+ for flop, i in zip(list_flops, range(len(list_flops))):
+ node = Node(res.name + "_node_" + str(i), res, [SoftItem.ANY_SOFT])
+ node.flops = flop
+ result.append(node)
+ res.nodes = result
+ return [res]
+
+
+
+
diff --git a/src/core/environment/ResourceManager.py b/src/core/environment/ResourceManager.py
new file mode 100644
index 0000000..6b8b754
--- /dev/null
+++ b/src/core/environment/ResourceManager.py
@@ -0,0 +1,271 @@
+##interface Algorithm
+import functools
+import operator
+import itertools
+from src.core.environment.BaseElements import Resource
+
+
+class Algorithm:
+ def __init__(self):
+ self.resource_manager = None
+ self.estimator = None
+
+ def run(self, event):
+ pass
+
+##interface ResourceManager
+class ResourceManager:
+ def __init__(self):
+ pass
+
+ ##get all resources in the system
+ def get_resources(self):
+ raise NotImplementedError()
+
+ def res_by_id(self, id):
+ raise NotImplementedError()
+
+ def change_performance(self, node, performance):
+ raise NotImplementedError()
+
+ ## TODO: remove duplcate code with HeftHelper
+ def get_nodes(self):
+ resources = self.get_resources()
+ result = set()
+ for resource in resources:
+ result.update(resource.nodes)
+ return result
+
+ def get_nodes_by_resource(self, resource):
+ name = resource.name if isinstance(resource, Resource)else resource
+ nodes = [node for node in self.get_nodes() if node.resource.name == name]
+ ## TODO: debug
+ print("Name", name)
+ print("Nodes", nodes)
+
+ return nodes
+
+ def byName(self):
+ raise NotImplementedError()
+
+##interface Estimator
+class Estimator:
+ def __init__(self):
+ pass
+
+ ##get estimated time of running the task on the node
+ def estimate_runtime(self, task, node):
+ pass
+
+ ## estimate transfer time between node1 and node2 for data generated by the task
+ def estimate_transfer_time(self, node1, node2, task1, task2):
+ pass
+
+## element of Schedule
+class ScheduleItem:
+ UNSTARTED = "unstarted"
+ FINISHED = "finished"
+ EXECUTING = "executing"
+ FAILED = "failed"
+ def __init__(self, job, start_time, end_time):
+ self.job = job ## either task or service operation like vm up
+ self.start_time = start_time
+ self.end_time = end_time
+ self.state = ScheduleItem.UNSTARTED
+
+ @staticmethod
+ def copy(item):
+ new_item = ScheduleItem(item.job, item.start_time, item.end_time)
+ new_item.state = item.state
+ return new_item
+
+ @staticmethod
+ def MIN_ITEM():
+ return ScheduleItem(None, 10000000, 10000000)
+
+ def is_unstarted(self):
+ return self.state == ScheduleItem.UNSTARTED
+
+ def __str__(self):
+ return str(self.job.id) + ":" + str(self.start_time) + ":" + str(self.end_time) + ":" + self.state
+
+ def __repr__(self):
+ return str(self.job.id) + ":" + str(self.start_time) + ":" + str(self.end_time) + ":" + self.state
+
+
+class Schedule:
+ def __init__(self, mapping):
+ ## {
+ ## res1: (task1,start_time1, end_time1),(task2,start_time2, end_time2), ...
+ ## ...
+ ## }
+ self.mapping = mapping##dict()
+
+ def is_finished(self, task):
+ (node, item) = self.place(task)
+ if item is None:
+ return False
+ return item.state == ScheduleItem.FINISHED
+
+ def get_next_item(self, task):
+ for (node, items) in self.mapping.items():
+ l = len(items)
+ for i in range(l):
+ if items[i].job.id == task.id:
+ if l > i + 1:
+ return items[i + 1]
+ else:
+ return None
+ return None
+
+ def place(self, task):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id:
+ return (node,item)
+ return None
+
+ def change_state_executed(self, task, state):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id and (item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.UNSTARTED):
+ item.state = state
+ return None
+
+ def place_single(self, task):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id and (item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.UNSTARTED):
+ return (node, item)
+ return None
+
+ def change_state_executed_with_end_time(self, task, state, time):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id and item.state == ScheduleItem.EXECUTING:
+ item.state = state
+ item.end_time = time
+ return True
+ #print("gotcha_failed_unstarted task: " + str(task))
+ return False
+
+ def place_by_time(self, task, start_time):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id and item.start_time == start_time:
+ return (node,item)
+ return None
+
+ def is_executing(self, task):
+ for (node, items) in self.mapping.items():
+ for item in items:
+ if item.job.id == task.id and item.state == ScheduleItem.EXECUTING:
+ return True
+ return False
+
+
+ def change_state(self, task, state):
+ (node, item) = self.place(task)
+ item.state = state
+
+ # def get_all_unique_tasks_id(self):
+ # ids = set(item.job.id for (node, items) in self.mapping.items() for item in items)
+ # return ids
+
+ def get_all_unique_tasks(self):
+ tasks = set(item.job for (node, items) in self.mapping.items() for item in items)
+ return tasks
+
+ def get_all_unique_tasks_id(self):
+ tasks = self.get_all_unique_tasks()
+ ids = set(t.id for t in tasks)
+ return ids
+
+ def get_unfailed_taks(self):
+ return [item.job for (node, items) in self.mapping.items()
+ for item in items if item.state == ScheduleItem.FINISHED or
+ item.state == ScheduleItem.EXECUTING or item.state == ScheduleItem.UNSTARTED]
+
+ def get_unfailed_tasks_ids(self):
+ return [job.id for job in self.get_unfailed_taks()]
+
+ def task_to_node(self):
+ """
+ This operation is applicable only for static scheduling.
+ i.e. it is assumed that each is "executed" only once and only on one node.
+ Also, all tasks must have state "Unstarted".
+ """
+ all_items = [item for node, items in self.mapping.items() for item in items]
+ assert all(it.state == ScheduleItem.UNSTARTED for it in all_items),\
+ "This operation is applicable only for static scheduling"
+ t_to_n = {item.job: node for (node, items) in self.mapping.items() for item in items}
+ return t_to_n
+
+ def tasks_to_node(self):
+ ## there can be several instances of a task due to fails of node
+ ## we should take all possible occurences
+ task_instances = itertools.groupby(((item.job.id, item , node) for (node, items) in self.mapping.items() for item in items),
+ key=lambda x: x[0])
+ task_instances = {task_id: [(item, node) for _, item, node in group]
+ for task_id, group in task_instances}
+ return task_instances
+
+ ## TODO: there is duplicate functionality Utility.check_and_raise_for_fixed_part
+ # def contains(self, other):
+ # for node, other_items in other.mapping.items():
+ # if node not in self.mapping:
+ # return False
+ # this_items = self.mapping[node]
+ # for i, item in enumerate(other_items):
+ # if len(this_items) <= i:
+ # return False
+ # if item != this_items[i]:
+ # return False
+ # return True
+
+
+ @staticmethod
+ def insert_item(mapping, node, item):
+ result = []
+ i = 0
+ try:
+ while i < len(mapping[node]):
+ ## TODO: potential problem with double comparing
+ if mapping[node][i].start_time >= item.end_time:
+ break
+ i += 1
+ mapping[node].insert(i, item)
+ except:
+ k = 1
+
+
+ def get_items_in_time(self, time):
+ pass
+
+ ## gets schedule consisting of only currently running tasks
+ def get_schedule_in_time(self, time):
+ pass
+
+ def get_the_most_upcoming_item(self, time):
+ pass
+
+ def __str__(self):
+ return str(self.mapping)
+
+ def __repr__(self):
+ return str(self.mapping)
+
+
+##interface Scheduler
+class Scheduler:
+ def __init__(self):
+ ##previously built schedule
+ self.old_schedule = None
+ self.resource_manager = None
+ self.estimator = None
+ self.executor = None
+ self.workflows = None
+
+ ## build and returns new schedule
+ def schedule(self):
+ pass
diff --git a/src/core/environment/Utility.py b/src/core/environment/Utility.py
new file mode 100644
index 0000000..2d37d7d
--- /dev/null
+++ b/src/core/environment/Utility.py
@@ -0,0 +1,399 @@
+import cProfile
+import functools
+import json
+import operator
+import os
+from pprint import pprint
+import pstats
+import io
+import time
+from random import Random
+from src.experiments.aggregate_utilities import interval_statistics
+
+from src.settings import __root_path__
+from src.core.environment.ResourceManager import ScheduleItem, Schedule
+from src.core.environment.DAXExtendParser import DAXParser
+
+def f_eq(a, b):
+ """
+ check equality for two float numbers
+ """
+ return abs(a - b) < 0.00000001
+
+def wf(wf_name, task_postfix_id="00", deadline=1000, is_head=True):
+ # dax_filepath = "../../resources/{0}.xml".format(wf_name)
+ dax_filepath = "{0}/resources/{1}.xml".format(__root_path__, wf_name)
+ _wf = Utility.readWorkflow(dax_filepath, wf_name, task_postfix_id, deadline=deadline, is_head=is_head)
+ return _wf
+
+def timing(f):
+ def wrap(*args, **kwargs):
+ time1 = time.time()
+ ret = f(*args, **kwargs)
+ time2 = time.time()
+ #print('{0} function took {1:0.3f} ms'.format(f.__name__, (time2-time1)*1000.0))
+ return ret
+ return wrap
+
+class RepeatableTiming:
+ def __init__(self, repeat_count):
+ self._repeat_count = repeat_count
+
+ def __call__(self, func):
+ def wrap(*args, **kwargs):
+ def measure():
+ time1 = time.time()
+ x = func(*args, **kwargs)
+ time2 = time.time()
+ return (time2-time1)*1000.0
+
+ measures = [measure() for _ in range(self._repeat_count)]
+ mean, mn, mx, std, left, right = interval_statistics(measures)
+ print("Statistics - mean: {0}, min: {1}, max: {2}, std: {3}, left: {4}, right: {5} by {6} runs".format(mean,
+ mn, mx, std, left, right, self._repeat_count))
+
+ return func(*args, **kwargs)
+ return wrap
+
+
+def profile_decorator(func):
+ def wrap_func(*args, **kwargs):
+ pr = cProfile.Profile()
+ pr.enable()
+ #=============
+ result = func(*args, **kwargs)
+ #=============
+ pr.disable()
+ s = io.StringIO()
+ sortby = 'cumulative'
+ ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
+ ps.print_stats()
+ print(s.getvalue())
+ return result
+ return wrap_func
+
+def reverse_dict(d):
+ """ Reverses direction of dependence dict
+ >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
+ >>> reverse_dict(d)
+ {1: ('a',), 2: ('a', 'b'), 3: ('b',)}
+ """
+ result = {}
+ for key in d:
+ for val in d[key]:
+ result[val] = result.get(val, tuple()) + (key, )
+ return result
+
+class GraphVisualizationUtility:
+ @staticmethod
+ def visualize_task_node_mapping(wf, schedule):
+ import matplotlib.pyplot as plt
+ import networkx
+
+ def extract_edges_and_vertex(parent, edge_set, vertex_set):
+ for child in parent.children:
+ vertex_set.add(child.id)
+ edge_set.add((parent.id, child.id))
+ extract_edges_and_vertex(child, edge_set, vertex_set)
+ pass
+ pass
+
+ def get_task_node_mapping(schedule):
+ result = {i.job.id: node.name for node, items in schedule.mapping.items() for i in items}
+ return result
+
+ def draw_graph():
+ graph = networkx.DiGraph()
+ edge_set = set()
+ vertex_set = set()
+ extract_edges_and_vertex(wf.head_task, edge_set, vertex_set)
+ edge_set = filter(lambda x: False if x[0] == wf.head_task.id else True, edge_set)
+ vertex_set = filter(lambda x: x == wf.head_task.id, vertex_set)
+ tnmap = get_task_node_mapping(schedule)
+ for v in vertex_set:
+ graph.add_node(v)
+ for v1, v2 in edge_set:
+ graph.add_edge(v1, v2)
+ labels = dict((t, str(t)+"/"+str(n)) for t, n in tnmap.items())
+ # networkx.draw(graph)
+ networkx.draw(graph, labels=labels)
+ plt.show()
+ pass
+
+ draw_graph()
+ pass
+
+
+def tracing(func):
+ def wrap(*args, **kwargs):
+ print("function {0} started".format(func.__name__))
+ result = func(*args, **kwargs)
+ print("function {0} finished".format(func.__name__))
+ return result
+ return wrap
+
+
+def signal_if_true(func):
+ def wrap(*args, **kwargs):
+ x = func(*args, **kwargs)
+ if isinstance(x, bool):
+ if x is True:
+ print("Event {0} appeared".format(func.__name__))
+ return x
+ else:
+ raise ValueError("result of function {0} is not of a boolean type".format(func.__name__))
+ return wrap
+
+
+
+
+class Utility:
+ MIN_PIPELINE_SIZE = 10
+ MAX_PIPELINE_SIZE = 40
+
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def is_enough_to_be_executed(wf, t_id, completed_tasks_ids):
+ pids = [p.id for p in wf.byId(t_id).parents if p != wf.head_task]
+ return all(pid in completed_tasks_ids for pid in pids)
+
+
+ @staticmethod
+ def get_default_bundle():
+ ## dedicated resource are the same for all bundles
+ _wf = wf('CyberShake_30')
+ path = '{0}/resources/saved_schedules/CyberShake_30_bundle_backup.json'.format(__root_path__)
+ bundle = Utility.load_schedule(path, _wf)
+ return bundle
+
+ @staticmethod
+ def generateUrgentPipeline(dax_filepath, wf_name, wf_start_id, task_postfix_id, deadline):
+ parser = DAXParser()
+ random = Random()
+ pipelineSize = 1##random.randint(Utility.MIN_PIPELINE_SIZE,Utility.MAX_PIPELINE_SIZE)
+ wfs = [parser.parseXml(dax_filepath, wf_start_id + str(i), task_postfix_id + str(i), wf_name) for i in
+ range(0, pipelineSize)]
+ for wf in wfs:
+ wf.deadline = deadline
+ return wfs
+
+ @staticmethod
+ def readWorkflow(dax_filepath, wf_name, wf_start_id="00", task_postfix_id="00", deadline=1000, is_head=True):
+ parser = DAXParser()
+ wf = parser.parseXml(dax_filepath, wf_start_id + "0", task_postfix_id + "0", wf_name, is_head=is_head)
+ wf.deadline = deadline
+ return wf
+
+ @staticmethod
+ def validate_time_seq(items):
+ time = -1
+ for item in items:
+ if time > item.start_time:
+ return False
+ # raise Exception("Node: " + str(node) + " all time: " + str(time) + " st_time: " + str(item.start_time))
+ else:
+ time = item.start_time
+ if time > item.end_time:
+ return False
+ else:
+ time = item.end_time
+ return True
+
+ @staticmethod
+ def validateNodesSeq(schedule):
+ for (node, items) in schedule.mapping.items():
+ result = Utility.validate_time_seq(items)
+ if result is False:
+ return False
+ return True
+
+ ## TODO: under development now
+ @staticmethod
+ def validateParentsAndChildren(schedule, workflow, AllUnstartedMode=False, RaiseException=False):
+ INCORRECT_SCHEDULE = "Incorrect schedule"
+ #{
+ # task: (node,start_time,end_time),
+ # ...
+ #}
+ task_to_node = dict()
+ for (node, items) in schedule.mapping.items():
+ for item in items:
+ seq = task_to_node.get(item.job.id, [])
+ seq.append(item)
+ ##seq.append(node, item.start_time, item.end_time, item.state)
+ task_to_node[item.job.id] = seq
+
+ def check_failed(seq):
+ ## in schedule items sequence, only one finished element must be
+ ## resulted schedule can contain only failed and finished elements
+ states = [item.state for item in seq]
+ if AllUnstartedMode:
+ if len(states) > 1 or states[0] != ScheduleItem.UNSTARTED:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ else:
+ if states[-1] != ScheduleItem.FINISHED:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ finished = [state for state in states if state == ScheduleItem.FINISHED]
+ if len(finished) != 1:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ failed = [state for state in states if state == ScheduleItem.FAILED]
+ if len(states) - len(finished) != len(failed):
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ return True
+
+ task_to_node = {job_id: sorted(seq, key=lambda x: x.start_time) for (job_id, seq) in task_to_node.items()}
+ for (job_id, seq) in task_to_node.items():
+ result = Utility.validate_time_seq(seq)
+ if result is False:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ if check_failed(seq) is False:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+
+
+ def check(task):
+ for child in task.children:
+ p_end_time = task_to_node[task.id][-1].end_time
+ c_start_time = task_to_node[child.id][-1].start_time
+ if c_start_time < p_end_time:
+
+ #TODO: debug
+ print("Parent task: ", task.id)
+ print("Child task: ", child.id)
+
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ res = check(child)
+ if res is False:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ return True
+
+ for task in workflow.head_task.children:
+ res = check(task)
+ if res is False:
+ if RaiseException: raise Exception(INCORRECT_SCHEDULE)
+ else: return False
+ return True
+
+
+
+ @staticmethod
+ def is_static_schedule_valid(_wf, schedule):
+ try:
+ Utility.validate_static_schedule(_wf, schedule)
+ except:
+ return False
+ return True
+
+ @staticmethod
+ def makespan(schedule):
+ def get_last_time(node_items):
+ return 0 if len(node_items) == 0 else node_items[-1].end_time
+
+ last_time = max([get_last_time(node_items) for (node, node_items) in schedule.mapping.items()])
+ return last_time
+
+ @staticmethod
+ def overall_transfer_time(schedule, wf, estimator):
+ """
+ This method extracts OVERALL transfer time during execution.
+ It should be noted that common_transfer_time + common_execution_time != makespan,
+ due to as transfer as execution can be executed in parallel, so overlap may occur.
+ """
+ t_n = schedule.task_to_node()
+ # t_n = {task.id: node for task, node in t_n.items()}
+ tasks = wf.get_all_unique_tasks()
+ def calc(p, child):
+ return estimator.estimate_transfer_time(t_n[p], t_n[child], p, child)
+ relations_iter = (calc(p, child) for p in tasks if p != wf.head_task.id for child in p.children)
+ transfer_time = functools.reduce(operator.add, relations_iter)
+ return transfer_time
+
+ @staticmethod
+ def overall_execution_time(schedule):
+ """
+ This method extracts OVERALL execution time during execution.
+ It should be noted that common_transfer_time + common_execution_time != makespan,
+ due to as transfer as execution can be executed in parallel, so overlap may occur.
+ """
+ execution_iters = (item.end_time - item.start_time for node, items in schedule.mapping.items() for item in items)
+ execution_time = functools.reduce(operator.add, execution_iters)
+ return execution_time
+
+
+
+ @staticmethod
+ def load_schedule(path, wf):
+ decoder = Utility.build_bundle_decoder(wf.head_task)
+ f = open(path, 'r')
+ bundle = json.load(f, object_hook=decoder)
+ f.close()
+ return bundle
+
+ @staticmethod
+ def check_and_raise_for_fixed_part(resulted_schedule, fixed_schedule_part, current_time):
+ ## TODO: Urgent! make a check for consistency with fixed schedule
+ fpart_check = Utility.check_fixed_part(resulted_schedule, fixed_schedule_part, current_time)
+ ## TODO: Urgent! make a check for abandoning of presence of duplicated tasks with state Finished, unstarted, executing
+ duplicated_check = Utility.check_duplicated_tasks(resulted_schedule)
+
+ if fpart_check is False:
+ raise Exception("check for consistency with fixed schedule didn't pass")
+ else:
+ print("Time: " + str(current_time) + " fpart_check passed")
+ if duplicated_check is False:
+ raise Exception("check for duplicated tasks didn't pass")
+ else:
+ print("Time: " + str(current_time) + " duplicated_check passed")
+ pass
+
+ @staticmethod
+ def check_fixed_part(schedule, fixed_part, current_time):
+ def item_equality(item1, fix_item):
+
+ is_equal = item1.state == fix_item.state
+ not_finished = (fix_item.state == ScheduleItem.UNSTARTED or fix_item.state == ScheduleItem.EXECUTING)
+ is_finished_now = (
+ not_finished and item1.state == ScheduleItem.FINISHED and fix_item.end_time <= current_time)
+ is_executing_now = (
+ not_finished and item1.state == ScheduleItem.EXECUTING and fix_item.start_time <= current_time <= fix_item.end_time )
+ is_state_correct = is_equal or is_finished_now or is_executing_now
+
+ return item1.job.id == fix_item.job.id and is_state_correct and item1.start_time == fix_item.start_time and item1.end_time == fix_item.end_time
+
+ for (node, items) in fixed_part.mapping.items():
+ #TODO: need to make here search by node.name
+ itms = schedule.mapping[node]
+ for i in range(len(items)):
+ if not item_equality(itms[i], items[i]):
+ return False
+ return True
+
+ @staticmethod
+ def check_duplicated_tasks(schedule):
+ task_instances = dict()
+ for (node, items) in schedule.mapping.items():
+ for item in items:
+ instances = task_instances.get(item.job.id, [])
+ instances.append((node, item))
+ task_instances[item.job.id] = instances
+
+ for (id, items) in task_instances.items():
+ sts = [item.state for (node, item) in items]
+ inter_excluded_states = list(filter(
+ lambda x: x == ScheduleItem.FINISHED or x == ScheduleItem.EXECUTING or x == ScheduleItem.UNSTARTED,
+ sts))
+ if len(inter_excluded_states) > 1:
+ return False
+ pass
+ return True
+
+
diff --git a/src/experiments/aggregate_utilities.py b/src/experiments/aggregate_utilities.py
new file mode 100644
index 0000000..ec519ac
--- /dev/null
+++ b/src/experiments/aggregate_utilities.py
@@ -0,0 +1,148 @@
+import json
+import os
+import math
+from pprint import pprint
+import matplotlib.pyplot as plt
+from src.settings import TEMP_PATH
+from scipy import stats
+import scipy
+import numpy
+
+# HACK
+BASE_PARAMS = {
+ "alg_name": "cga",
+ "executor_params": {
+ "base_fail_duration": 40,
+ "base_fail_dispersion": 1,
+ "fixed_interval_for_ga": 15,
+ "fail_count_upper_limit": 15,
+ "replace_anyway": True
+ },
+ "resource_set": {
+ "nodes_conf": [(10, 15, 25, 30)],
+ "rules_list": [(80, 30)]
+ },
+ "estimator_settings": {
+ "ideal_flops": 20,
+ "transfer_nodes": 100,
+ "reliability": 1.0,
+ "transfer_blades": 100
+ }
+}
+
+
+WFS_COLORS_30 = {
+ # 30 - series
+ "Montage_25": "-gD",
+ "CyberShake_30": "-rD",
+ "Inspiral_30": "-bD",
+ "Sipht_30": "-yD",
+ "Epigenomics_24": "-mD",
+}
+
+WFS_COLORS_50 = {
+ # 50 - series
+ "Montage_50": "-gD",
+ "CyberShake_50": "-rD",
+ "Inspiral_50": "-bD",
+ "Sipht_60": "-yD",
+ "Epigenomics_46": "-mD",
+}
+
+
+WFS_COLORS_75 = {
+ # 75 - series
+ "Montage_75": "-gD",
+ "CyberShake_75": "-rD",
+ "Inspiral_72": "-bD",
+ "Sipht_73": "-yD",
+ "Epigenomics_72": "-mD",
+}
+
+
+WFS_COLORS_100 = {
+ # 100 - series
+ "Montage_100": "-gD",
+ "CyberShake_100": "-rD",
+ "Inspiral_100": "-bD",
+ "Sipht_100": "-yD",
+ "Epigenomics_100": "-mD",
+}
+
+WFS_COLORS = dict()
+WFS_COLORS.update(WFS_COLORS_30)
+WFS_COLORS.update(WFS_COLORS_50)
+WFS_COLORS.update(WFS_COLORS_75)
+WFS_COLORS.update(WFS_COLORS_100)
+
+
+def visualize(data, functions, path_to_save=None):
+
+
+ for i in range(len(functions)):
+ #plt.subplot(len(functions), 1, i + 1)
+ plt.clf()
+ functions[i](data)
+
+ plt.tight_layout()
+
+ if path_to_save is None:
+ plt.show()
+ else:
+ directory = os.path.dirname(path_to_save)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ plt.savefig(path_to_save, dpi=96.0, format="png")
+ plt.clf()
+ pass
+
+
+def aggregate(pathes, picture_path="gh.png", extract_and_add=None, functions=None):
+ files = [os.path.join(path, p) for path in pathes for p in os.listdir(path) if p.endswith(".json")]
+ data = {}
+ for p in files:
+ with open(p, "r") as f:
+ d = json.load(f)
+ extract_and_add(data, d)
+
+ path = os.path.join(TEMP_PATH, picture_path) if not os.path.isabs(picture_path) else picture_path
+ visualize(data, functions, path)
+
+
+
+def interval_statistics(points, confidence_level=0.95):
+ s = numpy.array(points)
+ n, min_max, mean, var, skew, kurt = stats.describe(s)
+ std = math.sqrt(var)
+ left, right = stats.norm.interval(confidence_level, loc=mean, scale=std)
+ mn, mx = min_max
+ return mean, mn, mx, std, left, right
+
+
+class InMemoryDataAggregator:
+
+ def __init__(self, pathes):
+ files = [os.path.join(path, p) for path in pathes for p in os.listdir(path) if p.endswith(".json")]
+ self._data_array = []
+ for p in files:
+ with open(p, "r") as f:
+ d = json.load(f)
+ self._data_array.append(d)
+ pass
+
+ def __call__(self, picture_path="gh.png", extract_and_add=None, functions=None):
+ data = {}
+ for d in self._data_array:
+ extract_and_add(data, d)
+
+ path = os.path.join(TEMP_PATH, picture_path) if not os.path.isabs(picture_path) else picture_path
+ visualize(data, functions, path)
+ pass
+ pass
+
+
+def interval_stat_string(stat_result):
+ mean, mn, mx, std, left, right = stat_result
+ st = "Mean: {0:.0f}, Min: {1:.0f}, Max: {2:.0f}, Std: {3:.0f}, Left: {4:.0f}, Right: {5:.0f}"\
+ .format(mean, mn, mx, std, left, right)
+ return st
\ No newline at end of file
diff --git a/src/experiments/common.py b/src/experiments/common.py
new file mode 100644
index 0000000..d77bfc0
--- /dev/null
+++ b/src/experiments/common.py
@@ -0,0 +1,74 @@
+from deap import tools
+import numpy
+from src.algs.heft.DSimpleHeft import run_heft
+from src.core.CommonComponents.ExperimentalManager import ExperimentResourceManager, ModelTimeEstimator
+from src.core.environment.Utility import wf
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+
+
+
+class AbstractExperiment:
+ def __init__(self, wf_name,
+ resources_set=[4, 8, 8, 16]):
+ self.wf_name = wf_name
+
+ self._resorces_set = resources_set
+
+ self._wf = None
+ self._rm = None
+ self._estimator = None
+
+ self._stats = None
+ self._logbook = None
+ self._toolbox = None
+
+ self._heft_schedule = None
+
+ # TODO: make it config-consuming, i.e
+ """
+ **config
+ self._build_env(config) - a la smart factory method setter
+ self.env(self) - a la getter
+ """
+
+ pass
+
+ # def _fields(self, private=True, **kwargs):
+ # for k, v in kwargs.items():
+ # name = "_{0}".format(k) if private else "{0}".format(k)
+ # setattr(self, name, v)
+ # pass
+
+ def env(self):
+ if not self._wf or not self._rm or not self._estimator:
+ self._wf = wf(self.wf_name)
+ self._rm = ExperimentResourceManager(rg.r(self._resorces_set))
+ self._estimator = ModelTimeEstimator(bandwidth=10)
+ return self._wf, self._rm, self._estimator
+
+ def stats(self):
+ if self._stats is None:
+ self._stats = tools.Statistics(lambda ind: ind.fitness.values[0])
+ self._stats.register("avg", numpy.mean)
+ self._stats.register("std", numpy.std)
+ self._stats.register("min", numpy.min)
+ self._stats.register("max", numpy.max)
+ return self._stats
+
+ def logbook(self):
+ if self._logbook is None:
+ self._logbook = tools.Logbook()
+ self._logbook.header = ["gen", "evals"] + self.stats().fields
+ return self._logbook
+
+ def heft_schedule(self):
+ if not self._heft_schedule:
+ self._heft_schedule = run_heft(self._wf, self._rm, self._estimator)
+ return self._heft_schedule
+
+ def toolbox(self):
+ raise NotImplementedError()
+
+ def __call__(self, *args, **kwargs):
+ raise NotImplementedError()
+ pass
diff --git a/src/experiments/comparison_experiments/GAvsHEFT.py b/src/experiments/comparison_experiments/GAvsHEFT.py
new file mode 100644
index 0000000..8492aba
--- /dev/null
+++ b/src/experiments/comparison_experiments/GAvsHEFT.py
@@ -0,0 +1,125 @@
+import functools
+import numpy
+
+from src.algs.ga.GARunner import MixRunner
+from src.algs.heft.DSimpleHeft import run_heft
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.Utility import wf, Utility
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.experiments.cga.utilities.common import UniqueNameSaver, repeat
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+
+
+wf_names = ['Montage_100']
+# wf_names = ['Montage_50']
+# wf_names = ['Montage_500']
+# wf_names = ['CyberShake_100']
+# wf_names = ['Epigenomics_100']
+# wf_names = ["CyberShake_50"]
+
+only_heft = False
+
+PARAMS = {
+ "ideal_flops": 20,
+ "is_silent": False,
+ "is_visualized": False,
+ "ga_params": {
+ "Kbest": 5,
+ "population": 50,
+ "crossover_probability": 0.9, #0.3
+ "replacing_mutation_probability": 0.9, #0.1
+ "sweep_mutation_probability": 0.3, #0.3
+ "generations": 300
+ },
+ "nodes_conf": [10, 15, 25, 30],
+ "transfer_time": 100,
+ "heft_initial": False
+}
+
+run = functools.partial(MixRunner(), **PARAMS)
+directory = "../../temp/ga_vs_heft_exp"
+saver = UniqueNameSaver("../../temp/ga_vs_heft_exp")
+
+# def do_exp():
+# ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0])
+# saver(ga_makespan)
+# return ga_makespan
+
+def do_exp_schedule(takeHeftSchedule=True):
+ saver = UniqueNameSaver("../../temp/ga_vs_heft_exp_heft_schedule")
+
+ ga_makespan, heft_makespan, ga_schedule, heft_schedule, logbook = run(wf_names[0])
+
+ ## TODO: pure hack
+
+ schedule = heft_schedule if takeHeftSchedule else ga_schedule
+
+ mapping = [(item.job.id, node.flops) for node, items in schedule.mapping.items() for item in items]
+ mapping = sorted(mapping, key=lambda x: x[0])
+
+ ordering = [(item.job.id, item.start_time) for node, items in heft_schedule.mapping.items() for item in items]
+ ordering = [t for t, time in sorted(ordering, key=lambda x: x[1])]
+
+ data = {
+ "mapping": mapping,
+ "ordering": ordering
+ }
+
+ name = saver(data)
+ return ga_makespan, heft_makespan, ga_schedule, heft_schedule, name, logbook
+
+def do_exp_heft_schedule():
+ res = do_exp_schedule(True)
+ return (res[0], res[5])
+
+def do_exp_ga_schedule():
+ res = do_exp_schedule(False)
+ return (res[0], res[4])
+
+
+if __name__ == '__main__':
+ print("Population size: " + str(PARAMS["ga_params"]["population"]))
+
+ _wf = wf(wf_names[0])
+ rm = ExperimentResourceManager(rg.r(PARAMS["nodes_conf"]))
+ estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=PARAMS["ideal_flops"], transfer_time=PARAMS["transfer_time"])
+
+ heft_schedule = run_heft(_wf, rm, estimator)
+ heft_makespan = Utility.makespan(heft_schedule)
+ overall_transfer = Utility.overall_transfer_time(heft_schedule, _wf, estimator)
+ overall_execution = Utility.overall_execution_time(heft_schedule)
+
+ print("Heft makespan: {0}, Overall transfer time: {1}, Overall execution time: {2}".format(heft_makespan,
+ overall_transfer,
+ overall_execution))
+
+ if not only_heft:
+ exec_count = 100
+ gen = PARAMS["ga_params"]["generations"]
+ res_list = [0 for _ in range(gen)]
+ result = repeat(do_exp_heft_schedule, exec_count)
+ mean = numpy.mean([makespan for (makespan, list) in result])
+ for i in range(exec_count):
+ cur_list = result[i][1]
+ print(str(cur_list))
+ for j in range(gen):
+ res_list[j] = res_list[j] + cur_list[j]
+ for j in range(gen):
+ res_list[j] = res_list[j] / exec_count
+ print(str(res_list))
+
+ #file = open("C:\Melnik\Experiments\Work\PSO_compare\populations\GA with HEFT cyber.txt", 'w')
+ #file.write("#gen result" + "\n")
+ #for i in range(gen):
+ # file.write(str(i) + " " + str(res_list[i]) + "\n")
+
+ #profit = (1 - mean / heft_makespan) * 100
+ #print(result)
+ print("Heft makespan: {0}, Overall transfer time: {1}, Overall execution time: {2}".format(heft_makespan,
+ overall_transfer,
+ overall_execution))
+ print("Mean: {0}".format(mean))
+ #print("Profit: {0}".format(profit))
+
+
diff --git a/src/experiments/comparison_experiments/HEFTvsPEFT.py b/src/experiments/comparison_experiments/HEFTvsPEFT.py
new file mode 100644
index 0000000..6ce36af
--- /dev/null
+++ b/src/experiments/comparison_experiments/HEFTvsPEFT.py
@@ -0,0 +1,86 @@
+from src.algs.heft.DSimpleHeft import run_heft
+from src.algs.peft.DSimplePeft import run_peft
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.Utility import wf, Utility
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+
+#Mishanya
+from src.algs.heft.simple_heft import StaticHeftPlanner
+
+
+#rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+#estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+# ideal_flops=20, transfer_time=100)
+estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=20, transfer_time=100)
+
+def do_exp_HEFT(wf_name, trans):
+ _wf = wf("" + wf_name)
+
+ estimator.transfer_time = trans
+ #estim = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ # ideal_flops=20, transfer_time=trans)
+
+ heft_schedule = run_heft(_wf, rm, estimator)
+ #return heft_schedule
+ Utility.validate_static_schedule(_wf, heft_schedule)
+
+ makespan = Utility.makespan(heft_schedule)
+ return makespan
+ # print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+
+#Mishanya
+def do_exp_PEFT(wf_name, trans):
+ _wf = wf(wf_name)
+ estimator.transfer_time = trans
+ #estim = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ # ideal_flops=20, transfer_time=trans)
+
+ peft_schedule = run_peft(_wf, rm, estimator)
+ #return peft_schedule
+ Utility.validate_static_schedule(_wf, peft_schedule)
+
+ makespan = Utility.makespan(peft_schedule)
+ return makespan
+
+
+
+
+
+if __name__ == "__main__":
+ repeat_count = 1
+ #wf_list = ["Montage_25"]
+ wf_list = ["Montage_25", "Montage_30", "Montage_50", "Montage_100", "Montage_250",
+ "Epigenomics_24", "Epigenomics_46", "Epigenomics_72", "Epigenomics_100",
+ "CyberShake_30", "CyberShake_50", "CyberShake_75", "CyberShake_100",
+ "Inspiral_30", "Inspiral_50", "Inspiral_100", "Sipht_30", "Sipht_60", "Sipht_100"]
+ #wf_list = ["Montage_25", "Montage_50", "Montage_100", "Montage_250", "CyberShake_30",
+ # "CyberShake_50", "CyberShake_100", "Inspiral_30", "Inspiral_100", "Sipht_30", "Sipht_60", "Sipht_100"]
+ trans_list = [10, 100, 500, 1000, 10000]
+ for wf_cur in wf_list:
+ print(wf_cur)
+
+ print(" HEFT")
+ heft_result = [do_exp_HEFT(wf_cur, 100) for _ in range(repeat_count)]
+ #print(min(result))
+ print(" " + str(heft_result))
+
+ print(" PEFT")
+ peft_result = [do_exp_PEFT(wf_cur, 100) for _ in range(repeat_count)]
+ #print("finish")
+ #print(max(result))
+ print(" " + str(peft_result))
+ #for _ in range(repeat_count):
+ #profit_list = [(round((((do_exp_HEFT(wf_cur, trans) / do_exp_PEFT(wf_cur, trans)) - 1) * 100), 2), trans) for trans in [x * 10 for x in range(101)]]
+ #profit_list = [(round((((do_exp_HEFT(wf_cur, trans) / do_exp_PEFT(wf_cur, trans)) - 1) * 100), 2), trans) for trans in [x for x in trans_list]]
+ #profit_list = [(round((((do_exp_PEFT(wf_cur, trans) / do_exp_HEFT(wf_cur, trans)) - 1) * 100), 2), trans) for trans in [x * 10 for x in range(11)]]
+ #profit_list = [(round((((do_exp_PEFT(wf_cur, trans) / do_exp_HEFT(wf_cur, trans)) - 1) * 100), 2), trans) for trans in [x for x in trans_list]]
+ profit_list = [(round((((heft_result[0] / peft_result[0]) - 1) * 100), 2), trans) for trans in [100]]
+ print(" " + str(profit_list))
+ #file = open("F:\eScience\Work\experiments\HEFTvsPEFT\\" + wf_cur + ".txt", 'w')
+ #for (profit, trans) in profit_list:
+ # file.write(str(profit) + " " + str(trans) + "\n")
+
+
diff --git a/src/experiments/comparison_experiments/HeftOnly.py b/src/experiments/comparison_experiments/HeftOnly.py
new file mode 100644
index 0000000..0b42174
--- /dev/null
+++ b/src/experiments/comparison_experiments/HeftOnly.py
@@ -0,0 +1,20 @@
+from src.algs.heft.DSimpleHeft import run_heft
+from src.core.CommonComponents.ExperimentalManager import ExperimentResourceManager, ModelTimeEstimator
+from src.core.environment.Utility import wf, Utility
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+
+ideal_flops = 8.0
+
+rm = ExperimentResourceManager(rg.r([4.0, 8.0, 8.0, 16.0]))
+
+estimator = ModelTimeEstimator(bandwidth=10) # скорость передачи данных 10 MB/sec
+
+def do_exp(wf_name):
+ _wf = wf(wf_name)
+ heft_schedule = run_heft(_wf, rm, estimator)
+ makespan = Utility.makespan(heft_schedule)
+ return makespan
+
+if __name__ == "__main__":
+ result = do_exp("Montage_25")
+ print(result)
diff --git a/src/experiments/comparison_experiments/OMPSOvsHEFT.py b/src/experiments/comparison_experiments/OMPSOvsHEFT.py
new file mode 100644
index 0000000..e73cc5b
--- /dev/null
+++ b/src/experiments/comparison_experiments/OMPSOvsHEFT.py
@@ -0,0 +1,50 @@
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.pso.ompso_base_experiment import OmpsoBaseExperiment
+from src.experiments.pso.rdpso_base_experiment_ordering import RdpsoBaseExperiment
+from src.experiments.comparison_experiments.HeftOnly import do_exp
+from src.core.environment.Utility import wf
+from src.algs.heft.HeftHelper import HeftHelper
+
+if __name__ == "__main__":
+ wf_cur = "Montage_25"
+
+ w = 0.2
+ c1 = 0.6
+ c2 = 0.2
+ gen = 10
+ n = 4
+ execCount = 1
+ data_intensive = 100
+
+ exp_om = OmpsoBaseExperiment(wf_name=wf_cur,
+ #exp_rd = RdpsoBaseExperiment(wf_name=wf_cur,
+ W=w, C1=c1, C2=c2,
+ GEN=gen, N=n, data_intensive=data_intensive)
+
+ #result = repeat(exp_om, execCount)
+ result = repeat(exp_om, execCount)
+
+ #file = open("C:\Melnik\Experiments\Work\PSO_compare\populations\OM cyber.txt", 'w')
+ #file.write("#w = " + str(w) + " c1 = " + str(c1) + " c2 = " + str(c2) + "\n")
+ #file.write("#gen result" + "\n")
+
+ res_list = [0 for _ in range(gen)]
+ for i in range(execCount):
+ cur_list = result[i][1]
+ print(str(cur_list))
+ for j in range(gen):
+ res_list[j] = res_list[j] + cur_list[j]
+
+ res_list = [x / execCount for x in res_list]
+ print("res_list = " + str(res_list))
+ #for i in range(gen):
+ #file.write(str(i) + " " + str(res_list[i]) + "\n")
+ #sts = interval_statistics(result[0])
+ #heftRes = do_exp(wf_cur, data_intensive)
+
+ #print("res_list = " + str(res_list))
+
+ #print(" HEFT " + str(heftRes))
+ #print(" OM " + str(sts))
+
+ print("finish")
\ No newline at end of file
diff --git a/src/experiments/comparison_experiments/OMPSOvsRDPSO.py b/src/experiments/comparison_experiments/OMPSOvsRDPSO.py
new file mode 100644
index 0000000..6ea7710
--- /dev/null
+++ b/src/experiments/comparison_experiments/OMPSOvsRDPSO.py
@@ -0,0 +1,218 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+#----
+#from src.algs.pso.rdpso.ordering_operators import build_schedule, generate, ordering_update, fitness
+import src.algs.pso.ordering_operators as om_order
+import src.algs.pso.rdpso.ordering_operators as rd_order
+#from src.algs.pso.rdpso.rdpso import run_pso, initMapMatrix, initRankList
+import src.algs.pso.sdpso as om
+import src.algs.pso.rdpso.rdpso as rd
+#from src.algs.pso.rdpso.mapping_operators import update as mapping_update
+import src.algs.pso.mapping_operators as om_map
+import src.algs.pso.rdpso.mapping_operators as rd_map
+#---
+from src.core.environment.Utility import Utility
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.common import AbstractExperiment
+from src.algs.heft.HeftHelper import HeftHelper
+
+
+class RdpsoBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = RdpsoBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, W, C1, C2, GEN, N):
+ super().__init__(wf_name)
+
+ self.W = W
+ self.C1 = C1
+ self.C2 = C2
+ self.GEN = GEN
+ self.N = N
+ pass
+
+ def __call__(self):
+
+ stats, logbook = self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+ wf_dag = HeftHelper.convert_to_parent_children_map(_wf)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ nodes = rm.get_nodes()
+ mapMatrix = rd.initMapMatrix(jobs, nodes, estimator)
+ rankList = rd.initRankList(wf_dag, nodes, estimator)
+ ordFilter = rd.filterList(_wf)
+
+ toolbox = self.toolbox(mapMatrix, rankList, ordFilter)
+
+ pop, log, best = rd.run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=self.GEN, invalidate_fitness=True, initial_pop=None,
+ w=self.W, c1=self.C1, c2=self.C2, n=self.N, rm=rm, wf=_wf, estimator=estimator, mapMatrix=mapMatrix, rankList=rankList, ordFilter=ordFilter,
+ )
+
+ schedule = rd_order.build_schedule(_wf, rm, estimator, best, mapMatrix, rankList, ordFilter)
+
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ #print("Final makespan: {0}".format(makespan))
+ #print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+ def toolbox(self, mapMatrix, rankList, ordFilter):
+
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+
+
+ heft_particle = rd_order.generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter, heft_schedule)
+
+ heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else rd_order.generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n)]
+
+ #def componoud_update(w, c1, c2, p, best, pop, g):
+ def componoud_update(w, c1, c2, p, best, pop):
+ #if g%2 == 0:
+ rd_map.update(w, c1, c2, p.mapping, best.mapping, pop)
+ #else:
+ rd_order.ordering_update(w, c1, c2, p.ordering, best.ordering, pop)
+
+ toolbox = Toolbox()
+ toolbox.register("population", heft_gen)
+ toolbox.register("fitness", rd_order.fitness, _wf, rm, estimator)
+ toolbox.register("update", componoud_update)
+ return toolbox
+
+
+ pass
+
+class OmpsoBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = OmpsoBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, W, C1, C2, GEN, N):
+ super().__init__(wf_name)
+
+ self.W = W
+ self.C1 = C1
+ self.C2 = C2
+ self.GEN = GEN
+ self.N = N
+ pass
+
+ def __call__(self):
+
+ toolbox, stats, logbook = self.toolbox(), self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+ pop, log, best = om.run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=self.GEN, invalidate_fitness=True, initial_pop=None,
+ w=self.W, c1=self.C1, c2=self.C2, n=self.N,
+ )
+
+ schedule = om_order.build_schedule(_wf, rm, estimator, best)
+
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ #print("Final makespan: {0}".format(makespan))
+ #print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+ def toolbox(self):
+
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+ heft_particle = om_order.generate(_wf, rm, estimator, heft_schedule)
+
+ heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else om_order.generate(_wf, rm, estimator) for _ in range(n)]
+
+ def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
+ om_map.update(w, c1, c2, p.mapping, best.mapping, pop)
+ om_order.ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)
+
+ toolbox = Toolbox()
+ toolbox.register("population", heft_gen)
+ toolbox.register("fitness", om_order.fitness, _wf, rm, estimator)
+ toolbox.register("update", componoud_update)
+ return toolbox
+
+
+ pass
+
+
+
+
+if __name__ == "__main__":
+ wf_list = ["Inspiral_30"]
+ #wf_list = ["Montage_25", "Montage_50",# "Montage_100", #"Montage_250",
+ #"Epigenomics_24", "Epigenomics_46",# "Epigenomics_72", "Epigenomics_100",
+ #"CyberShake_30", "CyberShake_50",# "CyberShake_75", "CyberShake_100",
+ #"Inspiral_30", "Inspiral_50", "Sipht_30", "Sipht_60"]
+
+
+ #for (profit, trans) in profit_list:
+ # file.write(str(profit) + " " + str(trans) + "\n") `
+ w = 0.1
+ c1 = 0.6
+ c2 = 0.2
+ gen = 300
+ n = 20
+ fileRes = open("F:\eScience\Work\experiments\OMPSOvsRDPSO\Sipht_30 c1_06 c2_02 gen_300 n_20 FILTER only RD TEST.txt", 'w')
+ fileInfo = open("F:\eScience\Work\experiments\OMPSOvsRDPSO\Sipht_30 c1_06 c2_02 gen_300 n_20 FILTER info only RD TEST.txt", 'w')
+
+ exp_om = OmpsoBaseExperiment(wf_name=wf_list[0],
+ W=w, C1=c1, C2=c2,
+ GEN=gen, N=n)
+ result2 = repeat(exp_om, 100)
+ sts2 = interval_statistics(result2)
+ #for w in [x / 10 for x in range(15)]:
+ for _ in [1]:
+ print(w)
+ wf_cur = wf_list[0]
+ print(wf_cur)
+ exp_rd = RdpsoBaseExperiment(wf_name=wf_cur,
+ W=0.1, C1=0.6, C2=0.2,
+ GEN=gen, N=n)
+
+ result1 = repeat(exp_rd, 100)
+ sts1 = interval_statistics(result1)
+
+
+ fileInfo.write("OM " + str(w) + " " + str(sts2) + "\n")
+ fileInfo.write("RD " + str(w) + " " + str(sts1) + "\n")
+ fileInfo.write("\n")
+ profit = (round((((sts2[0] / sts1[0]) - 1) * 100), 2))
+ fileRes.write(str(w) + " " + str(profit) + "\n")
+ print("profit = " + str(profit))
+ print(" RD " + str(sts1))
+ print(" OM " + str(sts2))
+ #res_list[iter] = (round((((result2[0] / result1[0]) - 1) * 100), 2))
+ # result = exp()
+ #sts = interval_statistics(result)
+ #print("Statistics: {0}".format(interval_stat_string(sts)))
+ #print("Average: {0}".format(numpy.mean(result)))
+
+
+
+ pass
+
+
+
+
diff --git a/src/experiments/comparison_experiments/OMPSOvsRDPSO_onlyOrd.py b/src/experiments/comparison_experiments/OMPSOvsRDPSO_onlyOrd.py
new file mode 100644
index 0000000..ca90cc8
--- /dev/null
+++ b/src/experiments/comparison_experiments/OMPSOvsRDPSO_onlyOrd.py
@@ -0,0 +1,159 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+#----
+#from src.algs.pso.rdpso.ordering_operators import build_schedule, generate, ordering_update, fitness
+import src.algs.pso.ordering_operators as om_order
+import src.algs.pso.rdpso.ordering_operators as rd_order
+#from src.algs.pso.rdpso.rdpso import run_pso, initMapMatrix, initRankList
+import src.algs.pso.sdpso as om
+import src.algs.pso.rdpso.rdpso as rd
+#from src.algs.pso.rdpso.mapping_operators import update as mapping_update
+import src.algs.pso.mapping_operators as om_map
+import src.algs.pso.rdpso.mapping_operators as rd_map
+#---
+from src.core.environment.Utility import Utility
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.pso.ompso_base_experiment import OmpsoBaseExperiment
+from src.experiments.pso.rdpso_base_experiment_ordering import RdpsoBaseExperiment
+from src.experiments.comparison_experiments.HeftOnly import do_exp
+from src.core.environment.Utility import wf
+import src.experiments.pso.rdpso_base_experiment
+from src.experiments.common import AbstractExperiment
+from src.algs.heft.HeftHelper import HeftHelper
+import math
+
+def get_data_rate(jobslist):
+ jobs_copy = jobslist.copy()
+ total_job_rate = 0
+ total_runtime = 0
+ total_datasize = 0
+ for it in range(len(jobs_copy)):
+ job = jobs_copy.pop()
+ cur_datasize = 0
+ for file in job.input_files.items():
+ cur_datasize = cur_datasize + file[1].size
+ total_job_rate = total_job_rate + (cur_datasize / job.runtime)
+ total_runtime = total_runtime + job.runtime
+ total_datasize = total_datasize + cur_datasize
+ total_job_rate = total_job_rate / len(jobslist)
+ total_runtime = total_runtime / len(jobslist)
+ total_datasize = total_datasize / len(jobslist)
+
+ #return total_datasize / total_runtime
+ return total_job_rate
+
+if __name__ == "__main__":
+ wf_list = ["Montage_100"]
+
+ #wf_list = ["Montage_25", "Montage_50",# "Montage_100", #"Montage_250",
+ # "Epigenomics_24",# "Epigenomics_46",# "Epigenomics_72", "Epigenomics_100",
+ # "CyberShake_30",# "CyberShake_50",# "CyberShake_75", "CyberShake_100",
+ # "Inspiral_30",# "Inspiral_50",
+ # "Sipht_30"]#, "Sipht_60"]
+
+ #for (profit, trans) in profit_list:
+ # file.write(str(profit) + " " + str(trans) + "\n") `
+ w = 0.2
+ c1 = 0.6
+ c2 = 0.2
+ gen = 5000
+ n = 100
+ execCount = 12
+ #fileRes = open("C:\Melnik\Experiments\Work\PSO_compare\clean 300 20 (0.1 0.6 0.2).txt", 'w')
+ #fileInfo = open("C:\Melnik\Experiments\Work\PSO_compare\ data_intensive_average 0.2 0.6 0.2 500 50 info.txt", 'w')
+ #fileInfo.write(str(w) + " " + str(c1) + " " + str(c2) + " " + str(gen) + " " + str(n) + "\n")
+ #fileRes.write("# heftProfit profit" + "\n")
+ for wf_cur in wf_list:
+ print(wf_cur)
+ #wf_dag = HeftHelper.convert_to_parent_children_map(wf(wf_cur))
+ #jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ #data_intensive = 3100000 / get_data_rate(jobs) * 100
+ #data_intensive = 100
+ #print(data_intensive)
+
+ #curFileOM = open("C:\Melnik\Experiments\Work\PSO_compare\\result\\" + wf_cur + "\\" + "OM w_0.2 c1_0.6 c2_test gen_300 n_20.txt", 'w')
+ #curFileRD = open("C:\Melnik\Experiments\Work\PSO_compare\\result\\" + wf_cur + "\\" + "RD w_0.2 c1_0.6 c2_test gen_300 n_20.txt", 'w')
+ #curFileRD2 = open("C:\Melnik\Experiments\Work\PSO_compare\\result\\" + wf_cur + "\\" + "RD2 w_0.2 c1_0.6 c2_test gen_300 n_20.txt", 'w')
+ #curFile.write("#C1 OM RD RDwithMap Heft" + "\n")
+ #for wcur in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
+ #c2 = wcur
+ #print(c2)
+
+ exp_om = OmpsoBaseExperiment(wf_name=wf_cur,
+ W=w, C1=c1, C2=c2,
+ GEN=gen, N=n, data_intensive=100)
+ result2 = repeat(exp_om, execCount)
+ sts2 = interval_statistics([makespan for (makespan, pop) in result2])
+
+ #print(" om finish")
+
+
+ exp_rd = RdpsoBaseExperiment(wf_name=wf_cur,
+ W=w, C1=c1, C2=c2,
+ GEN=gen, N=n, data_intensive=100)
+ result1 = repeat(exp_rd, execCount)
+ sts1 = interval_statistics([makespan for (makespan, pop) in result1])
+
+ #print(" rd finish")
+ """
+ exp_rdM = src.experiments.pso.rdpso_base_experiment.RdpsoBaseExperiment(wf_name=wf_cur,
+ W=w, C1=c1, C2=c2,
+ GEN=gen, N=n)
+ result3 = repeat(exp_rdM, execCount)
+ sts3 = interval_statistics(result3)
+
+ print(" rd2 finish")
+ """
+ heftRes = do_exp(wf_cur, 100)
+
+ #fileInfo.write(wf_cur + "\n")
+ #fileInfo.write("HEFT " + str(heftRes) + "\n")
+ #fileInfo.write("OM " + str(sts2) + "\n")
+ #fileInfo.write("RD " + str(sts1) + "\n")
+ #fileInfo.write("RDwithMap " + str(sts3) + "\n")
+ #fileInfo.write("\n")
+ #profit = (round((((sts2[0] / sts1[0]) - 1) * 100), 2))
+ #profit2 = (round((((sts2[0] / sts3[0]) - 1) * 100), 2))
+ #heftProfit = (round((((heftRes / sts1[0]) - 1) * 100), 2))
+ #fileRes.write("# " + wf_cur + "\n")
+ #fileRes.write(str(profit) + " " + str(heftProfit) + "\n")
+
+ #curFile.write("# OM RD" + "\n")
+ #for it in range(execCount):
+ # curFile.write(str(round(result2[it], 2)) + " " + str(round(result1[it],2)) + "\n")
+
+ #curFileOM.write(str(c2) + " " + str(sts2[0]) + " " + str(sts2[1]) + " " + str(sts2[2]) + " " + str(heftRes) + "\n")
+ #curFileRD.write(str(c2) + " " + str(sts1[0]) + " " + str(sts1[1]) + " " + str(sts1[2]) + " " + str(heftRes) + "\n")
+ #curFileRD2.write(str(c2) + " " + str(sts3[0]) + " " + str(sts3[1]) + " " + str(sts3[2]) + " " + str(heftRes) + "\n")
+
+
+
+ #print("profit = " + str(profit))
+ #print("profitM = " + str(profit2))
+ #print("heftProfit = " + str(heftProfit))
+ print(" HEFT " + str(heftRes))
+ print(" OM " + str(sts2))
+ print(" RD " + str(sts1))
+ #print(" RD2 " + str(sts3))
+
+ #curFileOM.close()
+ #curFileRD.close()
+ #curFileRD2.close()
+ #res_list[iter] = (round((((result2[0] / result1[0]) - 1) * 100), 2))
+ # result = exp()
+ #sts = interval_statistics(result)
+ #print("Statistics: {0}".format(interval_stat_string(sts)))
+ #print("Average: {0}".format(numpy.mean(result)))
+
+
+ print("\n" + " FINISH!!!!")
+ pass
+
+
+
+
+
+
diff --git a/src/experiments/ga/ga_base_experiment.py b/src/experiments/ga/ga_base_experiment.py
new file mode 100644
index 0000000..fde0d95
--- /dev/null
+++ b/src/experiments/ga/ga_base_experiment.py
@@ -0,0 +1,150 @@
+from copy import deepcopy
+from functools import partial
+from statistics import mean
+import uuid
+from deap import tools
+from deap.base import Toolbox
+import numpy
+from src.algs.ga.GAFunctions2 import GAFunctions2
+from src.algs.ga.common_fixed_schedule_schema import run_ga, fit_converter
+from src.algs.heft.DSimpleHeft import run_heft
+from src.core.CommonComponents.ExperimentalManager import ExperimentResourceManager, ModelTimeEstimator
+from src.core.environment.ResourceManager import Schedule
+from src.core.environment.Utility import wf, Utility
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+from src.algs.ga.common_fixed_schedule_schema import generate as ga_generate
+from src.algs.common.utilities import unzip_result
+from src.core.CommonComponents.utilities import repeat
+from src.experiments.common import AbstractExperiment
+
+
+class GABaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = GABaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, ga_params=None):
+ wf_name = "Montage_25"
+ GA_PARAMS = {
+ "kbest": 5,
+ "n": 25,
+ "cxpb": 0.3, # 0.8
+ "mutpb": 0.9, # 0.5
+ "sweepmutpb": 0.3, # 0.4
+ "gen_curr": 0,
+ "gen_step": 300,
+ "is_silent": False
+ }
+ if ga_params is None:
+ self.GA_PARAMS = GA_PARAMS
+ else:
+ self.GA_PARAMS = ga_params
+ self.wf_name = wf_name
+
+ def __call__(self):
+ _wf = wf(self.wf_name)
+ rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+ estimator = ModelTimeEstimator(bandwidth=10)
+
+ empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()})
+
+ heft_schedule = run_heft(_wf, rm, estimator)
+
+ fixed_schedule = empty_fixed_schedule_part
+
+ ga_functions = GAFunctions2(_wf, rm, estimator)
+
+ generate = partial(ga_generate, ga_functions=ga_functions,
+ fixed_schedule_part=fixed_schedule,
+ current_time=0.0, init_sched_percent=0.05,
+ initial_schedule=heft_schedule)
+
+ stats = tools.Statistics(lambda ind: ind.fitness.values[0])
+ stats.register("avg", numpy.mean)
+ stats.register("std", numpy.std)
+ stats.register("min", numpy.min)
+ stats.register("max", numpy.max)
+
+ logbook = tools.Logbook()
+ logbook.header = ["gen", "evals"] + stats.fields
+
+ toolbox = Toolbox()
+ toolbox.register("generate", generate)
+ toolbox.register("evaluate", fit_converter(ga_functions.build_fitness(empty_fixed_schedule_part, 0.0)))
+ toolbox.register("clone", deepcopy)
+ toolbox.register("mate", ga_functions.crossover)
+ toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
+ toolbox.register("mutate", ga_functions.mutation)
+ # toolbox.register("select_parents", )
+ # toolbox.register("select", tools.selTournament, tournsize=4)
+ toolbox.register("select", tools.selRoulette)
+ pop, logbook, best = run_ga(toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ **self.GA_PARAMS)
+
+ resulted_schedule = ga_functions.build_schedule(best, empty_fixed_schedule_part, 0.0)
+
+ ga_makespan = Utility.makespan(resulted_schedule)
+ return (ga_makespan, logbook)
+
+
+def fix_schedule(res, heft):
+ for item in heft.mapping:
+ res.mapping[item] = res.mapping[item].append(heft.mapping[item])
+ return res
+
+if __name__ == "__main__":
+ exp = GABaseExperiment()
+ repeat_count = 1
+ result, logbooks = unzip_result(repeat(exp, repeat_count))
+ # logbook = logbooks_in_data(logbooks)
+ #data_to_file("./CyberShake_30_full.txt", 300, logbook)
+ print(result)
+
+# if __name__ == "__main__":
+#
+# base_ga_params = {
+# "kbest": 5,
+# "n": 5,
+# "cxpb": 0.3, # 0.8
+# "mutpb": 0.9, # 0.5
+# "sweepmutpb": 0.3, # 0.4
+# "gen_curr": 0,
+# "gen_step": 5,
+# "is_silent": False
+# }
+#
+# param_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
+# params_names = ["cxpb", "mutpb", "sweepmutpb"]
+#
+# ga_params = deepcopy(base_ga_params)
+# ga_params["cxpb"] = 0.4
+# ga_params["mutpb"] = 0.5
+# ga_params["sweepmutpb"] = 0.3
+# # def buildGaParams(cxpb, mutpb, sweepmutpb):
+# # ga_params = deepcopy(base_ga_params)
+# # ga_params["cxpb"] = cxpb
+# # ga_params["mutpb"] = mutpb
+# # ga_params["sweepmutpb"] = sweepmutpb
+# # return ga_params
+# #
+# #
+# # gaParamsSets = [buildGaParams(cxpb, mutpb, sweepmutpb) for cxpb in param_values
+# # for mutpb in param_values
+# # for sweepmutpb in param_values]
+#
+# # for ga_params in gaParamsSets:
+# exp = GABaseExperiment(ga_params)
+# print("cxpb: {0}, mutpb: {1}, sweepmutpb: {2}".format(ga_params["cxpb"],
+# ga_params["mutpb"],
+# ga_params["sweepmutpb"]))
+# repeat_count = 1
+# makespans, logbooks = unzip_result(repeat(exp, repeat_count))
+# out_line = "{0}\t{1}\t{2}\t{3}".format(ga_params["cxpb"],
+# ga_params["mutpb"],
+# ga_params["sweepmutpb"],
+# mean(makespans))
+# print(makespans)
\ No newline at end of file
diff --git a/src/experiments/gsa/gsa_base_experiment.py b/src/experiments/gsa/gsa_base_experiment.py
new file mode 100644
index 0000000..8af35c7
--- /dev/null
+++ b/src/experiments/gsa/gsa_base_experiment.py
@@ -0,0 +1,96 @@
+from copy import deepcopy
+import random
+
+from deap.base import Toolbox
+from deap.tools import Statistics, Logbook
+import numpy
+
+from src.algs.gsa.SimpleGsaScheme import run_gsa
+from src.algs.gsa.operators import G, Kbest
+from src.algs.gsa.ordering_mapping_operators import force, mapping_update, ordering_update, CompoundParticle, generate
+from src.algs.heft.DSimpleHeft import run_heft
+from src.algs.pso.ordering_operators import fitness, build_schedule
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+from src.core.environment.Utility import wf, Utility
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.experiments.cga.utilities.common import repeat
+
+
+_wf = wf("Montage_50")
+rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=20, transfer_time=100)
+heft_schedule = run_heft(_wf, rm, estimator)
+
+
+heft_particle = generate(_wf, rm, estimator, heft_schedule)
+
+heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)]
+
+# def heft_gen(n):
+# heft_count = int(n*0.05)
+# pop = [deepcopy(heft_particle) for _ in range(heft_count)]
+# for _ in range(n - heft_count):
+# variant = gen()
+# hp = deepcopy(heft_particle)
+# variant.ordering = hp.ordering
+# pop.append(variant)
+# return pop
+
+pop_size = 50
+iter_number = 300
+kbest = pop_size
+ginit = 10
+W, C = 0.2, 0.5
+
+
+def compound_force(p, pop, kbest, G):
+ mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G)
+ ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G)
+ return (mapping_force, ordering_force)
+
+def compound_update(w, c, p, min=-1, max=1):
+ mapping_update(w, c, p.mapping)
+ ordering_update(w, c, p.ordering, min, max)
+ pass
+
+
+toolbox = Toolbox()
+# toolbox.register("generate", generate, _wf, rm, estimator)
+toolbox.register("generate", heft_gen)
+toolbox.register("fitness", fitness, _wf, rm, estimator)
+toolbox.register("estimate_force", compound_force)
+toolbox.register("update", compound_update, W, C)
+toolbox.register("G", G)
+toolbox.register("kbest", Kbest)
+
+stats = Statistics()
+stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
+stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
+stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
+stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))
+
+logbook = Logbook()
+logbook.header = ["gen", "G", "kbest"] + stats.fields
+
+
+
+
+
+def do_exp():
+ pop, _logbook, best = run_gsa(toolbox, stats, logbook, pop_size, 0, iter_number, None, kbest, ginit, **{"w":W, "c":C})
+
+ schedule = build_schedule(_wf, rm, estimator, best)
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ print("Final makespan: {0}".format(makespan))
+ print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+
+if __name__ == "__main__":
+ # result = repeat(do_exp, 5)
+ result = do_exp()
+ print(result)
+ pass
diff --git a/src/experiments/gsa/omgsa_base_experiment.py b/src/experiments/gsa/omgsa_base_experiment.py
new file mode 100644
index 0000000..a3d8626
--- /dev/null
+++ b/src/experiments/gsa/omgsa_base_experiment.py
@@ -0,0 +1,100 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+from src.algs.gsa.SimpleGsaScheme import run_gsa
+from src.algs.gsa.operators import G, Kbest
+from src.algs.gsa.setbasedoperators import mapping_velocity_and_position, mapping_force_vector_matrix
+from src.algs.pso.ordering_operators import generate, ordering_update, fitness
+
+from src.algs.pso.sdpso import run_pso
+from src.algs.pso.sdpso import update as mapping_update
+from src.core.environment.Utility import Utility
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.common import AbstractExperiment
+
+
+class OmgsaBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = OmgsaBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, KBEST, G, GEN, N):
+ super().__init__(wf_name)
+
+ self.KBEST = KBEST
+ self.G = G
+ self.GEN = GEN
+ self.N = N
+ pass
+
+ def __call__(self):
+
+ toolbox, stats, logbook = self.toolbox(), self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+ pop, log, best = run_gsa(
+ toolbox=toolbox,
+ logbook=logbook,
+ statistics=stats,
+ n=self.N,
+ iter_number=self.GEN,
+ kbest=self.KBEST,
+ ginit=self.G
+ )
+
+ schedule = build_schedule(_wf, rm, estimator, best)
+
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ print("Final makespan: {0}".format(makespan))
+ print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+ def toolbox(self):
+
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+ heft_particle = generate(_wf, rm, estimator, heft_schedule)
+
+ heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)]
+
+ def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
+ mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
+ ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)
+
+ def compound_force_vector_matrix():
+ raise NotImplementedError()
+
+ def compound_velocity_and_postion():
+ raise NotImplementedError()
+
+
+ toolbox = Toolbox()
+ toolbox.register("generate", heft_gen)
+ toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
+ toolbox.register("force_vector_matrix", compound_force_vector_matrix)
+ toolbox.register("velocity_and_position", compound_velocity_and_postion, beta=0.0)
+ toolbox.register("G", G)
+ toolbox.register("kbest", Kbest)
+ return toolbox
+
+
+ pass
+
+
+if __name__ == "__main__":
+ exp = OmpsoBaseExperiment(wf_name="Montage_100",
+ W=0.1, C1=0.6, C2=0.2,
+ GEN=300, N=100)
+ # result = repeat(exp, 5)
+ result = exp()
+ print(result)
+ print("Average: {0}".format(numpy.mean(result)))
+ pass
+
+
diff --git a/src/experiments/pso/gapso_base_experiment.py b/src/experiments/pso/gapso_base_experiment.py
new file mode 100644
index 0000000..1d4ade3
--- /dev/null
+++ b/src/experiments/pso/gapso_base_experiment.py
@@ -0,0 +1,164 @@
+from copy import deepcopy
+from functools import partial
+import random
+from deap import tools
+from deap import creator
+from deap.base import Toolbox
+import numpy
+from src.algs.SimpleRandomizedHeuristic import SimpleRandomizedHeuristic
+from src.algs.common.individuals import FitAdapter, FitnessStd
+from src.algs.ga.coevolution.cga import Env
+from src.algs.ga.coevolution.operators import ordering_default_crossover, ordering_default_mutate, \
+ ordering_heft_based_initialize
+from src.algs.ga.nsga2 import run_nsga2
+from src.algs.heft.DSimpleHeft import run_heft
+from src.algs.heft.HeftHelper import HeftHelper
+from src.algs.pso.gapso import run_gapso
+from src.algs.pso.sdpso import run_pso, update, schedule_to_position, construct_solution, MappingParticle, \
+ Velocity, Position
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.Utility import Utility, wf
+from src.algs.common.MapOrdSchedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE, ordering_from_schedule, \
+ mapping_from_schedule
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+from src.algs.common.MapOrdSchedule import fitness as basefitness
+
+_wf = wf("Montage_50")
+rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=20, transfer_time=100)
+sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)
+
+heft_schedule = run_heft(_wf, rm, estimator)
+
+print(Utility.makespan(heft_schedule))
+
+
+
+
+stats = tools.Statistics(lambda ind: ind.fitness.values[0])
+stats.register("avg", numpy.mean)
+stats.register("std", numpy.std)
+stats.register("min", numpy.min)
+stats.register("max", numpy.max)
+
+logbook = tools.Logbook()
+logbook.header = ["gen", "evals"] + stats.fields
+
+
+heft_mapping = mapping_from_schedule(heft_schedule)
+heft_ordering = ordering_from_schedule(heft_schedule)
+
+
+
+
+# common params
+GEN, N = 1000, 30
+# pso params
+W, C1, C2 = 0.0, 0.3, 0.3
+# ga params
+CXPB, MU = 0.1, N
+
+class ParticleIndividual(FitAdapter):
+ def __init__(self):
+ velocity = Velocity({})
+ best = None
+ ordering = None
+ pass
+
+
+heft_particle = ParticleIndividual(Position(heft_mapping))
+heft_particle.ordering = heft_ordering
+
+def generate(n):
+ schedules = [SimpleRandomizedHeuristic(_wf, rm.get_nodes(), estimator).schedule() for _ in range(n)]
+ mapping_positions = [schedule_to_position(s).entity for s in schedules]
+ ordering_individuals = [ordering_from_schedule(s) for s in schedules]
+ pop = []
+ for mp, os in zip(mapping_positions, ordering_individuals):
+ p = ParticleIndividual(Position(mp))
+ p.ordering = os
+ pop.append(p)
+ return pop
+
+
+def population(n):
+ return [deepcopy(heft_particle) if random.random() > 0.95 else generate(1)[0] for _ in range(n)]
+
+
+def mutate(ind):
+ os = ind.ordering
+ ordering_default_mutate({'env': Env(wf=_wf, rm=rm, estimator=estimator)}, os)
+ child = ParticleIndividual(ind.entity)
+ child.ordering = os
+ return child,
+
+
+def mate(ind1, ind2):
+ ch1_os = ind1.ordering
+ ch2_os = ind2.ordering
+ ordering_default_crossover(None, ch1_os, ch2_os)
+ child1 = ParticleIndividual(ind1.entity)
+ child1.ordering = ch1_os
+ child2 = ParticleIndividual(ind2.entity)
+ child2.ordering = ch2_os
+ return child1, child2
+
+
+def fitness(particleind):
+ position = particleind.entity
+ ordering = particleind.ordering
+ solution = construct_solution(position, ordering)
+
+ sched = build_schedule(_wf, estimator, rm, solution)
+ makespan = Utility.makespan(sched)
+ ## TODO: make a real estimation later
+ fit = FitnessStd(values=(makespan, 0.0))
+ ## TODO: make a normal multi-objective fitness estimation
+ fit.mofit = makespan
+ return fit
+
+ return basefitness(_wf, rm, estimator, solution)
+
+
+toolbox = Toolbox()
+# common functions
+toolbox.register("map", map)
+toolbox.register("clone", deepcopy)
+toolbox.register("population", population)
+toolbox.register("fitness", fitness)
+
+# pso functions
+toolbox.register("update", update)
+# ga functions
+toolbox.register("mutate", mutate)
+toolbox.register("mate", mate)
+toolbox.register("select", tools.selNSGA2)
+
+ga = partial(run_nsga2, toolbox=toolbox, logbook=None, stats=None,
+ n=N, crossover_probability=CXPB, mutation_probability=MU)
+
+pso = partial(run_pso, toolbox=toolbox, logbook=None, stats=None,
+ invalidate_fitness=False,
+ w=W, c1=C1, c2=C2, n=N)
+
+
+def do_exp():
+ pop, log, best = run_gapso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen=GEN, n=N, ga=ga, pso=pso
+ )
+
+ best_position = best.entity
+ solution = construct_solution(best_position, sorted_tasks)
+ schedule = build_schedule(_wf, estimator, rm, solution)
+ makespan = Utility.makespan(schedule)
+ print("Final makespan: {0}".format(makespan))
+ pass
+
+if __name__ == "__main__":
+ do_exp()
+ pass
diff --git a/src/experiments/pso/ompso_base_experiment.py b/src/experiments/pso/ompso_base_experiment.py
new file mode 100644
index 0000000..2d6e093
--- /dev/null
+++ b/src/experiments/pso/ompso_base_experiment.py
@@ -0,0 +1,116 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+from src.algs.pso.ordering_operators import build_schedule, generate, ordering_update, fitness
+
+from src.algs.pso.sdpso import run_pso
+from src.algs.pso.mapping_operators import update as mapping_update
+from src.core.environment.Utility import Utility
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.common import AbstractExperiment
+from src.algs.heft.HeftHelper import HeftHelper
+
+class OmpsoBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = OmpsoBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, W, C1, C2, GEN, N, data_intensive):
+ super().__init__(wf_name)
+
+ self.W = W
+ self.C1 = C1
+ self.C2 = C2
+ self.GEN = GEN
+ self.N = N
+ self.data_intensive = data_intensive
+ pass
+
+ def __call__(self):
+
+ toolbox, stats, logbook = self.toolbox(), self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+
+
+ estimator.transfer_time = self.data_intensive
+ toolbox = self.toolbox(self.data_intensive)
+ heft_schedule = self.heft_schedule()
+
+ pop, log, best, logbook = run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=self.GEN, invalidate_fitness=True, initial_pop=None,
+ w=self.W, c1=self.C1, c2=self.C2, n=self.N,
+ )
+
+ schedule = build_schedule(_wf, rm, estimator, best)
+
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ #print("Final makespan: {0}".format(makespan))
+ #print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+
+ def toolbox(self, transfer):
+
+ _wf, rm, estimator = self.env()
+ estimator.transfer_time = transfer
+ heft_schedule = self.heft_schedule()
+
+ heft_particle = generate(_wf, rm, estimator, heft_schedule)
+
+ heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n-1)] + [deepcopy(heft_particle)])
+ #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)])
+
+ def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
+ #doMap = random.random()
+ #if doMap < 0.5:
+ mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
+ ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)
+
+ toolbox = Toolbox()
+ toolbox.register("population", heft_gen)
+ toolbox.register("fitness", fitness, _wf, rm, estimator)
+ toolbox.register("update", componoud_update)
+ return toolbox
+
+
+ pass
+
+def get_data_rate(jobslist):
+ jobs_copy = jobslist.copy()
+ total_job_rate = 0
+ total_runtime = 0
+ total_datasize = 0
+ for it in range(len(jobs_copy)):
+ job = jobs_copy.pop()
+ cur_datasize = 0
+ for file in job.input_files.items():
+ cur_datasize = cur_datasize + file[1].size
+ total_job_rate = total_job_rate + (cur_datasize / job.runtime)
+ total_runtime = total_runtime + job.runtime
+ total_datasize = total_datasize + cur_datasize
+ total_job_rate = total_job_rate / len(jobslist)
+ total_runtime = total_runtime / len(jobslist)
+ total_datasize = total_datasize / len(jobslist)
+
+ return total_job_rate
+
+
+if __name__ == "__main__":
+ exp = OmpsoBaseExperiment(wf_name="_30",
+ W=0.5, C1=1.6, C2=1.2,
+ GEN=100, N=50, data_intensive=100)
+ result = repeat(exp, 8)
+ print(result)
+ sts = interval_statistics(result)
+ print("Statistics: {0}".format(interval_stat_string(sts)))
+ print("Average: {0}".format(numpy.mean(result)))
+ pass
+
diff --git a/src/experiments/pso/rdpso_base_experiment.py b/src/experiments/pso/rdpso_base_experiment.py
new file mode 100644
index 0000000..9c9c65e
--- /dev/null
+++ b/src/experiments/pso/rdpso_base_experiment.py
@@ -0,0 +1,107 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+from src.algs.pso.rdpso.ordering_operators import build_schedule, generate, ordering_update, fitness
+
+from src.algs.pso.rdpso.rdpso import run_pso, initMapMatrix, initRankList, filterList
+from src.algs.pso.rdpso.mapping_operators import update as mapping_update
+from src.core.environment.Utility import Utility
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.common import AbstractExperiment
+from src.algs.heft.HeftHelper import HeftHelper
+
+
+class RdpsoBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = RdpsoBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, W, C1, C2, GEN, N):
+ super().__init__(wf_name)
+
+ self.W = W
+ self.C1 = C1
+ self.C2 = C2
+ self.GEN = GEN
+ self.N = N
+
+ pass
+
+ def __call__(self):
+
+ stats, logbook = self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+ estimator.transfer_time = 500
+ heft_schedule = self.heft_schedule()
+
+ wf_dag = HeftHelper.convert_to_parent_children_map(_wf)
+ jobs = set(wf_dag.keys()) | set(x for xx in wf_dag.values() for x in xx)
+ nodes = rm.get_nodes()
+ mapMatrix = initMapMatrix(jobs, nodes, estimator)
+ rankList = initRankList(wf_dag, nodes, estimator)
+ ordFilter = filterList(_wf)
+
+ toolbox = self.toolbox(mapMatrix, rankList, ordFilter)
+
+ pop, log, best = run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=self.GEN, invalidate_fitness=True, initial_pop=None,
+ w=self.W, c1=self.C1, c2=self.C2, n=self.N, rm=rm, wf=_wf, estimator=estimator, mapMatrix=mapMatrix, rankList=rankList, ordFilter=ordFilter,
+ )
+ #print(str(best.fitness))
+ schedule = build_schedule(_wf, rm, estimator, best, mapMatrix, rankList, ordFilter)
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ if makespan > best.fitness.values[0]:
+ print("DANGER!!!!!!!!!!!!!!!!!")
+ print("Final makespan: {0}".format(makespan))
+ print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+ def toolbox(self, mapMatrix, rankList, ordFilter):
+
+ _wf, rm, estimator = self.env()
+ estimator.transfer_time = 500
+ heft_schedule = self.heft_schedule()
+
+
+
+ heft_particle = generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter, heft_schedule)
+ heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n-1)] + [deepcopy(heft_particle)])
+ #heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n)]
+
+ def componoud_update(w, c1, c2, p, best, pop):
+ #doMap = random.random()
+ #if doMap < 0.1:
+ mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
+ #else:
+ ordering_update(w, c1, c2, p.ordering, best.ordering, pop)
+
+ toolbox = Toolbox()
+ toolbox.register("population", heft_gen)
+ toolbox.register("fitness", fitness, _wf, rm, estimator)
+ toolbox.register("update", componoud_update)
+ return toolbox
+
+
+ pass
+
+
+if __name__ == "__main__":
+ exp = RdpsoBaseExperiment(wf_name="Epigenomics_24",
+ W=0.2, C1=0.5, C2=0.5,
+ GEN=300, N=20)
+ result = repeat(exp, 60)
+ print(result)
+ # result = exp()
+ sts = interval_statistics(result)
+ print("Statistics: {0}".format(interval_stat_string(sts)))
+ print("Average: {0}".format(numpy.mean(result)))
+ pass
+
diff --git a/src/experiments/pso/rdpso_base_experiment_ordering.py b/src/experiments/pso/rdpso_base_experiment_ordering.py
new file mode 100644
index 0000000..88f3335
--- /dev/null
+++ b/src/experiments/pso/rdpso_base_experiment_ordering.py
@@ -0,0 +1,100 @@
+from copy import deepcopy
+import random
+from deap.base import Toolbox
+import numpy
+from src.algs.pso.rdpsoOrd.ordering_operators import build_schedule, generate, ordering_update, fitness
+
+from src.algs.pso.rdpsoOrd.rdpso import run_pso, initRankList
+from src.algs.pso.rdpsoOrd.mapping_operators import update as mapping_update
+from src.core.environment.Utility import Utility
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.utilities.common import repeat
+from src.experiments.common import AbstractExperiment
+from src.algs.heft.HeftHelper import HeftHelper
+
+
+class RdpsoBaseExperiment(AbstractExperiment):
+
+ @staticmethod
+ def run(**kwargs):
+ inst = RdpsoBaseExperiment(**kwargs)
+ return inst()
+
+ def __init__(self, wf_name, W, C1, C2, GEN, N):
+ super().__init__(wf_name)
+
+ self.W = W
+ self.C1 = C1
+ self.C2 = C2
+ self.GEN = GEN
+ self.N = N
+ pass
+
+ def __call__(self):
+
+ stats, logbook = self.stats(), self.logbook()
+ _wf, rm, estimator = self.env()
+
+
+
+ wf_dag = HeftHelper.convert_to_parent_children_map(_wf)
+ heft_schedule = self.heft_schedule()
+ nodes = rm.get_nodes()
+ rankList = initRankList(wf_dag, nodes, estimator)
+
+ toolbox = self.toolbox(rankList)
+
+ pop, log, best = run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=self.GEN, invalidate_fitness=True, initial_pop=None,
+ w=self.W, c1=self.C1, c2=self.C2, n=self.N, rm=rm, wf=_wf, estimator=estimator, rankList=rankList,
+ )
+
+ #schedule = build_schedule(_wf, rm, estimator, best, mapMatrix, rankList, ordFilter)
+ schedule = build_schedule(_wf, rm, estimator, best, rankList)
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ if (makespan > best.fitness.values[0]):
+ print("DANGER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
+ #print("Final makespan: {0}".format(makespan))
+ #print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+ #def toolbox(self, mapMatrix, rankList, ordFilter):
+ def toolbox(self, rankList):
+ _wf, rm, estimator = self.env()
+ heft_schedule = self.heft_schedule()
+
+
+
+ heft_particle = generate(_wf, rm, estimator, rankList, heft_schedule)
+
+ #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, rankList) for _ in range(n-1)] + [deepcopy(heft_particle)])
+ heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, rankList) for _ in range(n)])
+
+ def componoud_update(w, c1, c2, p, best, pop):
+ mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
+ ordering_update(w, c1, c2, p.ordering, best.ordering, pop)
+
+ toolbox = Toolbox()
+ toolbox.register("population", heft_gen)
+ toolbox.register("fitness", fitness, _wf, rm, estimator)
+ toolbox.register("update", componoud_update)
+ return toolbox
+
+
+ pass
+
+if __name__ == "__main__":
+ exp = RdpsoBaseExperiment(wf_name="Epigenomics_24",
+ W=0.1, C1=0.6, C2=0.2,
+ GEN=10, N=20)
+ result = repeat(exp, 1)
+ print(result)
+ sts = interval_statistics(result)
+ print("Statistics: {0}".format(interval_stat_string(sts)))
+ print("Average: {0}".format(numpy.mean(result)))
+ pass
+
diff --git a/src/experiments/pso/sdpso_base_experiment.py b/src/experiments/pso/sdpso_base_experiment.py
new file mode 100644
index 0000000..000ca04
--- /dev/null
+++ b/src/experiments/pso/sdpso_base_experiment.py
@@ -0,0 +1,83 @@
+from copy import deepcopy
+import random
+from deap import tools
+from deap.base import Toolbox
+import numpy
+from src.algs.common.particle_operations import MappingParticle
+from src.algs.heft.DSimpleHeft import run_heft
+from src.algs.heft.HeftHelper import HeftHelper
+from src.algs.pso.mapping_operators import schedule_to_position, fitness, update, generate, construct_solution
+from src.algs.pso.sdpso import run_pso
+
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.Utility import Utility, wf
+from src.algs.common.MapOrdSchedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE
+from src.experiments.aggregate_utilities import interval_statistics, interval_stat_string
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+from src.experiments.cga.utilities.common import repeat
+
+_wf = wf("CyberShake_30")
+rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=20, transfer_time=100)
+sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)
+
+heft_schedule = run_heft(_wf, rm, estimator)
+heft_mapping = schedule_to_position(heft_schedule)
+
+
+
+
+heft_mapping.velocity = MappingParticle.Velocity({})
+
+heft_gen = lambda n: [deepcopy(heft_mapping) if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0] for _ in range(n)]
+
+W, C1, C2 = 0.1, 0.6, 0.2
+GEN, N = 10, 4
+
+toolbox = Toolbox()
+toolbox.register("population", heft_gen)
+toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
+toolbox.register("update", update)
+
+stats = tools.Statistics(lambda ind: ind.fitness.values[0])
+stats.register("avg", numpy.mean)
+stats.register("std", numpy.std)
+stats.register("min", numpy.min)
+stats.register("max", numpy.max)
+
+logbook = tools.Logbook()
+logbook.header = ["gen", "evals"] + stats.fields
+
+
+
+def do_exp():
+
+ pop, log, best = run_pso(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ gen_curr=0, gen_step=GEN, invalidate_fitness=True, initial_pop=None,
+ w=W, c1=C1, c2=C2, n=N,
+ )
+
+
+
+ solution = construct_solution(best, sorted_tasks)
+ schedule = build_schedule(_wf, estimator, rm, solution)
+
+ Utility.validate_static_schedule(_wf, schedule)
+
+ makespan = Utility.makespan(schedule)
+ print("Final makespan: {0}".format(makespan))
+ print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
+ return makespan
+
+if __name__ == "__main__":
+ result = repeat(do_exp, 4)
+
+ sts = interval_statistics(result)
+ print("Statistics: {0}".format(interval_stat_string(sts)))
+ print(result)
+ pass
diff --git a/src/experiments/sa/sa_base_experiment.py b/src/experiments/sa/sa_base_experiment.py
new file mode 100644
index 0000000..911279f
--- /dev/null
+++ b/src/experiments/sa/sa_base_experiment.py
@@ -0,0 +1,76 @@
+from pstats import Stats
+from deap import tools
+from deap.base import Toolbox
+import numpy
+
+from src.algs.heft.DSimpleHeft import run_heft
+from src.algs.heft.HeftHelper import HeftHelper
+from src.algs.pso.sdpso import schedule_to_position, generate
+from src.algs.sa.SimulatedAnnealingScheme import run_sa
+from src.algs.sa.mappingops import energy, update_T, mapping_neighbor, transition_probability, State
+from src.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
+from src.core.environment.Utility import Utility, wf
+from src.algs.common.MapOrdSchedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE
+from src.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
+from src.core.environment.ResourceGenerator import ResourceGenerator as rg
+
+## TODO: need to test all of it
+from src.experiments.cga.utilities.common import repeat
+
+_wf = wf("Montage_25")
+rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
+estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
+ ideal_flops=20, transfer_time=100)
+sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)
+
+heft_schedule = run_heft(_wf, rm, estimator)
+heft_mapping = schedule_to_position(heft_schedule).entity
+
+
+
+initial_state = State()
+initial_state.mapping = heft_mapping
+# initial_state.mapping = generate(_wf, rm, estimator, 1)[0].entity
+initial_state.ordering = sorted_tasks
+
+T, N = 20, 1000
+
+
+
+toolbox = Toolbox()
+toolbox.register("energy", energy, _wf, rm, estimator)
+toolbox.register("update_T", update_T, T)
+toolbox.register("neighbor", mapping_neighbor, _wf, rm, estimator, 1)
+toolbox.register("transition_probability", transition_probability)
+# use just a const to define number of attempts
+toolbox.register("attempts_count", lambda T: 100)
+
+logbook = tools.Logbook()
+logbook.header = ["gen", "T", "val"]
+
+stats = tools.Statistics(lambda ind: ind.energy.values[0])
+stats.register("val", lambda arr: arr[0])
+
+def do_exp():
+ best, log, current = run_sa(
+ toolbox=toolbox,
+ logbook=logbook,
+ stats=stats,
+ initial_solution=initial_state, T=T, N=N
+ )
+
+ solution = {MAPPING_SPECIE: [item for item in best.mapping.items()], ORDERING_SPECIE: best.ordering}
+ schedule = build_schedule(_wf, estimator, rm, solution)
+ Utility.validate_static_schedule(_wf, schedule)
+ makespan = Utility.makespan(schedule)
+ heft_makespan = Utility.makespan(heft_schedule)
+ print("Final makespan: {0}".format(makespan))
+ print("Heft makespan: {0}".format(heft_makespan))
+ return makespan
+
+if __name__ == "__main__":
+ result = repeat(do_exp, 10)
+ print(result)
+ print("Mean: {0}".format(numpy.mean(result)))
+ pass
+
diff --git a/src/settings.py b/src/settings.py
new file mode 100644
index 0000000..1414e8b
--- /dev/null
+++ b/src/settings.py
@@ -0,0 +1,11 @@
+import os
+
+"""
+This module is intended to store all global project settings
+"""
+
+__root_path__ = os.path.dirname(os.path.dirname(__file__))
+RESOURCES_PATH = os.path.join(__root_path__, "resources")
+TEMP_PATH = os.path.join(__root_path__, "temp")
+TEMP_PATH = os.path.join(__root_path__, "temp")
+TEST_DIRECTORY_NAME = "test_directory"