Skip to content

Commit

Permalink
Merge branch 'master' into release_24.0
Browse files Browse the repository at this point in the history
  • Loading branch information
bgruening authored Mar 23, 2024
2 parents 0b742f3 + 65a9146 commit 2947ab9
Show file tree
Hide file tree
Showing 18 changed files with 223 additions and 150 deletions.
31 changes: 0 additions & 31 deletions files/galaxy/tpv/destinations.yml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ destinations:
$working_directory:rw,
{{ cvmfs.data.path }}:{{ cvmfs.data.docker_perm }}"

{# Save embedded Pulsar destinations to replicate them in the secondary cluster. #}
{% set embedded_pulsar %}
interactive_pulsar:
inherits: embedded_pulsar_docker_abstract
scheduling:
Expand Down Expand Up @@ -111,18 +109,6 @@ destinations:
GPU_AVAILABLE: "1"
params:
requirements: 'GalaxyGroup == "compute_gpu"'
{%- endset %}{{ embedded_pulsar }}

{# Generate secondary cluster destinations. #}
{% for name, destination in (embedded_pulsar | from_yaml).items() %}
secondary_{{ name }}:
inherits: {{ name }}
runner: pulsar_embedded_secondary
scheduling:
require:
- condor-secondary

{% endfor %}

#######################
# PULSAR DESTINATIONS #
Expand Down Expand Up @@ -165,7 +151,6 @@ destinations:
- singularity
- docker
- condor-tpv
- condor-secondary

pulsar_mira_tpv:
inherits: pulsar_default
Expand Down Expand Up @@ -370,8 +355,6 @@ destinations:
# LOCAL CONDOR DESTINATIONS #
#############################

{# Save condor destinations to replicate them in the secondary cluster. #}
{% set condor %}
condor_docker:
inherits: basic_docker_destination
runner: condor
Expand Down Expand Up @@ -484,17 +467,3 @@ destinations:
GPU_AVAILABLE: 1
params:
requirements: 'GalaxyGroup == "compute_gpu"'
{%- endset %}{{ condor }}

{# Generate secondary cluster destinations. #}
{% for name, destination in (condor | from_yaml).items() %}
{% if destination.runner is defined and destination.runner == "condor" %}
secondary_{{ name }}:
inherits: {{ name }}
runner: condor_secondary
scheduling:
require:
- condor-secondary

{% endif %}
{% endfor %}
15 changes: 0 additions & 15 deletions files/galaxy/tpv/tool_defaults.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,21 +60,6 @@ tools:
retval
fail: |
Invalid 'Remote resources id' selected in the config menu under 'User -> Preferences -> Manage Information -> Use distributed compute resources'. Please reselect either 'default' or an appropriate remote resource then click 'Save' and rerun your job.
- id: secondary_cluster
# Send all jobs to the secondary HTCondor cluster.
if: True
execute: |
from tpv.core.entities import Tag, TagSetManager, TagType
pulsar_tag = Tag(
"scheduling",
"condor-secondary",
TagType.REQUIRE,
)
entity.tpv_tags = entity.tpv_tags.combine(
TagSetManager(tags=[pulsar_tag])
)
rank: |
final_destinations = helpers.weighted_random_sampling(candidate_destinations)
final_destinations
116 changes: 102 additions & 14 deletions files/galaxy/tpv/tools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -219,10 +219,26 @@ tools:
toolshed.g2.bx.psu.edu/repos/iuc/sleuth/sleuth/.*:
cores: 4
mem: 16
scheduling:
require:
- conda
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/rnaquast/rna_quast/.*:
cores: 12
mem: 40
scheduling:
require:
- conda
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/mashmap/mashmap/.*:
cores: 8
mem: 32
scheduling:
require:
- conda
- singularity

toolshed.g2.bx.psu.edu/repos/galaxy-australia/alphafold2/alphafold/.*:
cores: 10
Expand Down Expand Up @@ -285,6 +301,12 @@ tools:

toolshed.g2.bx.psu.edu/repos/recetox/recetox_msfinder/recetox_msfinder/.*:
inherits: basic_docker_tool
toolshed.g2.bx.psu.edu/repos/recetox/qcxms_getres/qcxms_getres/.*:
inherits: basic_docker_tool
toolshed.g2.bx.psu.edu/repos/recetox/qcxms_neutral_run/qcxms_neutral_run/.*:
inherits: basic_docker_tool
toolshed.g2.bx.psu.edu/repos/recetox/qcxms_production_run/qcxms_production_run/.*:
inherits: basic_docker_tool
toolshed.g2.bx.psu.edu/repos/imgteam/bioformats2raw/bf2raw/.*:
inherits: basic_docker_tool
toolshed.g2.bx.psu.edu/repos/bgruening/biomodels_biomd0000001066/biomodels_biomd0000001066/.*:
Expand Down Expand Up @@ -778,7 +800,8 @@ tools:
singularity_enabled: true

toolshed.g2.bx.psu.edu/repos/iuc/trinity/trinity/.*:
cores: 8
cores: 24
mem: 250
scheduling:
prefer:
- condor-tpv
Expand All @@ -789,17 +812,21 @@ tools:
# see usegalaxy-eu/issues#473: https://github.com/usegalaxy-eu/issues/issues/473
env:
_JAVA_OPTIONS: -Xmx{int(mem)}G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp

- if: input_size < 0.1
cores: 1
mem: 4
- if: 0.1 <= input_size < 1
cores: 12
mem: 92
- if: 1 <= input_size < 4
cores: 16
mem: 128
- if: 4 <= input_size < 8
cores: 20
mem: 100
- if: 1 <= input_size < 2
cores: 30
mem: 200
- if: 2 <= input_size < 30
cores: 60
mem: 950
- if: input_size >= 30
fail:
mem: 190
- if: input_size >= 80
fail: |
Too much data, we cannot support such large Trinity assemblies with our
backend. Please use another server for your job.
Expand Down Expand Up @@ -1008,16 +1035,24 @@ tools:
env:
_JAVA_OPTIONS: -Xmx6G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp

## bag things 40 is enough for most of the jobs, a better rule is needed
toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/.*:
cores: 16
mem: 40
cores: 24
mem: 80
env:
TERM: vt100
_JAVA_OPTIONS: -XX:MaxPermSize=2G -Xmx6G -Xmx{int(mem)}G -Xms1G -Djava.io.tmpdir=/data/2/galaxy_db/tmp -Duser.home=/data/2/galaxy_db/tmp
scheduling:
accept:
- pulsar
rules:
- id: unicycler_small_input_rule
if: input_size < 0.05
cores: 1
mem: 3.8
- id: unicycler_medium_input_rule
if: 0.05 <= input_size < 2
cores: 8
mem: 28

toolshed.g2.bx.psu.edu/repos/imgteam/unzip/unzip/.*:
scheduling:
Expand Down Expand Up @@ -1060,6 +1095,14 @@ tools:
mem: 128
cores: 24

toolshed.g2.bx.psu.edu/repos/iuc/windowmasker/windowmasker_mkcounts/.*:
mem: 16
cores: 1

jbrowse2:
mem: 6
cores: 1

toolshed.g2.bx.psu.edu/repos/iuc/circos/circos/.*:
scheduling:
require:
Expand Down Expand Up @@ -1140,11 +1183,20 @@ tools:
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/bakta/bakta/.*:
cores: 20
cores: 8
memory: 24
scheduling:
require:
- conda
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/compleasm/compleasm/.*:
cores: 1
mem: 4
scheduling:
require:
- conda
- singularity

# 27:02:2024: We seem to have issues with the tools when loaded from the conda envs.
# For whatever reason they seem to get stuck in the D state and never finish.
Expand All @@ -1167,6 +1219,35 @@ tools:
require:
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/bwa_mem2/bwa_mem2/.*:
cores: 32
mem: 244
rules:
- id: bwa_mem2_small_input_rule
if: input_size < 0.25
cores: 2
mem: 7.6
- id: bwa_mem2_medium_input_rule
if: 0.25 <= input_size < 16
cores: 8
mem: 28
- id: bwa_mem2_large_input_rule
if: 16 <= input_size < 32
cores: 16
mem: 58
- id: bwa_mem2_xlarge_input_rule
if: 32 <= input_size < 64
cores: 24
mem: 120
- id: bwa_mem2_history_reference_rule
if: |
helpers.job_args_match(job, app, {"reference_source": {"reference_source_selector": "history"}})
# per https://github.com/bwa-mem2/bwa-mem2/issues/41 it's 28 * reference
mem: |
options = job.get_param_values(app)
size = options["reference_source"]["ref_file"].get_size()
min(max(float(size/1024**3) * 28, (input_size - float(size/1024**3)) * 2, 7.6), 120)
toolshed.g2.bx.psu.edu/repos/bgruening/flye/flye/.*:
cores: 20
mem: min(max(input_size*1.2, 3.8), 256)
Expand Down Expand Up @@ -1254,3 +1335,10 @@ tools:
toolshed.g2.bx.psu.edu/repos/bgruening/omark/omark/.*:
cores: 8
mem: 4
scheduling:
require:
- singularity

toolshed.g2.bx.psu.edu/repos/iuc/red/red/.*:
cores: 1
mem: 8
6 changes: 0 additions & 6 deletions files/galaxy/tpv/users.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
---
users:
[email protected]:
scheduling:
require:
- condor-secondary
[email protected]:
scheduling:
require:
- condor-secondary
[email protected]:
9 changes: 6 additions & 3 deletions group_vars/all.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
---
# Upload dirs
upload_dir_test: "{{ jwd.jwd04.path }}/tus_upload/test"
upload_dir_main: "{{ jwd.jwd04.path }}/tus_upload/main"
# Galaxy user and group
galaxy_user:
name: galaxy
Expand Down Expand Up @@ -58,19 +61,19 @@ telegraf_agent_output:
- database = "{{ influxdb.node.database }}"
- username = "{{ influxdb.node.username }}"
- password = "{{ influxdb.node.password }}"

- timeout = "10s"
telegraf_plugins_default:
- plugin: cpu
config:
- percpu = true
- plugin: disk
- plugin: kernel
- plugin: processes
- plugin: io
- plugin: diskio
- plugin: mem
- plugin: system
- plugin: swap
- plugin: net
- plugin: nstat
- plugin: netstat
- plugin: chrony

Expand Down
2 changes: 1 addition & 1 deletion group_vars/gxconfig.yml
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ base_app_main: &BASE_APP_MAIN
# Monitor dynamic job rules. If changes are found, rules are
# automatically reloaded. Takes the same values as the 'watch_tools'
# option.
watch_job_rules: false
watch_job_rules: true

# Monitor a subset of options in the core configuration file (See
# RELOADABLE_CONFIG_OPTIONS in lib/galaxy/config/__init__.py). If
Expand Down
2 changes: 2 additions & 0 deletions group_vars/maintenance.yml
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ fsm_scripts:
paths:
- "{{ galaxy_config['galaxy']['nginx_upload_store'] }}"
- "{{ galaxy_config['galaxy']['nginx_upload_job_files_store'] }}"
- "{{ upload_dir_main }}"
- "{{ upload_dir_test }}"
time: "{{ fsm_intervals.medium }}"
job_working_dirs:
enable: true
Expand Down
46 changes: 46 additions & 0 deletions group_vars/upload.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,50 @@
---
rustus_version: "0.7.4"
rustus_instances:
- name: test_uploads
# user that rustus will run as
user: "{{ user_name }}"
# group that rustus will run as
group: "{{ user_group_name }}"
# by default, rustus will refuse to work if it cannot write to its current working
# directory, read the documentation of the usegalaxy_eu.rustus role for more
# details
working_directory: "{{ upload_dir_test }}"
# args passed to rustus
args:
- --host "{{ inventory_hostname }}"
- --port 1080
- "--data-dir {{ upload_dir_test }}"
- --hooks-http-urls "https://test.usegalaxy.eu/api/upload/hooks"
- --hooks-http-proxy-headers "X-Api-Key,Cookie"
- --hooks "pre-create"
- --hooks-format tusd
- --url "/api/upload/resumable_upload"
- --max-body-size 20000000
- "--sentry-dsn {{ sentry_dsn.test }} --sentry-sample-rate 1.0"
- --info-storage "file-info-storage" --info-dir "{{ upload_dir_test }}"
- name: main_uploads
# user that rustus will run as
user: "{{ user_name }}"
# group that rustus will run as
group: "{{ user_group_name }}"
# by default, rustus will refuse to work if it cannot write to its current working
# directory, read the documentation of the usegalaxy_eu.rustus role for more
# details
working_directory: "{{ upload_dir_main }}"
# args passed to rustus
args:
- --host "{{ inventory_hostname }}"
- --port 1081
- "--data-dir {{ upload_dir_main }}"
- --hooks-http-urls "https://usegalaxy.eu/api/upload/hooks"
- --hooks-http-proxy-headers "X-Api-Key,Cookie"
- --hooks "pre-create"
- --hooks-format tusd
- --url "/api/upload/resumable_upload"
- --max-body-size 20000000
- "--sentry-dsn {{ sentry_dsn.main }} --sentry-sample-rate 1.0"
- --info-storage "file-info-storage" --info-dir "{{ upload_dir_main }}"
user_name: galaxy
user_group_name: galaxy

Expand Down
Loading

0 comments on commit 2947ab9

Please sign in to comment.