diff --git a/.github/workflows/jekyll.yml b/.github/workflows/jekyll.yml deleted file mode 100644 index c1f5e292..00000000 --- a/.github/workflows/jekyll.yml +++ /dev/null @@ -1,63 +0,0 @@ -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. - -# Sample workflow for building and deploying a Jekyll site to GitHub Pages -name: Deploy Jekyll site to Pages - -on: - # Runs on pushes targeting the default branch - push: - branches: ["master"] - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -jobs: - # Build job - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Ruby - uses: ruby/setup-ruby@8575951200e472d5f2d95c625da0c7bec8217c42 # v1.161.0 - with: - bundler-cache: true # runs 'bundle install' and caches installed gems automatically - cache-version: 0 # Increment this number if you need to re-download cached gems - - name: Setup Pages - id: pages - uses: actions/configure-pages@v5 - - name: Build with Jekyll - # Outputs to the './_site' directory by default - run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" - env: - JEKYLL_ENV: production - - name: Upload artifact - # Automatically uploads an artifact from the './_site' directory by default - uses: actions/upload-pages-artifact@v3 - - # Deployment job - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml deleted file mode 100644 index f86dba92..00000000 --- a/.github/workflows/tests.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Run Cypress tests - -on: - pull_request: - workflow_dispatch: - -jobs: - build-and-test: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Ruby - uses: ruby/setup-ruby@v1 - with: - bundler-cache: true # runs 'bundle install' - - - name: Set up Node.js - uses: actions/setup-node@v4.0.3 - with: - node-version-file: 'package.json' - - - name: Cypress run - uses: cypress-io/github-action@v6 # runs 'npm ci' - with: - start: npm start - wait-on: 'http://localhost:4000' diff --git a/.npmrc b/.npmrc deleted file mode 100644 index b6f27f13..00000000 --- a/.npmrc +++ /dev/null @@ -1 +0,0 @@ -engine-strict=true diff --git a/.nvmrc b/.nvmrc deleted file mode 100644 index 65da8ce3..00000000 --- a/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -20.17 diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index 944880fa..00000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -3.2.0 diff --git a/CYPRESS.md b/CYPRESS.md deleted file mode 100644 index 25c9e6e6..00000000 --- a/CYPRESS.md +++ /dev/null @@ -1,88 +0,0 @@ -# Run the Cypress end-to-end test suite - -Running the Cypress tests depends on: - -- Ruby for building and serving the site -- Node.js for running Cypress while the site is being served - -This documentation assumes your machine has git installed and GitHub credentials configured. See the git [Downloads page](https://git-scm.com/downloads) for help installing git, and [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh) for help configuring shell access to GitHub. - -## Clone this repo - -First clone this repository to your machine then move into it: - -```sh -git clone git@github.com:archivesspace/tech-docs.git - -cd tech-docs -``` - -**All of the remaining commands within this document should be run from the tech-docs root directory.** - -## Setup the Ruby environment - -The currently required Ruby version for this project is listed in the [.ruby-version](./.ruby-version) file. - -It is strongly recommended to use a Ruby version manager to be able to switch to any version that a given project uses. - -The most popular version manager available for macOS and Linux is `rbenv`. You can find the installation guide here [https://github.com/rbenv/rbenv#readme](https://github.com/rbenv/rbenv#readme). - -For Windows, a separate `rbenv` installer exists here: [https://github.com/RubyMetric/rbenv-for-windows#readme](https://github.com/RubyMetric/rbenv-for-windows#readme). - -If you wish to use a different manager or installation method, you can choose one of the following: [https://www.ruby-lang.org/en/documentation/installation/](https://www.ruby-lang.org/en/documentation/installation/) - -### Install Ruby - -Install Ruby using `rbenv`: - -```sh -rbenv install -``` - -### Install Ruby gems - -Install the project specific Ruby dependencies listed in [Gemfile](./Gemfile) which are used for building and serving the site. - -```sh -bundle install -``` - -## Set up the Node.js environment - -The currently required Node.js version for this project is listed in [.nvmrc](./.nvmrc) and under the `engines.node` key in [package.json](./package.json). - -It is strongly recommended to use a Node.js version manager to be able to switch to any version that a given project uses. - -The most popular version manager available for macOS and Linux is `nvm`. You can find the installation guide here [https://github.com/nvm-sh/nvm](https://github.com/nvm-sh/nvm). - -A popular version manager for Windows is `nvm-windows`. See the installation guide here [https://github.com/coreybutler/nvm-windows](https://github.com/coreybutler/nvm-windows). - -### Install Node.js - -Install Node.js using `nvm`: - -```sh -nvm install -``` - -### Install Node modules - -Install the project specific Node dependencies listed in [package.json](./package.json) which are used for running the tests: - -```sh -npm install -``` - -## Run the tests locally - -Run the tests localy by first serving the site: - -```sh -npm start -``` - -Then **in a different terminal** initiate the tests: - -```sh -npm test -``` diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 9d9249b8..00000000 --- a/Gemfile +++ /dev/null @@ -1,33 +0,0 @@ -source 'https://rubygems.org' -# Hello! This is where you manage which Jekyll version is used to run. -# When you want to use a different version, change it below, save the -# file and run `bundle install`. Run Jekyll with `bundle exec`, like so: -# -# bundle exec jekyll serve -# -# This will help ensure the proper Jekyll version is running. -# Happy Jekylling! -gem 'jekyll', '~> 4.3.2' -# This is the current site theme -gem 'jekyll-theme-minimal' -# If you want to use GitHub Pages, remove the "gem "jekyll"" above and -# uncomment the line below. To upgrade, run `bundle update github-pages`. -# gem 'github-pages', group: :jekyll_plugins -# If you have any plugins, put them here! -group :jekyll_plugins do - gem 'jekyll-optional-front-matter' -end - -# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem -# and associated library. -platforms :mingw, :x64_mingw, :mswin, :jruby do - gem 'tzinfo', '>= 1', '< 3' - gem 'tzinfo-data' -end - -# Performance-booster for watching directories on Windows -gem 'wdm', '~> 0.1.1', platforms: %i[mingw x64_mingw mswin] - -# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem -# do not have a Java counterpart. -gem 'http_parser.rb', '~> 0.6.0', platforms: [:jruby] diff --git a/Gemfile.lock b/Gemfile.lock deleted file mode 100644 index f86b8660..00000000 --- a/Gemfile.lock +++ /dev/null @@ -1,87 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - addressable (2.8.1) - public_suffix (>= 2.0.2, < 6.0) - colorator (1.1.0) - concurrent-ruby (1.2.0) - em-websocket (0.5.3) - eventmachine (>= 0.12.9) - http_parser.rb (~> 0) - eventmachine (1.2.7) - ffi (1.15.5) - forwardable-extended (2.6.0) - google-protobuf (3.25.5-arm64-darwin) - google-protobuf (3.25.5-x86_64-linux) - http_parser.rb (0.8.0) - i18n (1.12.0) - concurrent-ruby (~> 1.0) - jekyll (4.3.2) - addressable (~> 2.4) - colorator (~> 1.0) - em-websocket (~> 0.5) - i18n (~> 1.0) - jekyll-sass-converter (>= 2.0, < 4.0) - jekyll-watch (~> 2.0) - kramdown (~> 2.3, >= 2.3.1) - kramdown-parser-gfm (~> 1.0) - liquid (~> 4.0) - mercenary (>= 0.3.6, < 0.5) - pathutil (~> 0.9) - rouge (>= 3.0, < 5.0) - safe_yaml (~> 1.0) - terminal-table (>= 1.8, < 4.0) - webrick (~> 1.7) - jekyll-optional-front-matter (0.3.2) - jekyll (>= 3.0, < 5.0) - jekyll-sass-converter (3.0.0) - sass-embedded (~> 1.54) - jekyll-seo-tag (2.8.0) - jekyll (>= 3.8, < 5.0) - jekyll-theme-minimal (0.2.0) - jekyll (> 3.5, < 5.0) - jekyll-seo-tag (~> 2.0) - jekyll-watch (2.2.1) - listen (~> 3.0) - kramdown (2.4.0) - rexml - kramdown-parser-gfm (1.1.0) - kramdown (~> 2.0) - liquid (4.0.4) - listen (3.8.0) - rb-fsevent (~> 0.10, >= 0.10.3) - rb-inotify (~> 0.9, >= 0.9.10) - mercenary (0.4.0) - pathutil (0.16.2) - forwardable-extended (~> 2.6) - public_suffix (5.0.1) - rb-fsevent (0.11.2) - rb-inotify (0.10.1) - ffi (~> 1.0) - rexml (3.3.9) - rouge (4.0.1) - safe_yaml (1.0.5) - sass-embedded (1.58.0-arm64-darwin) - google-protobuf (~> 3.21) - sass-embedded (1.58.0-x86_64-linux-gnu) - google-protobuf (~> 3.21) - terminal-table (3.0.2) - unicode-display_width (>= 1.1.1, < 3) - unicode-display_width (2.4.2) - webrick (1.8.2) - -PLATFORMS - arm64-darwin-21 - x86_64-linux - -DEPENDENCIES - http_parser.rb (~> 0.6.0) - jekyll (~> 4.3.2) - jekyll-optional-front-matter - jekyll-theme-minimal - tzinfo (>= 1, < 3) - tzinfo-data - wdm (~> 0.1.1) - -BUNDLED WITH - 2.3.26 diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 53acf104..00000000 --- a/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2012 New York University, the Regents of the University -of California, the Board of Trustees of the University of Illinois -Licensed under the Educational Community License, Version 2.0 (the -"License"); you may not use this file except in compliance with the -License. You may obtain a copy of the License at - -http://opensource.org/licenses/ecl2.php - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -or implied. See the License for the specific language governing -permissions and limitations under the License. diff --git a/README.md b/README.md deleted file mode 100644 index b716c1b9..00000000 --- a/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -permalink: / ---- - -# ArchivesSpace technical documentation - -The technical documentation covers a range of topics of interest to those working with ArchivesSpace in different technical capacities, and is organized in order to help you find the information most appropriate to your role. - -- **[ArchivesSpace technical overview](./readme_evaluate)** – For anyone who needs to evaluate technical requirements and capabilities of ArchivesSpace -- **[Installing, configuring and maintaining an ArchivesSpace instance](./readme_implement)** – For anyone responsible for installing and/or maintaining an ArchivesSpace instance -- **[Developer resources](./readme_develop)** – For anyone who needs to create plugins, integrate ArchivesSpace with other systems, or contribute to core code - -**To suggest corrections or additions, please submit a pull request or issue report on [Github](https://github.com/archivesspace/tech-docs)** - -## Other technical documentation resources: - -- [ArchivesSpace API Reference](http://archivesspace.github.io/archivesspace/api/) -- [ArchivesSpace YARD Docs](http://archivesspace.github.io/archivesspace/doc/) -- [Technical Documentation website](https://archivesspace.github.io/tech-docs/) -- [Github repository](https://github.com/archivesspace/tech-docs) diff --git a/README_FEATURES_1.5.0.html b/README_FEATURES_1.5.0.html new file mode 100644 index 00000000..e2c6d90a --- /dev/null +++ b/README_FEATURES_1.5.0.html @@ -0,0 +1,158 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + README_FEATURES_1.5.0.md + +

+ +

+ + Report issue on Jira + README_FEATURES_1.5.0.md + +

+
+
+ +

What is the new functionality related to containers and container management in 1.5.0?

+ +

Top containers replace Container 1s. Unlike Container 1s in the current version of ArchivesSpace, top containers in the upcoming version can be defined once and linked many times to various archival objects, resources, and accessions.

+ +

The ability to create container profiles and associate them with top containers. Optional container profiles allow you to track information about the containers themselves, including dimensions.

+ +

Extent calculator. In conjunction with container profiles, the new extent calculator allows you to easily see extents for accessions, resources, or resource components. Optionally, you can use the calculator to generate extent records for an accession, resource, or resource component.

+ +

Bulk operations for containers. The Manage Top Containers area provides more efficient ways to work with multiple containers, including the ability to add or edit barcodes, change locations, and delete top containers in bulk.

+ +

The ability to “share” boxes across collections in a meaningful way. You can define top containers separately from individual accessions and resources and access them from multiple accession and resource records. For example, this might be helpful for recording information about an oversize box that contains items from many collections.

+ +

The ability to store data that will help you synchronize between ArchivesSpace and item records in your ILS. If your institution creates item records in its ILS for containers, you can now record that information within ArchivesSpace as well.

+ +

The ability to store data about the restriction status of material associated with a container. You can now see at a glance whether any portion of the contents of a container is restricted.

+ +

Machine-actionable restrictions. You will now have the ability to associate begin and end dates with “conditions governing access” and “conditions governing use” Notes. You’ll also be able to associate a local restriction type for non-time-bound restrictions. This gives the ability to better manage and re-describe expiring restrictions.

+ +

For more information on using the new features, consult the user manual, particularly the new section titled Managing Containers (available late April 2016).

+ + +
+ +
+ + + diff --git a/README_FEATURES_1.5.0.md b/README_FEATURES_1.5.0.md deleted file mode 100644 index 4f6068e7..00000000 --- a/README_FEATURES_1.5.0.md +++ /dev/null @@ -1,19 +0,0 @@ -# What is the new functionality related to containers and container management in 1.5.0? - -**Top containers replace Container 1s.** Unlike Container 1s in the current version of ArchivesSpace, top containers in the upcoming version can be defined once and linked many times to various archival objects, resources, and accessions. - -**The ability to create container profiles and associate them with top containers.** Optional container profiles allow you to track information about the containers themselves, including dimensions. - -**Extent calculator.** In conjunction with container profiles, the new extent calculator allows you to easily see extents for accessions, resources, or resource components. Optionally, you can use the calculator to generate extent records for an accession, resource, or resource component. - -**Bulk operations for containers.** The Manage Top Containers area provides more efficient ways to work with multiple containers, including the ability to add or edit barcodes, change locations, and delete top containers in bulk. - -**The ability to "share" boxes across collections in a meaningful way.** You can define top containers separately from individual accessions and resources and access them from multiple accession and resource records. For example, this might be helpful for recording information about an oversize box that contains items from many collections. - -**The ability to store data that will help you synchronize between ArchivesSpace and item records in your ILS.** If your institution creates item records in its ILS for containers, you can now record that information within ArchivesSpace as well. - -**The ability to store data about the restriction status of material associated with a container.** You can now see at a glance whether any portion of the contents of a container is restricted. - -**Machine-actionable restrictions.** You will now have the ability to associate begin and end dates with "conditions governing access" and "conditions governing use" Notes. You'll also be able to associate a local restriction type for non-time-bound restrictions. This gives the ability to better manage and re-describe expiring restrictions. - -For more information on using the new features, consult the user manual, particularly the new section titled Managing Containers (available late April 2016). diff --git a/_config.yml b/_config.yml deleted file mode 100644 index 023dd07f..00000000 --- a/_config.yml +++ /dev/null @@ -1,36 +0,0 @@ -theme: jekyll-theme-minimal -title: tech-docs -description: Technical documentation for ArchivesSpace - -baseurl: '/tech-docs' - -plugins: - - jekyll-optional-front-matter - -optional_front_matter: - remove_originals: true - -include: - - README.md # for optional-front-matter plugin - -defaults: - - scope: - path: '' # all files - values: - layout: 'default' - -exclude: - - cypress/ - - CYPRESS.md - - package*.json - - LICENSE - -github: # recreate gh-pages api - is_project_page: true - repository_url: https://github.com/archivesspace/tech-docs - repository_nwo: archivesspace/tech-docs - owner_url: https://github.com/archivesspace - owner_name: ArchivesSpace - edit_base_url: https://github.com/archivesspace/tech-docs/edit/master/ - -jira_issues: https://archivesspace.atlassian.net/jira/software/projects/TD/issues diff --git a/_includes/deprecation-notice.html b/_includes/deprecation-notice.html deleted file mode 100644 index a78bb181..00000000 --- a/_includes/deprecation-notice.html +++ /dev/null @@ -1,47 +0,0 @@ - - - diff --git a/_layouts/default.html b/_layouts/default.html deleted file mode 100644 index 2414b8b3..00000000 --- a/_layouts/default.html +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - -{% seo %} - - - {% include head-custom.html %} - - -
- {% include deprecation-notice.html %} - -
-

{{ site.title | default: site.github.repository_name }}

- - {% if site.logo %} - Logo - {% endif %} - -

{{ site.description | default: site.github.project_tagline }}

- - {% if site.github.is_project_page %} -

View the Project on GitHub {{ site.github.repository_nwo }}

- {% endif %} - - {% if site.github.is_user_page %} -

View My GitHub Profile

- {% endif %} - - {% if site.show_downloads %} - - {% endif %} - -

- - Edit this page on GitHub - {{ page.path }} - -

- -

- - Report issue on Jira - {{ page.path }} - -

-
-
- - {{ content }} - -
- -
- - - diff --git a/administration/README.md b/administration/README.md deleted file mode 100644 index a4b30f2f..00000000 --- a/administration/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -permalink: /administration/ ---- - -# Basic ArchivesSpace administration - -* [Getting started](./getting_started.html) -* [Running ArchivesSpace as a Unix daemon](./unix_daemon.html) -* [Running ArchivesSpace as a Windows service](./windows.html) -* [Backup and recovery](./backup.html) -* [Re-creating indexes](./indexes.html) -* [Resetting passwords](./passwords.html) -* [Upgrading](./upgrading.html) -* [Log rotation](./logrotate.html) diff --git a/administration/backup.html b/administration/backup.html new file mode 100644 index 00000000..4c721509 --- /dev/null +++ b/administration/backup.html @@ -0,0 +1,303 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/backup.md + +

+ +

+ + Report issue on Jira + administration/backup.md + +

+
+
+ +

Backup and recovery

+ +

Managing your own backups

+ +

Performing regular backups of your MySQL database is critical. ArchivesSpace stores +all of your records data in the database, so as long as you have backups of your +database then you can always recover from errors and failures.

+ +

If you are running MySQL, the mysqldump utility can dump the database +schema and data to a file. It’s a good idea to run this with the +--single-transaction option to avoid locking your database tables +while your backups run. It is also essential to use the --routines +flag, which will include functions and stored procedures in the +backup. The mysqldump utility is widely used, and there are many tutorials +available. As an example, something like this in your crontab would backup your +database twice daily:

+ +
  # Dump archivesspace database 6am and 6pm
+ 30 06,18 * * * mysqldump -u as -pas123 archivesspace | gzip > ~/backups/db.$(date +%F.%H%M%S).sql.gz
+
+ +

You should store backups in a safe location.

+ +

If you are running with the demo database (NEVER run the demo database in production), +you can create periodic database snapshots using the following configuration settings:

+ +
 # In this example, we create a snapshot at 4am each day and keep
+ # 7 days' worth of backups
+ #
+ # Database snapshots are written to 'data/demo_db_backups' by
+ # default.
+ AppConfig[:demo_db_backup_schedule] = "0 4 \* \* \*"
+ AppConfig[:demo\_db\_backup\_number\_to\_keep] = 7
+
+ +

Solr indexes can always be recreated from the contents of the +database, but backing them up can reduce your recovery time if +disaster strikes on a large site. You can create periodic Solr +snapshots using the following configuration settings:

+ +
 # Create one snapshot at midnight and keep only one.
+ #
+ # Solr snapshots are written to 'data/solr_backups' by default.
+ AppConfig[:solr_backup_schedule] = "0 0 \* \* \*"
+ AppConfig[:solr\_backup\_number\_to\_keep] = 1
+
+ +

Creating backups using the provided script

+ +

ArchivesSpace provides some simple scripts for backing up a single +instance to a .zip file. You can run:

+ +
 scripts/backup.sh --output /path/to/backup-yyyymmdd.zip
+
+ +

and the script will generate a file containing:

+ + + +

If you are running against MySQL and have mysqldump installed, you +can also provide the --mysqldump option. This will read the +database settings from your configuration file and add a dump of your +MySQL database to the resulting .zip file.

+ +
 scripts/backup.sh --mysqldump --output ~/backups/backup-yyyymmdd.zip
+
+ +

Recovering from backup

+ +

When recovering an ArchivesSpace installation from backup, you will +need to restore:

+ + + +

Of the two, the database backup is the most crucial, your ArchivesSpace records +are all stored in your MySQL database. The solr search indexes are worth restoring +if you have backups, but they can be recreated from scratch if necessary.

+ +

Recovering your database

+ +

If you are using MySQL, recovering your database just requires loading +your mysqldump backup into an empty database. If you are using the +scripts/backup.sh script (described above), this dump file is named +mysqldump.sql in your backup .zip file.

+ +

To load a MySQL dump file, follow the directions in Set up your MySQL +database to create an empty database with the appropriate +permissions. Then, populate the database from your backup file using +the MySQL client:

+ +
`mysql -uas -p archivesspace < mysqldump.sql`, where
+  `as` is the user name
+  `archivesspace` is the database name
+  `mysqldump.sql` is the mysqldump filename
+
+ +

You will be prompted for the password of the user.

+ +

If you are using the demo database, your backup .zip file will +contain a directory called demo_db_backups. Each subdirectory of +demo_db_backups contains a backup of the demo database. To +restore from a backup, copy its archivesspace_demo_db directory back +to your ArchivesSpace data directory. For example:

+ +
 cp -a /unpacked/zip/demo_db_backups/demo_db_backup_1373323208_25926/archivesspace_demo_db \
+       /path/to/archivesspace/data/
+
+ + + +

This step is optional since indexes can be rebuilt from the contents +of the database. However, recovering your search indexes can reduce +the time needed to get your system running again.

+ +

The backup .zip file contains two directories used by the +ArchivesSpace indexer:

+ + + +

To restore these directories from backup:

+ + + +

For example:

+ +
 mkdir -p /path/to/archivesspace/data/solr_index
+
+ cp -a /unpacked/zip/solr.backup-26475-1373323208/snapshot.20130709084008464 \
+       /path/to/archivesspace/data/solr_index/index
+
+ cp -a /unpacked/zip/solr.backup-26475-1373323208/indexer_state \
+       /path/to/archivesspace/data/
+
+ +

Checking your search indexes

+ +

ArchivesSpace ships with a script that can run Lucene’s CheckIndex +tool for you, verifying that a given Solr index is free from +corruption. To test an index, run the following command from your +archivesspace directory:

+ +
 # Or scripts/checkindex.bat for Windows
+ scripts/checkindex.sh data/solr_index/index
+
+ +

You can use the same script to check that your Solr backups are valid:

+ +
 scripts/checkindex.sh /unpacked/zip/solr.backup-26475-1373323208/snapshot.20130709084008464
+
+ + +
+ +
+ + + diff --git a/administration/backup.md b/administration/backup.md deleted file mode 100644 index a846968d..00000000 --- a/administration/backup.md +++ /dev/null @@ -1,150 +0,0 @@ -# Backup and recovery - -## Managing your own backups - -Performing regular backups of your MySQL database is critical. ArchivesSpace stores -all of your records data in the database, so as long as you have backups of your -database then you can always recover from errors and failures. - -If you are running MySQL, the `mysqldump` utility can dump the database -schema and data to a file. It's a good idea to run this with the -`--single-transaction` option to avoid locking your database tables -while your backups run. It is also essential to use the `--routines` -flag, which will include functions and stored procedures in the -backup. The `mysqldump` utility is widely used, and there are many tutorials -available. As an example, something like this in your `crontab` would backup your -database twice daily: - - # Dump archivesspace database 6am and 6pm - 30 06,18 * * * mysqldump -u as -pas123 archivesspace | gzip > ~/backups/db.$(date +%F.%H%M%S).sql.gz - -You should store backups in a safe location. - -If you are running with the demo database (NEVER run the demo database in production), -you can create periodic database snapshots using the following configuration settings: - - # In this example, we create a snapshot at 4am each day and keep - # 7 days' worth of backups - # - # Database snapshots are written to 'data/demo_db_backups' by - # default. - AppConfig[:demo_db_backup_schedule] = "0 4 \* \* \*" - AppConfig[:demo\_db\_backup\_number\_to\_keep] = 7 - -Solr indexes can always be recreated from the contents of the -database, but backing them up can reduce your recovery time if -disaster strikes on a large site. You can create periodic Solr -snapshots using the following configuration settings: - - # Create one snapshot at midnight and keep only one. - # - # Solr snapshots are written to 'data/solr_backups' by default. - AppConfig[:solr_backup_schedule] = "0 0 \* \* \*" - AppConfig[:solr\_backup\_number\_to\_keep] = 1 - -## Creating backups using the provided script - -ArchivesSpace provides some simple scripts for backing up a single -instance to a `.zip` file. You can run: - - scripts/backup.sh --output /path/to/backup-yyyymmdd.zip - -and the script will generate a file containing: - - * A snapshot of the demo database (if you're using the demo database). - NEVER use the demo database in production. - * A snapshot of the Solr index and related indexer files - -If you are running against MySQL and have `mysqldump` installed, you -can also provide the `--mysqldump` option. This will read the -database settings from your configuration file and add a dump of your -MySQL database to the resulting `.zip` file. - - scripts/backup.sh --mysqldump --output ~/backups/backup-yyyymmdd.zip - -## Recovering from backup - -When recovering an ArchivesSpace installation from backup, you will -need to restore: - - * Your database (either the demo database or MySQL) - * The search indexes and related indexer files (optional) - -Of the two, the database backup is the most crucial, your ArchivesSpace records -are all stored in your MySQL database. The solr search indexes are worth restoring -if you have backups, but they can be recreated from scratch if necessary. - - -### Recovering your database - -If you are using MySQL, recovering your database just requires loading -your `mysqldump` backup into an empty database. If you are using the -`scripts/backup.sh` script (described above), this dump file is named -`mysqldump.sql` in your backup `.zip` file. - -To load a MySQL dump file, follow the directions in *Set up your MySQL -database* to create an empty database with the appropriate -permissions. Then, populate the database from your backup file using -the MySQL client: - - `mysql -uas -p archivesspace < mysqldump.sql`, where - `as` is the user name - `archivesspace` is the database name - `mysqldump.sql` is the mysqldump filename - -You will be prompted for the password of the user. - -If you are using the demo database, your backup `.zip` file will -contain a directory called `demo_db_backups`. Each subdirectory of -`demo_db_backups` contains a backup of the demo database. To -restore from a backup, copy its `archivesspace_demo_db` directory back -to your ArchivesSpace data directory. For example: - - cp -a /unpacked/zip/demo_db_backups/demo_db_backup_1373323208_25926/archivesspace_demo_db \ - /path/to/archivesspace/data/ - - - -### Recovering the search indexes and related indexer files - -This step is optional since indexes can be rebuilt from the contents -of the database. However, recovering your search indexes can reduce -the time needed to get your system running again. - -The backup `.zip` file contains two directories used by the -ArchivesSpace indexer: - - * solr.backup-[timestamp]/snapshot.[timestamp] -- a snapshot of the - index files. - * solr.backup-[timestamp]/indexer_state -- the files used by the - indexer to remember what it last indexed. - -To restore these directories from backup: - - * Copy your index snapshot to `/path/to/archivesspace/data/solr_index/index` - * Copy your indexer_state backup to `/path/to/archivesspace/data/indexer_state` - -For example: - - mkdir -p /path/to/archivesspace/data/solr_index - - cp -a /unpacked/zip/solr.backup-26475-1373323208/snapshot.20130709084008464 \ - /path/to/archivesspace/data/solr_index/index - - cp -a /unpacked/zip/solr.backup-26475-1373323208/indexer_state \ - /path/to/archivesspace/data/ - - -### Checking your search indexes - -ArchivesSpace ships with a script that can run Lucene's CheckIndex -tool for you, verifying that a given Solr index is free from -corruption. To test an index, run the following command from your -`archivesspace` directory: - - # Or scripts/checkindex.bat for Windows - scripts/checkindex.sh data/solr_index/index - -You can use the same script to check that your Solr backups are valid: - - scripts/checkindex.sh /unpacked/zip/solr.backup-26475-1373323208/snapshot.20130709084008464 diff --git a/administration/getting_started.html b/administration/getting_started.html new file mode 100644 index 00000000..8ccc2432 --- /dev/null +++ b/administration/getting_started.html @@ -0,0 +1,301 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/getting_started.md + +

+ +

+ + Report issue on Jira + administration/getting_started.md + +

+
+
+ +

Getting started with ArchivesSpace

+ +

System requirements

+ +

Operating system

+ +

ArchivesSpace has been tested on Ubuntu Linux, Mac OS X, and Windows.

+ +

Memory

+ +

At least 1024 MB RAM allocated to the application are required. We recommend using at least 2 GB for optimal performance.

+ +

Java Runtime Environment

+ +

We recommend using OpenJDK. The following table lists the supported Java versions for each version of ArchivesSpace:

+ + + + + + + + + + + + + + + + + + +
ArchivesSpace versionOpenJDK version
≤ v3.5.18 or 11
≥ v4.0.011 or 17
+ +

Solr

+ +

Up to ArchivesSpace v3.1.1, the zip file distribution includes an embedded Solr v4 instance, which is deprecated and not supported anymore.

+ +

ArchivesSpace v3.2.0 or above requires an external Solr instance. The table below summarizes the supported Solr versions for each ArchivesSpace version:

+ + + + + + + + + + + + + + + + + + + + + + +
ArchivesSpace versionExternal Solr version
≤ v3.1.1no external solr required
v3.1.1 up to v3.5.18 (8.11)
≥ v4.0.09 (9.4.1)
+ +

Each ArchivesSpace version is tested for compatibility with the corresponding Solr version listed in the table above. +That version is being used during development and the ArchivesSpace automated tests run with that version. It is therefore recommended that you use that version of Solr in production.

+ +

It may be possible to use ArchivesSpace with an older version of Solr. However in that case it +is important to check the release notes +for any potential version compatibility issues.

+ +

Note: the ArchivesSpace Program Team can only provide support for Solr deployments +using the “officially” supported version with the standard configuration provided by +the application. Everything else will be treated as “best effort” community-led support.

+ +

See Running with external Solr for more information on installing and upgrading Solr.

+ +

Database

+ +

While ArchivesSpace does include an embedded database, MySQL is required for production use.

+ +

(While not officially supported by ArchivesSpace, some community members use MariaDB so there is some community support for version 10.4.10 only.)

+ +

The embedded database is for testing purposes only. You should use MySQL or MariaDB for any data intended for production, including data in a test instance that you intend to move over to a production instance.

+ +

All ArchivesSpace versions can run on MySQL version 5.x or 8.x.

+ +

Getting started

+ +

The quickest way to get ArchivesSpace up and running is to download +the latest distribution .zip file from the following URL:

+ +

https://github.com/archivesspace/archivesspace/releases

+ +

You will need to have Java installed on your machine. You can check your Java version by running the command:

+ +
 java -version
+
+ +

See above for the Java version needed. If you are running an earlier version of java upgrade to one of the supported ones (not the newest one). If you are running a newer version of Java you should revert back to or force your machine to use a supported version.

+ +

When you extract the .zip file, it will create a directory called +archivesspace. Next, follow the instructions for setting up:

+ + + +

From any ArchivesSpace version > 3.1.0 external Solr is required. Earlier versions provided an embedded Solr v4 instance, which is now unsupported due to its age.

+ +

Do not proceed until MySQL and Solr are running.

+ +

To run the system, just execute the appropriate +startup script for your platform. On Linux and OSX:

+ +
 cd /path/to/archivesspace
+ ./archivesspace.sh
+
+ +

and for Windows:

+ +
 cd \path\to\archivesspace
+ archivesspace.bat
+
+ +

This will start ArchivesSpace running in foreground mode (so it will +shut down when you close your terminal window). Log output will be +written to the file logs/archivesspace.out (by default).

+ +

Note: If you’re running Windows and you get an error message like +unable to resolve type 'size_t' or no such file to load -- bundler, +make sure that there are no spaces in any part of the path name in which the +ArchivesSpace directory is located.

+ +

Start ArchivesSpace

+ +

The first time it starts, the system will take a minute or so to start +up. Once it is ready, confirm that ArchivesSpace is running correctly by +accessing the following URLs in your browser:

+ + + +

To start using the Staff interface application, log in using the adminstrator +account:

+ + + +

Then, you can create a new repository by selecting “System” -> “Manage +repositories” at the top right hand side of the screen. From the +“System” menu, you can perform a variety of administrative tasks, such +as creating and modifying user accounts. Be sure to change the +“admin” user’s password at this time.

+ + +
+ +
+ + + diff --git a/administration/getting_started.md b/administration/getting_started.md deleted file mode 100644 index 7f083fb0..00000000 --- a/administration/getting_started.md +++ /dev/null @@ -1,123 +0,0 @@ -# Getting started with ArchivesSpace - -## System requirements - -### Operating system - -ArchivesSpace has been tested on Ubuntu Linux, Mac OS X, and Windows. - -### Memory - -At least 1024 MB RAM allocated to the application are required. We recommend using at least 2 GB for optimal performance. - -### Java Runtime Environment - -We recommend using [OpenJDK](https://openjdk.org/projects/jdk/). The following table lists the supported Java versions for each version of ArchivesSpace: - -| ArchivesSpace version | OpenJDK version | -|-----------------------|-----------------| -| ≤ v3.5.1 | 8 or 11 | -| ≥ v4.0.0 | 11 or 17 | - -### Solr - -Up to ArchivesSpace v3.1.1, the zip file distribution includes an embedded Solr v4 instance, which is deprecated and not supported anymore. - -ArchivesSpace v3.2.0 or above requires an external Solr instance. The table below summarizes the supported Solr versions for each ArchivesSpace version: - -| ArchivesSpace version | External Solr version | -|-----------------------|---------------------------| -| ≤ v3.1.1 | no external solr required | -| v3.1.1 up to v3.5.1 | 8 (8.11) | -| ≥ v4.0.0 | 9 (9.4.1) | - -Each ArchivesSpace version is tested for compatibility with the corresponding Solr version listed in the table above. -That version is being used during development and the ArchivesSpace automated tests run with that version. It is therefore recommended that you use that version of Solr in production. - -It may be possible to use ArchivesSpace with an older version of Solr. However in that case it -is important to check the [release notes](https://github.com/archivesspace/archivesspace/releases) -for any potential version compatibility issues. - -**Note: the ArchivesSpace Program Team can only provide support for Solr deployments -using the "officially" supported version with the standard configuration provided by -the application. Everything else will be treated as "best effort" community-led support.** - -See [Running with external Solr](../provisioning/solr.html) for more information on installing and upgrading Solr. - -### Database - -While ArchivesSpace does include an embedded database, MySQL is required for production use. - -(While not officially supported by ArchivesSpace, some community members use MariaDB so there is some community support for version 10.4.10 only.) - -**The embedded database is for testing purposes only. You should use MySQL or MariaDB for any data intended for production, including data in a test instance that you intend to move over to a production instance.** - -All ArchivesSpace versions can run on MySQL version 5.x or 8.x. - -## Getting started - -The quickest way to get ArchivesSpace up and running is to download -the latest distribution `.zip` file from the following URL: - - [https://github.com/archivesspace/archivesspace/releases](https://github.com/archivesspace/archivesspace/releases) - -You will need to have Java installed on your machine. You can check your Java version by running the command: - - java -version - -See [above](#java-runtime-environment) for the Java version needed. If you are running an earlier version of java upgrade to one of the supported ones (not the newest one). If you are running a newer version of Java you should revert back to or force your machine to use a supported version. - -When you extract the `.zip` file, it will create a directory called -`archivesspace`. Next, follow the instructions for setting up: - -* [MySQL](../provisioning/mysql.html) -* for version 3.2 and above, [Solr](../provisioning/solr.html) is also required - -**From any ArchivesSpace version > 3.1.0 external Solr is required. Earlier versions provided an embedded Solr v4 instance, which is now unsupported due to its age.** - -**Do not proceed until MySQL and Solr are running.** - -To run the system, just execute the appropriate -startup script for your platform. On Linux and OSX: - - cd /path/to/archivesspace - ./archivesspace.sh - -and for Windows: - - cd \path\to\archivesspace - archivesspace.bat - -This will start ArchivesSpace running in foreground mode (so it will -shut down when you close your terminal window). Log output will be -written to the file `logs/archivesspace.out` (by default). - -**Note:** If you're running Windows and you get an error message like -`unable to resolve type 'size_t'` or `no such file to load -- bundler`, -make sure that there are no spaces in any part of the path name in which the -ArchivesSpace directory is located. - -### Start ArchivesSpace - -The first time it starts, the system will take a minute or so to start -up. Once it is ready, confirm that ArchivesSpace is running correctly by -accessing the following URLs in your browser: - - - http://localhost:8089/ -- the backend - - http://localhost:8080/ -- the staff interface - - http://localhost:8081/ -- the public interface - - http://localhost:8082/ -- the OAI-PMH server - - http://localhost:8090/ -- the Solr admin console - - -To start using the Staff interface application, log in using the adminstrator -account: - -* Username: `admin` -* Password: `admin` - -Then, you can create a new repository by selecting "System" -> "Manage -repositories" at the top right hand side of the screen. From the -"System" menu, you can perform a variety of administrative tasks, such -as creating and modifying user accounts. **Be sure to change the -"admin" user's password at this time.** diff --git a/administration/index.html b/administration/index.html new file mode 100644 index 00000000..43238ac0 --- /dev/null +++ b/administration/index.html @@ -0,0 +1,151 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/README.md + +

+ +

+ + Report issue on Jira + administration/README.md + +

+
+
+ +

Basic ArchivesSpace administration

+ + + + +
+ +
+ + + diff --git a/administration/indexes.html b/administration/indexes.html new file mode 100644 index 00000000..837d3439 --- /dev/null +++ b/administration/indexes.html @@ -0,0 +1,233 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/indexes.md + +

+ +

+ + Report issue on Jira + administration/indexes.md + +

+
+
+ +

Re-creating indexes

+ +

There are two strategies for reindexing ArchivesSpace:

+ + + +

Soft reindex

+ +

A soft reindex updates the existing documents in Solr without directly +touching the actual index documents on the filesystem. This can be done +while the system is running and is suitable for most use cases.

+ +

There are two common ways to perform a soft reindex:

+ +
    +
  1. Delete indexer state files
  2. +
+ +

ArchivesSpace keeps track of what has been indexed by using the files +under data/indexer_state and data/indexer_pui_state (for the PUI).

+ +

If these files are missing, the indexer assumes that nothing has been +indexed and reindexes everything. To force ArchivesSpace to reindex all +records, just delete the files in /path/to/archivesspace/data/indexer_state +and /path/to/archivesspace/data/indexer_pui_state.

+ +

You also can do this selectively by record type, for example, to reindex +accessions in repository 2 delete the file called 2_accession.dat.

+ +
    +
  1. Bump system_mtime values in the database
  2. +
+ +

If you update a record’s system_mtime it becomes eligible for reindexing.

+ +
#reindex all resources
+UPDATE resource SET system_mtime = NOW();
+#reindex resource 1
+UPDATE resource SET system_mtime = NOW() WHERE id = 1;
+
+ +

Full reindex

+ +

A full reindex is a complete rebuild of the index from the database. This +may be required if you are having indexer issues, in the case of index +corruption, or if called for by an upgrade owing to changes in ArchivesSpace’s +Solr configuration.

+ +

To perform a full reindex:

+ +

ArchivesSpace <= 3.1.0 (embedded Solr)

+ + + +

ArchivesSpace > 3.1.0 (external Solr)

+ +

For external Solr there is a plugin that can perform all of the re-indexing steps: aspace-reindexer

+ +

Manual steps:

+ + + +
+ +

You can watch the Tips for indexing ArchivesSpace youtube video to see these steps performed.

+ +
+ + +
+ +
+ + + diff --git a/administration/indexes.md b/administration/indexes.md deleted file mode 100644 index c1c4c328..00000000 --- a/administration/indexes.md +++ /dev/null @@ -1,76 +0,0 @@ -# Re-creating indexes - -There are two strategies for reindexing ArchivesSpace: - -- soft reindex -- full reindex - -## Soft reindex - -A soft reindex updates the existing documents in Solr without directly -touching the actual index documents on the filesystem. This can be done -while the system is running and is suitable for most use cases. - -There are two common ways to perform a soft reindex: - -1. Delete indexer state files - -ArchivesSpace keeps track of what has been indexed by using the files -under `data/indexer_state` and `data/indexer_pui_state` (for the PUI). - -If these files are missing, the indexer assumes that nothing has been -indexed and reindexes everything. To force ArchivesSpace to reindex all -records, just delete the files in `/path/to/archivesspace/data/indexer_state` -and `/path/to/archivesspace/data/indexer_pui_state`. - -You also can do this selectively by record type, for example, to reindex -accessions in repository 2 delete the file called `2_accession.dat`. - -2. Bump `system_mtime` values in the database - -If you update a record's `system_mtime` it becomes eligible for reindexing. - -```sql -#reindex all resources -UPDATE resource SET system_mtime = NOW(); -#reindex resource 1 -UPDATE resource SET system_mtime = NOW() WHERE id = 1; -``` - -## Full reindex - -A full reindex is a complete rebuild of the index from the database. This -may be required if you are having indexer issues, in the case of index -corruption, or if called for by an upgrade owing to changes in ArchivesSpace's -Solr configuration. - -To perform a full reindex: - -### ArchivesSpace <= 3.1.0 (embedded Solr) - -- Shutdown ArchivesSpace -- Delete these directories: - - `rm -rf /path/to/archivesspace/data/indexer_state/` - - `rm -rf /path/to/archivesspace/data/indexer_pui_state/` - - `rm -rf /path/to/archivesspace/data/solr_index/` -- Restart ArchivesSpace - -### ArchivesSpace > 3.1.0 (external Solr) - -For external Solr there is a plugin that can perform all of the re-indexing steps: [aspace-reindexer](https://github.com/lyrasis/aspace-reindexer) - -Manual steps: - -- Shutdown ArchivesSpace -- Delete these directories: - - `rm -rf /path/to/archivesspace/data/indexer_state/` - - `rm -rf /path/to/archivesspace/data/indexer_pui_state/` -- Perform a delete all Solr query: - - `curl -X POST -H 'Content-Type: application/json' --data-binary '{"delete":{"query":"*:*" }}' http://${solrUrl}:${solrPort}/solr/archivesspace/update?commit=true` -- Restart ArchivesSpace - ---- - -You can watch the [Tips for indexing ArchivesSpace](https://www.youtube.com/watch?v=yFJ6yAaPa3A) youtube video to see these steps performed. - ---- diff --git a/administration/logrotate.html b/administration/logrotate.html new file mode 100644 index 00000000..b0f34d37 --- /dev/null +++ b/administration/logrotate.html @@ -0,0 +1,163 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/logrotate.md + +

+ +

+ + Report issue on Jira + administration/logrotate.md + +

+
+
+ +

Log Rotation

+ +

In order to prevent your ArchivesSpace log file from growing excessively, you can set up log rotation. How to set up log rotation is specific to your institution but here is an example logrotate config file with an explanation of what it does.

+ +

/etc/logrotate.d/

+ +
  /<install location>/archivesspace/logs/archivesspace.out {
+          daily
+          rotate 7
+          compress
+          notifempty
+          missingok
+          copytruncate
+   }
+
+

this example configuration file:

+ + + +
+ +
+ + + diff --git a/administration/logrotate.md b/administration/logrotate.md deleted file mode 100644 index d5c8757c..00000000 --- a/administration/logrotate.md +++ /dev/null @@ -1,23 +0,0 @@ -# Log Rotation - -In order to prevent your ArchivesSpace log file from growing excessively, you can set up log rotation. How to set up log rotation is specific to your institution but here is an example logrotate config file with an explanation of what it does. - -`/etc/logrotate.d/` - -```` - //archivesspace/logs/archivesspace.out { - daily - rotate 7 - compress - notifempty - missingok - copytruncate - } - ```` - this example configuration file: - * rotates the logs daily - * keeps 7 days worth of logs - * compresses the logs so they take up less space - * ignores empty logs - * does not report errors if the log file is missing - * creates a copy of the original log file for rotation before truncating the contents of the original file diff --git a/administration/passwords.html b/administration/passwords.html new file mode 100644 index 00000000..984e56b2 --- /dev/null +++ b/administration/passwords.html @@ -0,0 +1,151 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/passwords.md + +

+ +

+ + Report issue on Jira + administration/passwords.md + +

+
+
+ +

Resetting passwords

+ +

Under the scripts directory you will find a script that lets you +reset a user’s password. You can invoke it as:

+ +
scripts/password-reset.sh theusername newpassword  # or password-reset.bat under Windows
+
+ +

If you are running against MySQL, you can use this command to set a +password while the system is running. If you are running against the +demo database, you will need to shutdown ArchivesSpace before running +this script.

+ + +
+ +
+ + + diff --git a/administration/passwords.md b/administration/passwords.md deleted file mode 100644 index e280e5a3..00000000 --- a/administration/passwords.md +++ /dev/null @@ -1,11 +0,0 @@ -# Resetting passwords - -Under the `scripts` directory you will find a script that lets you -reset a user's password. You can invoke it as: - - scripts/password-reset.sh theusername newpassword # or password-reset.bat under Windows - -If you are running against MySQL, you can use this command to set a -password while the system is running. If you are running against the -demo database, you will need to shutdown ArchivesSpace before running -this script. diff --git a/administration/unix_daemon.html b/administration/unix_daemon.html new file mode 100644 index 00000000..ec7d1e25 --- /dev/null +++ b/administration/unix_daemon.html @@ -0,0 +1,190 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/unix_daemon.md + +

+ +

+ + Report issue on Jira + administration/unix_daemon.md + +

+
+
+ +

Running ArchivesSpace as a Unix daemon

+ +

The archivesspace.sh startup script doubles as an init script. If +you run:

+ +
 archivesspace.sh start
+
+ +

ArchivesSpace will run in the background as a daemon (logging to +logs/archivesspace.out by default, as before). You can shut it down with:

+ +
 archivesspace.sh stop
+
+ +

You can even install it as a system-wide init script by creating a +symbolic link:

+ +
 cd /etc/init.d
+ ln -s /path/to/your/archivesspace/archivesspace.sh archivesspace
+
+ +

Note: By default ArchivesSpace will overwrite the log file when restarted. You +can change that by modifying archivesspace.sh and changing the $startup_cmd +to include double greater than signs:

+ +
 $startup_cmd &>> \"$ARCHIVESSPACE_LOGS\" &
+
+ +

Then use the appropriate tool for your distribution to set up the +run-level symbolic links (such as chkconfig for RedHat or +update-rc.d for Debian-based distributions).

+ +

Note that you may want to edit archivesspace.sh to set the account +that the system runs under, JVM options, and so on.

+ +

For systems that use systemd you may wish to use a Systemd unit file for ArchivesSpace

+ +

Something similar to this should work:

+
[Unit]
+Description=ArchivesSpace Application
+After=syslog.target network.target
+[Service]
+Type=simple
+ExecStart=/path/to/your/archivesspace/archivesspace.sh
+ExecStop=/path/to/your/archivesspace/archivesspace.sh
+PIDFile=/path/to/your/archivesspace/archivesspace.pid
+User=archivesspacespace
+Group=archivesspacespace
+[Install]
+WantedBy=multi-user.target
+
+ + +
+ +
+ + + diff --git a/administration/unix_daemon.md b/administration/unix_daemon.md deleted file mode 100644 index 529880aa..00000000 --- a/administration/unix_daemon.md +++ /dev/null @@ -1,49 +0,0 @@ -# Running ArchivesSpace as a Unix daemon - -The `archivesspace.sh` startup script doubles as an init script. If -you run: - - archivesspace.sh start - -ArchivesSpace will run in the background as a daemon (logging to -`logs/archivesspace.out` by default, as before). You can shut it down with: - - archivesspace.sh stop - -You can even install it as a system-wide init script by creating a -symbolic link: - - cd /etc/init.d - ln -s /path/to/your/archivesspace/archivesspace.sh archivesspace - -Note: By default ArchivesSpace will overwrite the log file when restarted. You -can change that by modifying `archivesspace.sh` and changing the `$startup_cmd` -to include double greater than signs: - - $startup_cmd &>> \"$ARCHIVESSPACE_LOGS\" & - - -Then use the appropriate tool for your distribution to set up the -run-level symbolic links (such as `chkconfig` for RedHat or -`update-rc.d` for Debian-based distributions). - -Note that you may want to edit archivesspace.sh to set the account -that the system runs under, JVM options, and so on. - -For systems that use systemd you may wish to use a Systemd unit file for ArchivesSpace - -Something similar to this should work: -``` -[Unit] -Description=ArchivesSpace Application -After=syslog.target network.target -[Service] -Type=simple -ExecStart=/path/to/your/archivesspace/archivesspace.sh -ExecStop=/path/to/your/archivesspace/archivesspace.sh -PIDFile=/path/to/your/archivesspace/archivesspace.pid -User=archivesspacespace -Group=archivesspacespace -[Install] -WantedBy=multi-user.target -``` diff --git a/administration/upgrading.html b/administration/upgrading.html new file mode 100644 index 00000000..9bc2d0ea --- /dev/null +++ b/administration/upgrading.html @@ -0,0 +1,309 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/upgrading.md + +

+ +

+ + Report issue on Jira + administration/upgrading.md + +

+
+
+ +

Upgrading to a new release of ArchivesSpace

+ + + +

Create a backup of your ArchivesSpace instance

+ +

You should make sure you have a working backup of your ArchivesSpace +installation before attempting an upgrade. Follow the steps +under the Backup and recovery section to do this.

+ +

Unpack the new version

+ +

It’s a good idea to unpack a fresh copy of the version of +ArchivesSpace you are upgrading to. This will ensure that you are +running the latest versions of all files. In the examples below, +replace the lower case x with the version number updating to. For example, +1.5.2 or 1.5.3.

+ +

For example, on Mac OS X or Linux:

+ +
 $ mkdir archivesspace-1.5.x
+ $ cd archivesspace-1.5.x
+ $ curl -LJO https://github.com/archivesspace/archivesspace/releases/download/v1.5.x/archivesspace-v1.5.x.zip
+ $ unzip -x archivesspace-v1.5.x.zip
+
+ +

( The curl step is optional and simply downloads the distribution from github. You can also + simply download the zip file in your browser and copy it to the directory )

+ +

On Windows, you can do the same by extracting ArchivesSpace into a new +folder you create in Windows Explorer.

+ +

Shut down your ArchivesSpace instance

+ +

To ensure you get a consistent copy, you will need to shut down your +running ArchivesSpace instance now.

+ +

Copy your configuration and data files

+ +

You will need to bring across the following files and directories from +your original ArchivesSpace installation:

+ +
    +
  • the data directory (see Indexes note below)
  • +
  • the config directory (see Configuration note below)
  • +
  • your lib/mysql-connector*.jar file (if using MySQL)
  • +
  • any plugins and local modifications you have installed in your plugins directory
  • +
+ +

For example, on Mac OS X or Linux:

+ +
 $ cd archivesspace-1.5.x/archivesspace
+ $ cp -a /path/to/archivesspace-1.4.2/archivesspace/data/* data/
+ $ cp -a /path/to/archivesspace-1.4.2/archivesspace/config/* config/
+ $ cp -a /path/to/archivesspace-1.4.2/archivesspace/lib/mysql-connector* lib/
+ $ cp -a /path/to/archivesspace-1.4.2/archivesspace/plugins/local plugins/
+ $ cp -a /path/to/archivesspace-1.4.2/archivesspace/plugins/wonderful_plugin plugins/
+
+ +

Or on Windows:

+ +
 $ cd archivesspace-1.5.x\archivesspace
+ $ xcopy \path\to\archivesspace-1.4.2\archivesspace\data\* data /i /k /h /s /e /o /x /y
+ $ xcopy \path\to\archivesspace-1.4.2\archivesspace\config\* config /i /k /h /s /e /o /x /y
+ $ xcopy \path\to\archivesspace-1.4.2\archivesspace\lib\mysql-connector* lib /i /k /h /s /e /o /x /y
+ $ xcopy \path\to\archivesspace-1.4.2\archivesspace\plugins\local plugins\local /i /k /h /s /e /o /x /y
+ $ xcopy \path\to\archivesspace-1.4.2\archivesspace\plugins\wonderful_plugin plugins\wonderful_plugin /i /k /h /s /e /o /x /y
+
+ +

Note that you may want to preserve the logs file (logs/archivesspace.out +by default) from your previous installation–just in case you need to +refer to it later.

+ +

Configuration note

+ +

Sometimes a new release of ArchivesSpace will introduce new +configuration settings that weren’t present in previous releases. +Before you replace the distribution config/config.rb with your +original version, it’s a good idea to review the distribution version +to see if there are any new configuration settings of interest.

+ +

Upgrade notes will generally draw attention to any configuration +settings you need to set explicitly, but you never know when you’ll +discover a new, exciting feature! Documentation might also refer to +uncommenting configuration options that won’t be in your file if you +keep your older version.

+ +

Indexes note

+ +

Sometimes a new release of ArchivesSpace will require a FULL reindex +which means you do not want to copy over anything from your data directory +to your new release. The data directory contains the indexes created by Solr. +Check the release notes of the new version for any details about reindexing.

+ +

Transfer your locales data

+ +

If you’ve made modifications to you locales file ( en.yml ) with customized +labels, titles, tooltips, etc., you’ll need to transfer those to your new +locale file.

+ +

A good way to do this is to use a Diff tool, like Notepad++, TextMate, or just +Linux diff command:

+ +
 $ diff /path/to/archivesspace-1.4.2/locales/en.yml /path/to/archivesspace-1.5.x/archivesspace/locales/en.yml
+ $ diff /path/to/archivesspace-1.4.2/locales/enums/en.yml /path/to/archivesspace-v1.5.x/archivesspace/locales/enums/en.yml
+
+ +

This will show you the differences in your current locales files, as well as the +new additions in the new version locales files. Simply copy the values you wish +to keep from your old ArchivesSpace locales to your new ArchivesSpace locales +files.

+ +

Run the database migrations

+ +

With everything copied, the final step is to run the database +migrations. This will apply any schema changes and data migrations +that need to happen as a part of the upgrade. To do this, use the +setup-database script for your platform. For example, on Mac OS X +or Linux:

+ +
 $ cd archivesspace-1.5.x/archivesspace
+ $ scripts/setup-database.sh
+
+ +

Or on Windows:

+ +
 $ cd archivesspace-1.5.x\archivesspace
+ $ scripts\setup-database.bat
+
+ +

If you’re using external Solr (required beginning with version 3.2.0)

+ +

Full instructions for using external Solr with ArchivesSpace

+ +

If you’ve deployed to Tomcat

+ +

The steps to deploy to Tomcat are esentially the same as in the +archivesspace_tomcat

+ +

But, prior to running your setup-tomcat script, you’ll need to be sure to clean out the +any libraries from the previous ASpace version from your Tomcat classpath.

+ +
 1. Stop Tomcat
+ 2. Unpack your new version of ArchivesSpace
+ 3. Configure your MySQL database in the config.rb ( just like in the
+    install instructions )
+ 4. Make sure all you other local configuration settings are in your
+    config.rb file ( check your Tomcat conf/config.rb file for your current
+    settings. )
+ 5. Make sure you MySQL connector jar in the lib directory
+ 6. Run your setup-database script to migration your database.
+ 7. Delete all ASpace related jar libraries in your Tomcat's lib directory. These
+    will include the "gems" folder, as well as "common.jar" and some
+    [others](https://github.com/archivesspace/archivesspace/tree/master/common/lib).
+    This will make sure your running the correct version of the dependent
+    libraries for your new ASpace version.
+    Just be sure not to delete any of the Apache Tomcat libraries.
+ 8. Run your setup-tomcat script ( just like in the install instructions ).
+    This will copy all the files over to Tomcat.
+ 9. Start Tomcat
+
+ +

That’s it!

+ +

You can now start your new ArchivesSpace version as normal.

+ + +
+ +
+ + + diff --git a/administration/upgrading.md b/administration/upgrading.md deleted file mode 100644 index cf5366c0..00000000 --- a/administration/upgrading.md +++ /dev/null @@ -1,162 +0,0 @@ -# Upgrading to a new release of ArchivesSpace - -* **[Special considerations when upgrading to v1.1.0](upgrading/UPGRADING_1.1.0.html)** -* **[Special considerations when upgrading to v1.1.1](upgrading/UPGRADING_1.1.1.html)** -* **[Special considerations when upgrading from v1.4.2 to 1.5.x (these considerations also apply when upgrading from 1.4.2 to any version through 2.0.1)](upgrading/UPGRADING_1.5.0.html)** -* **[Special considerations when upgrading to 2.1.0](upgrading/UPGRADING_2.1.0.html)** - -## Create a backup of your ArchivesSpace instance - -You should make sure you have a working backup of your ArchivesSpace -installation before attempting an upgrade. Follow the steps -under the [Backup and recovery section](backup.html) to do this. - -## Unpack the new version - -It's a good idea to unpack a fresh copy of the version of -ArchivesSpace you are upgrading to. This will ensure that you are -running the latest versions of all files. In the examples below, -replace the lower case x with the version number updating to. For example, -1.5.2 or 1.5.3. - -For example, on Mac OS X or Linux: - - $ mkdir archivesspace-1.5.x - $ cd archivesspace-1.5.x - $ curl -LJO https://github.com/archivesspace/archivesspace/releases/download/v1.5.x/archivesspace-v1.5.x.zip - $ unzip -x archivesspace-v1.5.x.zip - -( The curl step is optional and simply downloads the distribution from github. You can also - simply download the zip file in your browser and copy it to the directory ) - -On Windows, you can do the same by extracting ArchivesSpace into a new -folder you create in Windows Explorer. - -## Shut down your ArchivesSpace instance - -To ensure you get a consistent copy, you will need to shut down your -running ArchivesSpace instance now. - - -## Copy your configuration and data files - -You will need to bring across the following files and directories from -your original ArchivesSpace installation: - - * the `data` directory (see **Indexes note** below) - * the `config` directory (see **Configuration note** below) - * your `lib/mysql-connector*.jar` file (if using MySQL) - * any plugins and local modifications you have installed in your `plugins` directory - -For example, on Mac OS X or Linux: - - $ cd archivesspace-1.5.x/archivesspace - $ cp -a /path/to/archivesspace-1.4.2/archivesspace/data/* data/ - $ cp -a /path/to/archivesspace-1.4.2/archivesspace/config/* config/ - $ cp -a /path/to/archivesspace-1.4.2/archivesspace/lib/mysql-connector* lib/ - $ cp -a /path/to/archivesspace-1.4.2/archivesspace/plugins/local plugins/ - $ cp -a /path/to/archivesspace-1.4.2/archivesspace/plugins/wonderful_plugin plugins/ - -Or on Windows: - - $ cd archivesspace-1.5.x\archivesspace - $ xcopy \path\to\archivesspace-1.4.2\archivesspace\data\* data /i /k /h /s /e /o /x /y - $ xcopy \path\to\archivesspace-1.4.2\archivesspace\config\* config /i /k /h /s /e /o /x /y - $ xcopy \path\to\archivesspace-1.4.2\archivesspace\lib\mysql-connector* lib /i /k /h /s /e /o /x /y - $ xcopy \path\to\archivesspace-1.4.2\archivesspace\plugins\local plugins\local /i /k /h /s /e /o /x /y - $ xcopy \path\to\archivesspace-1.4.2\archivesspace\plugins\wonderful_plugin plugins\wonderful_plugin /i /k /h /s /e /o /x /y - - -Note that you may want to preserve the logs file (`logs/archivesspace.out` -by default) from your previous installation--just in case you need to -refer to it later. - -### Configuration note - -Sometimes a new release of ArchivesSpace will introduce new -configuration settings that weren't present in previous releases. -Before you replace the distribution `config/config.rb` with your -original version, it's a good idea to review the distribution version -to see if there are any new configuration settings of interest. - -Upgrade notes will generally draw attention to any configuration -settings you need to set explicitly, but you never know when you'll -discover a new, exciting feature! Documentation might also refer to -uncommenting configuration options that won't be in your file if you -keep your older version. - -### Indexes note - -Sometimes a new release of ArchivesSpace will require a FULL reindex -which means you do not want to copy over anything from your data directory -to your new release. The data directory contains the indexes created by Solr. -Check the release notes of the new version for any details about reindexing. - -## Transfer your locales data - -If you've made modifications to you locales file ( en.yml ) with customized -labels, titles, tooltips, etc., you'll need to transfer those to your new -locale file. - -A good way to do this is to use a Diff tool, like Notepad++, TextMate, or just -Linux diff command: - - $ diff /path/to/archivesspace-1.4.2/locales/en.yml /path/to/archivesspace-1.5.x/archivesspace/locales/en.yml - $ diff /path/to/archivesspace-1.4.2/locales/enums/en.yml /path/to/archivesspace-v1.5.x/archivesspace/locales/enums/en.yml - -This will show you the differences in your current locales files, as well as the -new additions in the new version locales files. Simply copy the values you wish -to keep from your old ArchivesSpace locales to your new ArchivesSpace locales -files. - -## Run the database migrations - -With everything copied, the final step is to run the database -migrations. This will apply any schema changes and data migrations -that need to happen as a part of the upgrade. To do this, use the -`setup-database` script for your platform. For example, on Mac OS X -or Linux: - - $ cd archivesspace-1.5.x/archivesspace - $ scripts/setup-database.sh - -Or on Windows: - - $ cd archivesspace-1.5.x\archivesspace - $ scripts\setup-database.bat - -## If you're using external Solr (required beginning with version 3.2.0) - -[Full instructions for using external Solr with ArchivesSpace](https://archivesspace.github.io/tech-docs/provisioning/solr.html) - - -## If you've deployed to Tomcat - -The steps to deploy to Tomcat are esentially the same as in the -[archivesspace_tomcat](https://github.com/archivesspace-labs/archivesspace_tomcat) - -But, prior to running your setup-tomcat script, you'll need to be sure to clean out the -any libraries from the previous ASpace version from your Tomcat classpath. - - 1. Stop Tomcat - 2. Unpack your new version of ArchivesSpace - 3. Configure your MySQL database in the config.rb ( just like in the - install instructions ) - 4. Make sure all you other local configuration settings are in your - config.rb file ( check your Tomcat conf/config.rb file for your current - settings. ) - 5. Make sure you MySQL connector jar in the lib directory - 6. Run your setup-database script to migration your database. - 7. Delete all ASpace related jar libraries in your Tomcat's lib directory. These - will include the "gems" folder, as well as "common.jar" and some - [others](https://github.com/archivesspace/archivesspace/tree/master/common/lib). - This will make sure your running the correct version of the dependent - libraries for your new ASpace version. - Just be sure not to delete any of the Apache Tomcat libraries. - 8. Run your setup-tomcat script ( just like in the install instructions ). - This will copy all the files over to Tomcat. - 9. Start Tomcat - -## That's it! - -You can now start your new ArchivesSpace version as normal. diff --git a/administration/upgrading/UPGRADING_1.1.0.html b/administration/upgrading/UPGRADING_1.1.0.html new file mode 100644 index 00000000..9fb49678 --- /dev/null +++ b/administration/upgrading/UPGRADING_1.1.0.html @@ -0,0 +1,191 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/upgrading/UPGRADING_1.1.0.md + +

+ +

+ + Report issue on Jira + administration/upgrading/UPGRADING_1.1.0.md + +

+
+
+ +

UPGRADING TO 1.1.0

+ +

Additional upgrade considerations specific to this release. Refer to the upgrade documentation for the standard instructions that apply in all cases.

+ +

External Solr

+
+ +

In ArchivesSpace 1.0.9 the default ports configuration was:

+ +
AppConfig[:backend_url] = "http://localhost:8089"
+AppConfig[:frontend_url] = "http://localhost:8080"
+AppConfig[:solr_url] = "http://localhost:8090"
+AppConfig[:public_url] = "http://localhost:8081"
+
+ +

With the introduction of the optional external Solr instance functionality this has been updated to:

+ +
AppConfig[:backend_url] = "http://localhost:8089"
+AppConfig[:frontend_url] = "http://localhost:8080"
+AppConfig[:solr_url] = "http://localhost:8090"
+AppConfig[:indexer_url] = "http://localhost:8091" # NEW TO 1.1.0
+AppConfig[:public_url] = "http://localhost:8081"
+
+ +

In most cases the default value for indexer_url will blend in seamlessly without you needing to take any action. However if you modified the original values in your config.rb file you may need to update it. Examples:

+ +

You use a different ports sequence

+ +
AppConfig[:indexer_url] = "http://localhost:9091"
+
+ +

You run multiple ArchivesSpace instances on a single host

+ +

Under this deployment scenario you would have changed port numbers for some (or all) instances in each config.rb file, so set the indexer_url for each instance as described above.

+ +

You include hostnames

+ +
AppConfig[:indexer_url] = "http://yourhostname:8091"
+
+ +

Clustering

+
+ +

In a clustered configuration you may need to edit instance_[server hostname].rb files:

+ +
{
+  ...
+  :indexer_url => "http://[localhost|yourhostname]:8091",
+}
+
+ +
+ + +
+ +
+ + + diff --git a/administration/upgrading/UPGRADING_1.1.0.md b/administration/upgrading/UPGRADING_1.1.0.md deleted file mode 100644 index 15d54a8e..00000000 --- a/administration/upgrading/UPGRADING_1.1.0.md +++ /dev/null @@ -1,57 +0,0 @@ -# UPGRADING TO 1.1.0 - -Additional upgrade considerations specific to this release. Refer to the [upgrade documentation](../upgrading.html) for the standard instructions that apply in all cases. - -## External Solr -------------- - -In ArchivesSpace 1.0.9 the default ports configuration was: - -``` -AppConfig[:backend_url] = "http://localhost:8089" -AppConfig[:frontend_url] = "http://localhost:8080" -AppConfig[:solr_url] = "http://localhost:8090" -AppConfig[:public_url] = "http://localhost:8081" -``` - -With the introduction of the [optional external Solr instance](../../provisioning/solr.html) functionality this has been updated to: - -``` -AppConfig[:backend_url] = "http://localhost:8089" -AppConfig[:frontend_url] = "http://localhost:8080" -AppConfig[:solr_url] = "http://localhost:8090" -AppConfig[:indexer_url] = "http://localhost:8091" # NEW TO 1.1.0 -AppConfig[:public_url] = "http://localhost:8081" -``` - -In most cases the default value for `indexer_url` will blend in seamlessly without you needing to take any action. However if you modified the original values in your `config.rb` file you may need to update it. Examples: - -**You use a different ports sequence** - -``` -AppConfig[:indexer_url] = "http://localhost:9091" -``` - -**You run multiple ArchivesSpace instances on a single host** - -Under this deployment scenario you would have changed port numbers for some (or all) instances in each `config.rb` file, so set the `indexer_url` for each instance as described above. - -**You include hostnames** - -``` -AppConfig[:indexer_url] = "http://yourhostname:8091" -``` - -## Clustering ----------- - -In a clustered configuration you may need to edit `instance_[server hostname].rb` files: - -``` -{ - ... - :indexer_url => "http://[localhost|yourhostname]:8091", -} -``` - ---- diff --git a/administration/upgrading/UPGRADING_1.1.1.html b/administration/upgrading/UPGRADING_1.1.1.html new file mode 100644 index 00000000..d80bcb20 --- /dev/null +++ b/administration/upgrading/UPGRADING_1.1.1.html @@ -0,0 +1,191 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/upgrading/UPGRADING_1.1.1.md + +

+ +

+ + Report issue on Jira + administration/upgrading/UPGRADING_1.1.1.md + +

+
+
+ +

UPGRADING TO 1.1.1

+ +

Additional upgrade considerations specific to this release. Refer to the upgrade documentation for the standard instructions that apply in all cases.

+ +

Resequencing of Archival Object & Digital Object Component trees

+
+ +

There have been some scenarios in which archival objects and digital object components lose +some of the information used to order their hierarchy. This can result in issues in creation, +editing, or moving items in the tree, since there are database contraints to ensure uniqueness +of certain metadata elements.

+ +

In order to ensure data integrity, there is now method to resequence the trees. This will +not reorder or edit the elements, but simply rebuild all the technical metadata used to establish +the ordering.

+ +

To run the resequencing process, edit the config/config.rb file to have this line:

+ +
AppConfig[:resequence_on_startup] = true
+
+ +

and restart ArchivesSpace. This will trigger a rebuilding process after the application has +started. It’s advised to let this rebuild process run its course prior to editing records. +This duration depends on the size of your database, which can take seconds ( for databases with +few Archival and Digital Objects ) to hours ( for databases with hundreds of thousands of records ). +Check your log file to see how the process is going. When it has finished, you should see the application +return to it normal operation, generally with only indexer updates being recorded in the log file.

+ +

After you’ve started ArchivesSpace, be sure to change the config.rb file to have the :resequence_on_startup +set to “false”, since you will not need to run this process on every restart.

+ +

Export PDFs in the Public Interface

+
+ +

A common request has been to have PDF version of the EAD exported in the public application. +This has been a bit problematic, since EAD export has a rather large resource hit on the +database, which is only increased by the added process of PDF creation. We are currently +redesigning part of the ArchivesSpace backend to make PDF creation more user friendly by +establishing a queue system for exports.

+ +

In the meantime, Mark Cooper at Lyrasis has made a Public Metadata Formats plugin
+that exposes certain metadata formats and PDFs in the public UI. This plugin has been included +in this release, but you will need to configure it to expose which formats you would like +to have exposed. Please read the plugin documentation on how to configure this.

+ +

PLEASE NOTE: +Exporting large EAD resources with this plugin will most likely cause some problems. Long requests +will timeout, since the server does not want to waste resources on long running processes. +In addition, large number of requests for PDFs can cause an increase load on the server. +Please be aware of the issues and limitations of this plugin before enabling it.

+ +
+ + +
+ +
+ + + diff --git a/administration/upgrading/UPGRADING_1.1.1.md b/administration/upgrading/UPGRADING_1.1.1.md deleted file mode 100644 index 7720a21b..00000000 --- a/administration/upgrading/UPGRADING_1.1.1.md +++ /dev/null @@ -1,55 +0,0 @@ -# UPGRADING TO 1.1.1 - -Additional upgrade considerations specific to this release. Refer to the [upgrade documentation](../upgrading.html) for the standard instructions that apply in all cases. - -## Resequencing of Archival Object & Digital Object Component trees -------------- - -There have been some scenarios in which archival objects and digital object components lose -some of the information used to order their hierarchy. This can result in issues in creation, -editing, or moving items in the tree, since there are database contraints to ensure uniqueness -of certain metadata elements. - -In order to ensure data integrity, there is now method to resequence the trees. This will -not reorder or edit the elements, but simply rebuild all the technical metadata used to establish -the ordering. - -To run the resequencing process, edit the config/config.rb file to have this line: - -``` -AppConfig[:resequence_on_startup] = true -``` - -and restart ArchivesSpace. This will trigger a rebuilding process after the application has -started. It's advised to let this rebuild process run its course prior to editing records. -This duration depends on the size of your database, which can take seconds ( for databases with -few Archival and Digital Objects ) to hours ( for databases with hundreds of thousands of records ). -Check your log file to see how the process is going. When it has finished, you should see the application -return to it normal operation, generally with only indexer updates being recorded in the log file. - -After you've started ArchivesSpace, be sure to change the config.rb file to have the :resequence_on_startup -set to "false", since you will not need to run this process on every restart. - -## Export PDFs in the Public Interface -------------- - -A common request has been to have PDF version of the EAD exported in the public application. -This has been a bit problematic, since EAD export has a rather large resource hit on the -database, which is only increased by the added process of PDF creation. We are currently -redesigning part of the ArchivesSpace backend to make PDF creation more user friendly by -establishing a queue system for exports. - -In the meantime, Mark Cooper at Lyrasis has made a [ Public Metadata Formats plugin ](https://github.com/archivesspace-deprecated/aspace-public-formats) -that exposes certain metadata formats and PDFs in the public UI. This plugin has been included -in this release, but you will need to configure it to expose which formats you would like -to have exposed. Please read the plugin documentation on how to configure this. - -PLEASE NOTE: -Exporting large EAD resources with this plugin will most likely cause some problems. Long requests -will timeout, since the server does not want to waste resources on long running processes. -In addition, large number of requests for PDFs can cause an increase load on the server. -Please be aware of the issues and limitations of this plugin before enabling it. - - - ---- diff --git a/administration/upgrading/UPGRADING_1.5.0.html b/administration/upgrading/UPGRADING_1.5.0.html new file mode 100644 index 00000000..3167a3db --- /dev/null +++ b/administration/upgrading/UPGRADING_1.5.0.html @@ -0,0 +1,288 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/upgrading/UPGRADING_1.5.0.md + +

+ +

+ + Report issue on Jira + administration/upgrading/UPGRADING_1.5.0.md + +

+
+
+ +

UPGRADING TO 1.5.0

+ +

Additional upgrade considerations specific to this release, which also apply to upgrading from 1.4.2 or lower to any version through 2.0.1. Refer to the upgrade documentation for the standard instructions that apply in all cases.

+ +

General overview

+ +

The upgrade process to the new data model in 1.5.0 requires considerable data transformation and it is important for users to review this document to understand the implications and possible side-effects.

+ +

A quick overview of the steps are:

+ +
    +
  1. Review this document and understand how the upgrade will impact your data, paying particular attention to the Preparation section .
  2. +
  3. Backup your database.
  4. +
  5. No, really, backup your database.
  6. +
  7. It is suggested that users start with a new solr index. To do this, delete the data/solr_index/index directory and all files in the data/indexer_state directory. The embedded version of Solr has been upgraded, which should result in a much more compact index size.
  8. +
  9. + + + + + + + +
    Follow the standard upgrading instructions. Important to note: The setup-database.shbat script will modify your database schema, but it will not move the data. If you are currently using the container management plugin you will need to remove it from the list of plugins in your config file prior to starting ArchivesSpace.
    +
  10. +
  11. Start ArchivesSpace. When 1.5.0 starts for the first time, a conversion process will kick off and move the data into the new table structure. During this time, the application will be unavailable until it completes. Duration depends on the size of your data and server resources, with a few minutes for very small databases to several hours for very large ones.
  12. +
  13. When the conversion is done, the web application will start and the indexer will rebuild your index. Performance might be slower while the indexer runs, depending on your server environment and available resources.
  14. +
  15. Review the output of the conversion process following the instructions below. How long it takes for the report to load will depend on the number of entries included in it.
  16. +
+ +

Preparing for and Converting to the New Container Management Functionality

+ +

With version 1.5.0, ArchivesSpace is adopting a new data model that will enable more capable and efficient management of the containers in which you store your archival materials. To take advantage of this improved functionality:

+
    +
  • Repositories already using ArchivesSpace as a production application will need to upgrade their ArchivesSpace applications to the version 1.5.0. (This upgrade / conversion must be done to take advantage of any other new features / bug fixes in ArchivesSpace 1.5.0 or later versions.)
  • +
  • Repositories not yet using ArchivesSpace in production but needing to migrate data from the Archivists’ Toolkit or Archon will need to migrate their data to version 1.4.2 of ArchivesSpace or earlier and then upgrade that version to version 1.5.0. (This can be done when your repository is ready to migrate to ArchivesSpace.)
  • +
  • Repositories not yet using ArchivesSpace in production and not needing to migrate data from the Archivists’ Toolkit or Archon can start using Archivists 1.5.0 without the need of upgrading. (People in this situation do not need to read any further.)
  • +
+ +

Converting the container data model in version 1.4.2 and earlier versions of ArchivesSpace to the 1.5.0 version has some complexity and may not accommodate all the various ways in which container information has been recorded by diverse repositories. As a consequence, upgrading from a pre-1.5.0 version of ArchivesSpace requires planning for the upgrade, reviewing the results, and, possibly, remediating data either prior to or after the final conversion process. Because of all the variations in which container information can be recorded, it is impossible to know all the ways the data of repositories will be impacted. For this reason, all repositories upgrading their ArchivesSpace to version 1.5.0 should do so with a backup of their production ArchivesSpace instance and in a test environment. A conversion may only be undone by reverting back to the source database.

+ +

Frequently Asked Questions

+

How will my data be converted to the new model?

+ +

When your installation is upgraded to 1.5.0, the conversion will happen as part of the upgrade process.

+ +

Can I continue to use the current model for containers and not convert to the new model?

+ +

Because it is such a substantial improvement (see separate announcement for the new features), the new model is required for all using ArchivesSpace 1.5.0 and higher. The only way to continue using the current model is to never upgrade beyond 1.4.2.

+ +

What if I’m already using the container management plugin made available to the community by Yale University?

+ +

Conversion of data created using the Yale container management plugin, or a local adaptation of the plugin, will also happen as part of the process of upgrading to 1.5.0. Some steps will be skipped when they are not needed. At the end of the process, the new container data model will be integrated into your ArchivesSpace and will not need to be loaded or maintained as a plugin.

+ +

Those currently running the container management plugin will need to remove the container management plugin from the list in your config file prior to starting the conversion or a validation name error will occur.

+ +

I haven’t moved from Archivists’ Toolkit or Archon yet and am planning to use the associated migration tool. Can I migrate directly to 1.5.0?

+ +

No, you must migrate to 1.4.2 or earlier versions and then upgrade your installation to 1.5.0 according to the instructions provided here.

+ +

What changes are being made to the previous model for containers?

+ +

The biggest change is the new concept of top containers. A top container is the highest level container in which a particular instance is stored. Top containers are in some ways analogous to the current Container 1, but broken out from the entire container record (child and grandparent container records). As such, top containers enable more efficient recording and updating of the highest level containers in your collection.

+ +

How does ArchivesSpace determine what is a top container?

+ +

During the conversion, ArchivesSpace will find all the Container 1s in your current ArchivesSpace database. It will then evaluate them as follows:

+
    +
  • If containers have barcodes, one top container is created for each unique Container 1 barcode.
  • +
  • If containers do not have barcodes, one top container is created for each unique combination of container 1 indicator and container type 1 within a resource or accession.
  • +
  • Once a top container is created, additional instance records for the same container within an accession or resource will be linked to that top container record.
  • +
+ +

Preparation

+ +

What can I do to prepare my ArchivesSpace data for a smoother conversion to top containers?

+ +
    +
  • If your Container 1s have unique barcodes, you do not need to do anything except verify that your data is complete and accurate. You should run a preliminary conversion as described in the Conversion section and resolve any errors.
  • +
  • If your Container 1s do not have barcodes, but have a nonduplicative container identifier sequence within each accession or resource (e.g. Box 1, Box 2, Box 3), or the identifiers are only reused within an accession or resource for different types of containers (for example, you have a Box 1 through 10 and an Oversize Box 1 through 3) you do not need to do anything except verify that your data is complete and accurate. You should run a preliminary conversion as described in the Conversion section and resolve any errors.
  • +
  • If your Container 1s do not have barcodes and you have parallel numbering sequences, where the same indicators and types are used to refer to different containers within the same accession or resource within some or all accessions or resources (for example, you have a Box 1 in series 1 and a different Box 1 in series 5) you will need to find a way to uniquely identify these containers. One option is to run this barcoder plugin for each resource to which this applies. The barcoder plugin creates barcodes that combine the ID of the highest level archival object ancestor with the container 1 type and indicator. (The barcoder plugin is designed to run against one resource at a time, instead of against all resources, because not all resources in a repository may match this condition.) Once you’ve differentiated your containers with parallel number sequences, you should run a preliminary conversion as described in the Conversion section and resolve any errors.
  • +
+ +

You do not need to make any changes to Container 2 fields or Container 3 fields. Data in these fields will be converted to the new Child and Grandchild container fields that map directly to these fields.

+ +

If you use the current Container Extent fields, these will no longer be available in 1.5.0. Any data in these fields will be migrated to a new Extent sub-record during the conversion. You can evaluate whether this data should remain in an extent record or if it belongs in a container profile or other fields and then move it accordingly after the conversion is complete.

+ +

I have EADs I still need to import into ArchivesSpace. How can I get them ready for this new model?

+ +

If you have a box and folder associated with a component (or any other hierarchical relationship of containers), you will need to add identifiers to the container element so that the EAD importer knows which is the top container. If you previously used Archivists’ Toolkit to create EAD, your containers probably already have container identifiers. If your container elements do not have identifiers already, Yale University has made available an XSLT transformation file to add them. You will need to run it before importing the EAD file into ArchivesSpace.

+ +

Conversion

+ +

When upgrading from 1.4.2 (and earlier versions) to 1.5.0, the container conversion will happen as part of the upgrade process. You will be able to follow its progress in the log. Instructions for upgrading from a previous version of ArchivesSpace are available at upgrade documentation.

+ +

Because this is a major change in the data model for this portion of the application, running at least one test conversion is very strongly recommended. Follow these steps to run the upgrade/conversion process:

+
    +
  • Create a backup of your ArchivesSpace instance to use for testing. IT IS ESSENTIAL THAT YOU NOT RUN THIS ON A PRODUCTION INSTANCE AS THE CONVERSION CHANGES YOUR DATA, and THE CHANGES CANNOT BE UNDONE EXCEPT BY REVERTING TO A BACKUP VERSION OF YOUR DATA PRIOR TO RUNNING THE CONVERSION.
  • +
  • Follow the upgrade instructions to unpack a fresh copy of the v 1.5.0 release made available for testing, copy your configuration and data files, and transfer your locales.
  • +
  • It is recommended that you delete your Solr index files to start with a fresh index We are upgrading the version of Solr that ships with the application, and the upgrade will require a total reindex of your ArchivesSpace data. To do this, delete the data/solr_index/index directory and the files in data/indexer_state.
  • +
  • Follow the upgrade instructions to run the database migrations. As part of this step, your container data will be converted to the new data model. You can follow along in the log. Windows users can open the archivesspace.out file in a tool like Notepad ++. Mac users can do a tail –f logs/archivesspace.out to get a live update from the log.
  • +
  • When the test conversion has been completed, the log will indicate “Completed: existing containers have been migrated to the new container model.”
  • +
+ +

Image of Conversion Log

+ +
    +
  • Open ArchivesSpace via your browser and login. +Retrieve the container conversion error report from the Background Jobs area:
  • +
  • Select Background Jobs from the Settings menu.
  • +
+ +

Image of Background Jobs

+ +
    +
  • The first item listed under Archived Jobs after completing the upgrade should be container_conversion_job. Click View.
  • +
+ +

Image of Background Jobs List

+ +
    +
  • Under Files, click File to download a CSV file with the errors and a brief explanation.
  • +
+ +

Image of Files

+ +

Image of Error Report

+ +
    +
  • Go back to your source data and correct any errors that you can before doing another test conversion.
  • +
  • When the error report shows no errors, or when you are satisfied with the remaining errors, your production instance is ready to be upgraded.
  • +
  • When the final upgrade/conversion is complete, you can move ArchivesSpace version 1.5.0 into production.
  • +
+ +

What are some common errors or anomalies that will be flagged in the conversion?

+ +
    +
  • A container with a barcode has different indicators or types in different records.
  • +
  • A container with a particular type and indicator sometimes has a barcode and sometimes doesn’t.
  • +
  • A container is missing a type or indicator.
  • +
  • Container levels are skipped (for example, there is a Container 1 and a Container 3, but no Container 2).
  • +
  • A container has multiple locations.
  • +
+ +

The conversion process can resolve some of these errors for you by supplying or deleting values as it deems appropriate, but for the most control over the process you will most likely want to resolve such issues yourself in your ArchivesSpace database before converting to the new container model.

+ +

Are there any known conversion issues?

+ +

Due to a change in the ArchivesSpace EAD importer in 2015, some EADs with hierarchical containers not designated by a @parent attribute were turned into multiple instance records. This has since been corrected in the application, but we are working on a plugin (now available at Instance Joiner Plug-in that will enable you to turn these back into single instances so that subcontainers are not mistakenly turned into top containers.

+ + +
+ +
+ + + diff --git a/administration/upgrading/UPGRADING_1.5.0.md b/administration/upgrading/UPGRADING_1.5.0.md deleted file mode 100644 index eb7b3eb0..00000000 --- a/administration/upgrading/UPGRADING_1.5.0.md +++ /dev/null @@ -1,120 +0,0 @@ -# UPGRADING TO 1.5.0 - -Additional upgrade considerations specific to this release, which also apply to upgrading from 1.4.2 or lower to any version through 2.0.1. Refer to the [upgrade documentation](../upgrading.html) for the standard instructions that apply in all cases. - -## General overview - -The upgrade process to the new data model in 1.5.0 requires considerable data transformation and it is important for users to review this document to understand the implications and possible side-effects. - -A quick overview of the steps are: - -1. Review this document and understand how the upgrade will impact your data, paying particular attention to the [Preparation section](#preparation) . -2. [Backup your database](../backup.html). -3. No, really, [backup your database](../backup.html). -4. It is suggested that [users start with a new solr index](../indexes.html). To do this, delete the data/solr_index/index directory and all files in the data/indexer_state directory. The embedded version of Solr has been upgraded, which should result in a much more compact index size. -5. Follow the standard [upgrading instructions](../upgrading.html). Important to note: The setup-database.sh|bat script will modify your database schema, but it will not move the data. If you are currently using the container management plugin you will need to remove it from the list of plugins in your config file prior to starting ArchivesSpace. -6. Start ArchivesSpace. When 1.5.0 starts for the first time, a conversion process will kick off and move the data into the new table structure. **During this time, the application will be unavailable until it completes**. Duration depends on the size of your data and server resources, with a few minutes for very small databases to several hours for very large ones. -7. When the conversion is done, the web application will start and the indexer will rebuild your index. Performance might be slower while the indexer runs, depending on your server environment and available resources. -8. Review the [output of the conversion process](#conversion) following the instructions below. How long it takes for the report to load will depend on the number of entries included in it. - -## Preparing for and Converting to the New Container Management Functionality - -With version 1.5.0, ArchivesSpace is adopting a new data model that will enable more capable and efficient management of the containers in which you store your archival materials. To take advantage of this improved functionality: -* Repositories already using ArchivesSpace as a production application will need to upgrade their ArchivesSpace applications to the version 1.5.0. (This upgrade / conversion must be done to take advantage of any other new features / bug fixes in ArchivesSpace 1.5.0 or later versions.) -* Repositories not yet using ArchivesSpace in production but needing to migrate data from the Archivists’ Toolkit or Archon will need to migrate their data to version 1.4.2 of ArchivesSpace or earlier and then upgrade that version to version 1.5.0. (This can be done when your repository is ready to migrate to ArchivesSpace.) -* Repositories not yet using ArchivesSpace in production and not needing to migrate data from the Archivists’ Toolkit or Archon can start using Archivists 1.5.0 without the need of upgrading. (People in this situation do not need to read any further.) - -Converting the container data model in version 1.4.2 and earlier versions of ArchivesSpace to the 1.5.0 version has some complexity and may not accommodate all the various ways in which container information has been recorded by diverse repositories. As a consequence, upgrading from a pre-1.5.0 version of ArchivesSpace requires planning for the upgrade, reviewing the results, and, possibly, remediating data either prior to or after the final conversion process. Because of all the variations in which container information can be recorded, it is impossible to know all the ways the data of repositories will be impacted. For this reason, **all repositories upgrading their ArchivesSpace to version 1.5.0 should do so with a backup of their production ArchivesSpace instance and in a test environment.** A conversion may only be undone by reverting back to the source database. - -## Frequently Asked Questions -*How will my data be converted to the new model?* - -When your installation is upgraded to 1.5.0, the conversion will happen as part of the upgrade process. - -*Can I continue to use the current model for containers and not convert to the new model?* - -Because it is such a substantial improvement [(see separate announcement for the new features)](../../README_FEATURES_1.5.0.html), the new model is required for all using ArchivesSpace 1.5.0 and higher. The only way to continue using the current model is to never upgrade beyond 1.4.2. - -*What if I’m already using the container management plugin made available to the community by Yale University?* - -Conversion of data created using the Yale container management plugin, or a local adaptation of the plugin, will also happen as part of the process of upgrading to 1.5.0. Some steps will be skipped when they are not needed. At the end of the process, the new container data model will be integrated into your ArchivesSpace and will not need to be loaded or maintained as a plugin. - -Those currently running the container management plugin will need to remove the container management plugin from the list in your config file prior to starting the conversion or a validation name error will occur. - -*I haven’t moved from Archivists’ Toolkit or Archon yet and am planning to use the associated migration tool. Can I migrate directly to 1.5.0?* - -No, you must migrate to 1.4.2 or earlier versions and then upgrade your installation to 1.5.0 according to the instructions provided here. - -*What changes are being made to the previous model for containers?* - -The biggest change is the new concept of top containers. A top container is the highest level container in which a particular instance is stored. Top containers are in some ways analogous to the current Container 1, but broken out from the entire container record (child and grandparent container records). As such, top containers enable more efficient recording and updating of the highest level containers in your collection. - -*How does ArchivesSpace determine what is a top container?* - -During the conversion, ArchivesSpace will find all the Container 1s in your current ArchivesSpace database. It will then evaluate them as follows: -* If containers have barcodes, one top container is created for each unique Container 1 barcode. -* If containers do not have barcodes, one top container is created for each unique combination of container 1 indicator and container type 1 within a resource or accession. -* Once a top container is created, additional instance records for the same container within an accession or resource will be linked to that top container record. - -## Preparation - -*What can I do to prepare my ArchivesSpace data for a smoother conversion to top containers?* - -* If your Container 1s have unique barcodes, you do not need to do anything except verify that your data is complete and accurate. You should run a preliminary conversion as described in the Conversion section and resolve any errors. -* If your Container 1s do not have barcodes, but have a nonduplicative container identifier sequence within each accession or resource (e.g. Box 1, Box 2, Box 3), or the identifiers are only reused within an accession or resource for different types of containers (for example, you have a Box 1 through 10 and an Oversize Box 1 through 3) you do not need to do anything except verify that your data is complete and accurate. You should run a preliminary conversion as described in the Conversion section and resolve any errors. -* If your Container 1s do not have barcodes and you have parallel numbering sequences, where the same indicators and types are used to refer to different containers within the same accession or resource within some or all accessions or resources (for example, you have a Box 1 in series 1 and a different Box 1 in series 5) you will need to find a way to uniquely identify these containers. One option is to run this [barcoder plugin](https://github.com/archivesspace-plugins/barcoder) for each resource to which this applies. The barcoder plugin creates barcodes that combine the ID of the highest level archival object ancestor with the container 1 type and indicator. (The barcoder plugin is designed to run against one resource at a time, instead of against all resources, because not all resources in a repository may match this condition.) Once you’ve differentiated your containers with parallel number sequences, you should run a preliminary conversion as described in the Conversion section and resolve any errors. - -You do not need to make any changes to Container 2 fields or Container 3 fields. Data in these fields will be converted to the new Child and Grandchild container fields that map directly to these fields. - -If you use the current Container Extent fields, these will no longer be available in 1.5.0. Any data in these fields will be migrated to a new Extent sub-record during the conversion. You can evaluate whether this data should remain in an extent record or if it belongs in a container profile or other fields and then move it accordingly after the conversion is complete. - -*I have EADs I still need to import into ArchivesSpace. How can I get them ready for this new model?* - -If you have a box and folder associated with a component (or any other hierarchical relationship of containers), you will need to add identifiers to the container element so that the EAD importer knows which is the top container. If you previously used Archivists' Toolkit to create EAD, your containers probably already have container identifiers. If your container elements do not have identifiers already, Yale University has made available an [XSLT transformation file](https://github.com/YaleArchivesSpace/xslt-files/blob/master/EAD_add_IDs_to_containers.xsl) to add them. You will need to run it before importing the EAD file into ArchivesSpace. - -## Conversion - -When upgrading from 1.4.2 (and earlier versions) to 1.5.0, the container conversion will happen as part of the upgrade process. You will be able to follow its progress in the log. Instructions for upgrading from a previous version of ArchivesSpace are available at [upgrade documentation](../upgrading.html). - -Because this is a major change in the data model for this portion of the application, running at least one test conversion is very strongly recommended. Follow these steps to run the upgrade/conversion process: -* Create a backup of your ArchivesSpace instance to use for testing. **IT IS ESSENTIAL THAT YOU NOT RUN THIS ON A PRODUCTION INSTANCE AS THE CONVERSION CHANGES YOUR DATA, and THE CHANGES CANNOT BE UNDONE EXCEPT BY REVERTING TO A BACKUP VERSION OF YOUR DATA PRIOR TO RUNNING THE CONVERSION.** -* Follow the upgrade instructions to unpack a fresh copy of the v 1.5.0 release made available for testing, copy your configuration and data files, and transfer your locales. -* **It is recommended that you delete your Solr index files to start with a fresh index** We are upgrading the version of Solr that ships with the application, and the upgrade will require a total reindex of your ArchivesSpace data. To do this, delete the data/solr_index/index directory and the files in data/indexer_state. -* Follow the upgrade instructions to run the database migrations. As part of this step, your container data will be converted to the new data model. You can follow along in the log. Windows users can open the archivesspace.out file in a tool like Notepad ++. Mac users can do a tail –f logs/archivesspace.out to get a live update from the log. -* When the test conversion has been completed, the log will indicate "Completed: existing containers have been migrated to the new container model." - - ![Image of Conversion Log](../../images/ConversionLog.png) - -* Open ArchivesSpace via your browser and login. -Retrieve the container conversion error report from the Background Jobs area: -* Select Background Jobs from the Settings menu. - -![Image of Background Jobs](../../images/BackgroundJobs.png) - -* The first item listed under Archived Jobs after completing the upgrade should be container_conversion_job. Click View. - -![Image of Background Jobs List](../../images/BackgroundJobsList.png) - -* Under Files, click File to download a CSV file with the errors and a brief explanation. - -![Image of Files](../../images/Files.png) - -![Image of Error Report](../../images/ErrorReport.png) - -* Go back to your source data and correct any errors that you can before doing another test conversion. -* When the error report shows no errors, or when you are satisfied with the remaining errors, your production instance is ready to be upgraded. -* When the final upgrade/conversion is complete, you can move ArchivesSpace version 1.5.0 into production. - -*What are some common errors or anomalies that will be flagged in the conversion?* - -* A container with a barcode has different indicators or types in different records. -* A container with a particular type and indicator sometimes has a barcode and sometimes doesn’t. -* A container is missing a type or indicator. -* Container levels are skipped (for example, there is a Container 1 and a Container 3, but no Container 2). -* A container has multiple locations. - -The conversion process can resolve some of these errors for you by supplying or deleting values as it deems appropriate, but for the most control over the process you will most likely want to resolve such issues yourself in your ArchivesSpace database before converting to the new container model. - -*Are there any known conversion issues?* - -Due to a change in the ArchivesSpace EAD importer in 2015, some EADs with hierarchical containers not designated by a @parent attribute were turned into multiple instance records. This has since been corrected in the application, but we are working on a plugin (now available at [Instance Joiner Plug-in](https://github.com/archivesspace-plugins/instance_joiner) that will enable you to turn these back into single instances so that subcontainers are not mistakenly turned into top containers. diff --git a/administration/upgrading/UPGRADING_2.1.0.html b/administration/upgrading/UPGRADING_2.1.0.html new file mode 100644 index 00000000..b34fe1ab --- /dev/null +++ b/administration/upgrading/UPGRADING_2.1.0.html @@ -0,0 +1,164 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/upgrading/UPGRADING_2.1.0.md + +

+ +

+ + Report issue on Jira + administration/upgrading/UPGRADING_2.1.0.md + +

+
+
+ +

UPGRADING TO 2.1.0

+ +

(these considerations also apply when upgrading to any version past 2.1.0 from a version prior to 2.1.0)

+ +

Additional upgrade considerations specific to this release. Refer to the upgrade documentation for the standard instructions that apply in all cases.

+ +

For those upgrading from 1.4.2 and lower

+ +

Following the merge of the Container Management Plugin in 1.5.0, ArchivesSpace still retained the old container model and had a number of dependencies on it. This imposed unnecessary complexity and some performance degradation on the system.

+ +

In this release all references to the old container model have been removed and the parts of the application that were dependent on it (for example, Imports and Exports) have been refactored to use the new container model.

+ +

A consequence of this change is that if you are upgrading from ArchivesSpace version of 1.4.2 or lower, you will need to first upgrade to any version between 1.5.0 and 2.0.1 to run the container conversion. You will then be able to upgrade to 2.1.0. If you are already using any version of ArchivesSpace between 1.5.0 and 2.0.1, you will be able to upgrade directly to 2.1.0.

+ +

For those needing to migrate data from Archivists’ Toolkit or Archon using the migration tools

+ +

The migration tools are currently supported through version 1.4.2 only. If you want to migrate data to ArchivesSpace using one of these tools, you must migrate it to 1.4.2. From there you can follow the instructions for those upgrading from 1.4.2 and lower.

+ +

Data migrations in this release

+ +

The rights statements data model has changed in 2.1.0. If you currently use rights statements, your data will be converted to the new model during the setup-database step of the upgrade process. We strongly urge you to backup your database and run at least one test upgrade before putting 2.1.0 into production.

+ +

For those using an external Solr server

+ +

The index schema has changed with 2.1.0. If you are using an external Solr server, you will need to update the schema.xml with the newer version. If you are using the default Solr index that ships with ArchivesSpace, no action is needed.

+ + +
+ +
+ + + diff --git a/administration/upgrading/UPGRADING_2.1.0.md b/administration/upgrading/UPGRADING_2.1.0.md deleted file mode 100644 index fb81294e..00000000 --- a/administration/upgrading/UPGRADING_2.1.0.md +++ /dev/null @@ -1,26 +0,0 @@ -# UPGRADING TO 2.1.0 - -(these considerations also apply when upgrading to any version past 2.1.0 from a version prior to 2.1.0) - -Additional upgrade considerations specific to this release. Refer to the [upgrade documentation](../upgrading.html) for the standard instructions that apply in all cases. - -## For those upgrading from 1.4.2 and lower - -Following the merge of the Container Management Plugin in 1.5.0, ArchivesSpace still retained the old container model and had a number of dependencies on it. This imposed unnecessary complexity and some performance degradation on the system. - -In this release all references to the old container model have been removed and the parts of the application that were dependent on it (for example, Imports and Exports) have been refactored to use the new container model. - -A consequence of this change is that if you are upgrading from ArchivesSpace version of 1.4.2 or lower, you will need to first upgrade to any version between 1.5.0 and 2.0.1 to run the container conversion. You will then be able to upgrade to 2.1.0. If you are already using any version of ArchivesSpace between 1.5.0 and 2.0.1, you will be able to upgrade directly to 2.1.0. - -## For those needing to migrate data from Archivists' Toolkit or Archon using the migration tools - -The migration tools are currently supported through version 1.4.2 only. If you want to migrate data to ArchivesSpace using one of these tools, you must migrate it to 1.4.2. From there you can follow the instructions for those upgrading from 1.4.2 and lower. - -## Data migrations in this release - -The rights statements data model has changed in 2.1.0. If you currently use rights statements, your data will be converted to the new model during the setup-database step of the upgrade process. We strongly urge you to backup your database and run at least one test upgrade before putting 2.1.0 into production. - - -## For those using an external Solr server - -The index schema has changed with 2.1.0. If you are using an external Solr server, you will need to update the [schema.xml](https://github.com/archivesspace/archivesspace/blob/master/solr/schema.xml) with the newer version. If you are using the default Solr index that ships with ArchivesSpace, no action is needed. diff --git a/administration/windows.html b/administration/windows.html new file mode 100644 index 00000000..ac900cea --- /dev/null +++ b/administration/windows.html @@ -0,0 +1,193 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + administration/windows.md + +

+ +

+ + Report issue on Jira + administration/windows.md + +

+
+
+ +

Running ArchivesSpace as a Windows service

+ +

Running ArchivesSpace as a Windows service requires some additional configuration.

+ +

You can use Apache procrun to configure ArchivesSpace to run as a Windows service. We have provided a service.bat script that will attempt to configure procrun for you (under launcher\service.bat).

+ +

To run this script, first you need to download procrun. +Extract the files and copy the prunsrv.exe and prunmgr.exe to your ArchivesSpace directory.

+ +

To find the path to Java, “Start” > “Control Panel” > “Java”, Select “Java” tab. You’ll see the path there. It will look something like C:\Program Files (x86)\Java

+ +

You also need to be sure that Java is in your system path and also to create JAVA_HOME as a global environment variable. +To add Java to your path, edit you %PATH% environment variable to include the directory of your java executable ( it will be something like C:\Program Files (x86)\Java ). To add JAVA_HOME, add a new system variable and put the directory where java was installed ( something like C:\Program Files (x86)\Java ).

+ +

Environement varialbe be found by “Start” > “Control Panel” , search for environment. Click “edit the system environment variables”. In the section System Variables, find the PATH environment variable and select it. Click Edit. If the PATH environment variable does not exist, click New. In the Edit System Variable (or New System Variable) window, specify the value of the PATH environment variable. Click OK. Close all remaining windows by clicking OK. Do the same for JAVA_HOME

+ +

Before setting up the ArchivesSpace service, you should also configure ArchivesSpace to run against MySQL. +Be sure that the MySQL connector jar file is in the lib directory, in order for +the service setup script to add it to the application’s classpath.

+ +

Lastly, for the service to shutdown cleanly, uncomment and change these lines in +config/config.rb:

+ +
AppConfig[:use_jetty_shutdown_handler] = true
+AppConfig[:jetty_shutdown_path] = "/xkcd"
+
+ +

This enables a shutdown hook for Jetty to respond to when the shutdown action +is taken.

+ +

You can now execute the batch script from your ArchivesSpace root directory from +the command line with launcher\service.bat. This will configure the service and +provide two executables: ArchivesSpaceService.exe (the service) and +ArchivesSpaceServicew.exe (a GUI monitor)

+ +

There are several options to launch the service. The easiest is to open the GUI +monitor and click “Launch”.

+ +

Alternatively, you can start the GUI monitor and minimize it in your +system tray with:

+ +
ArchivesSpaceServicew.exe //MS//
+
+ +

To execute the service from the command line, you can invoke:

+ +
ArchivesSpaceService.exe //ES//
+
+ +

Log output will be placed in your ArchivesSpace log directory.

+ +

Please see the procrun +documentation +for more information.

+ + +
+ +
+ + + diff --git a/administration/windows.md b/administration/windows.md deleted file mode 100644 index 6044a76e..00000000 --- a/administration/windows.md +++ /dev/null @@ -1,51 +0,0 @@ -# Running ArchivesSpace as a Windows service - -Running ArchivesSpace as a Windows service requires some additional configuration. - -You can use Apache [procrun](http://commons.apache.org/proper/commons-daemon/procrun.html) to configure ArchivesSpace to run as a Windows service. We have provided a service.bat script that will attempt to configure procrun for you (under `launcher\service.bat`). - -To run this script, first you need to [download procrun](http://www.apache.org/dist/commons/daemon/binaries/windows/ ). -Extract the files and copy the prunsrv.exe and prunmgr.exe to your ArchivesSpace directory. - -To find the path to Java, "Start" > "Control Panel" > "Java", Select "Java" tab. You'll see the path there. It will look something like `C:\Program Files (x86)\Java` - -You also need to be sure that Java is in your system path and also to create `JAVA_HOME` as a global environment variable. -To add Java to your path, edit you %PATH% environment variable to include the directory of your java executable ( it will be something like `C:\Program Files (x86)\Java` ). To add `JAVA_HOME`, add a new system variable and put the directory where java was installed ( something like `C:\Program Files (x86)\Java` ). - -Environement varialbe be found by "Start" > "Control Panel" , search for environment. Click "edit the system environment variables". In the section System Variables, find the `PATH` environment variable and select it. Click Edit. If the `PATH` environment variable does not exist, click New. In the Edit System Variable (or New System Variable) window, specify the value of the `PATH` environment variable. Click OK. Close all remaining windows by clicking OK. Do the same for `JAVA_HOME` - -Before setting up the ArchivesSpace service, you should also [configure ArchivesSpace to run against MySQL](../provisioning/mysql.html). -Be sure that the MySQL connector jar file is in the lib directory, in order for -the service setup script to add it to the application's classpath. - -Lastly, for the service to shutdown cleanly, uncomment and change these lines in -config/config.rb: - - AppConfig[:use_jetty_shutdown_handler] = true - AppConfig[:jetty_shutdown_path] = "/xkcd" - -This enables a shutdown hook for Jetty to respond to when the shutdown action -is taken. - -You can now execute the batch script from your ArchivesSpace root directory from -the command line with `launcher\service.bat`. This will configure the service and -provide two executables: `ArchivesSpaceService.exe` (the service) and -`ArchivesSpaceServicew.exe` (a GUI monitor) - -There are several options to launch the service. The easiest is to open the GUI -monitor and click "Launch". - -Alternatively, you can start the GUI monitor and minimize it in your -system tray with: - - ArchivesSpaceServicew.exe //MS// - -To execute the service from the command line, you can invoke: - - ArchivesSpaceService.exe //ES// - -Log output will be placed in your ArchivesSpace log directory. - -Please see the [procrun -documentation](http://commons.apache.org/proper/commons-daemon/procrun.html) -for more information. diff --git a/api/README.md b/api/README.md deleted file mode 100644 index f378dae8..00000000 --- a/api/README.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -permalink: /api/ ---- - -# Working with the ArchivesSpace API - -This documentation provides general information on working with the API. For detailed documentation of specific endpoints, see the [API reference](http://archivesspace.github.io/archivesspace/api/), which is maintained separately. - -## Authentication - -Most actions against the backend require you to be logged in as a user -with the appropriate permissions. By sending a request like: - - POST /users/admin/login?password=login - -your authentication request will be validated, and a session token -will be returned in the JSON response for your request. To remain -authenticated, provide this token with subsequent requests in the -`X-ArchivesSpace-Session` header. For example: - - X-ArchivesSpace-Session: 8e921ac9bbe9a4a947eee8a7c5fa8b4c81c51729935860c1adfed60a5e4202cb - -Since not all backend/API end points require authentication, it is best to restrict access to port 8089 to only IP addresses you trust. Your firewall should be used to specify a range of IP addresses that are allowed to call your ArchivesSpace API endpoint. This is commonly called whitelisting or allowlisting. - -### Example requests using CURL - -Send request to authenticate: - -``` -curl -s -F password="admin" "http://localhost:8089/users/admin/login" -``` - -This will return a JSON response that includes something like the following: - -``` -{ - "session":"9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e", - .... -} -``` - -It’s a good idea to save the session key as an environment variable to use for later requests: - -``` -#Mac/Unix terminal -export SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e" - -#Windows Command Prompt -set SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e" - -#Windows PowerShell -$env:SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e" -``` - -Now you can make requests like this: - -``` -curl -H "X-ArchivesSpace-Session: $SESSION" "http://localhost:8089/repositories/2/resources/1 -``` - -## CRUD - -The ArchivesSpace API provides CRUD-style interactions for a number of -different "top-level" record types. Working with records follows a -fairly standard pattern: - - # Get a paginated list of accessions from repository '123' - GET /repositories/123/accessions?page=1 - - # Create a new accession, returning the ID of the new record - POST /repositories/123/accessions - {... a JSON document satisfying JSONModel(:accession) here ...} - - # Get a single accession (returned as a JSONModel(:accession) instance) using the ID returned by the previous request - GET /repositories/123/accessions/456 - - # Update an existing accession - POST /repositories/123/accessions/456 - {... a JSON document satisfying JSONModel(:accession) here ...} - - -## Performing API requests - -> Additional documentation is needed for these sections - please consider contributing documentation via a pull request to this repo - - -### GET requests - -#### Resolving associated records - -> Additional documentation needed - -#### Requests for paginated results - -> Additional documentation needed - -#### Working with long results sets - -> Additional documentation needed - -#### Search requests - -> Additional documentation needed - - -### POST requests - -#### Updating existing records - -> Additional documentation needed - -#### Creating new records - -> Additional documentation needed - - -### DELETE requests - -> Additional documentation needed diff --git a/api/index.html b/api/index.html new file mode 100644 index 00000000..f651ddd6 --- /dev/null +++ b/api/index.html @@ -0,0 +1,265 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + api/README.md + +

+ +

+ + Report issue on Jira + api/README.md + +

+
+
+ +

Working with the ArchivesSpace API

+ +

This documentation provides general information on working with the API. For detailed documentation of specific endpoints, see the API reference, which is maintained separately.

+ +

Authentication

+ +

Most actions against the backend require you to be logged in as a user +with the appropriate permissions. By sending a request like:

+ +
 POST /users/admin/login?password=login
+
+ +

your authentication request will be validated, and a session token +will be returned in the JSON response for your request. To remain +authenticated, provide this token with subsequent requests in the +X-ArchivesSpace-Session header. For example:

+ +
 X-ArchivesSpace-Session: 8e921ac9bbe9a4a947eee8a7c5fa8b4c81c51729935860c1adfed60a5e4202cb
+
+ +

Since not all backend/API end points require authentication, it is best to restrict access to port 8089 to only IP addresses you trust. Your firewall should be used to specify a range of IP addresses that are allowed to call your ArchivesSpace API endpoint. This is commonly called whitelisting or allowlisting.

+ +

Example requests using CURL

+ +

Send request to authenticate:

+ +
curl -s -F password="admin" "http://localhost:8089/users/admin/login"
+
+ +

This will return a JSON response that includes something like the following:

+ +
{
+   "session":"9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e",
+   ....
+}
+
+ +

It’s a good idea to save the session key as an environment variable to use for later requests:

+ +
#Mac/Unix terminal
+export SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e"
+
+#Windows Command Prompt
+set SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e"
+
+#Windows PowerShell
+$env:SESSION="9528190655b979f00817a5d38f9daf07d1686fed99a1d53dd2c9ff2d852a0c6e"
+
+ +

Now you can make requests like this:

+ +
curl -H "X-ArchivesSpace-Session: $SESSION" "http://localhost:8089/repositories/2/resources/1
+
+ +

CRUD

+ +

The ArchivesSpace API provides CRUD-style interactions for a number of +different “top-level” record types. Working with records follows a +fairly standard pattern:

+ +
 # Get a paginated list of accessions from repository '123'
+ GET /repositories/123/accessions?page=1
+
+ # Create a new accession, returning the ID of the new record
+ POST /repositories/123/accessions
+ {... a JSON document satisfying JSONModel(:accession) here ...}
+
+ # Get a single accession (returned as a JSONModel(:accession) instance) using the ID returned by the previous request
+ GET /repositories/123/accessions/456
+
+ # Update an existing accession
+ POST /repositories/123/accessions/456
+ {... a JSON document satisfying JSONModel(:accession) here ...}
+
+ +

Performing API requests

+ +
+

Additional documentation is needed for these sections - please consider contributing documentation via a pull request to this repo

+
+ +

GET requests

+ +

Resolving associated records

+ +
+

Additional documentation needed

+
+ +

Requests for paginated results

+ +
+

Additional documentation needed

+
+ +

Working with long results sets

+ +
+

Additional documentation needed

+
+ +

Search requests

+ +
+

Additional documentation needed

+
+ +

POST requests

+ +

Updating existing records

+ +
+

Additional documentation needed

+
+ +

Creating new records

+ +
+

Additional documentation needed

+
+ +

DELETE requests

+ +
+

Additional documentation needed

+
+ + +
+ +
+ + + diff --git a/architecture/README.md b/architecture/README.md deleted file mode 100644 index ad902131..00000000 --- a/architecture/README.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -permalink: /architecture/ ---- - -# ArchivesSpace architecture and components - -ArchivesSpace is divided into several components: the backend, which -exposes the major workflows and data types of the system via a -REST API, a staff interface, a public interface, and a search system, -consisting of Solr and an indexer application. - -These components interact by exchanging JSON data. The format of this -data is defined by a class called JSONModel. - -* [JSONModel -- a validated ArchivesSpace record](./jsonmodel.html) -* [The ArchivesSpace backend](./backend) -* [The ArchivesSpace staff interface](./frontend) -* [Background Jobs](./jobs) -* [Search indexing](./search) -* [The ArchivesSpace public user interface](./public) -* [OAI-PMH interface](./oai-pmh) - -## Languages, platforms, and included open source projects - -ArchivesSpace components are constructed using several programming languages, platforms, and additional open source projects. - -### Languages - -The languages used are Java, JRuby, Ruby, JavaScript, and CSS. - -### Platforms - -The backend, OAI harvester, and indexer are Sinatra apps. The staff and public user interfaces are Ruby on Rails apps. - -### Additional open source projects - -The database used out of the box and for testing is Apache Derby. The database suggested for production is MySQL. The index platform is Apache Solr. - -## Directory Structure - -ArchivesSpace is made up of several components that are kept in separate directories. - -### \_yard - -This directory contains the code for the documentation tool used to generate the github io pages here: https://archivesspace.github.io/archivesspace/api/ and https://archivesspace.github.io/archivesspace/doc/ - -### backend - -This directory contains the code that handles the database and the API. - -### build - -This directory contains the code used to build the application. It includes the commands that are used to run the development servers, the test suites, and to build the releases. ArchivesSpace is a JRuby application and Apache Ant is used to build it. - -### clustering - -This directory contains code that can be used when clustering an ArchivesSpace installation. - -### common - -This directory contains code that is used across two or more of the components. It includes configuration options, database schemas and migrations, and translation files. - -### contribution_files - -This directory contains the documentation and PDFs of the license agreement files. - -### docs - -This directory contains documentation files that are included in a release. - -### frontend - -This directory contains the staff interface Ruby on Rails application. - -### indexer - -This directory contains the indexer Sinatra application. - -### jmeter - -This directory contains an example that can be used to set up Apache JMeter to load test functional behavior and measure performance. - -### launcher - -This directory contains the code that launches (starts, restarts, and stops) an ArchivesSpace application. - -### oai - -This directory contains the OAI-PMH Sinatra application. - -### plugins - -This directory contains ArchivesSpace Program Team supported plugins. - -### proxy - -This directory contains the Docker proxy code. - -### public - -This directory contains the public interface Ruby on Rails application. - -### reports - -This directory contains the reports code. - -### scripts - -This directory contains scripts necessary for building, deploying, and other ArchivesSpace tasks. - -### selenium - -This directory contains the selenium tests. - -### solr - -This directory contains the solr code. - -### stylesheets - -This directory contains XSL stylesheets used by ArchivesSpace. - -### supervisord - -This directory contains a tool that can be used to run the development servers. diff --git a/architecture/backend/README.md b/architecture/backend/README.md deleted file mode 100644 index 2650ce86..00000000 --- a/architecture/backend/README.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -permalink: /architecture/backend/ ---- - -# The ArchivesSpace backend - -The backend is responsible for implementing the ArchivesSpace API, and -supports the sort of access patterns shown in the previous section. -We've seen that the backend must support CRUD operations against a -number of different record types, and those records as expressed as -JSON documents produced from instances of JSONModel classes. - -The following sections describe how the backend fits together. - - -## main.rb -- load and initialize the system - -The `main.rb` program is responsible for starting the ArchivesSpace -system: loading all controllers and models, creating -users/groups/permissions as needed, and preparing the system to handle -requests. - -When the system starts up, the `main.rb` program performs the -following actions: - - * Initializes JSONModel--triggering it to load all record schemas - from the filesystem and generate the classes that represent each - record type. - * Connects to the database - * Loads all backend models--the system's domain objects and - persistence layer - * Loads all controllers--defining the system's REST endpoints - * Starts the job scheduler--handling scheduled tasks such as backups - of the demo database (if used) - * Runs the "bootstrap ACLs" process--creates the admin user and - group if they don't already exist; creates the hidden global - repository; creates system users and groups. - * Fires the "backend started" notification to any registered - observers. - -In addition to handling the system startup, `main.rb` also provides -the following facilities: - - * Session handling--tracks authenticated backend sessions using the - token extracted from the `X-ArchivesSpace-Session` request header. - * Helper methods for accessing the current user and current session - of each request. - - -## rest.rb -- Request and response handling for REST endpoints - -The `rest.rb` module provides the mechanism used to define the API's -REST endpoints. Each endpoint definition includes: - - * The URI and HTTP request method used to access the endpoint - * A list of typed parameters for that endpoint - * Documentation for the endpoint, each parameter, and each possible - response that may be returned - * Permission checks--predicates that the current user must satisfy - to be able to use the endpoint - -Each controller in the system consists of one or more of these -endpoint definitions. By using the endpoint syntax provided by -`rest.rb`, the controllers can declare the interface they provide, and -are freed of having to perform the sort of boilerplate associated -with request handling--check parameter types, coerce values from -strings into other types, and so on. - -The `main.rb` and `rest.rb` components work together to insulate the -controllers from much of the complexity of request handling. By the -time a request reaches the body of an endpoint: - - * It can be sure that all required parameters are present and of the - correct types. - * The body of the request has been fetched, parsed into the - appropriate type (usually a JSONModel instance--see below) and - made available as a request parameter. - * Any parameters provided by the client that weren't present in the - endpoint definition have been dropped. - * The user's session has been retrieved, and any defined access - control checks have been carried out. - * A connection to the database has been assigned to the request, and - a transaction has been opened. If the controller throws an - exception, the transaction will be automatically rolled back. - -## Controllers - -As touched upon in the previous section, controllers implement the -functionality of the ArchivesSpace API by registering one or more -endpoints. Each endpoint accepts a HTTP request for a given URI, -carries out the request and returns a JSON response (if successful) or -throws an exception (if something goes wrong). - -Each controller lives in its own file, and these can be found in the -`backend/app/controllers` directory. Since most of the request -handling logic is captured by the `rest.rb` module, controllers -generally don't do much more than coordinate the classes from the -model layer and send a response back to the client. - -### crud_helpers.rb -- capturing common CRUD controller actions - -Even though controllers are quite thin, there's still a lot of overlap -in their behaviour. Each record type in the system supports the same -set of CRUD operations, and from the controller's point of view -there's not much difference between an update request for an accession -and an update request for a digital object (for example). - -The `crud_helpers.rb` module pulls this commonality into a set of -helper methods that are invoked by each controller, providing methods -for the standard operations of the system. - -## Models - -The backend's model layer is where the action is. The model layer's -role is to bridge the gap between the high-level JSONModel objects -(complete with their properties, nested records, references to other -records, etc.) and the underlying relational database (via the Sequel -database toolkit). As such, the model layer is mainly concerned with -mapping JSONModel instances to database tables in a way that preserves -everything and allows them to be queried efficiently. - -Each record type has a corresponding model class, but the individual -model definitions are often quite sparse. This is because the -different record types differ in the following ways: - - * The set of properties they allow (and their types, valid values, - etc.) - * The types of nested records they may contain - * The types of relationships they may have with other record types - -The first of these--the set of allowable properties--is already -captured by the JSONModel schema definitions, so the model layer -doesn't have to enforce these restrictions. Each model can simply -take the values supplied by the JSONModel object it is passed and -assume that everything that needs to be there is there, and that -validation has already happened. - -The remaining two aspects *are* enforced by the model layer, but -generally don't pertain to just a single record type. For example, an -accession may be linked to zero or more subjects, but so can several -other record types, so it doesn't make sense for the `Accession` model -to contain the logic for handling subjects. - -In practice we tend to see very little functionality that belongs -exclusively to a single record type, and as a result there's not much -to put in each corresponding model. Instead, models are generally -constructed by combining a number of mix-ins (Ruby modules) to satisfy -the requirements of the given record type. Features à la carte! - -### ASModel and other mix-ins - -At a minimum, every model includes the `ASModel` mix-in, which provides -base versions of the following methods: - - * `Model.create_from_json` -- Take a JSONModel instance and create a - model instance (a subclass of Sequel::Model) from it. Returns the - instance. - * `model.update_from_json` -- Update the target model instance with - the values from a given JSONModel instance. - * `Model.sequel_to_json` -- Return a JSONModel instance of the appropriate - type whose values are taken from the target model instance. - Model classes are declared to correspond to a particular JSONModel - instance when created, so this method can automatically return a - JSONModel instance of the appropriate type. - -These methods comprise the primary interface of the model layer: -virtually every mix-in in the model layer overrides one or all of -these to add behaviour in a modular way. - -For example, the 'notes' mix-in adds support for multiple notes to be -added to a record type--by mixing this module into a model class, that -class will automatically accept a JSONModel property called 'notes' -that will be stored and retrieved to and from the database as needed. -This works by overriding the three methods as follows: - - * `Model.create_from_json` -- Call 'super' to delegate the creation to - the next mix-in in the chain. When it returns the newly created - object, extract the notes from the JSONModel instance and attach - them to the model instance (saving them in the database). - * `model.update_from_json` -- Call 'super' to save the other updates - to the database, then replace any existing notes entries for the - record with the ones provided by the JSONModel. - * `Model.sequel_to_json` -- Call 'super' to have the next mix-in in - the chain create a JSONModel instance, then pull the stored notes - from the database and poke them into it. - -All of the mix-ins follow this pattern: call 'super' to delegate the -call to the next mix-in in the chain (eventually reaching ASModel), -then manipulate the result to implement the desired behaviour. - -### Nested records - -Some record types, like accessions, digital objects, and subjects, are -*top-level records*, in the sense that they are created independently -of any other record and are addressable via their own URI. However, -there are a number of records that can't exist in isolation, and only -exist in the context of another record. When one record can contain -instances of another record, we call them *nested records*. - -To give an example, the `date` record type is nested within an -`accession` record (among others). When the model layer is asked to -save a JSONModel instance containing nested records, it must pluck out -those records, save them in the appropriate database table, and ensure -that linkages are created within the database to allow them to be -retrieved later. - -This happens often enough that it would be tedious to write code for -each model to handle its nested records, so the ASModel mix-in -provides a declaration to handle this automatically. For example, the -`accession` model uses a definition like: - - base.def_nested_record(:the_property => :dates, - :contains_records_of_type => :date, - :corresponding_to_association => :date) - -When creating an accession, this declaration instructs the `Accession` -model to create a database record for each date listed in the "dates" -property of the incoming record. Each of these date records will be -automatically linked to the created accession. - -### Relationships - -A relationship is a link between two top-level records, where the link -is a separate, dynamically generated, model with zero or more -properties of its own. - -For example, the `Event` model can be related to several different -types of records: - - define_relationship(:name => :event_link, - :json_property => 'linked_records', - :contains_references_to_types => proc {[Accession, Resource, ArchivalObject]}) - -This declaration generates a custom class that models the relationship -between events and the other record types. The corresponding JSON -schema declaration for the `linked_records` property looks like this: - - "linked_records" => { - "type" => "array", - "ifmissing" => "error", - "minItems" => 1, - "items" => { - "type" => "object", - "subtype" => "ref", - "properties" => { - "role" => { - "type" => "string", - "dynamic_enum" => "linked_event_archival_record_roles", - "ifmissing" => "error", - }, - "ref" => { - "type" => [{"type" => "JSONModel(:accession) uri"}, - {"type" => "JSONModel(:resource) uri"}, - {"type" => "JSONModel(:archival_object) uri"}, - ...], - "ifmissing" => "error" - }, - ... - -That is, the property includes URI references to other records, plus -an additional "role" property to indicate the nature of the -relationship. The corresponding JSON might then be: - - linked_records: [{ref: '/repositories/123/accessions/456', role: 'authorizer'}, ...] - -The `define_relationship` definition automatically makes use of the -appropriate join tables in the database to store this relationship and -retrieve it later as needed. - -### Agents and `agent_manager.rb` - -Agents present a bit of a representational challenge. There are four -types of agents (person, family, corporate entity, software), and at a -high-level they are structured in the same way: each type can contain -one or more name records, zero or more contact records, and a number -of properties. Records that link to agents (via a relationship, for -example) can link to any of the four types so, in some sense, each -agent type implements a common `Agent` interface. - -However, the agent types differ in their details. Agents contain name -records, but the types of those name records correspond to the type of -the agent: a person agent contains a person name record, for example. -So, in spite of their similarities, the different agents need to be -modelled as separate record types. - -The `agent_manager` module captures the high-level similarities -between agents. Each agent model includes the agent manager mix-in: - - include AgentManager::Mixin - -and then defines itself declaratively by the provided class method: - - register_agent_type(:jsonmodel => :agent_person, - :name_type => :name_person, - :name_model => NamePerson) - -This definition sets up the properties of that agent. It creates: - - * a one_to_many relationship with the corresponding name - type of the agent. - * a one_to_many relationship with the agent_contact table. - * nested record definition which defines the names list of the agent - (so the list of names for the agent are automatically stored in - and retrieved from the database) - * a nested record definition for contact list of the agent. - -## Validations - -As records are added to and updated within the ArchivesSpace system, -they are validated against a number of rules to make sure they are -well-formed and don't conflict with other records. There are two -types of record validation: - - * Record-level validations check that a record is self-consistent: - that it contains all required fields, that its values are of the - appropriate type and format, and that its fields don't contradict - one another. - * System-level validations check that a record makes sense in a - broader context: that it doesn't share a unique identifier with - another record, and that any record it references actually exists. - -Record-level validations can be performed in isolation, while -system-level records require comparing the record to others in the -database. - -System-level validations need to be implemented in the database itself -(as integrity constraints), but record-level validations are often too -complex to be expressed this way. As a result, validations in -ArchivesSpace can appear in one or both of the following layers: - - * At the JSONModel level, validations are captured by JSON schema - documents. Where more flexibility is needed, custom validations - are added to the `common/validations.rb` file, allowing validation - logic to be expressed using arbitrary Ruby code. - * At the database level, validations are captured using database - constraints. Since the error messages yielded by these - constraints generally aren't useful for users, database - constraints are also replicated in the backend's model layer using - Sequel validations, which give more targeted error messages. - -As a general rule, record-level validations are handled by the -JSONModel validations (either through the JSON schema or custom -validations), while system-level validations are handled by the model -and the database schema. - - -## Optimistic concurrency control - -Updating a record using the ArchivesSpace API is a two part process: - - # Perform a `GET` against the desired record to fetch its JSON - # representation: - - GET /repositories/5/accessions/2 - - # Manipulate the JSON representation as required, and then `POST` - # it back to replace the original: - - POST /repositories/5/accessions/2 - -If two people do this simultaneously, there's a risk that one person -would silently overwrite the changes made by the other. To prevent -this, every record is marked with a version number that it carries in -the `lock_version` property. When the system receives the updated -copy of a record, it checks that the version it carries is still -current; if the version number doesn't match the one stored in the -database, the update request is rejected and the user must re-fetch -the latest version before applying their update. - -## The ArchivesSpace permissions model - -The ArchivesSpace backend enforces access control, defining which -users are allowed to create, read, update, suppress and delete the -records in the system. The major actors in the permissions model are: - - * Repositories -- The main mechanism for partitioning the - ArchivesSpace system. For example, an instance might contain one - repository for each section of an organisation, or one repository - for each major collection. - * Users -- An entity that uses the system--often a person, but - perhaps a consumer of the ArchivesSpace API. The set of users is - global to the system, and a single user may have access to - multiple repositories. - * Records -- A unit of information in the system. Some records are - global (existing outside of any given repository), while some are - repository-scoped (belonging to a single repository). - * Groups -- A set of users *within* a repository. Each group is - assigned zero or more permissions, which it confers upon its - members. - * Permissions -- An action that a user can perform. For example, A - user with the `update_accession_record` permission is allowed to - update accessions for a repository. - -To summarize, a user can perform an action within a repository if they -are a member of a group that has been assigned permission to perform -that action. - -### Conceptual trickery - -Since they're repository-scoped, groups govern access to repositories. -However, there are several record types that exist at the top-level of -the system (such as the repositories themselves, subjects and agents), -and the permissions model must be able to accommodate these. - -To get around this, we invent a concept: the "global" repository -conceptually contains the whole ArchivesSpace universe. As with other -repositories, the global repository contains groups, and users can be -made members of these groups to grant them permissions across the -entire system. One example of this is the "admin" user, which is -granted all permissions by the "administrators" group of the global -repository; another is the "search indexer" user, which can read (but -not update or delete) any record in the system. diff --git a/architecture/backend/api.html b/architecture/backend/api.html new file mode 100644 index 00000000..1554a7d4 --- /dev/null +++ b/architecture/backend/api.html @@ -0,0 +1,181 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/backend/api.md + +

+ +

+ + Report issue on Jira + architecture/backend/api.md + +

+
+
+ +

Working with the ArchivesSpace API

+ +
+

See API section for more detailed documentation

+
+ +

Authentication

+ +

Most actions against the backend require you to be logged in as a user +with the appropriate permissions. By sending a request like:

+ +
 POST /users/admin/login?password=login
+
+ +

your authentication request will be validated, and a session token +will be returned in the JSON response for your request. To remain +authenticated, provide this token with subsequent requests in the +X-ArchivesSpace-Session header. For example:

+ +
 X-ArchivesSpace-Session: 8e921ac9bbe9a4a947eee8a7c5fa8b4c81c51729935860c1adfed60a5e4202cb
+
+ +

CRUD

+ +

The ArchivesSpace API provides CRUD-style interactions for a number of +different “top-level” record types. Working with records follows a +fairly standard pattern:

+ +
 # Get a paginated list of accessions from repository '123'
+ GET /repositories/123/accessions?page=1
+
+ # Create a new accession, returning the ID of the new record
+ POST /repositories/123/accessions
+ {... a JSON document satisfying JSONModel(:accession) here ...}
+
+ # Get a single accession (returned as a JSONModel(:accession) instance) using the ID returned by the previous request
+ GET /repositories/123/accessions/456
+
+ # Update an existing accession
+ POST /repositories/123/accessions/456
+ {... a JSON document satisfying JSONModel(:accession) here ...}
+
+ + +
+ +
+ + + diff --git a/architecture/backend/api.md b/architecture/backend/api.md deleted file mode 100644 index 9450b588..00000000 --- a/architecture/backend/api.md +++ /dev/null @@ -1,38 +0,0 @@ -# Working with the ArchivesSpace API - -> See **API section** for more detailed documentation - -## Authentication - -Most actions against the backend require you to be logged in as a user -with the appropriate permissions. By sending a request like: - - POST /users/admin/login?password=login - -your authentication request will be validated, and a session token -will be returned in the JSON response for your request. To remain -authenticated, provide this token with subsequent requests in the -`X-ArchivesSpace-Session` header. For example: - - X-ArchivesSpace-Session: 8e921ac9bbe9a4a947eee8a7c5fa8b4c81c51729935860c1adfed60a5e4202cb - - -## CRUD - -The ArchivesSpace API provides CRUD-style interactions for a number of -different "top-level" record types. Working with records follows a -fairly standard pattern: - - # Get a paginated list of accessions from repository '123' - GET /repositories/123/accessions?page=1 - - # Create a new accession, returning the ID of the new record - POST /repositories/123/accessions - {... a JSON document satisfying JSONModel(:accession) here ...} - - # Get a single accession (returned as a JSONModel(:accession) instance) using the ID returned by the previous request - GET /repositories/123/accessions/456 - - # Update an existing accession - POST /repositories/123/accessions/456 - {... a JSON document satisfying JSONModel(:accession) here ...} diff --git a/architecture/backend/database.html b/architecture/backend/database.html new file mode 100644 index 00000000..8496643e --- /dev/null +++ b/architecture/backend/database.html @@ -0,0 +1,1625 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/backend/database.md + +

+ +

+ + Report issue on Jira + architecture/backend/database.md + +

+
+
+ +

Working with the ArchivesSpace Database

+ +

The ArchivesSpace database stores all data that is created within an ArchivesSpace instance. As described in other sections of this documentation, the backend code - particularly the model layer and ASModel_crud.rb file - uses the Sequel database toolkit to bridge the gap between this underlying data and the JSON objects which are exchanged by the other components of the system.

+ +

Often, querying the database directly is the most efficient and powerful way to retrieve data from ArchivesSpace. It is also possible to use raw SQL queries to create custom reports that can be run by users in the staff interface. Please consult the Custom Reports section of this documentation for additional information on creating custom reports.

+ + + +

It is recommended that ArchivesSpace be run against MySQL in production, not the included demo database. Instructions on setting up ArchivesSpace to run against MySQL are here.

+ +

The examples in this section are written for MySQL. There are many freely-available tutorials on the internet which can provide guidance to those unfamiliar with MySQL query syntax and the features of the language.

+ +

NOTE: the documentation below is current through database schema version 129, application version 2.7.1.

+ +

Database Overview

+ +

The ArchivesSpace database schema and it’s mapping to the JSONModel objects used by the other parts of the system is defined by the files in the common/schemas and backend/models directories. The database itself is created via the setup-database script in the scripts directory. This script runs the migrations in the common/db/migrations directory.

+ +

The tables in the ArchivesSpace database can be grouped into several general categories:

+ + +

One way to get a view of all tables and columns in your ArchivesSpace instance is to run the following query in a MySQL client:

+ +
  SELECT TABLE_SCHEMA
+  	, TABLE_NAME
+  	, COLUMN_NAME
+  	, ORDINAL_POSITION
+  	, IS_NULLABLE
+  	, COLUMN_TYPE
+  	, COLUMN_KEY
+  FROM INFORMATION_SCHEMA.COLUMNS
+  #change the following value to whatever your database is named
+  WHERE TABLE_SCHEMA Like 'archivesspace'
+
+ +

Additionally, a BETA version of an ArchivesSpace data dictionary has been created by members of the ArchivesSpace development team and the ArchivesSpace User Advisory Council Reports team.

+ +

Main record tables

+ +

These tables hold data about the primary record types in ArchivesSpace. Main record types are distinguished from subrecords in that they have their own persistent URIs - corresponding to their database identifiers/primary keys - that are resolvable via the staff interface, public interface, and API. They are distinguished from supporting records in that they are the primary descriptive record types that users will interact with in the system.

+ +

All of these records, except archival objects, can be created independently of any other record. Archival object records represent components of a larger entity, and so they must have a resource record as a root parent. See the parent/child relationships section for more information about the representation of hierarchical relationships in the database.

+ +

A few common fields occur in several main record tables. These similar fields are defined by the parent schemas in the common/schemas directory:

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Column NameTables
titleaccession, archival_object, digital_object, digital_object_component, resource
identifier/component_id/digital_object_idaccession, resource/archival_object, digital_object_component/digital_object
other_levelarchival_object, resource
repository_processing_notearchival_object, resource
+ + + +

All of the main records have a set of fields which store boolean values (0 or 1) that indicate whether the records are published in the public user interface, suppressed in the staff interface, or have some kind of applicable restriction. The exception to this is the repository table, which does not have a restriction boolean, but does have a hidden boolean. The accession table has multiple restriction-related booleans. See the section below for more information about boolean fields.

+ +

Beginning in version 2.6.0, the main record tables (and some supporting records - see below) also contain fields which hold data about archival resource keys (ARKs) and human-readable URLs (slugs):

+ + + + + + + + + + + + + + + + + + +
Column NameTables
slugaccession, archival_object, digital_object, digital_object_component, repository, resource
external_ark_urlarchival_object, resource
+ +

Also stored in these and all other tables are enumeration values, foreign keys which correspond to database identifiers in the enumeration_value table, which stores controlled values. See enumeration section below for more detail.

+ +

All subrecord data types - i.e. dates, extents, instances - relating to a main or supporting record are stored in their own tables and linked to main or supporting records via foreign key references in the subrecord tables. See subrecord section below for more detail.

+ +

The remaining data in the main record tables is text, and is unique to each table:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TABLE_NAMECOLUMN_NAMEIS_NULLABLECOLUMN_TYPECOLUMN_KEY
accessioncontent_descriptionYEStext 
accessioncondition_descriptionYEStext 
accessiondispositionYEStext 
accessioninventoryYEStext 
accessionprovenanceYEStext 
accessiongeneral_noteYEStext 
accessionaccession_dateYESdate 
accessionretention_ruleYEStext 
accessionaccess_restrictions_noteYEStext 
accessionuse_restrictions_noteYEStext 
archival_objectref_idNOvarchar(255)MUL
digital_object_componentlabelYESvarchar(255) 
repositoryrepo_codeNOvarchar(255)UNI
repositorynameNOvarchar(255) 
repositoryorg_codeYESvarchar(255) 
repositoryparent_institution_nameYESvarchar(255) 
repositoryurlYESvarchar(255) 
repositoryimage_urlYESvarchar(255) 
repositorycontact_personsYEStext 
repositorydescriptionYEStext 
repositoryoai_is_disabledYESint 
repositoryoai_sets_availableYEStext 
resourceead_idYESvarchar(255) 
resourceead_locationYESvarchar(255) 
resourcefinding_aid_titleYEStext 
resourcefinding_aid_filing_titleYEStext 
resourcefinding_aid_dateYESvarchar(255) 
resourcefinding_aid_authorYEStext 
resourcefinding_aid_language_noteYESvarchar(255) 
resourcefinding_aid_sponsorYEStext 
resourcefinding_aid_edition_statementYEStext 
resourcefinding_aid_series_statementYEStext 
resourcefinding_aid_noteYEStext 
resourcefinding_aid_subtitleYEStext 
+ + + +

Supporting record tables

+ +

Like the main record types listed above, supporting records can also be created independently of other records, and are addressable in the staff interface and API via their own URI. However, they are primarily meaningful via their many-to-many linkages to the main record types (and, sometimes, other supporting record types). These records typically provide additional information about, or otherwise enhance, the primary record types. A few supporting record types - for instance those in the term table - are used to enhance other supporting record types.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Supporting module tablesLinked to
agent_corporate_entity 
agent_family 
agent_person 
agent_software 
assessment 
classificationaccession, resource
classification_termclassification, accession, resource
container_profiletop_container
event 
location 
location_profilelocation
subjectresource, archival_object
termsubject
top_container 
vocabularysubject, term
assessment_attribute_definitionassessment_attribute, assessment_attribute_note
+ + + +

Subrecord tables

+ + + +

Subrecords must be associated with a main or supporting record - they cannot be created independently. As such, they do not have their own URIs, and can only be accessed via the API by retrieving the top-level record with which they are associated. In the staff interface these records are embedded within main or supporting record views. In the API subrecord data is contained in arrays within main or supporting records.

+ +

The various subrecord types do have their own database tables. In addition to data specific to the subrecord type, the tables also contain foreign key columns which hold the database identifiers of main or supporting records. Subrecord tables must have a value in one of the foreign key fields. Some subrecords can have another subrecord as parent (for instance, the sub_container subrecord has instance_id as its foreign key column).

+ +

Subrecords exist in a one-to-many relationship with their parent records, so a record’s id may appear multiple times in a subrecord table (i.e. when there are two dates associated with a resource record).

+ +

It is important to note that subrecords are deleted and recreated upon each save of the main or supporting record with which they are associated, regardless of whether the subrecord itself is modified. This means that the database identifier is deleted and reassigned upon each save.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Subrecord tablesForeign keys
agent_contactagent_person_id, agent_family_id, agent_corporate_entity_id, agent_software_id
dateaccession_id, deaccession_id, archival_object_id, resource_id, event_id, digital_object_id, digital_object_component_id, related_agents_rlshp_id, agent_person_id, agent_family_id, agent_corporate_entity_id, agent_software_id, name_person_id, name_family_id, name_corporate_entity_id, name_software_id
extentaccession_id, deaccession_id, archival_object_id, resource_id, digital_object_id, digital_object_component_id
external_documentaccession_id, archival_object_id, resource_id, subject_id, agent_person_id, agent_family_id, agent_corporate_entity_id, agent_software_id, rights_statement_id, digital_object_id, digital_object_component_id, event_id
external_idsubject_id, accession_id, archival_object_id, collection_management_id, digital_object_id, digital_object_component_id, event_id, location_id, resource_id
file_versiondigital_object_id, digital_object_component_id
instanceresource_id, archival_object_id, accession_id
name_authority_idname_person_id, name_family_id, name_software_id, name_corporate_entity_id
name_corporate_entityagent_corporate_entity_id
name_familyagent_family_id
name_personagent_person_id
name_softwareagent_software_id
noteresource_id, archival_object_id, digital_object_id, digital_object_component_id, agent_person_id, agent_corporate_entity_id, agent_family_id, agent_software_id, rights_statement_act_id, rights_statement_id
note_persistent_idnote_id, parent_id
revision_statementresource_id
rights_restrictionresource_id, archival_object_id
rights_restriction_typerights_restriction_id
rights_statementaccession_id, archival_object_id, resource_id, digital_object_id, digital_object_component_id, repo_id
rights_statement_actrights_statement_id
sub_containerinstance_id
telephoneagent_contact_id
user_definedaccession_id, resource_id, digital_object_id
ark_namearchival_object_id, resource_id
assessment_attribute_noteassessment_id
assessment_attributeassessment_id
lang_materialarchival_object_id, resource_id, digital_object_id, digital_object_component_id
language_and_scriptlang_material_id
collection_managementaccession_id, resource_id, digital_object_id
location_functionlocation_id
+ + + +

Relationship tables

+ +

These tables exist to enable linking between main records and supporting records. Relationship tables are necessary because, unlike subrecord tables, supporting record tables do not include foreign keys which link them to the main record tables.

+ +

Most relationship tables have the _rlshp suffix in their names. They typically contain just the primary keys for the tables that are being linked, though a few tables also include fields that are specific to the relationship between the two record types.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Relationship/linking tablesTables linked
assessment_reviewer_rlshpassessment to agent_person
assessment_rlshpassessment to accession, archival_object, resource, or digital_object
classification_creator_rlshpclassification to agent_person, agent_family, agent_corporate_entity, or agent_software
classification_rlshpclassification or classification_term to resource or accession
classification_term_creator_rlshpclassification_term to agent_person, agent_family, agent_corporate_entity, or agent_software
event_link_rlshpevent to accession, resource, archival_object, digital_object, digital_object_component, agent_person, agent_family, agent_corporate_entity, agent_software, or top_container. Also includes the role_id table, which can be joined with the enumeration_value table to return the event role (source, outcome, transfer, context)
instance_do_link_rlshpdigital_object to instance
linked_agents_rlshpagent_person, agent_software, agent_family, or agent_corporate_entity to accession, archival_object, digital_object, digital_object_component, event, or resource. Also includes the role_id and relator_id tables, which can be joined with the enumeration_value table
location_profile_rlshplocation to location_profile
owner_repo_rlshplocation to repository
related_accession_rlshpLinks a row in the accession table to another row in the accession table. Also includes fields for relator and relationship type.
related_agents_rlshpagent_person, agent_corporate_entity, agent_software, or agent_family to other agent tables, or two rows in the same agent table. Also includes fields for relator and description, and the type of relationship.
spawned_rlshpaccession to resource. This contains all linked accession data, even if the resource was not spawned from the accession record.
subject_rlshpsubject to accession, archival_object, resource, digital_object, or digital_object_component
surveyed_by_rlshpassessment to agent_person
top_container_housed_at_rlshptop_container to location. Also includes fields for start_date, end_date, status, and a free-text note.
top_container_link_rlshptop_container to sub_container
top_container_profile_rlshptop_container to container_profile
subject_termsubject to term
linked_agent_termlinked_agents_rlshp to term
+ + + +

It is not always obvious which relationship tables will provide the desired results. For instance, to get a box list for a given resource record, enter the following query into a MySQL editor:

+ +
 SELECT DISTINCT 	CONCAT('/repositories/', resource.repo_id, '/resources/', resource.id) as resource_uri
+ 	, resource.identifier
+ 	, resource.title
+ 	, tc.barcode as barcode
+ 	, tc.indicator as box_number
+ FROM sub_container sc
+ JOIN top_container_link_rlshp tclr on tclr.sub_container_id = sc.id
+ JOIN top_container tc on tclr.top_container_id = tc.id
+ JOIN instance on sc.instance_id = instance.id
+ JOIN archival_object ao on instance.archival_object_id = ao.id
+ JOIN resource on ao.root_record_id = resource.id
+ #change to your desired resource id
+ WHERE resource.id = 4556
+
+ +

Sometimes numerous relationship tables must be joined to retrieve the desired results. For instance, to get all boxes and folders for a given resource record, including any container profiles and locations, enter the following query into a MySQL editor:

+ +
 SELECT CONCAT('/repositories/', tc.repo_id, '/top_containers/', tc.id) as tc_uri
+ 	, CONCAT('/repositories/', resource.repo_id, '/resources/', resource.id) as resource_uri
+ 	, CONCAT('/repositories/', resource.repo_id) as repo_uri
+ 	, CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as ao_uri
+ 	, resource.identifier AS resource_identifier
+ 	, resource.title AS resource_title
+   , ao.display_string AS ao_title
+   , ev2.value AS level
+   , tc.barcode AS barcode
+ 	, cp.name AS container_profile
+ 	, tc.indicator AS container_num
+   , ev.value AS sc_type
+   , sc.indicator_2 AS sc_num
+ from sub_container sc
+ JOIN top_container_link_rlshp tclr on tclr.sub_container_id = sc.id
+ JOIN top_container tc on tclr.top_container_id = tc.id
+ LEFT JOIN top_container_profile_rlshp tcpr on tcpr.top_container_id = tc.id
+ LEFT JOIN container_profile cp on cp.id = tcpr.container_profile_id
+ LEFT JOIN top_container_housed_at_rlshp tchar on tchar.top_container_id = tc.id
+ JOIN instance on sc.instance_id = instance.id
+ JOIN archival_object ao on instance.archival_object_id = ao.id
+ JOIN resource on ao.root_record_id = resource.id
+ LEFT JOIN enumeration_value ev on ev.id = sc.type_2_id
+ LEFT JOIN enumeration_value ev2 on ev2.id = ao.level_id
+ #change to your desired resource id
+ WHERE resource.id = 4223
+
+
+

+ +

Enumerations

+ +

All controlled values used by the application - excluding tool-tips and frontend/public display values and the values that are stored a few of the supporting record tables (see below) - are stored in a table called enumeration_values. Controlled values are organized into a variety of parent enumerations (akin to a set of distinct controlled value lists) which are utilized by different record and subrecord types. Parent enumeration data is stored in the enumeration table and is linked by foreign key in the enumeration_id field in the enumeration_value table. In the record and subrecord tables, enumeration values appear as foreign keys in a variety of foreign key columns, usually identified by an _id suffix.

+ +

ArchivesSpace comes with a standard set of controlled values, but most of these are modifiable by end-users via the staff interface and API. However, some values in the enumeration_value table are read-only - these values define the terminology and data types used in different parts of the application (i.e. the various note types).

+ +

Enumeration IDs appear as foreign keys in a variety of database tables:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
table_namecolumn_nameenumeration_name
accessionacquisition_type_idaccession_acquisition_type
accessionresource_type_idaccession_resource_type
agent_contactsalutation_idagent_contact_salutation
archival_objectlevel_idarchival_record_level
collection_managementprocessing_priority_idcollection_management_processing_priority
collection_managementprocessing_status_idcollection_management_processing_status
collection_managementprocessing_total_extent_type_idextent_extent_type_id
container_profiledimension_units_iddimension_units
datecalendar_iddate_calendar
datecertainty_iddate_certainty
datedate_type_iddate_type
dateera_iddate_era
datelabel_iddate_label
deaccessionscope_iddeaccession_scope
digital_objectdigital_oject_type_iddigital_object_digital_object_type
digital_objectlevel_iddigital_object_level
eventevent_type_idevent_event_type
eventoutcome_idevent_outcome
extentextent_type_idextent_extent_type
extentportion_idextent_portion
external_documentidentifier_type_idrights_statement_external_document_identifier_type
file_versionchecksum_method_idfile_version_checksum_methods
file_versionfile_format_name_idfile_version_file_format_name
file_versionuse_statement_idfile_version_use_statement
file_versionxlink_actuate_attribute_idfile_version_xlink_actuate_attribute
file_versionxlink_show_attribute_idfile_version_xlink_show_attribute
instanceinstance_type_idinstance_instance_type
language_and_scriptlanguage_id 
language_and_scriptscript_id 
locationtemporary_idlocation_temporary
location_functionlocation_function_type_idlocation_function_type
location_profiledimension_units_iddimension_units
name_corporate_entityrules_idname_rule
name_corporate_entitysource_idname_source
name_familyrules_idname_rule
name_familysource_idname_source
name_personname_order_idname_person_name_order
name_personrules_idname_rule
name_personsource_idname_source
name_softwarerules_idname_rule
name_softwaresource_idname_source
repositorycountry_idcountry_iso_3166
resourcefinding_aid_description_rules_idresource_finding_aid_description_rules
resourcefinding_aid_language_id 
resourcefinding_aid_script_id 
resourcefinding_aid_status_idresource_finding_aid_status
resourcelevel_idarchival_record_level
resourceresource_type_idresource_resource_type
rights_restriction_typerestriction_type_idrestriction_type
rights_statementjurisdiction_id 
rights_statementother_rights_basis_idrights_statement_other_rights_basis
rights_statementrights_type_idrights_statement_rights_type
rights_statementstatus_id 
rights_statement_actact_type_idrights_statement_act_type
rights_statement_actrestriction_idrights_statement_act_restriction
rights_statement_pre_088ip_status_idrights_statement_ip_status
rights_statement_pre_088jurisdiction_id 
rights_statement_pre_088rights_type_idrights_statement_rights_type
sub_containertype_2_idcontainer_type
sub_containertype_3_idcontainer_type
subjectsource_idsubject_source
telephonenumber_type_idtelephone_number_type
termterm_type_idsubject_term_type
top_containertype_idcontainer_type
+ + + +

To translate the enumeration ID that appears in the record and subrecord tables, join the enumeration_value table. The table can be joined multiple times if there are multiple values to translate, but you must use an alias for each table. For example:

+ +
SELECT CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as ao_uri
+  , ao.display_string as ao_title
+  , date.begin
+  , date.end
+  , ev.value as date_label
+  , ev2.value as date_type
+  , ev3.value as date_calendar
+FROM archival_object ao
+LEFT JOIN date on date.archival_object_id = ao.id
+LEFT JOIN enumeration_value ev on ev.id = date.label_id
+LEFT JOIN enumeration_value ev2 on ev2.id = date.date_type_id
+LEFT JOIN enumeration_value ev3 on ev3.id = date.calendar_id
+
+ +

NOTE: container_profile, location_profile, and assessment_attribute_definition records are similar to the records in the enumeration_value table in that they store controlled values which are referenced by other parts of the system. However, they differ in that they have their own tables and are addressable via their own URIs.

+ +

User, setting, and permission tables

+ +

These tables store user and permissions information, user/repository/global preferences, and RDE and custom report templates.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table nameDescription
custom_report_templateCustom report templates
default_valuesDefault values settings
groupData about permission groups created by each repository
group_permissionLinks the permission table to the group table
group_userLinks the group table to the user table
oai_configConfiguration data for OAI-PMH harvesting
permissionAll permission types that can be assigned to users
preferenceUser preference data
rde_templateRDE templates
required_fieldsContains repository-defined required fields
userUser data
+ +

Job tables

+ +

These tables store data related to background jobs, including imports.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table nameDescription
jobAll jobs which have been run in an ArchivesSpace instance.
job_created_recordRecords created via background jobs
job_input_fileData about input files used in background jobs
job_modified_recordData about records modified via background jobs
+ +

System tables

+ +

These tables track actions taken against the database (i.e. edits and deletes), system events, session and authorization data, and database information. These tables are typically not referenced by any other table.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table nameDescription
active_editRecords being actively edited by a user. Read-only system table
auth_dbAuthentication data for users. Read-only system table
deleted_recordsRecords deleted in the past 24 hours. Read-only system table
notificationNotifications stream. Read-only system table
schema_infoContains the database schema version. Read-only system table.
sequenceThe value corresponds to the number of children the archival object has - 1. Read-only system table
sessionRecent session data. Read-only system table
system_eventSystem event data. Read-only system table
+ + + + +

Parent-Child Relationships and Sequencing

+ +

Repository-scoped records

+ +

Many main and supporting records are scoped to a particular repository. In these tables the parent repository is identified by a foreign key which corresponds to the database identifier in the repository table:

+ + + + + + + + + + + + + + + + + + +
Column nameDescriptionExampleFound in
repo_idThe database ID of the parent repository12accession, archival_object, assessment, assessment_attribute_definition, classification, classification_term, custom_report_template, default_values, digital_object, digital_object_component, event, group, job, preference, required_fields, resource, rights_statement, top_container
+ +

Parent/child relationships

+ +

Hierarchical relationships between other records are also expressed through foreign keys:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Column nameDescriptionExamplePK TablesFound in
root_record_idThe database ID of the root parent record4566resource, digital_object, classificationarchival_object, digital_object_component, classification_term
parent_idThe database ID of the immediate parent record. This is used to identify parent records which are of the same type as the child record (i.e. two archival object records). The value will be NULL if the only parent is the root record.1748121archival_object, classification_term, digital_object_componentarchival_object, classification_term, digital_object_component, note_persistent_id
parent_nameThe database ID or URI, and the record type of the immediate parent144@archival_object, root@/repositories/2/resources/2resource, archival_object, classification, classification_term, digital_object, digital_object_componentarchival_object, classification_term, digital_object_component
+ +

Beginning with MySQL 8, you can recursively retrieve all parents of an archival object (or all archival objects linked to a resource) by running the following query:

+ +
WITH RECURSIVE ao_path AS
+  (SELECT ao1.id
+    , ao1.display_string
+    , ao1.component_id
+    , ao1.parent_id
+    , ev.value as `ao_level`
+    , 1 as level
+   FROM archival_object ao1
+   LEFT JOIN enumeration_value ev on ev.id = ao1.level_id
+   WHERE ao1.id = <your ao id>
+   <!-- to get all trees for a resource change to: WHERE ao1.root_record_id = <your root_record_id> -->
+   UNION ALL
+   SELECT ao2.id
+    , ao2.display_string
+    , ao2.component_id
+    , ao2.parent_id
+    , ev.value as `ao_level`
+    , ao_path.level + 1 as level
+   FROM ao_path
+   JOIN archival_object ao2 on ao_path.parent_id = ao2.id
+   LEFT JOIN enumeration_value ev on ev.id = ao2.level_id)
+   SELECT GROUP_CONCAT(CONCAT(display_string, ' ', ' (', CONCAT(UPPER(SUBSTRING(ao_level,1,1)),LOWER(SUBSTRING(ao_level,2))), ' ', IF(component_id is not NULL, CAST(component_id as CHAR), "N/A"), ')') ORDER BY level DESC SEPARATOR ' > ') as tree
+   FROM ao_path;
+
+
+ +

To retrieve all children (MySQL 8+):

+ +

To retrieve both parents and children (MySQL 8+):

+ +

To retrieve all parents of a record in MySQL 5.7 and below, run the following query:

+ +
SELECT (SELECT GROUP_CONCAT(CONCAT(display_string, ' (', ao_level, ')') SEPARATOR ' < ') as parent_path
+		FROM (SELECT T2.display_string as display_string
+					, ev.value as ao_level
+			  FROM (SELECT @r AS _id
+						, @p := @r AS previous
+						, (SELECT @r := parent_id FROM archival_object WHERE id = _id) AS parent_id
+						, @l := @l + 1 AS lvl
+      				FROM ((SELECT @r := 1749840, @p := 0, @l := 0) AS vars,
+           					archival_object h)
+      					   WHERE @r <> 0 AND @r <> @p) AS T1
+			  JOIN archival_object T2 ON T1._id = T2.id
+			  LEFT JOIN enumeration_value ev on ev.id = T2.level_id
+			  WHERE T2.id != 1749840
+			  ORDER BY T1.lvl DESC) as all_parents) as p_path
+	 , ao.display_string
+	 , CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as uri
+FROM archival_object ao
+WHERE ao.id = 1749840
+
+ +

To retrieve all children of a record (MysQL 5.7 and below):

+ +
+ +

Sequencing

+ +

The ordering of records in a resource, classification, or digital_object tree is determined by the position field. The position field is also used to order values in the enumeration_value and assessment_attribute_definition tables:

+ + + + + + + + + + + + + + + + + + +
Column nameDescriptionExampleFound in
positionThe position of the archival object under the immediate parent168000enumeration_value, assessment_attribute_definition, classification_term, digital_object_component, archival_object
+ +

Boolean fields

+ +

Many records and subrecords include fields which contain integers (0 or 1) corresponding to boolean values.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Boolean fieldsDescriptionFound in
publish subnote_metadata, file_version, external_document, accession, classification, agent_person, agent_family, agent_software, agent_corporate_entity, classification_term, revision_statement, repository, note, digital_object, digital_object_component, archival_object, resource
suppressed accession, archival_object, assessment_reviewer_rlshp, assessment_rlshp, classification, classification_creator_rlshp, classification_rlshp, classification_term, classification_term_creator_rlshp, digital_object, digital_object_component, enumeration_value, event, event_link_rlshp, instance_do_link_rlshp, linked_agents_rlshp, location_profile_rlshp, owner_repo_rlshp, related_accession_rlshp, related_agents_rlshp, resource, spawned_rlshp, surveyed_by_rlshp, top_container_housed_at_rlshp, top_container_link_rlshp, top_container_profile_rlshp
restrictions_apply accession, archival_object
+ + + + + +

Read-Only Fields

+ +

Several system generated, read-only fields appear across many tables. These include database identifiers, timestamps that track record creation and modification, and fields that record the username of the user that created and last modified the each record.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Most common read-only fieldsDescription
id (primary key)Database identifier for each record
system_mtimeThe last time the record was modified by the system
created_byThe user that created a record
last_modified_byThe user that last modified a record
user_mtimeThe time that a record was last modified by a user
create_timeThe time that a record was created
lock_versionThis field is incrementally updated each time a record is updated. This provides a method of tracking updates and managing near-simultaneous edits by different users.
json_schema_versionThe JSON schema version
aspace_relationship_positionThe position of a linked record in a list of other linked records
is_slug_autoA boolean value that indicates whether a slug was auto-generated
system_generatedA boolean value that indicates whether a field was system-generated
display_stringA system-generated field which concatenates the title and date fields of an archival object record
+ +

NOTE: for subrecord tables these fields may hold unexpected data - because subrecords are deleted and recreated upon each save of a main or supporting record, their create and modification times will also be recreated and will not reflect the original creation date of the subrecord itself. For resource records, the timestamp only records the time that the resource itself was modified, not the last time any of its components were modified.

+ + + + +
+ +
+ + + diff --git a/architecture/backend/database.md b/architecture/backend/database.md deleted file mode 100644 index 36c4fd23..00000000 --- a/architecture/backend/database.md +++ /dev/null @@ -1,543 +0,0 @@ -# Working with the ArchivesSpace Database - -The ArchivesSpace database stores all data that is created within an ArchivesSpace instance. As described in other sections of this documentation, the backend code - particularly the model layer and `ASModel_crud.rb` file - uses the `Sequel` database toolkit to bridge the gap between this underlying data and the JSON objects which are exchanged by the other components of the system. - -Often, querying the database directly is the most efficient and powerful way to retrieve data from ArchivesSpace. It is also possible to use raw SQL queries to create custom reports that can be run by users in the staff interface. Please consult the [Custom Reports](../../customization/reports.html) section of this documentation for additional information on creating custom reports. - - - -It is recommended that ArchivesSpace be run against MySQL in production, not the included demo database. Instructions on setting up ArchivesSpace to run against MySQL are [here](../../provisioning/mysql.html). - -The examples in this section are written for MySQL. There are many freely-available tutorials on the internet which can provide guidance to those unfamiliar with MySQL query syntax and the features of the language. - -**NOTE**: the documentation below is current through database schema version 129, application version 2.7.1. - -## Database Overview - -The ArchivesSpace database schema and it's mapping to the JSONModel objects used by the other parts of the system is defined by the files in the `common/schemas` and `backend/models` directories. The database itself is created via the `setup-database` script in the `scripts` directory. This script runs the migrations in the `common/db/migrations` directory. - -The tables in the ArchivesSpace database can be grouped into several general categories: - * [Main record tables](#Main-record-tables) - * [Supporting record tables](#Supporting-record-tables) - * [Subrecord tables](#Subrecord-tables) - * [Relationship/linking tables](#Relationship-tables) - * [Enumeration tables](#Enumerations) - * [User, setting, and permission tables](#User-setting-and-permission-tables) - * [Job tables](#Job-tables) - * [System tables](#System-tables) - - - One way to get a view of all tables and columns in your ArchivesSpace instance is to run the following query in a MySQL client: - - ``` - SELECT TABLE_SCHEMA - , TABLE_NAME - , COLUMN_NAME - , ORDINAL_POSITION - , IS_NULLABLE - , COLUMN_TYPE - , COLUMN_KEY - FROM INFORMATION_SCHEMA.COLUMNS - #change the following value to whatever your database is named - WHERE TABLE_SCHEMA Like 'archivesspace' - ``` - -Additionally, a BETA version of an [ArchivesSpace data dictionary](https://github.com/archivesspace/data-dictionary-initial) has been created by members of the ArchivesSpace development team and the ArchivesSpace User Advisory Council Reports team. - -## Main record tables - -These tables hold data about the primary record types in ArchivesSpace. Main record types are distinguished from subrecords in that they have their own persistent URIs - corresponding to their database identifiers/primary keys - that are resolvable via the staff interface, public interface, and API. They are distinguished from supporting records in that they are the primary descriptive record types that users will interact with in the system. - -All of these records, except archival objects, can be created independently of any other record. Archival object records represent components of a larger entity, and so they must have a resource record as a root parent. See the [parent/child relationships](#Parent-Child-Relationships-and-Sequencing) section for more information about the representation of hierarchical relationships in the database. - -A few common fields occur in several main record tables. These similar fields are defined by the parent schemas in the `common/schemas` directory: - -| Column Name | Tables | -|----------------|--------| -| `title` | `accession`, `archival_object`, `digital_object`, `digital_object_component`, `resource` -| `identifier`/`component_id`/`digital_object_id` | `accession`, `resource`/`archival_object`, `digital_object_component`/`digital_object` -| `other_level` | `archival_object`, `resource` -| `repository_processing_note` | `archival_object`, `resource` - - - -All of the main records have a set of fields which store boolean values (`0` or `1`) that indicate whether the records are published in the public user interface, suppressed in the staff interface, or have some kind of applicable restriction. The exception to this is the `repository` table, which does not have a restriction boolean, but does have a `hidden` boolean. The `accession` table has multiple restriction-related booleans. See the section below for more information about boolean fields. - -Beginning in version 2.6.0, the main record tables (and some supporting records - see below) also contain fields which hold data about archival resource keys (ARKs) and human-readable URLs (slugs): - -| Column Name | Tables | -|----------------|--------| -`slug` | `accession`, `archival_object`, `digital_object`, `digital_object_component`, `repository`, `resource` -| `external_ark_url` | `archival_object`, `resource` - -Also stored in these and all other tables are enumeration values, foreign keys which correspond to database identifiers in the `enumeration_value` table, which stores controlled values. See enumeration section below for more detail. - -All subrecord data types - i.e. dates, extents, instances - relating to a main or supporting record are stored in their own tables and linked to main or supporting records via foreign key references in the subrecord tables. See subrecord section below for more detail. - -The remaining data in the main record tables is text, and is unique to each table: - -| TABLE_NAME | COLUMN_NAME | IS_NULLABLE | COLUMN_TYPE | COLUMN_KEY | -|--------------------------|-------------------------------|-------------|---------------|------------| -| `accession` | `content_description` | YES | text | | -| `accession` | `condition_description` | YES | text | | -| `accession` | `disposition` | YES | text | | -| `accession` | `inventory` | YES | text | | -| `accession` | `provenance` | YES | text | | -| `accession` | `general_note` | YES | text | | -| `accession` | `accession_date` | YES | date | | -| `accession` | `retention_rule` | YES | text | | -| `accession` | `access_restrictions_note` | YES | text | | -| `accession` | `use_restrictions_note` | YES | text | | -| `archival_object` | `ref_id` | NO | varchar(255) | MUL | -| `digital_object_component` | `label` | YES | varchar(255) | | -| `repository` | `repo_code` | NO | varchar(255) | UNI | -| `repository` | `name` | NO | varchar(255) | | -| `repository` | `org_code` | YES | varchar(255) | | -| `repository` | `parent_institution_name` | YES | varchar(255) | | -| `repository` | `url` | YES | varchar(255) | | -| `repository` | `image_url` | YES | varchar(255) | | -| `repository` | `contact_persons` | YES | text | | -| `repository` | `description` | YES | text | | -| `repository` | `oai_is_disabled` | YES | int | | -| `repository` | `oai_sets_available` | YES | text | | -| `resource` | `ead_id` | YES | varchar(255) | | -| `resource` | `ead_location` | YES | varchar(255) | | -| `resource` | `finding_aid_title` | YES | text | | -| `resource` | `finding_aid_filing_title` | YES | text | | -| `resource` | `finding_aid_date` | YES | varchar(255) | | -| `resource` | `finding_aid_author` | YES | text | | -| `resource` | `finding_aid_language_note` | YES | varchar(255) | | -| `resource` | `finding_aid_sponsor` | YES | text | | -| `resource` | `finding_aid_edition_statement` | YES | text | | -| `resource` | `finding_aid_series_statement` | YES | text | | -| `resource` | `finding_aid_note` | YES | text | | -| `resource` | `finding_aid_subtitle` | YES | text | | - - - -## Supporting record tables - -Like the main record types listed above, supporting records can also be created independently of other records, and are addressable in the staff interface and API via their own URI. However, they are primarily meaningful via their many-to-many linkages to the main record types (and, sometimes, other supporting record types). These records typically provide additional information about, or otherwise enhance, the primary record types. A few supporting record types - for instance those in the `term` table - are used to enhance other supporting record types. - -| Supporting module tables | Linked to | -|-----------------------------------|--------------------------| -| `agent_corporate_entity` | -| `agent_family` | -| `agent_person` | -| `agent_software` | -| `assessment` | -| `classification` | `accession`, `resource` -| `classification_term` | `classification`, `accession`, `resource` -| `container_profile` | `top_container` -| `event` | -| `location` | -| `location_profile` | `location` -| `subject` | `resource`, `archival_object` -| `term` | `subject` -| `top_container` | -| `vocabulary` | `subject`, `term` -| `assessment_attribute_definition` | `assessment_attribute`, `assessment_attribute_note` - - - -## Subrecord tables - - - -Subrecords must be associated with a main or supporting record - they cannot be created independently. As such, they do not have their own URIs, and can only be accessed via the API by retrieving the top-level record with which they are associated. In the staff interface these records are embedded within main or supporting record views. In the API subrecord data is contained in arrays within main or supporting records. - -The various subrecord types do have their own database tables. In addition to data specific to the subrecord type, the tables also contain foreign key columns which hold the database identifiers of main or supporting records. Subrecord tables must have a value in one of the foreign key fields. Some subrecords can have another subrecord as parent (for instance, the `sub_container` subrecord has `instance_id` as its foreign key column). - -Subrecords exist in a one-to-many relationship with their parent records, so a record's `id` may appear multiple times in a subrecord table (i.e. when there are two dates associated with a resource record). - -It is important to note that subrecords are deleted and recreated upon each save of the main or supporting record with which they are associated, regardless of whether the subrecord itself is modified. This means that the database identifier is deleted and reassigned upon each save. - -| Subrecord tables | Foreign keys | -|-------------------------------|-------------- -| `agent_contact` | `agent_person_id`, `agent_family_id`, `agent_corporate_entity_id`, `agent_software_id` -| `date` | `accession_id`, `deaccession_id`, `archival_object_id`, `resource_id`, `event_id`, `digital_object_id`, `digital_object_component_id`, `related_agents_rlshp_id`, `agent_person_id`, `agent_family_id`, `agent_corporate_entity_id`, `agent_software_id`, `name_person_id`, `name_family_id`, `name_corporate_entity_id`, `name_software_id` -| `extent` | `accession_id`, `deaccession_id`, `archival_object_id`, `resource_id`, `digital_object_id`, `digital_object_component_id` -| `external_document` | `accession_id`, `archival_object_id`, `resource_id`, `subject_id`, `agent_person_id`, `agent_family_id`, `agent_corporate_entity_id`, `agent_software_id`, `rights_statement_id`, `digital_object_id`, `digital_object_component_id`, `event_id` -| `external_id` | `subject_id`, `accession_id`, `archival_object_id`, `collection_management_id`, `digital_object_id`, `digital_object_component_id`, `event_id`, `location_id`, `resource_id` -| `file_version` | `digital_object_id`, `digital_object_component_id` -| `instance` | `resource_id`, `archival_object_id`, `accession_id` -| `name_authority_id` | `name_person_id`, `name_family_id`, `name_software_id`, `name_corporate_entity_id` -| `name_corporate_entity` | `agent_corporate_entity_id` -| `name_family` | `agent_family_id` -| `name_person` | `agent_person_id` -| `name_software` | `agent_software_id` -| `note` | `resource_id`, `archival_object_id`, `digital_object_id`, `digital_object_component_id`, `agent_person_id`, `agent_corporate_entity_id`, `agent_family_id`, `agent_software_id`, `rights_statement_act_id`, `rights_statement_id` -| `note_persistent_id` | `note_id`, `parent_id` -| `revision_statement` | `resource_id` -| `rights_restriction` | `resource_id`, `archival_object_id` -| `rights_restriction_type` | `rights_restriction_id` -| `rights_statement` | `accession_id`, `archival_object_id`, `resource_id`, `digital_object_id`, `digital_object_component_id`, `repo_id` -| `rights_statement_act` | `rights_statement_id` -| `sub_container` | `instance_id` -| `telephone` | `agent_contact_id` -| `user_defined` | `accession_id`, `resource_id`, `digital_object_id` -| `ark_name` | `archival_object_id`, `resource_id` -| `assessment_attribute_note` | `assessment_id` -| `assessment_attribute` | `assessment_id` -| `lang_material` | `archival_object_id`, `resource_id`, `digital_object_id`, `digital_object_component_id` -| `language_and_script` | `lang_material_id` -| `collection_management` | `accession_id`, `resource_id`, `digital_object_id` -| `location_function` | `location_id` - - - -## Relationship tables - -These tables exist to enable linking between main records and supporting records. Relationship tables are necessary because, unlike subrecord tables, supporting record tables do not include foreign keys which link them to the main record tables. - -Most relationship tables have the `_rlshp` suffix in their names. They typically contain just the primary keys for the tables that are being linked, though a few tables also include fields that are specific to the relationship between the two record types. - -| Relationship/linking tables | Tables linked | -|-------------------------------------|------------- -| `assessment_reviewer_rlshp` | `assessment` to `agent_person` -| `assessment_rlshp` | `assessment` to `accession`, `archival_object`, `resource`, or `digital_object` -| `classification_creator_rlshp` | `classification` to `agent_person`, `agent_family`, `agent_corporate_entity`, or `agent_software` -| `classification_rlshp` | `classification` or `classification_term` to `resource` or `accession` -| `classification_term_creator_rlshp` | `classification_term` to `agent_person`, `agent_family`, `agent_corporate_entity`, or `agent_software` -| `event_link_rlshp` | `event` to `accession`, `resource`, `archival_object`, `digital_object`, `digital_object_component`, `agent_person`, `agent_family`, `agent_corporate_entity`, `agent_software`, or `top_container`. Also includes the `role_id` table, which can be joined with the `enumeration_value` table to return the event role (source, outcome, transfer, context) -| `instance_do_link_rlshp` | `digital_object` to `instance` -| `linked_agents_rlshp` | `agent_person`, `agent_software`, `agent_family`, or `agent_corporate_entity` to `accession`, `archival_object`, `digital_object`, `digital_object_component`, `event`, or `resource`. Also includes the `role_id` and `relator_id` tables, which can be joined with the `enumeration_value` table -| `location_profile_rlshp` | `location` to `location_profile` -| `owner_repo_rlshp` | `location` to `repository` -| `related_accession_rlshp` | Links a row in the `accession` table to another row in the `accession` table. Also includes fields for `relator` and relationship type. -| `related_agents_rlshp` | `agent_person`, `agent_corporate_entity`, `agent_software`, or `agent_family` to other agent tables, or two rows in the same agent table. Also includes fields for `relator` and `description`, and the type of relationship. -| `spawned_rlshp` | `accession` to `resource`. This contains all linked accession data, even if the resource was not spawned from the accession record. -| `subject_rlshp` | `subject` to `accession`, `archival_object`, `resource`, `digital_object`, or `digital_object_component` -| `surveyed_by_rlshp` | `assessment` to `agent_person` -| `top_container_housed_at_rlshp` | `top_container` to `location`. Also includes fields for `start_date`, `end_date`, `status`, and a free-text `note`. -| `top_container_link_rlshp` | `top_container` to `sub_container` -| `top_container_profile_rlshp` | `top_container` to `container_profile` -| `subject_term` | `subject` to `term` -| `linked_agent_term` | `linked_agents_rlshp` to `term` - - - - It is not always obvious which relationship tables will provide the desired results. For instance, to get a box list for a given resource record, enter the following query into a MySQL editor: - - ``` - SELECT DISTINCT CONCAT('/repositories/', resource.repo_id, '/resources/', resource.id) as resource_uri - , resource.identifier - , resource.title - , tc.barcode as barcode - , tc.indicator as box_number - FROM sub_container sc - JOIN top_container_link_rlshp tclr on tclr.sub_container_id = sc.id - JOIN top_container tc on tclr.top_container_id = tc.id - JOIN instance on sc.instance_id = instance.id - JOIN archival_object ao on instance.archival_object_id = ao.id - JOIN resource on ao.root_record_id = resource.id - #change to your desired resource id - WHERE resource.id = 4556 - ``` - - Sometimes numerous relationship tables must be joined to retrieve the desired results. For instance, to get all boxes and folders for a given resource record, including any container profiles and locations, enter the following query into a MySQL editor: - - ``` - SELECT CONCAT('/repositories/', tc.repo_id, '/top_containers/', tc.id) as tc_uri - , CONCAT('/repositories/', resource.repo_id, '/resources/', resource.id) as resource_uri - , CONCAT('/repositories/', resource.repo_id) as repo_uri - , CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as ao_uri - , resource.identifier AS resource_identifier - , resource.title AS resource_title - , ao.display_string AS ao_title - , ev2.value AS level - , tc.barcode AS barcode - , cp.name AS container_profile - , tc.indicator AS container_num - , ev.value AS sc_type - , sc.indicator_2 AS sc_num - from sub_container sc - JOIN top_container_link_rlshp tclr on tclr.sub_container_id = sc.id - JOIN top_container tc on tclr.top_container_id = tc.id - LEFT JOIN top_container_profile_rlshp tcpr on tcpr.top_container_id = tc.id - LEFT JOIN container_profile cp on cp.id = tcpr.container_profile_id - LEFT JOIN top_container_housed_at_rlshp tchar on tchar.top_container_id = tc.id - JOIN instance on sc.instance_id = instance.id - JOIN archival_object ao on instance.archival_object_id = ao.id - JOIN resource on ao.root_record_id = resource.id - LEFT JOIN enumeration_value ev on ev.id = sc.type_2_id - LEFT JOIN enumeration_value ev2 on ev2.id = ao.level_id - #change to your desired resource id - WHERE resource.id = 4223 - - ``` - - -## Enumerations - -All controlled values used by the application - excluding tool-tips and frontend/public display values and the values that are stored a few of the supporting record tables (see below) - are stored in a table called `enumeration_values`. Controlled values are organized into a variety of parent enumerations (akin to a set of distinct controlled value lists) which are utilized by different record and subrecord types. Parent enumeration data is stored in the `enumeration` table and is linked by foreign key in the `enumeration_id` field in the `enumeration_value` table. In the record and subrecord tables, enumeration values appear as foreign keys in a variety of foreign key columns, usually identified by an `_id` suffix. - -ArchivesSpace comes with a standard set of controlled values, but most of these are modifiable by end-users via the staff interface and API. However, some values in the `enumeration_value` table are read-only - these values define the terminology and data types used in different parts of the application (i.e. the various note types). - -Enumeration IDs appear as foreign keys in a variety of database tables: - -| table_name | column_name | enumeration_name | -|----------------------------|------------------------------------|--------------------------------------------| -| `accession` | `acquisition_type_id` | accession_acquisition_type -| `accession` | `resource_type_id` | accession_resource_type -| `agent_contact` | `salutation_id` | agent_contact_salutation -| `archival_object` | `level_id` | archival_record_level -| `collection_management` | `processing_priority_id` | collection_management_processing_priority -| `collection_management` | `processing_status_id` | collection_management_processing_status -| `collection_management` | `processing_total_extent_type_id` | extent_extent_type_id -| `container_profile` | `dimension_units_id` | dimension_units -| `date` | `calendar_id` | date_calendar -| `date` | `certainty_id` | date_certainty -| `date` | `date_type_id` | date_type -| `date` | `era_id` | date_era -| `date` | `label_id` | date_label -| `deaccession` | `scope_id` | deaccession_scope -| `digital_object` | `digital_oject_type_id` | digital_object_digital_object_type -| `digital_object` | `level_id` | digital_object_level -| `event` | `event_type_id` | event_event_type -| `event` | `outcome_id` | event_outcome -| `extent` | `extent_type_id` | extent_extent_type -| `extent` | `portion_id` | extent_portion -| `external_document` | `identifier_type_id` | rights_statement_external_document_identifier_type -| `file_version` | `checksum_method_id` | file_version_checksum_methods -| `file_version` | `file_format_name_id` | file_version_file_format_name -| `file_version` | `use_statement_id` | file_version_use_statement -| `file_version` | `xlink_actuate_attribute_id` | file_version_xlink_actuate_attribute -| `file_version` | `xlink_show_attribute_id` | file_version_xlink_show_attribute -| `instance` | `instance_type_id` | instance_instance_type -| `language_and_script` | `language_id` | -| `language_and_script` | `script_id` | -| `location` | `temporary_id` | location_temporary -| `location_function` | `location_function_type_id` | location_function_type -| `location_profile` | `dimension_units_id` | dimension_units -| `name_corporate_entity` | `rules_id` | name_rule -| `name_corporate_entity` | `source_id` | name_source -| `name_family` | `rules_id` | name_rule -| `name_family` | `source_id` | name_source -| `name_person` | `name_order_id` | name_person_name_order -| `name_person` | `rules_id` | name_rule -| `name_person` | `source_id` | name_source -| `name_software` | `rules_id` | name_rule -| `name_software` | `source_id` | name_source -| `repository` | `country_id` | country_iso_3166 -| `resource` | `finding_aid_description_rules_id` | resource_finding_aid_description_rules -| `resource` | `finding_aid_language_id` | -| `resource` | `finding_aid_script_id` | -| `resource` | `finding_aid_status_id` | resource_finding_aid_status -| `resource` | `level_id` | archival_record_level -| `resource` | `resource_type_id` | resource_resource_type -| `rights_restriction_type` | `restriction_type_id` | restriction_type -| `rights_statement` | `jurisdiction_id` | -| `rights_statement` | `other_rights_basis_id` | rights_statement_other_rights_basis -| `rights_statement` | `rights_type_id` | rights_statement_rights_type -| `rights_statement` | `status_id` | -| `rights_statement_act` | `act_type_id` | rights_statement_act_type -| `rights_statement_act` | `restriction_id` | rights_statement_act_restriction -| `rights_statement_pre_088` | `ip_status_id` | rights_statement_ip_status -| `rights_statement_pre_088` | `jurisdiction_id` | -| `rights_statement_pre_088` | `rights_type_id` | rights_statement_rights_type -| `sub_container` | `type_2_id` | container_type -| `sub_container` | `type_3_id` | container_type -| `subject` | `source_id` | subject_source -| `telephone` | `number_type_id` | telephone_number_type -| `term` | `term_type_id` | subject_term_type -| `top_container` | `type_id` | container_type - - - -To translate the enumeration ID that appears in the record and subrecord tables, join the `enumeration_value` table. The table can be joined multiple times if there are multiple values to translate, but you must use an alias for each table. For example: - -``` -SELECT CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as ao_uri - , ao.display_string as ao_title - , date.begin - , date.end - , ev.value as date_label - , ev2.value as date_type - , ev3.value as date_calendar -FROM archival_object ao -LEFT JOIN date on date.archival_object_id = ao.id -LEFT JOIN enumeration_value ev on ev.id = date.label_id -LEFT JOIN enumeration_value ev2 on ev2.id = date.date_type_id -LEFT JOIN enumeration_value ev3 on ev3.id = date.calendar_id -``` - -**NOTE**: `container_profile`, `location_profile`, and `assessment_attribute_definition` records are similar to the records in the `enumeration_value` table in that they store controlled values which are referenced by other parts of the system. However, they differ in that they have their own tables and are addressable via their own URIs. - -## User, setting, and permission tables - -These tables store user and permissions information, user/repository/global preferences, and RDE and custom report templates. - -| Table name | Description | -|-----------------------------------|----------------------------------------------------------------| -| `custom_report_template` | Custom report templates -| `default_values` | Default values settings -| `group` | Data about permission groups created by each repository -| `group_permission` | Links the permission table to the group table -| `group_user` | Links the group table to the user table -| `oai_config` | Configuration data for OAI-PMH harvesting -| `permission` | All permission types that can be assigned to users -| `preference` | User preference data -| `rde_template` | RDE templates -| `required_fields` | Contains repository-defined required fields -| `user` | User data - -## Job tables - -These tables store data related to background jobs, including imports. - -| Table name | Description | -|-----------------------------------|----------------------------------------------------------------| -| `job` | All jobs which have been run in an ArchivesSpace instance. -| `job_created_record` | Records created via background jobs -| `job_input_file` | Data about input files used in background jobs -| `job_modified_record` | Data about records modified via background jobs - -## System tables - -These tables track actions taken against the database (i.e. edits and deletes), system events, session and authorization data, and database information. These tables are typically not referenced by any other table. - -| Table name | Description | -|-----------------------------------|----------------------------------------------------------------| -| `active_edit` | Records being actively edited by a user. Read-only system table -| `auth_db` | Authentication data for users. Read-only system table -| `deleted_records` | Records deleted in the past 24 hours. Read-only system table -| `notification` | Notifications stream. Read-only system table -| `schema_info` | Contains the database schema version. Read-only system table. -| `sequence` | The value corresponds to the number of children the archival object has - 1. Read-only system table -| `session` | Recent session data. Read-only system table -| `system_event` | System event data. Read-only system table - - - - -## Parent-Child Relationships and Sequencing - -### Repository-scoped records - -Many main and supporting records are scoped to a particular repository. In these tables the parent repository is identified by a foreign key which corresponds to the database identifier in the `repository` table: - -| Column name | Description | Example | Found in -|---------------|------------------------------------------|---------|------- -`repo_id` | The database ID of the parent repository | `12` | `accession`, `archival_object`, `assessment`, `assessment_attribute_definition`, `classification`, `classification_term`, `custom_report_template`, `default_values`, `digital_object`, `digital_object_component`, `event`, `group`, `job`, `preference`, `required_fields`, `resource`, `rights_statement`, `top_container` - -### Parent/child relationships - -Hierarchical relationships between other records are also expressed through foreign keys: - -| Column name | Description | Example |PK Tables| Found in -|---------------|-------------------------------------------|---------|---------|------- -`root_record_id`| The database ID of the root parent record | `4566` |`resource`, `digital_object`, `classification`| `archival_object`, `digital_object_component`, `classification_term` -`parent_id` | The database ID of the immediate parent record. This is used to identify parent records which are of the same type as the child record (i.e. two archival object records). The value will be NULL if the only parent is the root record. | `1748121` |`archival_object`, `classification_term`, `digital_object_component`| `archival_object`, `classification_term`, `digital_object_component`, `note_persistent_id` -`parent_name` | The database ID or URI, and the record type of the immediate parent | `144@archival_object`, `root@/repositories/2/resources/2` | `resource`, `archival_object`, `classification`, `classification_term`, `digital_object`, `digital_object_component`| `archival_object`, `classification_term`, `digital_object_component` - -Beginning with MySQL 8, you can recursively retrieve all parents of an archival object (or all archival objects linked to a resource) by running the following query: - -``` -WITH RECURSIVE ao_path AS - (SELECT ao1.id - , ao1.display_string - , ao1.component_id - , ao1.parent_id - , ev.value as `ao_level` - , 1 as level - FROM archival_object ao1 - LEFT JOIN enumeration_value ev on ev.id = ao1.level_id - WHERE ao1.id = - - UNION ALL - SELECT ao2.id - , ao2.display_string - , ao2.component_id - , ao2.parent_id - , ev.value as `ao_level` - , ao_path.level + 1 as level - FROM ao_path - JOIN archival_object ao2 on ao_path.parent_id = ao2.id - LEFT JOIN enumeration_value ev on ev.id = ao2.level_id) - SELECT GROUP_CONCAT(CONCAT(display_string, ' ', ' (', CONCAT(UPPER(SUBSTRING(ao_level,1,1)),LOWER(SUBSTRING(ao_level,2))), ' ', IF(component_id is not NULL, CAST(component_id as CHAR), "N/A"), ')') ORDER BY level DESC SEPARATOR ' > ') as tree - FROM ao_path; - -``` - -To retrieve all children (MySQL 8+): - -To retrieve both parents and children (MySQL 8+): - -To retrieve all parents of a record in MySQL 5.7 and below, run the following query: - -``` -SELECT (SELECT GROUP_CONCAT(CONCAT(display_string, ' (', ao_level, ')') SEPARATOR ' < ') as parent_path - FROM (SELECT T2.display_string as display_string - , ev.value as ao_level - FROM (SELECT @r AS _id - , @p := @r AS previous - , (SELECT @r := parent_id FROM archival_object WHERE id = _id) AS parent_id - , @l := @l + 1 AS lvl - FROM ((SELECT @r := 1749840, @p := 0, @l := 0) AS vars, - archival_object h) - WHERE @r <> 0 AND @r <> @p) AS T1 - JOIN archival_object T2 ON T1._id = T2.id - LEFT JOIN enumeration_value ev on ev.id = T2.level_id - WHERE T2.id != 1749840 - ORDER BY T1.lvl DESC) as all_parents) as p_path - , ao.display_string - , CONCAT('/repositories/', ao.repo_id, '/archival_objects/', ao.id) as uri -FROM archival_object ao -WHERE ao.id = 1749840 -``` - -To retrieve all children of a record (MysQL 5.7 and below): - -``` -``` - -### Sequencing - -The ordering of records in a `resource`, `classification`, or `digital_object` tree is determined by the `position` field. The position field is also used to order values in the `enumeration_value` and `assessment_attribute_definition` tables: - -| Column name | Description | Example |Found in -|---------------|------------------------------------------|---------|------- -`position` | The position of the archival object under the immediate parent | `168000` | `enumeration_value`, `assessment_attribute_definition`, `classification_term`, `digital_object_component`, `archival_object` - - -## Boolean fields - -Many records and subrecords include fields which contain integers (`0` or `1`) corresponding to boolean values. - -| Boolean fields | Description | Found in | -|----------------------|----------------|----------| -| `publish` | | `subnote_metadata`, `file_version`, `external_document`, `accession`, `classification`, `agent_person`, `agent_family`, `agent_software`, `agent_corporate_entity`, `classification_term`, `revision_statement`, `repository`, `note`, `digital_object`, `digital_object_component`, `archival_object`, `resource` -| `suppressed` | | `accession`, `archival_object`, `assessment_reviewer_rlshp`, `assessment_rlshp`, `classification`, `classification_creator_rlshp`, `classification_rlshp`, `classification_term`, `classification_term_creator_rlshp`, `digital_object`, `digital_object_component`, `enumeration_value`, `event`, `event_link_rlshp`, `instance_do_link_rlshp`, `linked_agents_rlshp`, `location_profile_rlshp`, `owner_repo_rlshp`, `related_accession_rlshp`, `related_agents_rlshp`, `resource`, `spawned_rlshp`, `surveyed_by_rlshp`, `top_container_housed_at_rlshp`, `top_container_link_rlshp`, `top_container_profile_rlshp` -| `restrictions_apply` | | `accession`, `archival_object` - - - - - -## Read-Only Fields - -Several system generated, read-only fields appear across many tables. These include database identifiers, timestamps that track record creation and modification, and fields that record the username of the user that created and last modified the each record. - -| Most common read-only fields | Description | -|--------------------------------|------------------------------------| -| `id` (primary key) | Database identifier for each record -| `system_mtime` | The last time the record was modified by the system -| `created_by` | The user that created a record -| `last_modified_by` | The user that last modified a record -| `user_mtime` | The time that a record was last modified by a user -| `create_time` | The time that a record was created -| `lock_version` | This field is incrementally updated each time a record is updated. This provides a method of tracking updates and managing near-simultaneous edits by different users. -| `json_schema_version` | The JSON schema version -| `aspace_relationship_position` | The position of a linked record in a list of other linked records -| `is_slug_auto` | A boolean value that indicates whether a slug was auto-generated -| `system_generated` | A boolean value that indicates whether a field was system-generated -| `display_string` | A system-generated field which concatenates the title and date fields of an archival object record - -**NOTE**: for subrecord tables these fields may hold unexpected data - because subrecords are deleted and recreated upon each save of a main or supporting record, their create and modification times will also be recreated and will not reflect the original creation date of the subrecord itself. For resource records, the timestamp only records the time that the resource itself was modified, not the last time any of its components were modified. - - diff --git a/architecture/backend/index.html b/architecture/backend/index.html new file mode 100644 index 00000000..1a31f4fb --- /dev/null +++ b/architecture/backend/index.html @@ -0,0 +1,573 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/backend/README.md + +

+ +

+ + Report issue on Jira + architecture/backend/README.md + +

+
+
+ +

The ArchivesSpace backend

+ +

The backend is responsible for implementing the ArchivesSpace API, and +supports the sort of access patterns shown in the previous section. +We’ve seen that the backend must support CRUD operations against a +number of different record types, and those records as expressed as +JSON documents produced from instances of JSONModel classes.

+ +

The following sections describe how the backend fits together.

+ +

main.rb – load and initialize the system

+ +

The main.rb program is responsible for starting the ArchivesSpace +system: loading all controllers and models, creating +users/groups/permissions as needed, and preparing the system to handle +requests.

+ +

When the system starts up, the main.rb program performs the +following actions:

+ +
    +
  • Initializes JSONModel–triggering it to load all record schemas +from the filesystem and generate the classes that represent each +record type.
  • +
  • Connects to the database
  • +
  • Loads all backend models–the system’s domain objects and +persistence layer
  • +
  • Loads all controllers–defining the system’s REST endpoints
  • +
  • Starts the job scheduler–handling scheduled tasks such as backups +of the demo database (if used)
  • +
  • Runs the “bootstrap ACLs” process–creates the admin user and +group if they don’t already exist; creates the hidden global +repository; creates system users and groups.
  • +
  • Fires the “backend started” notification to any registered +observers.
  • +
+ +

In addition to handling the system startup, main.rb also provides +the following facilities:

+ +
    +
  • Session handling–tracks authenticated backend sessions using the +token extracted from the X-ArchivesSpace-Session request header.
  • +
  • Helper methods for accessing the current user and current session +of each request.
  • +
+ +

rest.rb – Request and response handling for REST endpoints

+ +

The rest.rb module provides the mechanism used to define the API’s +REST endpoints. Each endpoint definition includes:

+ +
    +
  • The URI and HTTP request method used to access the endpoint
  • +
  • A list of typed parameters for that endpoint
  • +
  • Documentation for the endpoint, each parameter, and each possible +response that may be returned
  • +
  • Permission checks–predicates that the current user must satisfy +to be able to use the endpoint
  • +
+ +

Each controller in the system consists of one or more of these +endpoint definitions. By using the endpoint syntax provided by +rest.rb, the controllers can declare the interface they provide, and +are freed of having to perform the sort of boilerplate associated +with request handling–check parameter types, coerce values from +strings into other types, and so on.

+ +

The main.rb and rest.rb components work together to insulate the +controllers from much of the complexity of request handling. By the +time a request reaches the body of an endpoint:

+ +
    +
  • It can be sure that all required parameters are present and of the +correct types.
  • +
  • The body of the request has been fetched, parsed into the +appropriate type (usually a JSONModel instance–see below) and +made available as a request parameter.
  • +
  • Any parameters provided by the client that weren’t present in the +endpoint definition have been dropped.
  • +
  • The user’s session has been retrieved, and any defined access +control checks have been carried out.
  • +
  • A connection to the database has been assigned to the request, and +a transaction has been opened. If the controller throws an +exception, the transaction will be automatically rolled back.
  • +
+ +

Controllers

+ +

As touched upon in the previous section, controllers implement the +functionality of the ArchivesSpace API by registering one or more +endpoints. Each endpoint accepts a HTTP request for a given URI, +carries out the request and returns a JSON response (if successful) or +throws an exception (if something goes wrong).

+ +

Each controller lives in its own file, and these can be found in the +backend/app/controllers directory. Since most of the request +handling logic is captured by the rest.rb module, controllers +generally don’t do much more than coordinate the classes from the +model layer and send a response back to the client.

+ +

crud_helpers.rb – capturing common CRUD controller actions

+ +

Even though controllers are quite thin, there’s still a lot of overlap +in their behaviour. Each record type in the system supports the same +set of CRUD operations, and from the controller’s point of view +there’s not much difference between an update request for an accession +and an update request for a digital object (for example).

+ +

The crud_helpers.rb module pulls this commonality into a set of +helper methods that are invoked by each controller, providing methods +for the standard operations of the system.

+ +

Models

+ +

The backend’s model layer is where the action is. The model layer’s +role is to bridge the gap between the high-level JSONModel objects +(complete with their properties, nested records, references to other +records, etc.) and the underlying relational database (via the Sequel +database toolkit). As such, the model layer is mainly concerned with +mapping JSONModel instances to database tables in a way that preserves +everything and allows them to be queried efficiently.

+ +

Each record type has a corresponding model class, but the individual +model definitions are often quite sparse. This is because the +different record types differ in the following ways:

+ +
    +
  • The set of properties they allow (and their types, valid values, +etc.)
  • +
  • The types of nested records they may contain
  • +
  • The types of relationships they may have with other record types
  • +
+ +

The first of these–the set of allowable properties–is already +captured by the JSONModel schema definitions, so the model layer +doesn’t have to enforce these restrictions. Each model can simply +take the values supplied by the JSONModel object it is passed and +assume that everything that needs to be there is there, and that +validation has already happened.

+ +

The remaining two aspects are enforced by the model layer, but +generally don’t pertain to just a single record type. For example, an +accession may be linked to zero or more subjects, but so can several +other record types, so it doesn’t make sense for the Accession model +to contain the logic for handling subjects.

+ +

In practice we tend to see very little functionality that belongs +exclusively to a single record type, and as a result there’s not much +to put in each corresponding model. Instead, models are generally +constructed by combining a number of mix-ins (Ruby modules) to satisfy +the requirements of the given record type. Features à la carte!

+ +

ASModel and other mix-ins

+ +

At a minimum, every model includes the ASModel mix-in, which provides +base versions of the following methods:

+ +
    +
  • Model.create_from_json – Take a JSONModel instance and create a +model instance (a subclass of Sequel::Model) from it. Returns the +instance.
  • +
  • model.update_from_json – Update the target model instance with +the values from a given JSONModel instance.
  • +
  • Model.sequel_to_json – Return a JSONModel instance of the appropriate +type whose values are taken from the target model instance. +Model classes are declared to correspond to a particular JSONModel +instance when created, so this method can automatically return a +JSONModel instance of the appropriate type.
  • +
+ +

These methods comprise the primary interface of the model layer: +virtually every mix-in in the model layer overrides one or all of +these to add behaviour in a modular way.

+ +

For example, the ‘notes’ mix-in adds support for multiple notes to be +added to a record type–by mixing this module into a model class, that +class will automatically accept a JSONModel property called ‘notes’ +that will be stored and retrieved to and from the database as needed. +This works by overriding the three methods as follows:

+ +
    +
  • Model.create_from_json – Call ‘super’ to delegate the creation to +the next mix-in in the chain. When it returns the newly created +object, extract the notes from the JSONModel instance and attach +them to the model instance (saving them in the database).
  • +
  • model.update_from_json – Call ‘super’ to save the other updates +to the database, then replace any existing notes entries for the +record with the ones provided by the JSONModel.
  • +
  • Model.sequel_to_json – Call ‘super’ to have the next mix-in in +the chain create a JSONModel instance, then pull the stored notes +from the database and poke them into it.
  • +
+ +

All of the mix-ins follow this pattern: call ‘super’ to delegate the +call to the next mix-in in the chain (eventually reaching ASModel), +then manipulate the result to implement the desired behaviour.

+ +

Nested records

+ +

Some record types, like accessions, digital objects, and subjects, are +top-level records, in the sense that they are created independently +of any other record and are addressable via their own URI. However, +there are a number of records that can’t exist in isolation, and only +exist in the context of another record. When one record can contain +instances of another record, we call them nested records.

+ +

To give an example, the date record type is nested within an +accession record (among others). When the model layer is asked to +save a JSONModel instance containing nested records, it must pluck out +those records, save them in the appropriate database table, and ensure +that linkages are created within the database to allow them to be +retrieved later.

+ +

This happens often enough that it would be tedious to write code for +each model to handle its nested records, so the ASModel mix-in +provides a declaration to handle this automatically. For example, the +accession model uses a definition like:

+ +
 base.def_nested_record(:the_property => :dates,
+                        :contains_records_of_type => :date,
+                        :corresponding_to_association  => :date)
+
+ +

When creating an accession, this declaration instructs the Accession +model to create a database record for each date listed in the “dates” +property of the incoming record. Each of these date records will be +automatically linked to the created accession.

+ +

Relationships

+ +

A relationship is a link between two top-level records, where the link +is a separate, dynamically generated, model with zero or more +properties of its own.

+ +

For example, the Event model can be related to several different +types of records:

+ +
 define_relationship(:name => :event_link,
+                     :json_property => 'linked_records',
+                     :contains_references_to_types => proc {[Accession, Resource, ArchivalObject]})
+
+ +

This declaration generates a custom class that models the relationship +between events and the other record types. The corresponding JSON +schema declaration for the linked_records property looks like this:

+ +
  "linked_records" => {
+    "type" => "array",
+    "ifmissing" => "error",
+    "minItems" => 1,
+    "items" => {
+      "type" => "object",
+      "subtype" => "ref",
+      "properties" => {
+        "role" => {
+          "type" => "string",
+          "dynamic_enum" => "linked_event_archival_record_roles",
+          "ifmissing" => "error",
+        },
+        "ref" => {
+          "type" => [{"type" => "JSONModel(:accession) uri"},
+                     {"type" => "JSONModel(:resource) uri"},
+                     {"type" => "JSONModel(:archival_object) uri"},
+                     ...],
+          "ifmissing" => "error"
+        },
+      ...
+
+ +

That is, the property includes URI references to other records, plus +an additional “role” property to indicate the nature of the +relationship. The corresponding JSON might then be:

+ +
linked_records: [{ref: '/repositories/123/accessions/456', role: 'authorizer'}, ...]
+
+ +

The define_relationship definition automatically makes use of the +appropriate join tables in the database to store this relationship and +retrieve it later as needed.

+ +

Agents and agent_manager.rb

+ +

Agents present a bit of a representational challenge. There are four +types of agents (person, family, corporate entity, software), and at a +high-level they are structured in the same way: each type can contain +one or more name records, zero or more contact records, and a number +of properties. Records that link to agents (via a relationship, for +example) can link to any of the four types so, in some sense, each +agent type implements a common Agent interface.

+ +

However, the agent types differ in their details. Agents contain name +records, but the types of those name records correspond to the type of +the agent: a person agent contains a person name record, for example. +So, in spite of their similarities, the different agents need to be +modelled as separate record types.

+ +

The agent_manager module captures the high-level similarities +between agents. Each agent model includes the agent manager mix-in:

+ +
 include AgentManager::Mixin
+
+ +

and then defines itself declaratively by the provided class method:

+ +
 register_agent_type(:jsonmodel => :agent_person,
+                     :name_type => :name_person,
+                     :name_model => NamePerson)
+
+ +

This definition sets up the properties of that agent. It creates:

+ +
    +
  • a one_to_many relationship with the corresponding name +type of the agent.
  • +
  • a one_to_many relationship with the agent_contact table.
  • +
  • nested record definition which defines the names list of the agent +(so the list of names for the agent are automatically stored in +and retrieved from the database)
  • +
  • a nested record definition for contact list of the agent.
  • +
+ +

Validations

+ +

As records are added to and updated within the ArchivesSpace system, +they are validated against a number of rules to make sure they are +well-formed and don’t conflict with other records. There are two +types of record validation:

+ +
    +
  • Record-level validations check that a record is self-consistent: +that it contains all required fields, that its values are of the +appropriate type and format, and that its fields don’t contradict +one another.
  • +
  • System-level validations check that a record makes sense in a +broader context: that it doesn’t share a unique identifier with +another record, and that any record it references actually exists.
  • +
+ +

Record-level validations can be performed in isolation, while +system-level records require comparing the record to others in the +database.

+ +

System-level validations need to be implemented in the database itself +(as integrity constraints), but record-level validations are often too +complex to be expressed this way. As a result, validations in +ArchivesSpace can appear in one or both of the following layers:

+ +
    +
  • At the JSONModel level, validations are captured by JSON schema +documents. Where more flexibility is needed, custom validations +are added to the common/validations.rb file, allowing validation +logic to be expressed using arbitrary Ruby code.
  • +
  • At the database level, validations are captured using database +constraints. Since the error messages yielded by these +constraints generally aren’t useful for users, database +constraints are also replicated in the backend’s model layer using +Sequel validations, which give more targeted error messages.
  • +
+ +

As a general rule, record-level validations are handled by the +JSONModel validations (either through the JSON schema or custom +validations), while system-level validations are handled by the model +and the database schema.

+ +

Optimistic concurrency control

+ +

Updating a record using the ArchivesSpace API is a two part process:

+ +
 # Perform a `GET` against the desired record to fetch its JSON
+ # representation:
+
+   GET /repositories/5/accessions/2
+
+ # Manipulate the JSON representation as required, and then `POST`
+ # it back to replace the original:
+
+   POST /repositories/5/accessions/2
+
+ +

If two people do this simultaneously, there’s a risk that one person +would silently overwrite the changes made by the other. To prevent +this, every record is marked with a version number that it carries in +the lock_version property. When the system receives the updated +copy of a record, it checks that the version it carries is still +current; if the version number doesn’t match the one stored in the +database, the update request is rejected and the user must re-fetch +the latest version before applying their update.

+ +

The ArchivesSpace permissions model

+ +

The ArchivesSpace backend enforces access control, defining which +users are allowed to create, read, update, suppress and delete the +records in the system. The major actors in the permissions model are:

+ +
    +
  • Repositories – The main mechanism for partitioning the +ArchivesSpace system. For example, an instance might contain one +repository for each section of an organisation, or one repository +for each major collection.
  • +
  • Users – An entity that uses the system–often a person, but +perhaps a consumer of the ArchivesSpace API. The set of users is +global to the system, and a single user may have access to +multiple repositories.
  • +
  • Records – A unit of information in the system. Some records are +global (existing outside of any given repository), while some are +repository-scoped (belonging to a single repository).
  • +
  • Groups – A set of users within a repository. Each group is +assigned zero or more permissions, which it confers upon its +members.
  • +
  • Permissions – An action that a user can perform. For example, A +user with the update_accession_record permission is allowed to +update accessions for a repository.
  • +
+ +

To summarize, a user can perform an action within a repository if they +are a member of a group that has been assigned permission to perform +that action.

+ +

Conceptual trickery

+ +

Since they’re repository-scoped, groups govern access to repositories. +However, there are several record types that exist at the top-level of +the system (such as the repositories themselves, subjects and agents), +and the permissions model must be able to accommodate these.

+ +

To get around this, we invent a concept: the “global” repository +conceptually contains the whole ArchivesSpace universe. As with other +repositories, the global repository contains groups, and users can be +made members of these groups to grant them permissions across the +entire system. One example of this is the “admin” user, which is +granted all permissions by the “administrators” group of the global +repository; another is the “search indexer” user, which can read (but +not update or delete) any record in the system.

+ + +
+ +
+ + + diff --git a/architecture/directories.html b/architecture/directories.html new file mode 100644 index 00000000..54a3b7ff --- /dev/null +++ b/architecture/directories.html @@ -0,0 +1,226 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/directories.md + +

+ +

+ + Report issue on Jira + architecture/directories.md + +

+
+
+ +

Directory structure

+ +

ArchivesSpace is made up of several components that are kept in separate directories.

+ +

_yard

+ +

This directory contains the code for the documentation tool used to generate the github io pages here: http://archivesspace.github.io/archivesspace/

+ +

backend

+ +

This directory contains the code that handles the database and the API.

+ +

build

+ +

This directory contains the code used to build the application. It includes the commands that are used to run the development servers, the test suites, and to build the releases. ArchivesSpace is a JRuby application and Apache Ant is used to build it.

+ +

clustering

+ +

This directory contains code that can be used when clustering an ArchivesSpace installation.

+ +

common

+ +

This directory contains code that is used across two or more of the components. It includes configuration options, database schemas and migrations, and translation files.

+ +

contribution_files

+ +

This directory contains the documentation and PDFs of the license agreement files.

+ +

docs

+ +

This directory contains documentation files that are included in a release.

+ +

frontend

+ +

This directory contains the staff interface Ruby on Rails application.

+ +

indexer

+ +

This directory contains the indexer Sinatra application.

+ +

jmeter

+ +

This directory contains an example that can be used to set up Apache JMeter to load test functional behavior and measure performance.

+ +

launcher

+ +

This directory contains the code that launches (starts, restarts, and stops) an ArchivesSpace application.

+ +

oai

+ +

This directory contains the OAI-PMH Sinatra application.

+ +

plugins

+ +

This directory contains ArchivesSpace Program Team supported plugins.

+ +

proxy

+ +

This directory contains the Docker proxy code.

+ +

public

+ +

This directory contains the public interface Ruby on Rails application.

+ +

reports

+ +

This directory contains the reports code.

+ +

scripts

+ +

This directory contains scripts necessary for building, deploying, and other ArchivesSpace tasks.

+ +

selenium

+ +

This directory contains the selenium tests.

+ +

solr

+ +

This directory contains the solr code.

+ +

stylesheets

+ +

This directory contains XSL stylesheets used by ArchivesSpace.

+ +

supervisord

+ +

This directory contains a tool that can be used to run the development servers.

+ + +
+ +
+ + + diff --git a/architecture/directories.md b/architecture/directories.md deleted file mode 100644 index 64eec4c3..00000000 --- a/architecture/directories.md +++ /dev/null @@ -1,87 +0,0 @@ -# Directory structure - -ArchivesSpace is made up of several components that are kept in separate directories. - -## \_yard - -This directory contains the code for the documentation tool used to generate the github io pages here: http://archivesspace.github.io/archivesspace/ - -## backend - -This directory contains the code that handles the database and the API. - -## build - -This directory contains the code used to build the application. It includes the commands that are used to run the development servers, the test suites, and to build the releases. ArchivesSpace is a JRuby application and Apache Ant is used to build it. - -## clustering - -This directory contains code that can be used when clustering an ArchivesSpace installation. - -## common - -This directory contains code that is used across two or more of the components. It includes configuration options, database schemas and migrations, and translation files. - -## contribution_files - -This directory contains the documentation and PDFs of the license agreement files. - -## docs - -This directory contains documentation files that are included in a release. - -## frontend - -This directory contains the staff interface Ruby on Rails application. - -## indexer - -This directory contains the indexer Sinatra application. - -## jmeter - -This directory contains an example that can be used to set up Apache JMeter to load test functional behavior and measure performance. - -## launcher - -This directory contains the code that launches (starts, restarts, and stops) an ArchivesSpace application. - -## oai - -This directory contains the OAI-PMH Sinatra application. - -## plugins - -This directory contains ArchivesSpace Program Team supported plugins. - -## proxy - -This directory contains the Docker proxy code. - -## public - -This directory contains the public interface Ruby on Rails application. - -## reports - -This directory contains the reports code. - -## scripts - -This directory contains scripts necessary for building, deploying, and other ArchivesSpace tasks. - -## selenium - -This directory contains the selenium tests. - -## solr - -This directory contains the solr code. - -## stylesheets - -This directory contains XSL stylesheets used by ArchivesSpace. - -## supervisord - -This directory contains a tool that can be used to run the development servers. diff --git a/architecture/frontend/README.md b/architecture/frontend/README.md deleted file mode 100644 index f7588048..00000000 --- a/architecture/frontend/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -permalink: /architecture/frontend/ ---- - -# The ArchivesSpace Staff Interface - -This document provides an overview of the parts of the ArchivesSpace codebase which control the frontend/staff interface. For guidance on using the ArchivesSpace staff interface, consult the [ArchivesSpace Help Center](https://archivesspace.atlassian.net/wiki/spaces/ArchivesSpaceUserManual/overview) (ArchivesSpace members only). - -> Additional documentation needed diff --git a/architecture/frontend/index.html b/architecture/frontend/index.html new file mode 100644 index 00000000..20aaa4ed --- /dev/null +++ b/architecture/frontend/index.html @@ -0,0 +1,146 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/frontend/README.md + +

+ +

+ + Report issue on Jira + architecture/frontend/README.md + +

+
+
+ +

The ArchivesSpace Staff Interface

+ +

This document provides an overview of the parts of the ArchivesSpace codebase which control the frontend/staff interface. For guidance on using the ArchivesSpace staff interface, consult the ArchivesSpace Help Center (ArchivesSpace members only).

+ +
+

Additional documentation needed

+
+ + +
+ +
+ + + diff --git a/architecture/index.html b/architecture/index.html new file mode 100644 index 00000000..63e3a366 --- /dev/null +++ b/architecture/index.html @@ -0,0 +1,262 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/README.md + +

+ +

+ + Report issue on Jira + architecture/README.md + +

+
+
+ +

ArchivesSpace architecture and components

+ +

ArchivesSpace is divided into several components: the backend, which +exposes the major workflows and data types of the system via a +REST API, a staff interface, a public interface, and a search system, +consisting of Solr and an indexer application.

+ +

These components interact by exchanging JSON data. The format of this +data is defined by a class called JSONModel.

+ + + +

Languages, platforms, and included open source projects

+ +

ArchivesSpace components are constructed using several programming languages, platforms, and additional open source projects.

+ +

Languages

+ +

The languages used are Java, JRuby, Ruby, JavaScript, and CSS.

+ +

Platforms

+ +

The backend, OAI harvester, and indexer are Sinatra apps. The staff and public user interfaces are Ruby on Rails apps.

+ +

Additional open source projects

+ +

The database used out of the box and for testing is Apache Derby. The database suggested for production is MySQL. The index platform is Apache Solr.

+ +

Directory Structure

+ +

ArchivesSpace is made up of several components that are kept in separate directories.

+ +

_yard

+ +

This directory contains the code for the documentation tool used to generate the github io pages here: https://archivesspace.github.io/archivesspace/api/ and https://archivesspace.github.io/archivesspace/doc/

+ +

backend

+ +

This directory contains the code that handles the database and the API.

+ +

build

+ +

This directory contains the code used to build the application. It includes the commands that are used to run the development servers, the test suites, and to build the releases. ArchivesSpace is a JRuby application and Apache Ant is used to build it.

+ +

clustering

+ +

This directory contains code that can be used when clustering an ArchivesSpace installation.

+ +

common

+ +

This directory contains code that is used across two or more of the components. It includes configuration options, database schemas and migrations, and translation files.

+ +

contribution_files

+ +

This directory contains the documentation and PDFs of the license agreement files.

+ +

docs

+ +

This directory contains documentation files that are included in a release.

+ +

frontend

+ +

This directory contains the staff interface Ruby on Rails application.

+ +

indexer

+ +

This directory contains the indexer Sinatra application.

+ +

jmeter

+ +

This directory contains an example that can be used to set up Apache JMeter to load test functional behavior and measure performance.

+ +

launcher

+ +

This directory contains the code that launches (starts, restarts, and stops) an ArchivesSpace application.

+ +

oai

+ +

This directory contains the OAI-PMH Sinatra application.

+ +

plugins

+ +

This directory contains ArchivesSpace Program Team supported plugins.

+ +

proxy

+ +

This directory contains the Docker proxy code.

+ +

public

+ +

This directory contains the public interface Ruby on Rails application.

+ +

reports

+ +

This directory contains the reports code.

+ +

scripts

+ +

This directory contains scripts necessary for building, deploying, and other ArchivesSpace tasks.

+ +

selenium

+ +

This directory contains the selenium tests.

+ +

solr

+ +

This directory contains the solr code.

+ +

stylesheets

+ +

This directory contains XSL stylesheets used by ArchivesSpace.

+ +

supervisord

+ +

This directory contains a tool that can be used to run the development servers.

+ + +
+ +
+ + + diff --git a/architecture/jobs/README.md b/architecture/jobs/README.md deleted file mode 100644 index ea5b8aa2..00000000 --- a/architecture/jobs/README.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -permalink: /architecture/jobs/ ---- - -# Background Jobs - -ArchivesSpace provides a mechanism for long running processes to run -asynchronously. These processes are called `Background Jobs`. - -## Managing Jobs in the Staff UI - -The `Create` menu has a `Background Job` option which shows a submenu of job -types that the current user has permission to create. (See below for more - information about job permissions and hidden jobs.) Selecting one of these - options will take the user to a form to enter any parameters required for the - job and then to create it. - -When a job is created it is placed in the `Background Job Queue`. Jobs in the -queue will be run in the order they were created. (See below for more - information about multiple threads and concurrent jobs.) - -The `Browse` menu has a `Background Jobs` option. This takes the user to a list -of jobs arranged by their status. The user can then view the details of a job, -and cancel it if they have permission. - - -## Permissions - -A user must have the `create_job` permission to create a job. By default, this -permission is included in the `repository_basic_data_entry` group. - -A user must have the `cancel_job` permission to cancel a job. By default, this -permission is included in the `repository_managers` group. - -When a JobRunner registers it can specify additional create and cancel -permissions. (See below for more information) - - -## Types, Runners and Schemas - -Each job has a type, and each type has a registered runner to run jobs of that -type and JSONModel schema to define its parameters. - -#### Registered JobRunners - -All jobs of a type are handled by a registered `JobRunner`. The job runner -classes are located here: - - backend/app/lib/job_runners/ - -It is possible to define additional job runners from a plugin. (See below for - more information about plugins.) - -A job runner class must subclass `JobRunner`, register to run one or more job -types, and implement a `#run` method for jobs that it handles. - -When a job runner registers for a job type, it can set some options: - - * `:hidden` - * Defaults to `false` - * If this is set then this job type will not be shown in the list of available job types. - * `:run_concurrently` - * Defaults to `false` - * If this is set to true then more than one job of this type can run at the same time. - * `:create_permissions` - * Defaults to `[]` - * A permission or list of permissions required, in addition to `create_job`, to create jobs of this type. - * `:cancel_permissions` - * Defaults to `[]` - * A permission or list of permissions required, in addition to `cancel_job`, to cancel jobs of this type. - -For more information about defining a job runner, see the `JobRunner` superclass: - - backend/app/lib/job_runner.rb - -#### JSONModel Schemas - -A job type also requires a JSONModel schema that defines the parameters to run a -job of the type. The schema name must be the same as the type that the runner -registers for. For example: - - common/schemas/import_job.rb - -This schema, for `JSONModel(:import_job)`, defines the parameters for running a -job of type `import_job`. - - -## Concurrency - -ArchivesSpace can be configured to run more than one background job at a time. -By default, there will be two threads available to run background jobs. -The configuration looks like this: - - AppConfig[:job_thread_count] = 2 - -The `BackgroundJobQueue` will start this number of threads at start up. Those -threads will then poll for queued jobs and run them. - -When a job runner registers, it can set an option called `:run_concurrently`. -This is `false` by default. When set to `false` a job thread will not run a job -if there is already a job of that type running. The job will remain on the queue -and will be run when there are no longer any jobs of its type running. - -When set to `true` a job will be run when it comes to the front of the queue -regardless of whether there is a job of the same type running. - - -## Plugins - -It is possible to add a new job type from a plugin. ArchivesSpace includes a -plugin that demonstrates how to do this: - - plugins/jobs_example diff --git a/architecture/jobs/index.html b/architecture/jobs/index.html new file mode 100644 index 00000000..fe8c1663 --- /dev/null +++ b/architecture/jobs/index.html @@ -0,0 +1,263 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/jobs/README.md + +

+ +

+ + Report issue on Jira + architecture/jobs/README.md + +

+
+
+ +

Background Jobs

+ +

ArchivesSpace provides a mechanism for long running processes to run +asynchronously. These processes are called Background Jobs.

+ +

Managing Jobs in the Staff UI

+ +

The Create menu has a Background Job option which shows a submenu of job +types that the current user has permission to create. (See below for more + information about job permissions and hidden jobs.) Selecting one of these + options will take the user to a form to enter any parameters required for the + job and then to create it.

+ +

When a job is created it is placed in the Background Job Queue. Jobs in the +queue will be run in the order they were created. (See below for more + information about multiple threads and concurrent jobs.)

+ +

The Browse menu has a Background Jobs option. This takes the user to a list +of jobs arranged by their status. The user can then view the details of a job, +and cancel it if they have permission.

+ +

Permissions

+ +

A user must have the create_job permission to create a job. By default, this +permission is included in the repository_basic_data_entry group.

+ +

A user must have the cancel_job permission to cancel a job. By default, this +permission is included in the repository_managers group.

+ +

When a JobRunner registers it can specify additional create and cancel +permissions. (See below for more information)

+ +

Types, Runners and Schemas

+ +

Each job has a type, and each type has a registered runner to run jobs of that +type and JSONModel schema to define its parameters.

+ +

Registered JobRunners

+ +

All jobs of a type are handled by a registered JobRunner. The job runner +classes are located here:

+ +
  backend/app/lib/job_runners/
+
+ +

It is possible to define additional job runners from a plugin. (See below for + more information about plugins.)

+ +

A job runner class must subclass JobRunner, register to run one or more job +types, and implement a #run method for jobs that it handles.

+ +

When a job runner registers for a job type, it can set some options:

+ +
    +
  • :hidden +
      +
    • Defaults to false
    • +
    • If this is set then this job type will not be shown in the list of available job types.
    • +
    +
  • +
  • :run_concurrently +
      +
    • Defaults to false
    • +
    • If this is set to true then more than one job of this type can run at the same time.
    • +
    +
  • +
  • :create_permissions +
      +
    • Defaults to []
    • +
    • A permission or list of permissions required, in addition to create_job, to create jobs of this type.
    • +
    +
  • +
  • :cancel_permissions +
      +
    • Defaults to []
    • +
    • A permission or list of permissions required, in addition to cancel_job, to cancel jobs of this type.
    • +
    +
  • +
+ +

For more information about defining a job runner, see the JobRunner superclass:

+ +
  backend/app/lib/job_runner.rb
+
+ +

JSONModel Schemas

+ +

A job type also requires a JSONModel schema that defines the parameters to run a +job of the type. The schema name must be the same as the type that the runner +registers for. For example:

+ +
  common/schemas/import_job.rb
+
+ +

This schema, for JSONModel(:import_job), defines the parameters for running a +job of type import_job.

+ +

Concurrency

+ +

ArchivesSpace can be configured to run more than one background job at a time. +By default, there will be two threads available to run background jobs. +The configuration looks like this:

+ +
  AppConfig[:job_thread_count] = 2
+
+ +

The BackgroundJobQueue will start this number of threads at start up. Those +threads will then poll for queued jobs and run them.

+ +

When a job runner registers, it can set an option called :run_concurrently. +This is false by default. When set to false a job thread will not run a job +if there is already a job of that type running. The job will remain on the queue +and will be run when there are no longer any jobs of its type running.

+ +

When set to true a job will be run when it comes to the front of the queue +regardless of whether there is a job of the same type running.

+ +

Plugins

+ +

It is possible to add a new job type from a plugin. ArchivesSpace includes a +plugin that demonstrates how to do this:

+ +
  plugins/jobs_example
+
+ + +
+ +
+ + + diff --git a/architecture/jsonmodel.html b/architecture/jsonmodel.html new file mode 100644 index 00000000..82cc97c0 --- /dev/null +++ b/architecture/jsonmodel.html @@ -0,0 +1,235 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/jsonmodel.md + +

+ +

+ + Report issue on Jira + architecture/jsonmodel.md + +

+
+
+ +

JSONModel – a validated ArchivesSpace record

+ +

The ArchivesSpace system is concerned with managing a number of +different archival record types. Each record can be expressed as a +set of nested key/value pairs, and associated with each record type is +a number of rules that describe what it means for a record of that +type to be valid:

+ +
    +
  • some fields are mandatory, some optional
  • +
  • some fields can only take certain types
  • +
  • some fields can only take values from a constrained set
  • +
  • some fields are dependent on other fields
  • +
  • some record types can be nested within other record types
  • +
  • some record types may be related to others through a hierarchy
  • +
  • some record types form a relationship graph with other record +types
  • +
+ +

The JSONModel class provides a common language for expressing these +rules that all parts of the application can share. There is a +JSONModel class instance for each type of record in the system, so:

+ +
JSONModel(:digital_object)
+
+ +

is a class that knows how to take a hash of properties and make sure +those properties conform to the specification of a Digital Object:

+ +
JSONModel(:digital_object).from_hash(myhash)
+
+ +

If it passes validation, a new JSONModel(:digital_object) instance is +returned, which provides accessors for accessing its values, and +facilities for round-tripping between JSON documents and regular Ruby +hashes:

+ +
 obj = JSONModel(:digital_object).from_hash(myhash)
+
+ obj.title  # or obj['title']
+ obj.title = 'a new title'  # or obj['title'] = 'a new title'
+
+ obj.\_exceptions  # Validates the object and reports any issues
+
+ obj.to_hash  # Turn the JSONModel object back into a regular hash
+ obj.to_json  # Serialize the JSONModel object into JSON
+
+ +

Much of the validation performed by JSONModel is provided by the JSON +schema definitions listed in the common/schemas directory. JSON +schemas provide a standard way of declaring which properties a record +may and may not contain, along with their types and other +restrictions. ArchivesSpace uses these schemas to capture the +validation rules defining each record type in a declarative and +relatively self-documenting fashion.

+ +

JSONModel instances are the primary data interchange mechanism for the +ArchivesSpace system: the API consumes and produces JSONModel +instances (in JSON format), and much of the user interface’s life is +spent turning forms into JSONModel instances and shipping them off to +the backend.

+ +

JSONModel::Client – A high-level API for interacting with the ArchivesSpace backend

+ +

To save the need for a lot of HTTP request wrangling, ArchivesSpace +ships with a module called JSONModel::Client that simplifies the +common CRUD-style operations. Including this module just requires +passing an additional parameter when initializing JSONModel:

+ +
 JSONModel::init(:client_mode => true, :url => @backend_url)
+ include JSONModel
+
+ +

If you’ll be working against a single repository, it’s convenient to +set it as the default for subsequent actions:

+ +
 JSONModel.set_repository(123)
+
+ +

Then, several additional JSONModel methods are available:

+ +
 # As before, get a paginated list of accessions (GET)
+ JSONModel(:accession).all(:page => 1)
+
+ # Create a new accession (POST)
+ obj = JSONModel(:accession).from_hash(:title => "A new accession", ...)
+ obj.save
+
+ # Get a single accession by ID (GET)
+ obj = JSONModel(:accession).find(123)
+
+ # Update an existing accession (POST)
+ obj = JSONModel(:accession).find(123)
+ obj.title = "Updated title"
+ obj.save
+
+ + +
+ +
+ + + diff --git a/architecture/jsonmodel.md b/architecture/jsonmodel.md deleted file mode 100644 index fea845c4..00000000 --- a/architecture/jsonmodel.md +++ /dev/null @@ -1,89 +0,0 @@ -# JSONModel -- a validated ArchivesSpace record - -The ArchivesSpace system is concerned with managing a number of -different archival record types. Each record can be expressed as a -set of nested key/value pairs, and associated with each record type is -a number of rules that describe what it means for a record of that -type to be valid: - - * some fields are mandatory, some optional - * some fields can only take certain types - * some fields can only take values from a constrained set - * some fields are dependent on other fields - * some record types can be nested within other record types - * some record types may be related to others through a hierarchy - * some record types form a relationship graph with other record - types - -The JSONModel class provides a common language for expressing these -rules that all parts of the application can share. There is a -JSONModel class instance for each type of record in the system, so: - - JSONModel(:digital_object) - -is a class that knows how to take a hash of properties and make sure -those properties conform to the specification of a Digital Object: - - JSONModel(:digital_object).from_hash(myhash) - -If it passes validation, a new JSONModel(:digital\_object) instance is -returned, which provides accessors for accessing its values, and -facilities for round-tripping between JSON documents and regular Ruby -hashes: - - obj = JSONModel(:digital_object).from_hash(myhash) - - obj.title # or obj['title'] - obj.title = 'a new title' # or obj['title'] = 'a new title' - - obj.\_exceptions # Validates the object and reports any issues - - obj.to_hash # Turn the JSONModel object back into a regular hash - obj.to_json # Serialize the JSONModel object into JSON - - -Much of the validation performed by JSONModel is provided by the JSON -schema definitions listed in the `common/schemas` directory. JSON -schemas provide a standard way of declaring which properties a record -may and may not contain, along with their types and other -restrictions. ArchivesSpace uses these schemas to capture the -validation rules defining each record type in a declarative and -relatively self-documenting fashion. - -JSONModel instances are the primary data interchange mechanism for the -ArchivesSpace system: the API consumes and produces JSONModel -instances (in JSON format), and much of the user interface's life is -spent turning forms into JSONModel instances and shipping them off to -the backend. - -## JSONModel::Client -- A high-level API for interacting with the ArchivesSpace backend - -To save the need for a lot of HTTP request wrangling, ArchivesSpace -ships with a module called JSONModel::Client that simplifies the -common CRUD-style operations. Including this module just requires -passing an additional parameter when initializing JSONModel: - - JSONModel::init(:client_mode => true, :url => @backend_url) - include JSONModel - -If you'll be working against a single repository, it's convenient to -set it as the default for subsequent actions: - - JSONModel.set_repository(123) - -Then, several additional JSONModel methods are available: - - # As before, get a paginated list of accessions (GET) - JSONModel(:accession).all(:page => 1) - - # Create a new accession (POST) - obj = JSONModel(:accession).from_hash(:title => "A new accession", ...) - obj.save - - # Get a single accession by ID (GET) - obj = JSONModel(:accession).find(123) - - # Update an existing accession (POST) - obj = JSONModel(:accession).find(123) - obj.title = "Updated title" - obj.save diff --git a/architecture/languages.html b/architecture/languages.html new file mode 100644 index 00000000..18b5670c --- /dev/null +++ b/architecture/languages.html @@ -0,0 +1,150 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/languages.md + +

+ +

+ + Report issue on Jira + architecture/languages.md + +

+
+
+ +

Languages, platforms, and included open source projects

+

ArchivesSpace components are constructed using several programming languages, platforms, and additional open source projects.

+ +

Languages

+

The languages used are Java, JRuby, Ruby, JavaScript, and CSS.

+ +

Platforms

+

The backend, OAI harvester, and indexer are Sinatra apps. The staff and public user interfaces are Ruby on Rails apps.

+ +

Additional open source projects

+

The database used out of the box and for testing is Apache Derby. The database suggested for production is MySQL. The index platform is Apache Solr.

+ + +
+ +
+ + + diff --git a/architecture/languages.md b/architecture/languages.md deleted file mode 100644 index 2a9a4de8..00000000 --- a/architecture/languages.md +++ /dev/null @@ -1,11 +0,0 @@ -# Languages, platforms, and included open source projects -ArchivesSpace components are constructed using several programming languages, platforms, and additional open source projects. - -## Languages -The languages used are Java, JRuby, Ruby, JavaScript, and CSS. - -## Platforms -The backend, OAI harvester, and indexer are Sinatra apps. The staff and public user interfaces are Ruby on Rails apps. - -## Additional open source projects -The database used out of the box and for testing is Apache Derby. The database suggested for production is MySQL. The index platform is Apache Solr. diff --git a/architecture/oai-pmh/README.md b/architecture/oai-pmh/README.md deleted file mode 100644 index d01aebee..00000000 --- a/architecture/oai-pmh/README.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -permalink: /architecture/oai-pmh/ ---- - -# OAI-PMH interface - -A starter OAI-PMH interface for ArchivesSpace allowing other systems to harvest -your records is included in version 2.1.0. Additional features and functionality -will be added in later releases. - -By default, the OAI-PMH interface runs on port 8082. A sample request page is -available at http://localhost:8082/sample. (To access it, make sure that you -have set the AppConfig[:oai_proxy_url] appropriately.) - -The system provides responses to a number of standard OAI-PMH requests, -including GetRecord, Identify, ListIdentifiers, ListMetadataFormats, -ListRecords, and ListSets. Unpublished and suppressed records and elements are -not included in any of the OAI-PMH responses. - -Some responses require the URL parameter metadataPrefix. There are five -different metadata responses available: - -* EAD -- oai_ead (resources in EAD) -* Dublin Core -- oai_dc (archival objects and resources in Dublin Core) -* extended DCMI Terms -- oai_dcterms (archival objects and resources in DCMI Metadata Terms format) -* MARC -- oai_marc (archival objects and resources in MARC) -* MODS -- oai_mods (archival objects and resources in MODS) - -The EAD response for resources and MARC response for resources and archival -objects use the mappings from the built-in exporter for resources. The DC, -DCMI terms, and MODS responses for resources and archival objects use mappings -suggested by the community. - -Here are some example URLs and other information for these requests: - -**GetRecord** – needs a record identifier and metadataPrefix - Up to ArchivesSpace v3.5.1 OAI identifiers are in this format: - - ```http://localhost:8082/oai?verb=GetRecord&identifier=oai:archivesspace//repositories/2/resources/138&metadataPrefix=oai_ead``` - - Starting with ArchivesSpace v4.0.0 OAI identifiers are in the new format (notice the colon after the `oai:archivesspace` namespace part of the identifier): - - `http://localhost:8082/oai?verb=GetRecord&identifier=oai:archivesspace:/repositories/2/resources/138&metadataPrefix=oai_ead` - - see also: https://github.com/code4lib/ruby-oai/releases/tag/v1.0.0 - -**Identify** - - `http://localhost:8082/oai?verb=Identify` - -**ListIdentifiers** – needs a metadataPrefix - - `http://localhost:8082/oai?verb=ListIdentifiers&metadataPrefix=oai_dc` - -**ListMetadataFormats** - - `http://localhost:8082/oai?verb=ListMetadataFormats` - -**ListRecords** – needs a metadataPrefix - - `http://localhost:8082/oai?verb=ListRecords&metadataPrefix=oai_dcterms` - -**ListSets** - - `http://localhost:8082/oai?verb=ListSets` - -Harvesting the ArchivesSpace OAI-PMH server without specifying a set will yield -all published records across all repositories. -Predefined sets can be accessed using the set parameter. In order to retrieve -records from sets include a set parameter in the URL and the DC metadataPrefix, -such as "&set=collection&metadataPrefix=oai_dc". These sets can be from -configured sets as shown above or from the following levels of description: - -* Class -- class -* Collection -- collection -* File -- file -* Fonds -- fonds -* Item -- item -* Other_Level -- otherlevel -* Record_Group -- recordgrp -* Series -- series -* Sub-Fonds -- subfonds -* Sub-Group -- subgrp -* Sub-Series -- subseries - -In addition to the sets based on level of description, you can define sets -based on repository codes and/or sponsors in the config/config.rb file: - - AppConfig[:oai_sets] = { - 'repository_set' => { - :repo_codes => ['hello626'], - :description => "A set of one or more repositories", - }, - 'sponsor_set' => { - :sponsors => ['The_Sponsor'], - :description => "A set of one or more sponsors", - } - } - -The interface implements resumption tokens for pagination of results. As an -example, the following URL format should be used to page through the results -from a ListRecords request: - - `http://localhost:8082/oai?verb=ListRecords&metadataPrefix=oai_ead` - -using the resumption token: - - `http://localhost:8082/oai?verb=ListRecords&resumptionToken=eyJtZXRhZGF0YV9wcmVmaXgiOiJvYWlfZWFkIiwiZnJvbSI6IjE5NzAtMDEtMDEgMDA6MDA6MDAgVVRDIiwidW50aWwiOiIyMDE3LTA3LTA2IDE3OjEwOjQxIFVUQyIsInN0YXRlIjoicHJvZHVjaW5nX3JlY29yZHMiLCJsYXN0X2RlbGV0ZV9pZCI6MCwicmVtYWluaW5nX3R5cGVzIjp7IlJlc291cmNlIjoxfSwiaXNzdWVfdGltZSI6MTQ5OTM2MTA0Mjc0OX0=` - -Note: you do not use the metadataPrefix when you use the resumptionToken - -The ArchivesSpace OAI-PMH server supports persistent deletes, so harvesters -will be notified of any records that were deleted since -they last harvested. - -Mixed content is removed from Dublin Core, dcterms, MARC, and MODS field outputs -in the OAI-PMH response (e.g., a scope note mapped to a DC description field - would not include `

`, ``, `

`, ``, ``, `
`, - ``, ``, ``, ``, ``, ``, ``, - ``, ``, ``, ``, ``, ``, ``, - ``, ``, ``, ``, ``, ``, ``, ``, ``, ``, ``, ``, `<unitdate>`, `<unittitle>`). - -The component level records include inherited data from superior hierarchical -levels of the finding aid. Element inheritance is determined by institutional -system configuration (editable in the config/config.rb file) as implemented for -the Public User Interface. - -ARKs have not yet been implemented, pending more discussion of how they should -be formulated. diff --git a/architecture/oai-pmh/index.html b/architecture/oai-pmh/index.html new file mode 100644 index 00000000..a9f246a1 --- /dev/null +++ b/architecture/oai-pmh/index.html @@ -0,0 +1,271 @@ +<!DOCTYPE html> +<html lang="en-US"> + <head> + <meta charset="UTF-8"> + <meta http-equiv="X-UA-Compatible" content="IE=edge"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + +<!-- Begin Jekyll SEO tag v2.8.0 --> +<title>tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/oai-pmh/README.md + +

+ +

+ + Report issue on Jira + architecture/oai-pmh/README.md + +

+
+
+ +

OAI-PMH interface

+ +

A starter OAI-PMH interface for ArchivesSpace allowing other systems to harvest +your records is included in version 2.1.0. Additional features and functionality +will be added in later releases.

+ +

By default, the OAI-PMH interface runs on port 8082. A sample request page is +available at http://localhost:8082/sample. (To access it, make sure that you +have set the AppConfig[:oai_proxy_url] appropriately.)

+ +

The system provides responses to a number of standard OAI-PMH requests, +including GetRecord, Identify, ListIdentifiers, ListMetadataFormats, +ListRecords, and ListSets. Unpublished and suppressed records and elements are +not included in any of the OAI-PMH responses.

+ +

Some responses require the URL parameter metadataPrefix. There are five +different metadata responses available:

+ +
    +
  • EAD – oai_ead (resources in EAD)
  • +
  • Dublin Core – oai_dc (archival objects and resources in Dublin Core)
  • +
  • extended DCMI Terms – oai_dcterms (archival objects and resources in DCMI Metadata Terms format)
  • +
  • MARC – oai_marc (archival objects and resources in MARC)
  • +
  • MODS – oai_mods (archival objects and resources in MODS)
  • +
+ +

The EAD response for resources and MARC response for resources and archival +objects use the mappings from the built-in exporter for resources. The DC, +DCMI terms, and MODS responses for resources and archival objects use mappings +suggested by the community.

+ +

Here are some example URLs and other information for these requests:

+ +

GetRecord – needs a record identifier and metadataPrefix + Up to ArchivesSpace v3.5.1 OAI identifiers are in this format:

+ +

http://localhost:8082/oai?verb=GetRecord&identifier=oai:archivesspace//repositories/2/resources/138&metadataPrefix=oai_ead

+ +

Starting with ArchivesSpace v4.0.0 OAI identifiers are in the new format (notice the colon after the oai:archivesspace namespace part of the identifier):

+ +

http://localhost:8082/oai?verb=GetRecord&identifier=oai:archivesspace:/repositories/2/resources/138&metadataPrefix=oai_ead

+ +

see also: https://github.com/code4lib/ruby-oai/releases/tag/v1.0.0

+ +

Identify

+ +

http://localhost:8082/oai?verb=Identify

+ +

ListIdentifiers – needs a metadataPrefix

+ +

http://localhost:8082/oai?verb=ListIdentifiers&metadataPrefix=oai_dc

+ +

ListMetadataFormats

+ +

http://localhost:8082/oai?verb=ListMetadataFormats

+ +

ListRecords – needs a metadataPrefix

+ +

http://localhost:8082/oai?verb=ListRecords&metadataPrefix=oai_dcterms

+ +

ListSets

+ +

http://localhost:8082/oai?verb=ListSets

+ +

Harvesting the ArchivesSpace OAI-PMH server without specifying a set will yield +all published records across all repositories. +Predefined sets can be accessed using the set parameter. In order to retrieve +records from sets include a set parameter in the URL and the DC metadataPrefix, +such as “&set=collection&metadataPrefix=oai_dc”. These sets can be from +configured sets as shown above or from the following levels of description:

+ +
    +
  • Class – class
  • +
  • Collection – collection
  • +
  • File – file
  • +
  • Fonds – fonds
  • +
  • Item – item
  • +
  • Other_Level – otherlevel
  • +
  • Record_Group – recordgrp
  • +
  • Series – series
  • +
  • Sub-Fonds – subfonds
  • +
  • Sub-Group – subgrp
  • +
  • Sub-Series – subseries
  • +
+ +

In addition to the sets based on level of description, you can define sets +based on repository codes and/or sponsors in the config/config.rb file:

+ +
AppConfig[:oai_sets] = {
+'repository_set' => {
+    :repo_codes => ['hello626'],
+    :description => "A set of one or more repositories",
+},
+'sponsor_set' => {
+    :sponsors => ['The_Sponsor'],
+    :description => "A set of one or more sponsors",
+}
+}
+
+ +

The interface implements resumption tokens for pagination of results. As an +example, the following URL format should be used to page through the results +from a ListRecords request:

+ +
`http://localhost:8082/oai?verb=ListRecords&metadataPrefix=oai_ead`
+
+ +

using the resumption token:

+ +
`http://localhost:8082/oai?verb=ListRecords&resumptionToken=eyJtZXRhZGF0YV9wcmVmaXgiOiJvYWlfZWFkIiwiZnJvbSI6IjE5NzAtMDEtMDEgMDA6MDA6MDAgVVRDIiwidW50aWwiOiIyMDE3LTA3LTA2IDE3OjEwOjQxIFVUQyIsInN0YXRlIjoicHJvZHVjaW5nX3JlY29yZHMiLCJsYXN0X2RlbGV0ZV9pZCI6MCwicmVtYWluaW5nX3R5cGVzIjp7IlJlc291cmNlIjoxfSwiaXNzdWVfdGltZSI6MTQ5OTM2MTA0Mjc0OX0=`
+
+ +

Note: you do not use the metadataPrefix when you use the resumptionToken

+ +

The ArchivesSpace OAI-PMH server supports persistent deletes, so harvesters +will be notified of any records that were deleted since +they last harvested.

+ +

Mixed content is removed from Dublin Core, dcterms, MARC, and MODS field outputs +in the OAI-PMH response (e.g., a scope note mapped to a DC description field + would not include <p>, <abbr>, <address>, <archref>, <bibref>, <blockquote>, + <chronlist>, <corpname>, <date>, <emph>, <expan>, <extptr>, <extref>, + <famname>, <function>, <genreform>, <geogname>, <lb>, <linkgrp>, <list>, + <name>, <note>, <num>, <occupation>, <origination>, <persname>, <ptr>, <ref>, <repository>, <subject>, <table>, <title>, <unitdate>, <unittitle>).

+ +

The component level records include inherited data from superior hierarchical +levels of the finding aid. Element inheritance is determined by institutional +system configuration (editable in the config/config.rb file) as implemented for +the Public User Interface.

+ +

ARKs have not yet been implemented, pending more discussion of how they should +be formulated.

+ + +
+ +
+ + + diff --git a/architecture/public/README.md b/architecture/public/README.md deleted file mode 100644 index e835db5d..00000000 --- a/architecture/public/README.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -permalink: /architecture/public/ ---- - -# The ArchivesSpace public user interface - -The ArchivesSpace Public User Interface (PUI) provides a public -interface to your ArchivesSpace collections. In a default -ArchivesSpace installation it runs on port `:8081`. - -## Configuration - -The PUI is configured using the standard ArchivesSpace `config.rb` -file, with the relevant configuration options are prefixed with -`:pui_`. - -To see the full list of available options, see the file -[`https://github.com/archivesspace/archivesspace/blob/master/common/config/config-defaults.rb`](https://github.com/archivesspace/archivesspace/blob/master/common/config/config-defaults.rb) - -### Preserving Patron Privacy - -The **:block_referrer** key in the configuration file (default: **true**) determines whether the full referring URL is -transmitted when the user clicks a link to a website outside the web domain of your instance of ArchivesSpace. This -protects your patrons from tracking by that site. - -### Main Navigation Menu - -You can choose not to display one or more of the links on the main -(horizontal) navigation menu, either globally or by repository, if you -have more than one repository. You manage this through the -`:pui_hide` options in the `config/config.rb` file. - -### Repository Customization - -#### Display of "badges" on the Repository page - -You can configure which badges appear on the Repository page, both -globally or by repository. See the `:pui_hide` configuration options -for these too. - -### Activation of the "Request" button on archival object pages - -You can configure, both globally or by repository, whether the -"Request" button is active on archival object pages for objects that -don't have an associated Top Container. See the -`:pui_requests_permitted_for_containers_only` configuration option to -modify this. - -### I18n - -You can change the text and labels used by the PUI by editing the -locale files under the `locales/public` directory of your -ArchivesSpace distribution. - -### Addition of a "lead paragraph" - -You can also use the custom `.yml` files, described above, to add a -custom "lead paragraph" (including html markup) for one or more of -your repositories, keyed to the repository's code. - -For example, if your repository, `My Wonderful Repository` has a code of `MWR`, this is what you might see in the -custom `en.yml`: -``` -en: - repos: - mwr: - lead_graph: This amazing repository has so much to offer you! -``` - -## Development - -To run a development server, the PUI follows the same pattern as the rest of ArchivesSpace. From your ArchivesSpace checkout: - - # Prepare all dependencies - build/run bootstrap - - # Run the backend development server (and Solr) - build/run backend:devserver - - # Run the indexer - build/run indexer - - # Finally, run the PUI itself - build/run public:devserver - -## Inheritance - -### Three options for inheritance: - -* Directly inherit a value for a field – the record has no value for the field and you want the value in the field to display as if it were the record’s own [uncomment the inheritance section in the config, set desired field (property) to inherit_directly => true] -* Indirectly inherit a value for a field – the record has no value for the field and you want to display the value from a higher level, but precede it with a note that indicates that it comes from that higher level, such as "From the collection" [uncomment the inheritance section in the config, set desired field (property) to inherit_directly => false] -* Don’t display the field at all – the record has no value of its own for the field and you don’t want it to display at all [uncomment the inheritance section in the config, delete the lines for the desired field (property)] - - -### Archival Inheritance - -With the new version of the Public Interface, all elements of description can be inherited. This is especially important since the PUI displays each level of description as its own webpage. - -Each element of description can be inherited either directly or indirectly. When an element is inherited directly, it will appear as if that element was attached directly to that archival object in the staff interface. When an element is inherited indirectly, it will appear on the lower-level of description in the public interface, but the inherited element will be preceded with a note indicating the level of the ancestor from which the note is inherited (e.g. From the Collection, or From the Sub-Series). In both cases, the element will only be inherited if it is missing from the archival object. Additionally, the element of description will only be inherited from the closest ancestor. In other words, if a top-level collection record has an access restrictions note, and a child-level series record has an an access restrictions note, but the sub-series child of that series record lacks an access restrictions note, then the sub-series record will inherit only the access restrictions note from its parent series record. - -Additionally, the identifier element in ArchivesSpace, which is better known as the Reference Code in ISAD-G and DACS, can be inherited in a composite manner. When inherited in a composite manner, the inherited elements will be concatenated together. In other words, an identifier at the item level could look like this: MSS 1. Series A. Item 1. Though the archival object has an identifier of "Item 1", a composite identifier is displayed since the series-level record to which the item belongs has an identifier of "Series A", which in turn also belongs to a collection-level record that has an identifier of "MSS 1". - -By default, the following elements are turned on for inheritance: - - * Title (direct inheritance) - * Identifier (indirect inheritance, but by default the identifier inherits from ancestor archival objects only; it does NOT include the resource identifier. - -Also it is advised to inherit this element in a composite fashion once it is determined whether the level of description should or should not display as part of the identifier, which will depend upon local data-entry practices - - * Language code (direct inheritance, but it does NOT display anywhere in the interface currently; eventually, this could be used for faceting) - * Dates (direct inheritance) - * Extents (indirect inheritance) - * Creator (indirect inheritance) - * Access restrictions note (direct inheritance) - * Scope and contents note (indirect inheritance) - * Language of Materials note (indirect inheritance, but there seems to be a bug right now so that the Language notes always show up as being directly inherited. See AR-XXXX) - -See https://github.com/archivesspace/archivesspace/blob/master/common/config/config-defaults.rb#L296-L396 for more information and examples. - -Also, a video overview of this feature, which was recorded before development was finished, is available online: -https://vimeo.com/195457286 - -Composite Identifier Inheritance - -If you add the following three lines to your configuration file, re-start ArchivesSpace, and then let the indexer re-index your records, you can gain the benefit of composite identifiers: - -``` -AppConfig[:record_inheritance][:archival_object][:composite_identifiers] = { -:include_level => true, -:identifier_delimiter => '. ' -} -``` - -To add extra fields, such as subjects you can add the following: - -``` -inherited_fields_extras = [ - { - code: 'subjects', - property: 'subjects', - inherit_if: proc { |json| json.select { |j| true } }, - inherit_directly: false, - }, -] -``` - -When you set include_level to true, that means that the archival object level will be included in identifier so that you don't have to repeat that data. For example, if the level of description is "Series" and the archival object identifier is "1", and the parent resource identifier is "MSS 1", then the composite identifier would display as "MSS 1. Series 1" at the series 1 level, and any descendant record. If you set include_level to false, then the display would be "MSS 1. 1" - -### License - -ArchivesSpace is released under the [Educational Community License, -version 2.0](http://opensource.org/licenses/ecl2.php). See the -[COPYING](https://github.com/archivesspace/archivesspace/blob/master/COPYING) file for more information. diff --git a/architecture/public/index.html b/architecture/public/index.html new file mode 100644 index 00000000..e526c391 --- /dev/null +++ b/architecture/public/index.html @@ -0,0 +1,291 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/public/README.md + +

+ +

+ + Report issue on Jira + architecture/public/README.md + +

+
+
+ +

The ArchivesSpace public user interface

+ +

The ArchivesSpace Public User Interface (PUI) provides a public +interface to your ArchivesSpace collections. In a default +ArchivesSpace installation it runs on port :8081.

+ +

Configuration

+ +

The PUI is configured using the standard ArchivesSpace config.rb +file, with the relevant configuration options are prefixed with +:pui_.

+ +

To see the full list of available options, see the file +https://github.com/archivesspace/archivesspace/blob/master/common/config/config-defaults.rb

+ +

Preserving Patron Privacy

+ +

The :block_referrer key in the configuration file (default: true) determines whether the full referring URL is +transmitted when the user clicks a link to a website outside the web domain of your instance of ArchivesSpace. This +protects your patrons from tracking by that site.

+ + + +

You can choose not to display one or more of the links on the main +(horizontal) navigation menu, either globally or by repository, if you +have more than one repository. You manage this through the +:pui_hide options in the config/config.rb file.

+ +

Repository Customization

+ +

Display of “badges” on the Repository page

+ +

You can configure which badges appear on the Repository page, both +globally or by repository. See the :pui_hide configuration options +for these too.

+ +

Activation of the “Request” button on archival object pages

+ +

You can configure, both globally or by repository, whether the +“Request” button is active on archival object pages for objects that +don’t have an associated Top Container. See the +:pui_requests_permitted_for_containers_only configuration option to +modify this.

+ +

I18n

+ +

You can change the text and labels used by the PUI by editing the +locale files under the locales/public directory of your +ArchivesSpace distribution.

+ +

Addition of a “lead paragraph”

+ +

You can also use the custom .yml files, described above, to add a +custom “lead paragraph” (including html markup) for one or more of +your repositories, keyed to the repository’s code.

+ +

For example, if your repository, My Wonderful Repository has a code of MWR, this is what you might see in the +custom en.yml:

+
en:
+  repos:
+    mwr:
+      lead_graph: This <strong>amazing</strong> repository has so much to offer you!
+
+ +

Development

+ +

To run a development server, the PUI follows the same pattern as the rest of ArchivesSpace. From your ArchivesSpace checkout:

+ +
 # Prepare all dependencies
+ build/run bootstrap
+
+ # Run the backend development server (and Solr)
+ build/run backend:devserver
+
+ # Run the indexer
+ build/run indexer
+
+ # Finally, run the PUI itself
+ build/run public:devserver
+
+ +

Inheritance

+ +

Three options for inheritance:

+ +
    +
  • Directly inherit a value for a field – the record has no value for the field and you want the value in the field to display as if it were the record’s own [uncomment the inheritance section in the config, set desired field (property) to inherit_directly => true]
  • +
  • Indirectly inherit a value for a field – the record has no value for the field and you want to display the value from a higher level, but precede it with a note that indicates that it comes from that higher level, such as “From the collection” [uncomment the inheritance section in the config, set desired field (property) to inherit_directly => false]
  • +
  • Don’t display the field at all – the record has no value of its own for the field and you don’t want it to display at all [uncomment the inheritance section in the config, delete the lines for the desired field (property)]
  • +
+ +

Archival Inheritance

+ +

With the new version of the Public Interface, all elements of description can be inherited. This is especially important since the PUI displays each level of description as its own webpage.

+ +

Each element of description can be inherited either directly or indirectly. When an element is inherited directly, it will appear as if that element was attached directly to that archival object in the staff interface. When an element is inherited indirectly, it will appear on the lower-level of description in the public interface, but the inherited element will be preceded with a note indicating the level of the ancestor from which the note is inherited (e.g. From the Collection, or From the Sub-Series). In both cases, the element will only be inherited if it is missing from the archival object. Additionally, the element of description will only be inherited from the closest ancestor. In other words, if a top-level collection record has an access restrictions note, and a child-level series record has an an access restrictions note, but the sub-series child of that series record lacks an access restrictions note, then the sub-series record will inherit only the access restrictions note from its parent series record.

+ +

Additionally, the identifier element in ArchivesSpace, which is better known as the Reference Code in ISAD-G and DACS, can be inherited in a composite manner. When inherited in a composite manner, the inherited elements will be concatenated together. In other words, an identifier at the item level could look like this: MSS 1. Series A. Item 1. Though the archival object has an identifier of “Item 1”, a composite identifier is displayed since the series-level record to which the item belongs has an identifier of “Series A”, which in turn also belongs to a collection-level record that has an identifier of “MSS 1”.

+ +

By default, the following elements are turned on for inheritance:

+ +
    +
  • Title (direct inheritance)
  • +
  • Identifier (indirect inheritance, but by default the identifier inherits from ancestor archival objects only; it does NOT include the resource identifier.
  • +
+ +

Also it is advised to inherit this element in a composite fashion once it is determined whether the level of description should or should not display as part of the identifier, which will depend upon local data-entry practices

+ +
    +
  • Language code (direct inheritance, but it does NOT display anywhere in the interface currently; eventually, this could be used for faceting)
  • +
  • Dates (direct inheritance)
  • +
  • Extents (indirect inheritance)
  • +
  • Creator (indirect inheritance)
  • +
  • Access restrictions note (direct inheritance)
  • +
  • Scope and contents note (indirect inheritance)
  • +
  • Language of Materials note (indirect inheritance, but there seems to be a bug right now so that the Language notes always show up as being directly inherited. See AR-XXXX)
  • +
+ +

See https://github.com/archivesspace/archivesspace/blob/master/common/config/config-defaults.rb#L296-L396 for more information and examples.

+ +

Also, a video overview of this feature, which was recorded before development was finished, is available online: +https://vimeo.com/195457286

+ +

Composite Identifier Inheritance

+ +

If you add the following three lines to your configuration file, re-start ArchivesSpace, and then let the indexer re-index your records, you can gain the benefit of composite identifiers:

+ +
AppConfig[:record_inheritance][:archival_object][:composite_identifiers] = {
+:include_level => true,
+:identifier_delimiter => '. '
+}
+
+ +

To add extra fields, such as subjects you can add the following:

+ +
inherited_fields_extras = [
+ {
+   code: 'subjects',
+   property: 'subjects',
+   inherit_if: proc { |json| json.select { |j| true } },
+   inherit_directly: false,
+ },
+]
+
+ +

When you set include_level to true, that means that the archival object level will be included in identifier so that you don’t have to repeat that data. For example, if the level of description is “Series” and the archival object identifier is “1”, and the parent resource identifier is “MSS 1”, then the composite identifier would display as “MSS 1. Series 1” at the series 1 level, and any descendant record. If you set include_level to false, then the display would be “MSS 1. 1”

+ +

License

+ +

ArchivesSpace is released under the Educational Community License, +version 2.0. See the +COPYING file for more information.

+ + +
+ +
+ + + diff --git a/architecture/search/README.md b/architecture/search/README.md deleted file mode 100644 index 489f3167..00000000 --- a/architecture/search/README.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -permalink: /architecture/search/ ---- - -# Search indexing - -The ArchivesSpace system uses Solr for its full-text search. As -records are added/updated/deleted by the backend, the corresponding -changes are made to the Solr index to keep them (roughly) -synchronized. - -Keeping the backend and Solr in sync is the job of the "indexer", a -separate process that runs in the background and watches for record -updates. The indexer operates in two modes simultaneously: - - * The periodic mode polls the backend to get a list of records that - were added/modified/deleted since it last checked. These changes - are propagated to the Solr index. This generally happens every 30 - to 60 seconds (and is configurable). - * The real-time mode responds to updates as they happen, applying - changes to Solr as soon as they're applied to the backend. This - aims to reflect updates within the search indexes in milliseconds - or seconds. - -The two modes of operation overlap somewhat, but they serve different -purposes. The periodic mode ensures that records are never missed due -to transient failures, and will bring the indexes up to date even if -the indexer hasn't run for quite some time--even creating them from -scratch if necessary. This mode is also used for indexing updates -made by bulk import processes and other updates that don't need to be -reflected in the indexes immediately. - -The real-time indexer mode attempts to apply updates to the index much -more quickly. Rather than polling, it performs a `GET` request -against the `/update-feed` endpoint of the backend. This endpoint -returns any records that were updated since the last time it was asked -and, most importantly, it leaves the request hanging if no records -have changed. - -By calling this endpoint in a loop, the real-time indexer spends most -of its time sitting around waiting for something to happen. The -moment a record is updated, the already-pending request to the -`/update-feed` endpoint yields the updated record, which is sent to -Solr and indexed immediately. This avoids the delays associated with -polling and keeps indexing latency low where it matters. For example, -newly created records should appear in the browse list by the time a -user views it. diff --git a/architecture/search/index.html b/architecture/search/index.html new file mode 100644 index 00000000..a4a03bcb --- /dev/null +++ b/architecture/search/index.html @@ -0,0 +1,184 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + architecture/search/README.md + +

+ +

+ + Report issue on Jira + architecture/search/README.md + +

+
+
+ +

Search indexing

+ +

The ArchivesSpace system uses Solr for its full-text search. As +records are added/updated/deleted by the backend, the corresponding +changes are made to the Solr index to keep them (roughly) +synchronized.

+ +

Keeping the backend and Solr in sync is the job of the “indexer”, a +separate process that runs in the background and watches for record +updates. The indexer operates in two modes simultaneously:

+ +
    +
  • The periodic mode polls the backend to get a list of records that +were added/modified/deleted since it last checked. These changes +are propagated to the Solr index. This generally happens every 30 +to 60 seconds (and is configurable).
  • +
  • The real-time mode responds to updates as they happen, applying +changes to Solr as soon as they’re applied to the backend. This +aims to reflect updates within the search indexes in milliseconds +or seconds.
  • +
+ +

The two modes of operation overlap somewhat, but they serve different +purposes. The periodic mode ensures that records are never missed due +to transient failures, and will bring the indexes up to date even if +the indexer hasn’t run for quite some time–even creating them from +scratch if necessary. This mode is also used for indexing updates +made by bulk import processes and other updates that don’t need to be +reflected in the indexes immediately.

+ +

The real-time indexer mode attempts to apply updates to the index much +more quickly. Rather than polling, it performs a GET request +against the /update-feed endpoint of the backend. This endpoint +returns any records that were updated since the last time it was asked +and, most importantly, it leaves the request hanging if no records +have changed.

+ +

By calling this endpoint in a loop, the real-time indexer spends most +of its time sitting around waiting for something to happen. The +moment a record is updated, the already-pending request to the +/update-feed endpoint yields the updated record, which is sent to +Solr and indexed immediately. This avoids the delays associated with +polling and keeps indexing latency low where it matters. For example, +newly created records should appear in the browse list by the time a +user views it.

+ + +
+ +
+ + + diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 00000000..fd9a2237 --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1,580 @@ +@font-face { + font-family: "Noto Sans"; + font-weight: 400; + font-style: normal; + src: url("../fonts/Noto-Sans-regular/Noto-Sans-regular.eot"); + src: url("../fonts/Noto-Sans-regular/Noto-Sans-regular.eot?#iefix") format("embedded-opentype"), local("Noto Sans"), local("Noto-Sans-regular"), url("../fonts/Noto-Sans-regular/Noto-Sans-regular.woff2") format("woff2"), url("../fonts/Noto-Sans-regular/Noto-Sans-regular.woff") format("woff"), url("../fonts/Noto-Sans-regular/Noto-Sans-regular.ttf") format("truetype"), url("../fonts/Noto-Sans-regular/Noto-Sans-regular.svg#NotoSans") format("svg"); +} +@font-face { + font-family: "Noto Sans"; + font-weight: 700; + font-style: normal; + src: url("../fonts/Noto-Sans-700/Noto-Sans-700.eot"); + src: url("../fonts/Noto-Sans-700/Noto-Sans-700.eot?#iefix") format("embedded-opentype"), local("Noto Sans Bold"), local("Noto-Sans-700"), url("../fonts/Noto-Sans-700/Noto-Sans-700.woff2") format("woff2"), url("../fonts/Noto-Sans-700/Noto-Sans-700.woff") format("woff"), url("../fonts/Noto-Sans-700/Noto-Sans-700.ttf") format("truetype"), url("../fonts/Noto-Sans-700/Noto-Sans-700.svg#NotoSans") format("svg"); +} +@font-face { + font-family: "Noto Sans"; + font-weight: 400; + font-style: italic; + src: url("../fonts/Noto-Sans-italic/Noto-Sans-italic.eot"); + src: url("../fonts/Noto-Sans-italic/Noto-Sans-italic.eot?#iefix") format("embedded-opentype"), local("Noto Sans Italic"), local("Noto-Sans-italic"), url("../fonts/Noto-Sans-italic/Noto-Sans-italic.woff2") format("woff2"), url("../fonts/Noto-Sans-italic/Noto-Sans-italic.woff") format("woff"), url("../fonts/Noto-Sans-italic/Noto-Sans-italic.ttf") format("truetype"), url("../fonts/Noto-Sans-italic/Noto-Sans-italic.svg#NotoSans") format("svg"); +} +@font-face { + font-family: "Noto Sans"; + font-weight: 700; + font-style: italic; + src: url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot"); + src: url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot?#iefix") format("embedded-opentype"), local("Noto Sans Bold Italic"), local("Noto-Sans-700italic"), url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2") format("woff2"), url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff") format("woff"), url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf") format("truetype"), url("../fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg#NotoSans") format("svg"); +} +.highlight table td { + padding: 5px; +} + +.highlight table pre { + margin: 0; +} + +.highlight .cm { + color: #999988; + font-style: italic; +} + +.highlight .cp { + color: #999999; + font-weight: bold; +} + +.highlight .c1 { + color: #999988; + font-style: italic; +} + +.highlight .cs { + color: #999999; + font-weight: bold; + font-style: italic; +} + +.highlight .c, .highlight .cd { + color: #999988; + font-style: italic; +} + +.highlight .err { + color: #a61717; + background-color: #e3d2d2; +} + +.highlight .gd { + color: #000000; + background-color: #ffdddd; +} + +.highlight .ge { + color: #000000; + font-style: italic; +} + +.highlight .gr { + color: #aa0000; +} + +.highlight .gh { + color: #999999; +} + +.highlight .gi { + color: #000000; + background-color: #ddffdd; +} + +.highlight .go { + color: #888888; +} + +.highlight .gp { + color: #555555; +} + +.highlight .gs { + font-weight: bold; +} + +.highlight .gu { + color: #aaaaaa; +} + +.highlight .gt { + color: #aa0000; +} + +.highlight .kc { + color: #000000; + font-weight: bold; +} + +.highlight .kd { + color: #000000; + font-weight: bold; +} + +.highlight .kn { + color: #000000; + font-weight: bold; +} + +.highlight .kp { + color: #000000; + font-weight: bold; +} + +.highlight .kr { + color: #000000; + font-weight: bold; +} + +.highlight .kt { + color: #445588; + font-weight: bold; +} + +.highlight .k, .highlight .kv { + color: #000000; + font-weight: bold; +} + +.highlight .mf { + color: #009999; +} + +.highlight .mh { + color: #009999; +} + +.highlight .il { + color: #009999; +} + +.highlight .mi { + color: #009999; +} + +.highlight .mo { + color: #009999; +} + +.highlight .m, .highlight .mb, .highlight .mx { + color: #009999; +} + +.highlight .sb { + color: #d14; +} + +.highlight .sc { + color: #d14; +} + +.highlight .sd { + color: #d14; +} + +.highlight .s2 { + color: #d14; +} + +.highlight .se { + color: #d14; +} + +.highlight .sh { + color: #d14; +} + +.highlight .si { + color: #d14; +} + +.highlight .sx { + color: #d14; +} + +.highlight .sr { + color: #009926; +} + +.highlight .s1 { + color: #d14; +} + +.highlight .ss { + color: #990073; +} + +.highlight .s { + color: #d14; +} + +.highlight .na { + color: #008080; +} + +.highlight .bp { + color: #999999; +} + +.highlight .nb { + color: #0086B3; +} + +.highlight .nc { + color: #445588; + font-weight: bold; +} + +.highlight .no { + color: #008080; +} + +.highlight .nd { + color: #3c5d5d; + font-weight: bold; +} + +.highlight .ni { + color: #800080; +} + +.highlight .ne { + color: #990000; + font-weight: bold; +} + +.highlight .nf { + color: #990000; + font-weight: bold; +} + +.highlight .nl { + color: #990000; + font-weight: bold; +} + +.highlight .nn { + color: #555555; +} + +.highlight .nt { + color: #000080; +} + +.highlight .vc { + color: #008080; +} + +.highlight .vg { + color: #008080; +} + +.highlight .vi { + color: #008080; +} + +.highlight .nv { + color: #008080; +} + +.highlight .ow { + color: #000000; + font-weight: bold; +} + +.highlight .o { + color: #000000; + font-weight: bold; +} + +.highlight .w { + color: #bbbbbb; +} + +.highlight { + background-color: #f8f8f8; +} + +body { + background-color: #fff; + padding: 50px; + font: 14px/1.5 "Noto Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; + color: #727272; + font-weight: 400; +} + +h1, h2, h3, h4, h5, h6 { + color: #222; + margin: 0 0 20px; +} + +p, ul, ol, table, pre, dl { + margin: 0 0 20px; +} + +h1, h2, h3 { + line-height: 1.1; +} + +h1 { + font-size: 28px; +} + +h2 { + color: #393939; +} + +h3, h4, h5, h6 { + color: #494949; +} + +a { + color: #267CB9; + text-decoration: none; +} + +a:hover, a:focus { + color: #069; + font-weight: bold; +} + +a small { + font-size: 11px; + color: #777; + margin-top: -0.3em; + display: block; +} + +a:hover small { + color: #777; +} + +.wrapper { + width: 860px; + margin: 0 auto; +} + +blockquote { + border-left: 1px solid #e5e5e5; + margin: 0; + padding: 0 0 0 20px; + font-style: italic; +} + +code, pre { + font-family: Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace; + color: #333; +} + +pre { + padding: 8px 15px; + background: #f8f8f8; + border-radius: 5px; + border: 1px solid #e5e5e5; + overflow-x: auto; +} + +table { + width: 100%; + border-collapse: collapse; +} + +th, td { + text-align: left; + padding: 5px 10px; + border-bottom: 1px solid #e5e5e5; +} + +dt { + color: #444; + font-weight: 700; +} + +th { + color: #444; +} + +img { + max-width: 100%; +} + +kbd { + background-color: #fafbfc; + border: 1px solid #c6cbd1; + border-bottom-color: #959da5; + border-radius: 3px; + box-shadow: inset 0 -1px 0 #959da5; + color: #444d56; + display: inline-block; + font-size: 11px; + line-height: 10px; + padding: 3px 5px; + vertical-align: middle; +} + +header { + width: 270px; + float: left; + position: fixed; + -webkit-font-smoothing: subpixel-antialiased; +} + +ul.downloads { + list-style: none; + height: 40px; + padding: 0; + background: #f4f4f4; + border-radius: 5px; + border: 1px solid #e0e0e0; + width: 270px; +} + +.downloads li { + width: 89px; + float: left; + border-right: 1px solid #e0e0e0; + height: 40px; +} + +.downloads li:first-child a { + border-radius: 5px 0 0 5px; +} + +.downloads li:last-child a { + border-radius: 0 5px 5px 0; +} + +.downloads a { + line-height: 1; + font-size: 11px; + color: #676767; + display: block; + text-align: center; + padding-top: 6px; + height: 34px; +} + +.downloads a:hover, .downloads a:focus { + color: #675C5C; + font-weight: bold; +} + +.downloads ul a:active { + background-color: #f0f0f0; +} + +strong { + color: #222; + font-weight: 700; +} + +.downloads li + li + li { + border-right: none; + width: 89px; +} + +.downloads a strong { + font-size: 14px; + display: block; + color: #222; +} + +section { + width: 500px; + float: right; + padding-bottom: 50px; +} + +small { + font-size: 11px; +} + +hr { + border: 0; + background: #e5e5e5; + height: 1px; + margin: 0 0 20px; +} + +footer { + width: 270px; + float: left; + position: fixed; + bottom: 50px; + -webkit-font-smoothing: subpixel-antialiased; +} + +@media print, screen and (max-width: 960px) { + div.wrapper { + width: auto; + margin: 0; + } + header, section, footer { + float: none; + position: static; + width: auto; + } + header { + padding-right: 320px; + } + section { + border: 1px solid #e5e5e5; + border-width: 1px 0; + padding: 20px 0; + margin: 0 0 20px; + } + header a small { + display: inline; + } + header ul { + position: absolute; + right: 50px; + top: 52px; + } +} +@media print, screen and (max-width: 720px) { + body { + word-wrap: break-word; + } + header { + padding: 0; + } + header ul, header p.view { + position: static; + } + pre, code { + word-wrap: normal; + } +} +@media print, screen and (max-width: 480px) { + body { + padding: 15px; + } + .downloads { + width: 99%; + } + .downloads li, .downloads li + li + li { + width: 33%; + } +} +@media print { + body { + padding: 0.4in; + font-size: 12pt; + color: #444; + } +} +a:hover, +a:focus { + font-weight: unset; + text-decoration: underline; +} + +/*# sourceMappingURL=style.css.map */ \ No newline at end of file diff --git a/assets/css/style.css.map b/assets/css/style.css.map new file mode 100644 index 00000000..46c8e158 --- /dev/null +++ b/assets/css/style.css.map @@ -0,0 +1 @@ +{"version":3,"sourceRoot":"","sources":["../../../../.rbenv/versions/3.2.0/lib/ruby/gems/3.2.0/gems/jekyll-theme-minimal-0.2.0/_sass/fonts.scss","../../../../.rbenv/versions/3.2.0/lib/ruby/gems/3.2.0/gems/jekyll-theme-minimal-0.2.0/_sass/rouge-github.scss","../../../../.rbenv/versions/3.2.0/lib/ruby/gems/3.2.0/gems/jekyll-theme-minimal-0.2.0/_sass/jekyll-theme-minimal.scss","style.scss"],"names":[],"mappings":"AAAA;EACE;EACA;EACA;EACA;EACA;;AASF;EACE;EACA;EACA;EACA;EACA;;AASF;EACE;EACA;EACA;EACA;EACA;;AASF;EACE;EACA;EACA;EACA;EACA;;AC/CF;EAAsB;;;AACtB;EAAuB;;;AACvB;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;;;AAEF;EACE;EACA;;;AAEF;EACE;EACA;;;AAEF;EACE;;;AAEF;EACE;;;AC5MF;EACE;EACA;EACA;EACA;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;EACA;EACA;;;AAGF;EACE;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;EACA;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;EACA;EACA;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;;;AAGF;EACE;EACA;EACA;EACA;;;AAGF;EACE;EACA;EACA;EACA;EACA;EACA;EACA;;;AAGF;EACE;EACA;EACA;EACA;;;AAGF;EACE;;;AAGF;EACE;;;AAGF;EACE;EACA;EACA;EACA;EACA;EACA;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;;;AAGF;EACE;EACA;EACA;;;AAGF;EACE;EACA;EACA;;;AAGF;EACE;;;AAGF;EACE;EACA;EACA;EACA;;;AAGF;EACE;EACA;EACA;EACA;EACA;;;AAGF;EAEE;IACE;IACA;;EAGF;IACE;IACA;IACA;;EAGF;IACE;;EAGF;IACE;IACA;IACA;IACA;;EAGF;IACE;;EAGF;IACE;IACA;IACA;;;AAIJ;EACE;IACE;;EAGF;IACE;;EAGF;IACE;;EAGF;IACE;;;AAIJ;EACE;IACE;;EAGF;IACE;;EAGF;IACE;;;AAIJ;EACE;IACE;IACA;IACA;;;ACvRJ;AAAA;EAEE;EACA","sourcesContent":["@font-face {\n font-family: 'Noto Sans';\n font-weight: 400;\n font-style: normal;\n src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot');\n src: url('../fonts/Noto-Sans-regular/Noto-Sans-regular.eot?#iefix') format('embedded-opentype'),\n local('Noto Sans'),\n local('Noto-Sans-regular'),\n url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff2') format('woff2'),\n url('../fonts/Noto-Sans-regular/Noto-Sans-regular.woff') format('woff'),\n url('../fonts/Noto-Sans-regular/Noto-Sans-regular.ttf') format('truetype'),\n url('../fonts/Noto-Sans-regular/Noto-Sans-regular.svg#NotoSans') format('svg');\n}\n\n@font-face {\n font-family: 'Noto Sans';\n font-weight: 700;\n font-style: normal;\n src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot');\n src: url('../fonts/Noto-Sans-700/Noto-Sans-700.eot?#iefix') format('embedded-opentype'),\n local('Noto Sans Bold'),\n local('Noto-Sans-700'),\n url('../fonts/Noto-Sans-700/Noto-Sans-700.woff2') format('woff2'),\n url('../fonts/Noto-Sans-700/Noto-Sans-700.woff') format('woff'),\n url('../fonts/Noto-Sans-700/Noto-Sans-700.ttf') format('truetype'),\n url('../fonts/Noto-Sans-700/Noto-Sans-700.svg#NotoSans') format('svg');\n}\n\n@font-face {\n font-family: 'Noto Sans';\n font-weight: 400;\n font-style: italic;\n src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot');\n src: url('../fonts/Noto-Sans-italic/Noto-Sans-italic.eot?#iefix') format('embedded-opentype'),\n local('Noto Sans Italic'),\n local('Noto-Sans-italic'),\n url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff2') format('woff2'),\n url('../fonts/Noto-Sans-italic/Noto-Sans-italic.woff') format('woff'),\n url('../fonts/Noto-Sans-italic/Noto-Sans-italic.ttf') format('truetype'),\n url('../fonts/Noto-Sans-italic/Noto-Sans-italic.svg#NotoSans') format('svg');\n}\n\n@font-face {\n font-family: 'Noto Sans';\n font-weight: 700;\n font-style: italic;\n src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot');\n src: url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot?#iefix') format('embedded-opentype'),\n local('Noto Sans Bold Italic'),\n local('Noto-Sans-700italic'),\n url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2') format('woff2'),\n url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff') format('woff'),\n url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf') format('truetype'),\n url('../fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg#NotoSans') format('svg');\n}\n",".highlight table td { padding: 5px; }\n.highlight table pre { margin: 0; }\n.highlight .cm {\n color: #999988;\n font-style: italic;\n}\n.highlight .cp {\n color: #999999;\n font-weight: bold;\n}\n.highlight .c1 {\n color: #999988;\n font-style: italic;\n}\n.highlight .cs {\n color: #999999;\n font-weight: bold;\n font-style: italic;\n}\n.highlight .c, .highlight .cd {\n color: #999988;\n font-style: italic;\n}\n.highlight .err {\n color: #a61717;\n background-color: #e3d2d2;\n}\n.highlight .gd {\n color: #000000;\n background-color: #ffdddd;\n}\n.highlight .ge {\n color: #000000;\n font-style: italic;\n}\n.highlight .gr {\n color: #aa0000;\n}\n.highlight .gh {\n color: #999999;\n}\n.highlight .gi {\n color: #000000;\n background-color: #ddffdd;\n}\n.highlight .go {\n color: #888888;\n}\n.highlight .gp {\n color: #555555;\n}\n.highlight .gs {\n font-weight: bold;\n}\n.highlight .gu {\n color: #aaaaaa;\n}\n.highlight .gt {\n color: #aa0000;\n}\n.highlight .kc {\n color: #000000;\n font-weight: bold;\n}\n.highlight .kd {\n color: #000000;\n font-weight: bold;\n}\n.highlight .kn {\n color: #000000;\n font-weight: bold;\n}\n.highlight .kp {\n color: #000000;\n font-weight: bold;\n}\n.highlight .kr {\n color: #000000;\n font-weight: bold;\n}\n.highlight .kt {\n color: #445588;\n font-weight: bold;\n}\n.highlight .k, .highlight .kv {\n color: #000000;\n font-weight: bold;\n}\n.highlight .mf {\n color: #009999;\n}\n.highlight .mh {\n color: #009999;\n}\n.highlight .il {\n color: #009999;\n}\n.highlight .mi {\n color: #009999;\n}\n.highlight .mo {\n color: #009999;\n}\n.highlight .m, .highlight .mb, .highlight .mx {\n color: #009999;\n}\n.highlight .sb {\n color: #d14;\n}\n.highlight .sc {\n color: #d14;\n}\n.highlight .sd {\n color: #d14;\n}\n.highlight .s2 {\n color: #d14;\n}\n.highlight .se {\n color: #d14;\n}\n.highlight .sh {\n color: #d14;\n}\n.highlight .si {\n color: #d14;\n}\n.highlight .sx {\n color: #d14;\n}\n.highlight .sr {\n color: #009926;\n}\n.highlight .s1 {\n color: #d14;\n}\n.highlight .ss {\n color: #990073;\n}\n.highlight .s {\n color: #d14;\n}\n.highlight .na {\n color: #008080;\n}\n.highlight .bp {\n color: #999999;\n}\n.highlight .nb {\n color: #0086B3;\n}\n.highlight .nc {\n color: #445588;\n font-weight: bold;\n}\n.highlight .no {\n color: #008080;\n}\n.highlight .nd {\n color: #3c5d5d;\n font-weight: bold;\n}\n.highlight .ni {\n color: #800080;\n}\n.highlight .ne {\n color: #990000;\n font-weight: bold;\n}\n.highlight .nf {\n color: #990000;\n font-weight: bold;\n}\n.highlight .nl {\n color: #990000;\n font-weight: bold;\n}\n.highlight .nn {\n color: #555555;\n}\n.highlight .nt {\n color: #000080;\n}\n.highlight .vc {\n color: #008080;\n}\n.highlight .vg {\n color: #008080;\n}\n.highlight .vi {\n color: #008080;\n}\n.highlight .nv {\n color: #008080;\n}\n.highlight .ow {\n color: #000000;\n font-weight: bold;\n}\n.highlight .o {\n color: #000000;\n font-weight: bold;\n}\n.highlight .w {\n color: #bbbbbb;\n}\n.highlight {\n background-color: #f8f8f8;\n}\n","@import \"fonts\";\n@import \"rouge-github\";\n\nbody {\n background-color: #fff;\n padding:50px;\n font: 14px/1.5 \"Noto Sans\", \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n color:#727272;\n font-weight:400;\n}\n\nh1, h2, h3, h4, h5, h6 {\n color:#222;\n margin:0 0 20px;\n}\n\np, ul, ol, table, pre, dl {\n margin:0 0 20px;\n}\n\nh1, h2, h3 {\n line-height:1.1;\n}\n\nh1 {\n font-size:28px;\n}\n\nh2 {\n color:#393939;\n}\n\nh3, h4, h5, h6 {\n color:#494949;\n}\n\na {\n color:#267CB9;\n text-decoration:none;\n}\n\na:hover, a:focus {\n color:#069;\n font-weight: bold;\n}\n\na small {\n font-size:11px;\n color:#777;\n margin-top:-0.3em;\n display:block;\n}\n\na:hover small {\n color:#777;\n}\n\n.wrapper {\n width:860px;\n margin:0 auto;\n}\n\nblockquote {\n border-left:1px solid #e5e5e5;\n margin:0;\n padding:0 0 0 20px;\n font-style:italic;\n}\n\ncode, pre {\n font-family:Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace;\n color:#333;\n}\n\npre {\n padding:8px 15px;\n background: #f8f8f8;\n border-radius:5px;\n border:1px solid #e5e5e5;\n overflow-x: auto;\n}\n\ntable {\n width:100%;\n border-collapse:collapse;\n}\n\nth, td {\n text-align:left;\n padding:5px 10px;\n border-bottom:1px solid #e5e5e5;\n}\n\ndt {\n color:#444;\n font-weight:700;\n}\n\nth {\n color:#444;\n}\n\nimg {\n max-width:100%;\n}\n\nkbd {\n background-color: #fafbfc;\n border: 1px solid #c6cbd1;\n border-bottom-color: #959da5;\n border-radius: 3px;\n box-shadow: inset 0 -1px 0 #959da5;\n color: #444d56;\n display: inline-block;\n font-size: 11px;\n line-height: 10px;\n padding: 3px 5px;\n vertical-align: middle;\n}\n\nheader {\n width:270px;\n float:left;\n position:fixed;\n -webkit-font-smoothing:subpixel-antialiased;\n}\n\nul.downloads {\n list-style:none;\n height:40px;\n padding:0;\n background: #f4f4f4;\n border-radius:5px;\n border:1px solid #e0e0e0;\n width:270px;\n}\n\n.downloads li {\n width:89px;\n float:left;\n border-right:1px solid #e0e0e0;\n height:40px;\n}\n\n.downloads li:first-child a {\n border-radius:5px 0 0 5px;\n}\n\n.downloads li:last-child a {\n border-radius:0 5px 5px 0;\n}\n\n.downloads a {\n line-height:1;\n font-size:11px;\n color:#676767;\n display:block;\n text-align:center;\n padding-top:6px;\n height:34px;\n}\n\n.downloads a:hover, .downloads a:focus {\n color:#675C5C;\n font-weight:bold;\n}\n\n.downloads ul a:active {\n background-color:#f0f0f0;\n}\n\nstrong {\n color:#222;\n font-weight:700;\n}\n\n.downloads li + li + li {\n border-right:none;\n width:89px;\n}\n\n.downloads a strong {\n font-size:14px;\n display:block;\n color:#222;\n}\n\nsection {\n width:500px;\n float:right;\n padding-bottom:50px;\n}\n\nsmall {\n font-size:11px;\n}\n\nhr {\n border:0;\n background:#e5e5e5;\n height:1px;\n margin:0 0 20px;\n}\n\nfooter {\n width:270px;\n float:left;\n position:fixed;\n bottom:50px;\n -webkit-font-smoothing:subpixel-antialiased;\n}\n\n@media print, screen and (max-width: 960px) {\n\n div.wrapper {\n width:auto;\n margin:0;\n }\n\n header, section, footer {\n float:none;\n position:static;\n width:auto;\n }\n\n header {\n padding-right:320px;\n }\n\n section {\n border:1px solid #e5e5e5;\n border-width:1px 0;\n padding:20px 0;\n margin:0 0 20px;\n }\n\n header a small {\n display:inline;\n }\n\n header ul {\n position:absolute;\n right:50px;\n top:52px;\n }\n}\n\n@media print, screen and (max-width: 720px) {\n body {\n word-wrap:break-word;\n }\n\n header {\n padding:0;\n }\n\n header ul, header p.view {\n position:static;\n }\n\n pre, code {\n word-wrap:normal;\n }\n}\n\n@media print, screen and (max-width: 480px) {\n body {\n padding:15px;\n }\n\n .downloads {\n width:99%;\n }\n\n .downloads li, .downloads li + li + li {\n width:33%;\n }\n}\n\n@media print {\n body {\n padding:0.4in;\n font-size:12pt;\n color:#444;\n }\n}\n","@import \"jekyll-theme-minimal\";\n\n// See https://github.com/pages-themes/minimal#stylesheet for customizing theme\n\na:hover,\na:focus {\n font-weight: unset;\n text-decoration: underline;\n}\n"],"file":"style.css"} \ No newline at end of file diff --git a/assets/css/style.scss b/assets/css/style.scss deleted file mode 100644 index 2fc53531..00000000 --- a/assets/css/style.scss +++ /dev/null @@ -1,12 +0,0 @@ ---- ---- - -@import "{{ site.theme }}"; - -// See https://github.com/pages-themes/minimal#stylesheet for customizing theme - -a:hover, -a:focus { - font-weight: unset; - text-decoration: underline; -} diff --git a/assets/fonts/Noto-Sans-700/Noto-Sans-700.eot b/assets/fonts/Noto-Sans-700/Noto-Sans-700.eot new file mode 100755 index 00000000..03bf93fe Binary files /dev/null and b/assets/fonts/Noto-Sans-700/Noto-Sans-700.eot differ diff --git a/assets/fonts/Noto-Sans-700/Noto-Sans-700.svg b/assets/fonts/Noto-Sans-700/Noto-Sans-700.svg new file mode 100755 index 00000000..925fe474 --- /dev/null +++ b/assets/fonts/Noto-Sans-700/Noto-Sans-700.svg @@ -0,0 +1,336 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/fonts/Noto-Sans-700/Noto-Sans-700.ttf b/assets/fonts/Noto-Sans-700/Noto-Sans-700.ttf new file mode 100755 index 00000000..4599e3ca Binary files /dev/null and b/assets/fonts/Noto-Sans-700/Noto-Sans-700.ttf differ diff --git a/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff b/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff new file mode 100755 index 00000000..9d0b78df Binary files /dev/null and b/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff differ diff --git a/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff2 b/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff2 new file mode 100755 index 00000000..55fc44bc Binary files /dev/null and b/assets/fonts/Noto-Sans-700/Noto-Sans-700.woff2 differ diff --git a/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot new file mode 100755 index 00000000..cb97b2b4 Binary files /dev/null and b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.eot differ diff --git a/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg new file mode 100755 index 00000000..abdafc0f --- /dev/null +++ b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.svg @@ -0,0 +1,334 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf new file mode 100755 index 00000000..6640dbeb Binary files /dev/null and b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.ttf differ diff --git a/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff new file mode 100755 index 00000000..209739ee Binary files /dev/null and b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff differ diff --git a/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2 b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2 new file mode 100755 index 00000000..f5525aa2 Binary files /dev/null and b/assets/fonts/Noto-Sans-700italic/Noto-Sans-700italic.woff2 differ diff --git a/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.eot b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.eot new file mode 100755 index 00000000..a9973499 Binary files /dev/null and b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.eot differ diff --git a/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.svg b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.svg new file mode 100755 index 00000000..dcd8fc89 --- /dev/null +++ b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.svg @@ -0,0 +1,337 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.ttf b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.ttf new file mode 100755 index 00000000..7f75a2d9 Binary files /dev/null and b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.ttf differ diff --git a/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff new file mode 100755 index 00000000..6dce67ce Binary files /dev/null and b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff differ diff --git a/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff2 b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff2 new file mode 100755 index 00000000..a9c14c49 Binary files /dev/null and b/assets/fonts/Noto-Sans-italic/Noto-Sans-italic.woff2 differ diff --git a/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.eot b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.eot new file mode 100755 index 00000000..15fc8bfc Binary files /dev/null and b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.eot differ diff --git a/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.svg b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.svg new file mode 100755 index 00000000..bd2894d6 --- /dev/null +++ b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.svg @@ -0,0 +1,335 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.ttf b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.ttf new file mode 100755 index 00000000..a83bbf9f Binary files /dev/null and b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.ttf differ diff --git a/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff new file mode 100755 index 00000000..17c85006 Binary files /dev/null and b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff differ diff --git a/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff2 b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff2 new file mode 100755 index 00000000..a87d9cd7 Binary files /dev/null and b/assets/fonts/Noto-Sans-regular/Noto-Sans-regular.woff2 differ diff --git a/assets/img/logo.png b/assets/img/logo.png new file mode 100644 index 00000000..93e608e4 Binary files /dev/null and b/assets/img/logo.png differ diff --git a/assets/js/scale.fix.js b/assets/js/scale.fix.js new file mode 100644 index 00000000..911d33c3 --- /dev/null +++ b/assets/js/scale.fix.js @@ -0,0 +1,27 @@ +(function(document) { + var metas = document.getElementsByTagName('meta'), + changeViewportContent = function(content) { + for (var i = 0; i < metas.length; i++) { + if (metas[i].name == "viewport") { + metas[i].content = content; + } + } + }, + initialize = function() { + changeViewportContent("width=device-width, minimum-scale=1.0, maximum-scale=1.0"); + }, + gestureStart = function() { + changeViewportContent("width=device-width, minimum-scale=0.25, maximum-scale=1.6"); + }, + gestureEnd = function() { + initialize(); + }; + + + if (navigator.userAgent.match(/iPhone/i)) { + initialize(); + + document.addEventListener("touchstart", gestureStart, false); + document.addEventListener("touchend", gestureEnd, false); + } +})(document); diff --git a/customization/README.md b/customization/README.md deleted file mode 100644 index 41d4f950..00000000 --- a/customization/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -permalink: /customization/ ---- - -# ArchivesSpace customization and configuration - -* [Configuring ArchivesSpace](./configuration.html) -* [Adding support for additional username/password-based authentication backends](./authentication.html) -* [Configuring LDAP authentication](./ldap.html) -* [Customizing text in ArchivesSpace](./locales.html) -* [ArchivesSpace Plug-ins](./plugins.html) -* [Theming ArchivesSpace](./theming.html) -* [Managing frontend assets with Bower](./bower.html) -* [Adding custom reports](./reports.html) diff --git a/customization/authentication.html b/customization/authentication.html new file mode 100644 index 00000000..5de41ed4 --- /dev/null +++ b/customization/authentication.html @@ -0,0 +1,275 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/authentication.md + +

+ +

+ + Report issue on Jira + customization/authentication.md + +

+
+
+ +

Adding support for additional username/password-based authentication backends

+ +

ArchivesSpace supports LDAP-based authentication out of the box, but you can +authenticate against other password-based user directories by defining your own +authentication handler, creating a plug-in, and configuring your ArchivesSpace +instance to use it. If you would rather not have to create your own handler, +there is a plug-in available that uses OAUTH user authentication that you can add +to your ArchivesSpace installation.

+ +

Creating a new authentication handler class to use in a plug-in

+ +

An authentication handler is just a class that implements a couple of +key methods:

+ +
    +
  • initialize(opts) – An object constructor which receives the +configuration block specified in the system’s configuration.
  • +
  • name – A zero-argument method which just returns a string that +identifies the instance of your handler. The format of this +string isn’t important: it just gets stored as a user attribute +(in the ArchivesSpace database) to make it possible to tell which +authentication source a user last successfully authenticated +against.
  • +
  • authenticate(username, password) – a method which checks +whether password is the correct password for username. If the +password is correct, returns an instance of JSONModel(:user). +Otherwise, returns nil.
  • +
+ +

A new instance of your handler will be created for each login attempt, +so there’s no need to handle concurrency in your implementation.

+ +

Your authenticate method can do whatever is required to check that +the provided password is correct, with the only constraint being that +it must return either nil or a JSONModel(:user) instance.

+ +

The JSONModel(:user) class (whose JSON schema is defined in +common/schemas/user.rb) defines the set of properties that the +system needs for a user. When you return a JSONModel(:user) object, +its values will be used to create an ArchivesSpace user (if a user by +that name didn’t exist already), or update the existing user (if they +were already known).

+ +

Note: The JSONModel(:user) class validates the values you give it +against its JSON schema and throws an JSONModel::ValidationException +if anything isn’t right. If this happens within your handler, the +exception will be logged and the authentication request will fail.

+ +

A skeleton implementation

+ +

Suppose you already have a database with a table containing users that +should be able to log in to ArchivesSpace. Below is a sketch of an +authentication handler that will connect to this database and use it +for authentication.

+ +
  # For this example we'll use the Sequel database toolkit.  Note that
+  # this isn't necessary--you could use whatever database library you
+  # like here.
+  require 'sequel'
+
+  class MyDatabaseAuth
+
+    # For easy access to the JSONModel(:user) class
+    include JSONModel
+
+
+    def initialize(definition)
+      # Store the database connection details for use at
+      # authentication time.
+      @db_url = definition[:db_url] or raise "Need a value for :db_url"
+    end
+
+
+    # Just for informational purposes.  Return a string containing our
+    # database URL.
+    def name
+      "MyDatabaseAuth - #{@db_url}"
+    end
+
+
+    def authenticate(username, password)
+      # Open a connection to the database
+      Sequel.connect(@db_url) do |db|
+
+        # Check whether we have an entry for the given username
+        # and password in our database's "users" table
+        user = db[:users].filter(:username => username,
+                                 :password => password).
+                          first
+
+        if !user
+          # The user couldn't be found, or their password was wrong.
+          # Authentication failed.
+          return nil
+        end
+
+        # Build and return a JSONModel(:user) instance from fields in the database
+        JSONModel(:user).from_hash(:username => username,
+                                   :name => user[:user_full_name])
+
+      end
+    end
+
+  end
+
+ +

In order to use your new authentication handler, you’ll need to add it to the plug-in +architecture in ArchivesSpace and enable it. Create a new directory, called our_auth +perhaps, in the plugins directory of your ArchivesSpace installation. Inside +that directory create this directory hierarchy backend/model/ and place the +new class file there. Next, configure the new handler.

+ +

Modifying your configuration

+ +

To have ArchivesSpace invoke your new authentication handler, just add +a new entry to the :authentication_sources configuration block in the +config/config.rb file.

+ +

A configuration for the above example might be as follows:

+ +
 AppConfig[:authentication_sources] = [{
+                                         :model => 'MyDatabaseAuth',
+                                         :db_url => 'jdbc:mysql://localhost:3306/somedb?user=myuser&password=mypassword',
+                                       }]
+
+ +

Add the plug-in to the list of plug-ins already enabled

+ +

In the config/config.rb file, find the setting of AppConfig[:plugins] and add +a reference to the new plug-in there. For example, if you named it our_auth, the +AppConfig[:plugins] setting may look something like this:

+ +

AppConfig[:plugins] = [‘local’, ‘hello_world’, ‘our_auth’]

+ +

Restart your ArchivesSpace installation and you should now see authentication +requests hitting your new handler.

+ + +
+ +
+ + + diff --git a/customization/authentication.md b/customization/authentication.md deleted file mode 100644 index ac48c61e..00000000 --- a/customization/authentication.md +++ /dev/null @@ -1,132 +0,0 @@ -# Adding support for additional username/password-based authentication backends - -ArchivesSpace supports LDAP-based authentication out of the box, but you can -authenticate against other password-based user directories by defining your own -authentication handler, creating a plug-in, and configuring your ArchivesSpace -instance to use it. If you would rather not have to create your own handler, -there is a [plug-in](https://github.com/lyrasis/aspace-oauth) available that uses OAUTH user authentication that you can add -to your ArchivesSpace installation. - -## Creating a new authentication handler class to use in a plug-in - -An authentication handler is just a class that implements a couple of -key methods: - - * `initialize(opts)` -- An object constructor which receives the - configuration block specified in the system's configuration. - * `name` -- A zero-argument method which just returns a string that - identifies the instance of your handler. The format of this - string isn't important: it just gets stored as a user attribute - (in the ArchivesSpace database) to make it possible to tell which - authentication source a user last successfully authenticated - against. - * `authenticate(username, password)` -- a method which checks - whether `password` is the correct password for `username`. If the - password is correct, returns an instance of `JSONModel(:user)`. - Otherwise, returns `nil`. - -A new instance of your handler will be created for each login attempt, -so there's no need to handle concurrency in your implementation. - -Your `authenticate` method can do whatever is required to check that -the provided password is correct, with the only constraint being that -it must return either `nil` or a `JSONModel(:user)` instance. - -The `JSONModel(:user)` class (whose JSON schema is defined in -`common/schemas/user.rb`) defines the set of properties that the -system needs for a user. When you return a `JSONModel(:user)` object, -its values will be used to create an ArchivesSpace user (if a user by -that name didn't exist already), or update the existing user (if they -were already known). - -**Note**: `The JSONModel(:user)` class validates the values you give it -against its JSON schema and throws an `JSONModel::ValidationException` -if anything isn't right. If this happens within your handler, the -exception will be logged and the authentication request will fail. - -### A skeleton implementation - -Suppose you already have a database with a table containing users that -should be able to log in to ArchivesSpace. Below is a sketch of an -authentication handler that will connect to this database and use it -for authentication. - - # For this example we'll use the Sequel database toolkit. Note that - # this isn't necessary--you could use whatever database library you - # like here. - require 'sequel' - - class MyDatabaseAuth - - # For easy access to the JSONModel(:user) class - include JSONModel - - - def initialize(definition) - # Store the database connection details for use at - # authentication time. - @db_url = definition[:db_url] or raise "Need a value for :db_url" - end - - - # Just for informational purposes. Return a string containing our - # database URL. - def name - "MyDatabaseAuth - #{@db_url}" - end - - - def authenticate(username, password) - # Open a connection to the database - Sequel.connect(@db_url) do |db| - - # Check whether we have an entry for the given username - # and password in our database's "users" table - user = db[:users].filter(:username => username, - :password => password). - first - - if !user - # The user couldn't be found, or their password was wrong. - # Authentication failed. - return nil - end - - # Build and return a JSONModel(:user) instance from fields in the database - JSONModel(:user).from_hash(:username => username, - :name => user[:user_full_name]) - - end - end - - end - -In order to use your new authentication handler, you'll need to add it to the plug-in -architecture in ArchivesSpace and enable it. Create a new directory, called our_auth -perhaps, in the plugins directory of your ArchivesSpace installation. Inside -that directory create this directory hierarchy `backend/model/` and place the -new class file there. Next, configure the new handler. - -## Modifying your configuration - -To have ArchivesSpace invoke your new authentication handler, just add -a new entry to the `:authentication_sources` configuration block in the -`config/config.rb` file. - -A configuration for the above example might be as follows: - - AppConfig[:authentication_sources] = [{ - :model => 'MyDatabaseAuth', - :db_url => 'jdbc:mysql://localhost:3306/somedb?user=myuser&password=mypassword', - }] - -## Add the plug-in to the list of plug-ins already enabled - -In the `config/config.rb` file, find the setting of AppConfig[:plugins] and add -a reference to the new plug-in there. For example, if you named it our_auth, the -AppConfig[:plugins] setting may look something like this: - -AppConfig[:plugins] = ['local', 'hello_world', 'our_auth'] - -Restart your ArchivesSpace installation and you should now see authentication -requests hitting your new handler. diff --git a/customization/bower.html b/customization/bower.html new file mode 100644 index 00000000..411f6cc8 --- /dev/null +++ b/customization/bower.html @@ -0,0 +1,200 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/bower.md + +

+ +

+ + Report issue on Jira + customization/bower.md + +

+
+
+ +

Managing frontend assets with Bower

+ +

This is aimed at developers and applies to the ‘frontend’ application only.

+ +

If you wish to add static assets to the core project (i.e., javascript, css, +less files) please use bower to add and install them so we know what’s what +and when to upgrade.

+ +

If you wish to do a good deed for ArchivesSpace you can track down the source +of any vendor assets not included in bower.json and get them updated and +installed according to this protocol.

+ +

General Setup

+ +

Step 1: install npm

+ +

On OSX, for example:

+ +
brew install npm
+
+ +

Step 2: install Bower

+ +
npm install bower -g
+
+ +

Step 3: install components

+ +
bower install
+
+ +

Adding a static asset to ASpace Frontend (Staff UI)

+ +

Step 1: add the component

+ +
bower install <PACKAGE NAME> --save
+
+ +

Step 2: map Bower > Rails

+ +
Edit the bower.json file to map the assets you want from bower_components
+to assets. See examples in bower.json
+This is kind of a hack to workaround:
+https://github.com/blittle/bower-installer/issues/75
+
+ +

Step 3: Install assets

+ +
alias npm-exec='PATH=$(npm bin):$PATH'
+npm-exec bower-installer
+
+ +

Step 4: Check assets in

+ +

Check the installed assets into Git. We version control bower.json and the +installed files, but not the bower_components directory.

+ +

Production!

+ +

Don’t forget - if you are adding assets that don’t have a .js extension, you +need to add them to frontend/config/environments/production.rb

+ + +
+ +
+ + + diff --git a/customization/bower.md b/customization/bower.md deleted file mode 100644 index b498f7a5..00000000 --- a/customization/bower.md +++ /dev/null @@ -1,55 +0,0 @@ -# Managing frontend assets with Bower - -This is aimed at developers and applies to the 'frontend' application only. - -If you wish to add static assets to the core project (i.e., javascript, css, -less files) please use `bower` to add and install them so we know what's what -and when to upgrade. - -If you wish to do a good deed for ArchivesSpace you can track down the source -of any vendor assets not included in bower.json and get them updated and -installed according to this protocol. - -## General Setup - -### Step 1: install npm - -On OSX, for example: - - brew install npm - -### Step 2: install Bower - - npm install bower -g - -### Step 3: install components - - bower install - -## Adding a static asset to ASpace Frontend (Staff UI) - -### Step 1: add the component - - bower install --save - -### Step 2: map Bower > Rails - - Edit the bower.json file to map the assets you want from bower_components - to assets. See examples in bower.json - This is kind of a hack to workaround: - https://github.com/blittle/bower-installer/issues/75 - -### Step 3: Install assets - - alias npm-exec='PATH=$(npm bin):$PATH' - npm-exec bower-installer - -### Step 4: Check assets in - -Check the installed assets into Git. We version control bower.json and the -installed files, but not the bower_components directory. - -### Production! - -Don't forget - if you are adding assets that don't have a .js extension, you -need to add them to frontend/config/environments/production.rb diff --git a/customization/configuration.html b/customization/configuration.html new file mode 100644 index 00000000..d040a526 --- /dev/null +++ b/customization/configuration.html @@ -0,0 +1,1427 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/configuration.md + +

+ +

+ + Report issue on Jira + customization/configuration.md + +

+
+
+ +

Configuring ArchivesSpace

+ +

The primary configuration for ArchivesSpace is done in the config/config.rb +file. By default, this file contains the default settings, which are indicated +by commented out lines ( indicated by the “#” in the file ). You can adjust these +settings by adding new lines that change the default and restarting +ArchivesSpace. Be sure that your new settings are not commented out +( i.e. do NOT start with a “#” ), otherwise the settings will not take effect.

+ +

Commonly changed settings

+ +

Database config

+ +

:db_url

+ +

Set your database name and credentials. The default specifies that the embedded database should be used. +It is recommended to use a MySQL database instead of the embedded database. +For more info, see +Running ArchivesSpace against MySQL

+ +

This is an example of specifying MySQL credentials:

+ +

AppConfig[:db_url] = "jdbc:mysql://127.0.0.1:3306/aspace?useUnicode=true&characterEncoding=UTF-8&user=as&password=as123"

+ +

:db_max_connections

+ +

Set the maximum number of database connections used by the application. +Default is derived from the number of indexer threads.

+ +

AppConfig[:db_max_connections] = proc { 20 + (AppConfig[:indexer_thread_count] * 2) }

+ +

URLs for ArchivesSpace components

+ +

Set the ArchivesSpace backend port. The backend listens on port 8089 by default.

+ +

AppConfig[:backend_url] = "http://localhost:8089"

+ +

Set the ArchivesSpace staff interface (frontend) port. The staff interface listens on port 8080 by default.

+ +

AppConfig[:frontend_url] = "http://localhost:8080"

+ +

Set the ArchivesSpace public interface port. The public interface listens on port 8081 by default.

+ +

AppConfig[:public_url] = "http://localhost:8081"

+ +

Set the ArchivesSpace OAI server port. The OAI server listens on port 8082 by default.

+ +

AppConfig[:oai_url] = "http://localhost:8082"

+ +

Set the ArchivesSpace Solr index port. The Solr server listens on port 8090 by default.

+ +

AppConfig[:solr_url] = "http://localhost:8090"

+ +

Set the ArchivesSpace indexer port. The indexer listens on port 8091 by default.

+ +

AppConfig[:indexer_url] = "http://localhost:8091"

+ +

Set the ArchivesSpace API documentation port. The API documentation listens on port 8888 by default.

+ +

AppConfig[:docs_url] = "http://localhost:8888"

+ +

Enabling ArchivesSpace components

+ +

Enable or disable specific componenets by setting the following settings to true or false (defaults to true):

+ +
AppConfig[:enable_backend] = true
+AppConfig[:enable_frontend] = true
+AppConfig[:enable_public] = true
+AppConfig[:enable_solr] = true
+AppConfig[:enable_indexer] = true
+AppConfig[:enable_docs] = true
+AppConfig[:enable_oai] = true
+
+ +

Application logging

+ +

By default, all logging will be output on the screen while the archivesspace command +is running. When running as a daemon/service, this is put into a file in +logs/archivesspace.out. You can route log output to a different file per component by changing the log value to +a filepath that archivesspace has write access to.

+ +

You can also set the logging level for each component. Valid values are:

+ +
    +
  • debug (everything)
  • +
  • info
  • +
  • warn
  • +
  • error
  • +
  • fatal (severe only)
  • +
+ +

AppConfig[:frontend_log]

+ +

File for log output for the frontend (staff interface). Set to “default” to +route log output to archivesspace.out.

+ +

#AppConfig[:frontend_log_level]

+ +

Logging level for the frontend.

+ +

AppConfig[:backend_log]

+ +

File for log output for the backend. Set to “default” to +route log output to archivesspace.out.

+ +

#AppConfig[:backend_log_level]

+ +

Logging level for the backend.

+ +

AppConfig[:pui_log]

+ +

File for log output for the public UI. Set to “default” to +route log output to archivesspace.out.

+ +

#AppConfig[:pui_log_level]

+ +

Logging level for the public UI.

+ +

AppConfig[:indexer_log]

+ +

File for log output for the indexer. Set to “default” to +route log output to archivesspace.out.

+ +

#AppConfig[:indexer_log_level]

+ +

Logging level for the indexer.

+ +

Database logging

+ +

AppConfig[:db_debug_log]

+ +

Set to true to log all SQL statements. +Note that this will have a performance impact!

+ +

AppConfig[:db_debug_log] = false

+ +

AppConfig[:mysql_binlog]

+ +

Set to true if you have enabled MySQL binary logging.

+ +

AppConfig[:mysql_binlog] = false

+ +

Solr backups

+ +

AppConfig[:solr_backup_schedule]

+ +

Set Solr back up schedule. By default, Solr backups will run at midnight. See https://crontab.guru/ for + information about the schedule syntax.

+ +

AppConfig[:solr_backup_schedule] = "0 * * * *"

+ +

AppConfig[:solr_backup_number_to_keep]

+ +

Number of Solr backups to keep (default = 1)

+ +

AppConfig[:solr_backup_number_to_keep] = 1

+ +

AppConfig[:solr_backup_directory]

+ +

Directory to store Solr backups.

+ +

AppConfig[:solr_backup_directory] = proc { File.join(AppConfig[:data_directory], "solr_backups") }

+ +

Default Solr params

+ +

AppConfig[:solr_params]

+ +

Add default solr params.

+ +

A simple example: use AND for search:

+ +

AppConfig[:solr_params] = { "q.op" => "AND" }

+ +

A more complex example: set the boost query value (bq) to boost the relevancy +for the query string in the title, set the phrase fields parameter (pf) to boost +the relevancy for the title when the query terms are in close proximity to each +other, and set the phrase slop (ps) parameter for the pf parameter to indicate +how close the proximity should be:

+ +
AppConfig[:solr_params] = {
+  "bq" => proc { "title:\"#{@query_string}\"*" },
+  "pf" => 'title^10',
+  "ps" => 0,
+}
+
+ +

Language

+ +

AppConfig[:locale]

+ +

Set the application’s language (see the .yml files in +https://github.com/archivesspace/archivesspace/tree/master/common/locales +for a list of available locale codes). Default is English (:en):

+ +

AppConfig[:locale] = :en

+ +

Plugin registration

+ +

AppConfig[:plugins]

+ +

Plug-ins to load. They will load in the order specified.

+ +

AppConfig[:plugins] = ['local', 'lcnaf']

+ +

Thread count

+ +

AppConfig[:job_thread_count]

+ +

The number of concurrent threads available to run background jobs. +Introduced because long running jobs were blocking the queue. +Resist the urge to set this to a big number!

+ +

AppConfig[:job_thread_count] = 2

+ +

OAI configuration options

+ +

AppConfig[:oai_proxy_url]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:oai_proxy_url] = 'http://your-public-oai-url.example.com'

+ +

NOTE: As of version 2.5.2, the following parameters (oai_repository_name, oai_record_prefix, and oai_admin_email) have been deprecated. They should be set in the Staff User Interface. To set them, select the System menu in the Staff User Interface and then select Manage OAI-PMH Settings. These three settings are at the top of the page in the General Settings section. These settings will be completely removed from the config file when version 2.6.0 is released.

+ +

AppConfig[:oai_repository_name]

+ +

AppConfig[:oai_repository_name] = 'ArchivesSpace OAI Provider'

+ +

AppConfig[:oai_record_prefix]

+ +

AppConfig[:oai_record_prefix] = 'oai:archivesspace'

+ +

AppConfig[:oai_admin_email]

+ +

AppConfig[:oai_admin_email] = 'admin@example.com'

+ +

AppConfig[:oai_sets]

+ +

In addition to the sets based on level of description, you can define OAI Sets +based on repository codes and/or sponsors as follows:

+ +
AppConfig[:oai_sets] = {
+  'repository_set' => {
+    :repo_codes => ['hello626'],
+    :description => "A set of one or more repositories",
+  },
+
+  'sponsor_set' => {
+    :sponsors => ['The_Sponsor'],
+    :description => "A set of one or more sponsors",
+  },
+}
+
+ +

Other less commonly changed settings

+ +

Default admin password

+ +

AppConfig[:default_admin_password]

+ +

Set default admin password. Default password is “admin”.

+ +

#AppConfig[:default_admin_password] = "admin"

+ +

Data directories

+ +

AppConfig[:data_directory]

+ +

If you run ArchivesSpace using the standard scripts (archivesspace.sh, +archivesspace.bat or as a Windows service), the value of :data_directory is +automatically set to be the “data” directory of your ArchivesSpace +distribution. You don’t need to change this value unless you specifically +want ArchivesSpace to put its data files elsewhere.

+ +

AppConfig[:data_directory] = File.join(Dir.home, "ArchivesSpace")

+ +

AppConfig[:backup_directory]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:backup_directory] = proc { File.join(AppConfig[:data_directory], "demo_db_backups") }

+ +

AppConfig[:solr_index_directory]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:solr_index_directory] = proc { File.join(AppConfig[:data_directory], "solr_index") }

+ +

AppConfig[:solr_home_directory]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:solr_home_directory] = proc { File.join(AppConfig[:data_directory], "solr_home") }

+ +

Solr defaults

+ +

AppConfig[:solr_indexing_frequency_seconds]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:solr_indexing_frequency_seconds] = 30

+ +

AppConfig[:solr_facet_limit]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:solr_facet_limit] = 100

+ +

AppConfig[:default_page_size]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:default_page_size] = 10

+ +

AppConfig[:max_page_size]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:max_page_size] = 250

+ + + +

AppConfig[:cookie_prefix]

+ +

A prefix added to cookies used by the application. +Change this if you’re running more than one instance of ArchivesSpace on the +same hostname (i.e. multiple instances on different ports). +Default is “archivesspace”.

+ +

AppConfig[:cookie_prefix] = "archivesspace"

+ +

Indexer settings

+ +

The periodic indexer can run using multiple threads to take advantage of +multiple CPU cores. By setting these two options, you can control how many +CPU cores are used, and the amount of memory that will be consumed by the +indexing process (more cores and/or more records per thread means more memory used).

+ +

AppConfig[:indexer_records_per_thread]

+ +

AppConfig[:indexer_records_per_thread] = 25

+ +

AppConfig[:indexer_thread_count]

+ +

AppConfig[:indexer_thread_count] = 4

+ +

AppConfig[:indexer_solr_timeout_seconds]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:indexer_solr_timeout_seconds] = 300

+ +

PUI Indexer Settings

+ +

AppConfig[:pui_indexer_enabled]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:pui_indexer_enabled] = true

+ +

AppConfig[:pui_indexing_frequency_seconds]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:pui_indexing_frequency_seconds] = 30

+ +

AppConfig[:pui_indexer_records_per_thread]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:pui_indexer_records_per_thread] = 25

+ +

AppConfig[:pui_indexer_thread_count]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:pui_indexer_thread_count] = 1

+ +

Index state

+ +

AppConfig[:index_state_class]

+ +

Set to ‘IndexStateS3’ for amazon s3

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:index_state_class] = 'IndexState'

+ +

AppConfig[:index_state_s3]

+ +

Store indexer state in amazon s3 (optional) +NOTE: s3 charges for read / update requests and the pui indexer is continually +writing to state files so you may want to increase pui_indexing_frequency_seconds

+ +
+

TODO - Needs more documentation

+
+ +
AppConfig[:index_state_s3] = {
+  region: ENV.fetch("AWS_REGION"),
+  aws_access_key_id: ENV.fetch("AWS_ACCESS_KEY_ID"),
+  aws_secret_access_key: ENV.fetch("AWS_SECRET_ACCESS_KEY"),
+  bucket: ENV.fetch("AWS_ASPACE_BUCKET"),
+  prefix: proc { "#{AppConfig[:cookie_prefix]}_" },
+}
+
+ +

Misc. database options

+ +

AppConfig[:allow_other_unmapped]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:allow_other_unmapped] = false

+ +

AppConfig[:db_url_redacted]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:db_url_redacted] = proc { AppConfig[:db_url].gsub(/(user|password)=(.*?)(&|$)/, '\1=[REDACTED]\3') }

+ +

AppConfig[:demo_db_backup_schedule]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:demo_db_backup_schedule] = "0 4 * * *"

+ +

AppConfig[:allow_unsupported_database]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:allow_unsupported_database] = false

+ +

AppConfig[:allow_non_utf8_mysql_database]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:allow_non_utf8_mysql_database] = false

+ +

AppConfig[:demo_db_backup_number_to_keep] = 7

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:demo_db_backup_number_to_keep] = 7

+ +

Proxy URLs

+ +

If you are serving user-facing applications via proxy +(i.e., another domain or port, or via https, or for a prefix) it is +recommended that you record those URLs in your configuration

+ +

AppConfig[:frontend_proxy_url] = proc { AppConfig[:frontend_url] }

+ +

Proxy URL for the frontend (staff interface)

+ +

AppConfig[:frontend_proxy_url] = proc { AppConfig[:frontend_url] }

+ +

AppConfig[:public_proxy_url]

+ +

Proxy URL for the public interface

+ +

AppConfig[:public_proxy_url] = proc { AppConfig[:public_url] }

+ +

AppConfig[:frontend_proxy_prefix]

+ +

Don’t override this setting unless you know what you’re doing

+ +

AppConfig[:frontend_proxy_prefix] = proc { "#{URI(AppConfig[:frontend_proxy_url]).path}/".gsub(%r{/+$}, "/") }

+ +

AppConfig[:public_proxy_prefix]

+ +

Don’t override this setting unless you know what you’re doing

+ +

AppConfig[:public_proxy_prefix] = proc { "#{URI(AppConfig[:public_proxy_url]).path}/".gsub(%r{/+$}, "/") }

+ +

Enable component applications

+ +

Setting any of these false will prevent the associated applications from starting. +Temporarily disabling the frontend and public UIs and/or the indexer may help users +who are running into memory-related issues during migration.

+ +

AppConfig[:enable_backend]

+ +

AppConfig[:enable_backend] = true

+ +

AppConfig[:enable_frontend]

+ +

AppConfig[:enable_frontend] = true

+ +

AppConfig[:enable_public]

+ +

AppConfig[:enable_public] = true

+ +

AppConfig[:enable_solr]

+ +

AppConfig[:enable_solr] = true

+ +

AppConfig[:enable_indexer]

+ +

AppConfig[:enable_indexer] = true

+ +

AppConfig[:enable_docs]

+ +

AppConfig[:enable_docs] = true

+ +

AppConfig[:enable_oai]

+ +

AppConfig[:enable_oai] = true

+ +

Jetty shutdown

+ +

Some use cases want the ability to shutdown the Jetty service using Jetty’s +ShutdownHandler, which allows a POST request to a specific URI to signal +server shutdown. The prefix for this URI path is set to /xkcd to reduce the +possibility of a collision in the path configuration. So, full path would be

+ +

/xkcd/shutdown?token={randomly generated password}

+ +

The launcher creates a password to use this, which is stored +in the data directory. This is not turned on by default.

+ +

AppConfig[:use_jetty_shutdown_handler]

+ +

AppConfig[:use_jetty_shutdown_handler] = false

+ +

AppConfig[:jetty_shutdown_path]

+ +

AppConfig[:jetty_shutdown_path] = "/xkcd"

+ +

Managing multile backend instances

+ +

If you have multiple instances of the backend running behind a load +balancer, list the URL of each backend instance here. This is used by the +real-time indexing, which needs to connect directly to each running +instance.

+ +

By default we assume you’re not using a load balancer, so we just connect +to the regular backend URL.

+ +

AppConfig[:backend_instance_urls]

+ +

AppConfig[:backend_instance_urls] = proc { [AppConfig[:backend_url]] }

+ +

Theme

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:frontend_theme]

+ +

AppConfig[:frontend_theme] = "default"

+ +

AppConfig[:public_theme]

+ +

AppConfig[:public_theme] = "default"

+ +

Session expiration

+ +

AppConfig[:session_expire_after_seconds]

+ +

Sessions marked as expirable will timeout after this number of seconds of inactivity

+ +

AppConfig[:session_expire_after_seconds] = 3600

+ +

AppConfig[:session_nonexpirable_force_expire_after_seconds]

+ +

Sessions marked as non-expirable will eventually expire too, but after a longer period.

+ +

AppConfig[:session_nonexpirable_force_expire_after_seconds] = 604800

+ +

System usernames

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:search_username]

+ +

AppConfig[:search_username] = "search_indexer"

+ +

AppConfig[:public_username]

+ +

AppConfig[:public_username] = "public_anonymous"

+ +

AppConfig[:staff_username]

+ +

AppConfig[:staff_username] = "staff_system"

+ +

Authentication sources

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:authentication_sources]

+ +

AppConfig[:authentication_sources] = []

+ +

Misc. backlog and snapshot settings

+ +

AppConfig[:realtime_index_backlog_ms]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:realtime_index_backlog_ms] = 60000

+ +

AppConfig[:notifications_backlog_ms]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:notifications_backlog_ms] = 60000

+ +

AppConfig[:notifications_poll_frequency_ms]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:notifications_poll_frequency_ms] = 1000

+ +

AppConfig[:max_usernames_per_source]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:max_usernames_per_source] = 50

+ +

AppConfig[:demodb_snapshot_flag]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:demodb_snapshot_flag] = proc { File.join(AppConfig[:data_directory], "create_demodb_snapshot.txt") }

+ +

Report Configuration

+ +

AppConfig[:report_page_layout]

+ +

Uses valid values for the CSS3 @page directive’s size property: +http://www.w3.org/TR/css3-page/#page-size-prop

+ +

AppConfig[:report_page_layout] = "letter"

+ +

AppConfig[:report_pdf_font_paths]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:report_pdf_font_paths] = proc { ["#{AppConfig[:backend_url]}/reports/static/fonts/dejavu/DejaVuSans.ttf"] }

+ +

AppConfig[:report_pdf_font_family]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:report_pdf_font_family] = "\"DejaVu Sans\", sans-serif"

+ +

Plugins directory

+ +

AppConfig[:plugins_directory]

+ +

By default, the plugins directory will be in your ASpace Home. +If you want to override that, update this with an absolute path

+ +

AppConfig[:plugins_directory] = "plugins"

+ +

Feedback

+ +

AppConfig[:feedback_url]

+ +

URL to direct the feedback link. +You can remove this from the footer by making the value blank.

+ +

AppConfig[:feedback_url] = "http://archivesspace.org/contact"

+ +

User registration

+ +

AppConfig[:allow_user_registration]

+ +

Allow an unauthenticated user to create an account

+ +

AppConfig[:allow_user_registration] = true

+ +

Help Configuration

+ +

AppConfig[:help_enabled]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:help_enabled] = true

+ +

AppConfig[:help_url]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:help_url] = "https://archivesspace.atlassian.net/wiki/spaces/ArchivesSpaceUserManual/overview"`

+ +

AppConfig[:help_topic_base_url]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:help_topic_base_url] = "https://archivesspace.atlassian.net/wiki/spaces/ArchivesSpaceUserManual/pages/"`

+ +

Shared storage

+ +

AppConfig[:shared_storage]

+ +

AppConfig[:shared_storage] = proc { File.join(AppConfig[:data_directory], "shared") }

+ +

Background jobs

+ +

AppConfig[:job_file_path]

+ +

Formerly known as :import_job_path

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:job_file_path] = proc { AppConfig.has_key?(:import_job_path) ? AppConfig[:import_job_path] : File.join(AppConfig[:shared_storage], "job_files") }

+ +

AppConfig[:job_poll_seconds]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:job_poll_seconds] = proc { AppConfig.has_key?(:import_poll_seconds) ? AppConfig[:import_poll_seconds] : 5 }

+ +

AppConfig[:job_timeout_seconds]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:job_timeout_seconds] = proc { AppConfig.has_key?(:import_timeout_seconds) ? AppConfig[:import_timeout_seconds] : 300 }

+ +

AppConfig[:jobs_cancelable]

+ +

By default, only allow jobs to be cancelled if we’re running against MySQL (since we can rollback)

+ +

AppConfig[:jobs_cancelable] = proc { (AppConfig[:db_url] != AppConfig.demo_db_url).to_s }

+ +

Locations

+ +

AppConfig[:max_location_range]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:max_location_range] = 1000

+ +

Schema Info check

+ +

AppConfig[:ignore_schema_info_check]

+ +

ASpace backend will not start if the db’s schema_info version is not set +correctly for this version of ASPACE. This is to ensure that all the +migrations have run and completed before starting the app. You can override +this check here. Do so at your own peril.

+ +

AppConfig[:ignore_schema_info_check] = false

+ +

Demo data

+ +

AppConfig[:demo_data_url]

+ +

This is a URL that points to some demo data that can be used for testing, +teaching, etc. To use this, set an OS environment variable of ASPACE_DEMO = true

+ +

AppConfig[:demo_data_url] = "https://s3-us-west-2.amazonaws.com/archivesspacedemo/latest-demo-data.zip"

+ +

External IDs

+ +

AppConfig[:show_external_ids]

+ +

Expose external ids in the frontend

+ +

AppConfig[:show_external_ids] = false

+ +

Jetty request/response buffer

+ +

Set the allowed size of the request/response header that Jetty will accept +(anything bigger gets a 403 error). Note if you want to jack this size up, +you will also have to configure your Nginx/Apache as well if you’re using that

+ +

AppConfig[:jetty_response_buffer_size_bytes]

+ +

AppConfig[:jetty_response_buffer_size_bytes] = 64 * 1024

+ +

AppConfig[:jetty_request_buffer_size_bytes]

+ +

AppConfig[:jetty_request_buffer_size_bytes] = 64 * 1024

+ +

Container management configuration fields

+ +

AppConfig[:container_management_barcode_length]

+ +

Defines global and repo-level barcode validations (validating on length only). +Barcodes that have either no value, or a value between :min and :max, will validate on save. +Set global constraints via :system_default, and use the repo_code value for repository-level constraints. +Note that :system_default will always inherit down its values when possible.

+ +

AppConfig[:container_management_barcode_length] = {:system_default => {:min => 5, :max => 10}, 'repo' => {:min => 9, :max => 12}, 'other_repo' => {:min => 9, :max => 9} }

+ +

AppConfig[:container_management_extent_calculator]

+ +

Globally defines the behavior of the exent calculator. +Use :report_volume (true/false) to define whether space should be reported in cubic +or linear dimensions. +Use :unit (:feet, :inches, :meters, :centimeters) to define the unit which the calculator +reports extents in. +Use :decimal_places to define how many decimal places the calculator should return.

+ +

Example:

+ +

AppConfig[:container_management_extent_calculator] = { :report_volume => true, :unit => :feet, :decimal_places => 3 }

+ +

Record inheritance in public interface

+ +

AppConfig[:record_inheritance]

+ +

Define the fields for a record type that are inherited from ancestors +if they don’t have a value in the record itself. +This is used in common/record_inheritance.rb and was developed to support +the new public UI application. +Note - any changes to record_inheritance config will require a reindex of pui +records to take affect. To do this remove files from indexer_pui_state

+ +
AppConfig[:record_inheritance] = {
+  :archival_object => {
+    :inherited_fields => [
+      {
+        :property => 'title',
+        :inherit_directly => true
+      },
+      {
+        :property => 'component_id',
+        :inherit_directly => false
+      },
+      {
+        :property => 'language',
+        :inherit_directly => true
+      },
+      {
+        :property => 'dates',
+        :inherit_directly => true
+      },
+      {
+        :property => 'extents',
+        :inherit_directly => false
+      },
+      {
+        :property => 'linked_agents',
+        :inherit_if => proc {|json| json.select {|j| j['role'] == 'creator'} },
+        :inherit_directly => false
+      },
+      {
+        :property => 'notes',
+        :inherit_if => proc {|json| json.select {|j| j['type'] == 'accessrestrict'} },
+        :inherit_directly => true
+      },
+      {
+        :property => 'notes',
+        :inherit_if => proc {|json| json.select {|j| j['type'] == 'scopecontent'} },
+        :inherit_directly => false
+      },
+      {
+        :property => 'notes',
+        :inherit_if => proc {|json| json.select {|j| j['type'] == 'langmaterial'} },
+        :inherit_directly => false
+      },
+    ]
+  }
+}
+
+ +

To enable composite identifiers - added to the merged record in a property +\_composite_identifier

+ +

The values for :include_level and :identifier_delimiter shown here are the defaults

+ +

If :include_level is set to true then level values (eg Series) will be included in \_composite_identifier

+ +

The :identifier_delimiter is used when joining the four part identifier for resources

+ +
AppConfig[:record_inheritance][:archival_object][:composite_identifiers] = {
+  :include_level => false,
+  :identifier_delimiter => ' '
+}
+
+ +

To configure additional elements to be inherited use this pattern in your config

+ +
AppConfig[:record_inheritance][:archival_object][:inherited_fields] <<
+  {
+    :property => 'linked_agents',
+    :inherit_if => proc {|json| json.select {|j| j['role'] == 'subject'} },
+    :inherit_directly => true
+  }
+
+ +

… or use this pattern to add many new elements at once

+ +
AppConfig[:record_inheritance][:archival_object][:inherited_fields].concat(
+  [
+    {
+      :property => 'subjects',
+      :inherit_if => proc {|json|
+        json.select {|j|
+          ! j['_resolved']['terms'].select { |t| t['term_type'] == 'topical'}.empty? }
+        },
+      :inherit_directly => true
+    },
+    {
+      :property => 'external_documents',
+      :inherit_directly => false
+    },
+    {
+      :property => 'rights_statements',
+      :inherit_directly => false
+    },
+    {
+      :property => 'instances',
+      :inherit_directly => false
+    },
+  ])
+
+ +

If you want to modify any of the default rules, the safest approach is to uncomment +the entire default record_inheritance config and make your changes. +For example, to stop scopecontent notes from being inherited into file or item records +uncomment the entire record_inheritance default config above, and add a skip_if +clause to the scopecontent rule, like this:

+ +
  {
+    :property => 'notes',
+    :skip_if => proc {|json| ['file', 'item'].include?(json['level']) },
+    :inherit_if => proc {|json| json.select {|j| j['type'] == 'scopecontent'} },
+    :inherit_directly => false
+  },
+
+ +

PUI Configurations

+ +

AppConfig[:pui_search_results_page_size]

+ +

AppConfig[:pui_search_results_page_size] = 10

+ +

AppConfig[:pui_branding_img]

+ +

AppConfig[:pui_branding_img] = 'archivesspace.small.png'

+ +

AppConfig[:pui_block_referrer]

+ +

AppConfig[:pui_block_referrer] = true # patron privacy; blocks full 'referer' when going outside the domain

+ +

AppConfig[:pui_max_concurrent_pdfs]

+ +

The number of PDFs we’ll generate (in the background) at the same time.

+ +

PDF generation can be a little memory intensive for large collections, so we +set this fairly low out of the box.

+ +

AppConfig[:pui_max_concurrent_pdfs] = 2

+ +

AppConfig[:pui_pdf_timeout]

+ +

You can set this to nil or zero to prevent a timeout

+ +

AppConfig[:pui_pdf_timeout] = 600

+ +

AppConfig[:pui_hide]

+ +

AppConfig[:pui_hide] = {}

+ +

The following determine which ‘tabs’ are on the main horizontal menu:

+ +
AppConfig[:pui_hide][:repositories] = false
+AppConfig[:pui_hide][:resources] = false
+AppConfig[:pui_hide][:digital_objects] = false
+AppConfig[:pui_hide][:accessions] = false
+AppConfig[:pui_hide][:subjects] = false
+AppConfig[:pui_hide][:agents] = false
+AppConfig[:pui_hide][:classifications] = false
+AppConfig[:pui_hide][:search_tab] = false
+
+ +

The following determine globally whether the various “badges” appear on the Repository page +can be overriden at repository level below (e.g.: +AppConfig[:repos][{repo_code}][:hide][:counts] = true

+ +
AppConfig[:pui_hide][:resource_badge] = false
+AppConfig[:pui_hide][:record_badge] = true # hide by default
+AppConfig[:pui_hide][:digital_object_badge] = false
+AppConfig[:pui_hide][:accession_badge] = false
+AppConfig[:pui_hide][:subject_badge] = false
+AppConfig[:pui_hide][:agent_badge] = false
+AppConfig[:pui_hide][:classification_badge] = false
+AppConfig[:pui_hide][:counts] = false
+
+ +

The following determines globally whether the ‘container inventory’ navigation +tab/pill is hidden on resource/collection page

+ +
AppConfig[:pui_hide][:container_inventory] = false
+
+ +

AppConfig[:pui_requests_permitted_for_types]

+ +

Determine when the request button is displayed

+ +

AppConfig[:pui_requests_permitted_for_types] = [:resource, :archival_object, :accession, :digital_object, :digital_object_component]

+ +

AppConfig[:pui_requests_permitted_for_containers_only]

+ +

Set to ‘true’ if you want to disable if there is no top container

+ +

AppConfig[:pui_requests_permitted_for_containers_only] = false

+ +

AppConfig[:pui_repos]

+ +

Repository-specific examples. Replace {repo_code} with your repository code, i.e. ‘foo’ - note the lower-case

+ +

AppConfig[:pui_repos] = {}

+ +

Examples:

+ +

for a particular repository, only enable requests for certain record types (Note this configuration will override AppConfig[:pui_requests_permitted_for_types] for the repository)

+ +
AppConfig[:pui_repos]['foo'][:requests_permitted_for_types] = [:resource, :archival_object, :accession, :digital_object, :digital_object_component]
+
+ +

For a particular repository, disable request

+ +
AppConfig[:pui_repos]['foo'][:requests_permitted_for_containers_only] = true
+
+ +

Set the email address to send any repository requests:

+ +
AppConfig[:pui_repos]['foo'][:request_email] = {email address}
+
+ +
+

TODO - Needs more documentation here

+
+ +
AppConfig[:pui_repos]['foo'][:hide] = {}
+AppConfig[:pui_repos]['foo'][:hide][:counts] = true
+
+ +

AppConfig[:pui_display_deaccessions]

+ +
+

TODO - Needs more documentation

+
+ +

AppConfig[:pui_display_deaccessions] = true

+ +

AppConfig[:pui_page_actions_cite]

+ +

Enable / disable PUI resource/archival object page ‘cite’ action

+ +

AppConfig[:pui_page_actions_cite] = true

+ +

AppConfig[:pui_page_actions_bookmark]

+ +

Enable / disable PUI resource/archival object page ‘bookmark’ action

+ +

AppConfig[:pui_page_actions_bookmark] = true

+ +

AppConfig[:pui_page_actions_request]

+ +

Enable / disable PUI resource/archival object page ‘request’ action

+ +

AppConfig[:pui_page_actions_request] = true

+ +

AppConfig[:pui_page_actions_print]

+ +

Enable / disable PUI resource/archival object page ‘print’ action

+ +

AppConfig[:pui_page_actions_print] = true

+ + + +

when a user is authenticated, add a link back to the staff interface from the specified record

+ +

AppConfig[:pui_enable_staff_link] = true

+ + + +

by default, staff link will open record in staff interface in edit mode, +change this to ‘readonly’ for it to open in readonly mode

+ +

AppConfig[:pui_staff_link_mode] = 'edit'

+ +

AppConfig[:pui_page_custom_actions]

+ +

Add page actions via the configuration

+ +

AppConfig[:pui_page_custom_actions] = []

+ +

Javascript action example:

+ +
AppConfig[:pui_page_custom_actions] << {
+  'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for
+  'label' => 'actions.do_something', # the I18n path for the action button
+  'icon' => 'fa-paw', # the font-awesome icon CSS class
+  'onclick_javascript' => 'alert("do something grand");',
+}
+
+ +

Hyperlink action example:

+ +
AppConfig[:pui_page_custom_actions] << {
+  'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for
+  'label' => 'actions.do_something', # the I18n path for the action button
+  'icon' => 'fa-paw', # the font-awesome icon CSS class
+  'url_proc' => proc {|record| 'http://example.com/aspace?uri='+record.uri},
+}
+
+ +

Form-POST action example:

+ +
AppConfig[:pui_page_custom_actions] << {
+  'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for
+  'label' => 'actions.do_something', # the I18n path for the action button
+  'icon' => 'fa-paw', # the font-awesome icon CSS class
+  # 'post_params_proc' returns a hash of params which populates a form with hidden inputs ('name' => 'value')
+  'post_params_proc' => proc {|record| {'uri' => record.uri, 'display_string' => record.display_string} },
+  # 'url_proc' returns the URL for the form to POST to
+  'url_proc' => proc {|record| 'http://example.com/aspace?uri='+record.uri},
+  # 'form_id' as string to be used as the form's ID
+  'form_id' => 'my_grand_action',
+}
+
+ +

ERB action example:

+ +
AppConfig[:pui_page_custom_actions] << {
+  'record_type' => ['resource', 'archival_object'],
+  # the jsonmodel type to show for
+  # 'erb_partial' returns the path to an erb template from which the action will be rendered
+  'erb_partial' => 'shared/my_special_action',
+}
+
+ +

AppConfig[:pui_email_enabled]

+ +

PUI email settings (logs emails when disabled)

+ +

AppConfig[:pui_email_enabled] = false

+ +

AppConfig[:pui_email_override]

+ +

See above AppConfig[:pui_repos][{repo_code}][:request_email] for setting repository email overrides +‘pui_email_override’ for testing, this email will be the to-address for all sent emails

+ +

AppConfig[:pui_email_override] = 'testing@example.com'

+ +

AppConfig[:pui_request_email_fallback_to_address]

+ +

The ‘to’ email address for repositories that don’t define their own email

+ +

AppConfig[:pui_request_email_fallback_to_address] = 'testing@example.com'

+ +

AppConfig[:pui_request_email_fallback_from_address]

+ +

The ‘from’ email address for repositories that don’t define their own email

+ +

AppConfig[:pui_request_email_fallback_from_address] = 'testing@example.com'

+ +

AppConfig[:pui_request_use_repo_email]

+ +

Use the repository record email address for requests (overrides config email)

+ +

AppConfig[:pui_request_use_repo_email] = false

+ +

AppConfig[:pui_email_delivery_method]

+ +

AppConfig[:pui_email_delivery_method] = :sendmail

+ +

AppConfig[:pui_email_sendmail_settings]

+ +
AppConfig[:pui_email_sendmail_settings] = {
+  location: '/usr/sbin/sendmail',
+  arguments: '-i'
+}
+
+ +

AppConfig[:pui_email_smtp_settings]

+ +

Apply when AppConfig[:pui_email_delivery_method] set to :smtp

+ +

Example SMTP configuration:

+ +
AppConfig[:pui_email_smtp_settings] = {
+  address: 'smtp.gmail.com',
+  port: 587,
+  domain: 'gmail.com',
+  user_name: '<username>',
+  password: '<password>',
+  authentication: 'plain',
+  enable_starttls_auto: true,
+}
+
+ +

AppConfig[:pui_email_perform_deliveries]

+ +

AppConfig[:pui_email_perform_deliveries] = true

+ +

AppConfig[:pui_email_raise_delivery_errors]

+ +

AppConfig[:pui_email_raise_delivery_errors] = true

+ +

AppConfig[:pui_readmore_max_characters]

+ +

The number of characters to truncate before showing the ‘Read More’ link on notes

+ +

AppConfig[:pui_readmore_max_characters] = 450

+ +

AppConfig[:pui_expand_all]

+ +

Whether to expand all additional information blocks at the bottom of record pages by default. true expands all blocks, false collapses all blocks.

+ +

AppConfig[:pui_expand_all] = false

+ +

AppConfig[:max_search_columns]

+ +

Use to specify the maximum number of columns to display when searching or browsing

+ +

AppConfig[:max_search_columns] = 7

+ + +
+ +
+ + + diff --git a/customization/configuration.md b/customization/configuration.md deleted file mode 100644 index e469dd7f..00000000 --- a/customization/configuration.md +++ /dev/null @@ -1,1383 +0,0 @@ -# Configuring ArchivesSpace - -The primary configuration for ArchivesSpace is done in the config/config.rb -file. By default, this file contains the default settings, which are indicated -by commented out lines ( indicated by the "#" in the file ). You can adjust these -settings by adding new lines that change the default and restarting -ArchivesSpace. Be sure that your new settings are not commented out -( i.e. do NOT start with a "#" ), otherwise the settings will not take effect. - - -## Commonly changed settings - -### Database config - - -#### :db_url - -Set your database name and credentials. The default specifies that the embedded database should be used. -It is recommended to use a MySQL database instead of the embedded database. -For more info, see -[Running ArchivesSpace against MySQL](https://archivesspace.github.io/tech-docs/provisioning/mysql.html) - -This is an example of specifying MySQL credentials: - -`AppConfig[:db_url] = "jdbc:mysql://127.0.0.1:3306/aspace?useUnicode=true&characterEncoding=UTF-8&user=as&password=as123"` - - -#### :db_max_connections - -Set the maximum number of database connections used by the application. -Default is derived from the number of indexer threads. - -`AppConfig[:db_max_connections] = proc { 20 + (AppConfig[:indexer_thread_count] * 2) }` - - -### URLs for ArchivesSpace components - -Set the ArchivesSpace backend port. The backend listens on port 8089 by default. - -`AppConfig[:backend_url] = "http://localhost:8089"` - - -Set the ArchivesSpace staff interface (frontend) port. The staff interface listens on port 8080 by default. - -`AppConfig[:frontend_url] = "http://localhost:8080"` - - -Set the ArchivesSpace public interface port. The public interface listens on port 8081 by default. - -`AppConfig[:public_url] = "http://localhost:8081"` - - -Set the ArchivesSpace OAI server port. The OAI server listens on port 8082 by default. - -`AppConfig[:oai_url] = "http://localhost:8082"` - - -Set the ArchivesSpace Solr index port. The Solr server listens on port 8090 by default. - -`AppConfig[:solr_url] = "http://localhost:8090"` - -Set the ArchivesSpace indexer port. The indexer listens on port 8091 by default. - -`AppConfig[:indexer_url] = "http://localhost:8091"` - -Set the ArchivesSpace API documentation port. The API documentation listens on port 8888 by default. - -`AppConfig[:docs_url] = "http://localhost:8888"` - - -### Enabling ArchivesSpace components - -Enable or disable specific componenets by setting the following settings to true or false (defaults to true): - -``` -AppConfig[:enable_backend] = true -AppConfig[:enable_frontend] = true -AppConfig[:enable_public] = true -AppConfig[:enable_solr] = true -AppConfig[:enable_indexer] = true -AppConfig[:enable_docs] = true -AppConfig[:enable_oai] = true -``` - -### Application logging - -By default, all logging will be output on the screen while the archivesspace command -is running. When running as a daemon/service, this is put into a file in -`logs/archivesspace.out`. You can route log output to a different file per component by changing the log value to -a filepath that archivesspace has write access to. - -You can also set the logging level for each component. Valid values are: - -* `debug` (everything) -* `info` -* `warn` -* `error` -* `fatal` (severe only) - -#### `AppConfig[:frontend_log]` - -File for log output for the frontend (staff interface). Set to "default" to -route log output to archivesspace.out. - -#### `#AppConfig[:frontend_log_level]` - -Logging level for the frontend. - - -#### `AppConfig[:backend_log]` - -File for log output for the backend. Set to "default" to -route log output to archivesspace.out. - -#### `#AppConfig[:backend_log_level]` - -Logging level for the backend. - - -#### `AppConfig[:pui_log]` - -File for log output for the public UI. Set to "default" to -route log output to archivesspace.out. - -#### `#AppConfig[:pui_log_level]` - -Logging level for the public UI. - - -#### `AppConfig[:indexer_log]` - -File for log output for the indexer. Set to "default" to -route log output to archivesspace.out. - -#### `#AppConfig[:indexer_log_level]` - -Logging level for the indexer. - - -### Database logging - -#### `AppConfig[:db_debug_log]` - -Set to true to log all SQL statements. -Note that this will have a performance impact! - -`AppConfig[:db_debug_log] = false` - - -#### `AppConfig[:mysql_binlog]` - -Set to true if you have enabled MySQL binary logging. - -`AppConfig[:mysql_binlog] = false` - - -### Solr backups - -#### `AppConfig[:solr_backup_schedule]` - -Set Solr back up schedule. By default, Solr backups will run at midnight. See https://crontab.guru/ for - information about the schedule syntax. - -`AppConfig[:solr_backup_schedule] = "0 * * * *"` - - -#### `AppConfig[:solr_backup_number_to_keep]` - -Number of Solr backups to keep (default = 1) - -`AppConfig[:solr_backup_number_to_keep] = 1` - - -#### `AppConfig[:solr_backup_directory]` - -Directory to store Solr backups. - -`AppConfig[:solr_backup_directory] = proc { File.join(AppConfig[:data_directory], "solr_backups") }` - -### Default Solr params - -#### `AppConfig[:solr_params]` - -Add default solr params. - -A simple example: use AND for search: - -`AppConfig[:solr_params] = { "q.op" => "AND" }` - -A more complex example: set the boost query value (bq) to boost the relevancy -for the query string in the title, set the phrase fields parameter (pf) to boost -the relevancy for the title when the query terms are in close proximity to each -other, and set the phrase slop (ps) parameter for the pf parameter to indicate -how close the proximity should be: - -``` -AppConfig[:solr_params] = { - "bq" => proc { "title:\"#{@query_string}\"*" }, - "pf" => 'title^10', - "ps" => 0, -} -``` - -### Language - -#### `AppConfig[:locale]` - -Set the application's language (see the .yml files in -https://github.com/archivesspace/archivesspace/tree/master/common/locales -for a list of available locale codes). Default is English (:en): - -`AppConfig[:locale] = :en` - - - -### Plugin registration - -#### `AppConfig[:plugins]` - -Plug-ins to load. They will load in the order specified. - -`AppConfig[:plugins] = ['local', 'lcnaf']` - - - -### Thread count - -#### `AppConfig[:job_thread_count]` - -The number of concurrent threads available to run background jobs. -Introduced because long running jobs were blocking the queue. -Resist the urge to set this to a big number! - -`AppConfig[:job_thread_count] = 2` - - - -### OAI configuration options - -#### `AppConfig[:oai_proxy_url]` - -> TODO - Needs more documentation - -`AppConfig[:oai_proxy_url] = 'http://your-public-oai-url.example.com'` - -**NOTE: As of version 2.5.2, the following parameters (oai_repository_name, oai_record_prefix, and oai_admin_email) have been deprecated. They should be set in the Staff User Interface. To set them, select the System menu in the Staff User Interface and then select Manage OAI-PMH Settings. These three settings are at the top of the page in the General Settings section. These settings will be completely removed from the config file when version 2.6.0 is released.** - -#### `AppConfig[:oai_repository_name]` - -`AppConfig[:oai_repository_name] = 'ArchivesSpace OAI Provider'` - -#### `AppConfig[:oai_record_prefix]` - -`AppConfig[:oai_record_prefix] = 'oai:archivesspace'` - -#### `AppConfig[:oai_admin_email]` - -`AppConfig[:oai_admin_email] = 'admin@example.com'` - - - -#### `AppConfig[:oai_sets]` - -In addition to the sets based on level of description, you can define OAI Sets -based on repository codes and/or sponsors as follows: - -``` -AppConfig[:oai_sets] = { - 'repository_set' => { - :repo_codes => ['hello626'], - :description => "A set of one or more repositories", - }, - - 'sponsor_set' => { - :sponsors => ['The_Sponsor'], - :description => "A set of one or more sponsors", - }, -} -``` - - -## Other less commonly changed settings - - -### Default admin password - -#### `AppConfig[:default_admin_password]` - -Set default admin password. Default password is "admin". - -`#AppConfig[:default_admin_password] = "admin"` - - -### Data directories - -#### `AppConfig[:data_directory]` - -If you run ArchivesSpace using the standard scripts (archivesspace.sh, -archivesspace.bat or as a Windows service), the value of :data_directory is -automatically set to be the "data" directory of your ArchivesSpace -distribution. You don't need to change this value unless you specifically -want ArchivesSpace to put its data files elsewhere. - -`AppConfig[:data_directory] = File.join(Dir.home, "ArchivesSpace")` - - -#### `AppConfig[:backup_directory]` - -> TODO - Needs more documentation - -`AppConfig[:backup_directory] = proc { File.join(AppConfig[:data_directory], "demo_db_backups") }` - - -#### `AppConfig[:solr_index_directory]` - -> TODO - Needs more documentation - -`AppConfig[:solr_index_directory] = proc { File.join(AppConfig[:data_directory], "solr_index") }` - - -#### `AppConfig[:solr_home_directory]` - -> TODO - Needs more documentation - -`AppConfig[:solr_home_directory] = proc { File.join(AppConfig[:data_directory], "solr_home") }` - - - -### Solr defaults - -#### `AppConfig[:solr_indexing_frequency_seconds]` - -> TODO - Needs more documentation - -`AppConfig[:solr_indexing_frequency_seconds] = 30` - - -#### `AppConfig[:solr_facet_limit]` - -> TODO - Needs more documentation - -`AppConfig[:solr_facet_limit] = 100` - - -#### `AppConfig[:default_page_size]` - -> TODO - Needs more documentation - -`AppConfig[:default_page_size] = 10` - - -#### `AppConfig[:max_page_size]` - -> TODO - Needs more documentation - -`AppConfig[:max_page_size] = 250` - - -### Cookie prefix - -#### `AppConfig[:cookie_prefix]` - -A prefix added to cookies used by the application. -Change this if you're running more than one instance of ArchivesSpace on the -same hostname (i.e. multiple instances on different ports). -Default is "archivesspace". - -`AppConfig[:cookie_prefix] = "archivesspace"` - - -### Indexer settings - -The periodic indexer can run using multiple threads to take advantage of -multiple CPU cores. By setting these two options, you can control how many -CPU cores are used, and the amount of memory that will be consumed by the -indexing process (more cores and/or more records per thread means more memory used). - -#### `AppConfig[:indexer_records_per_thread]` - -`AppConfig[:indexer_records_per_thread] = 25` - - -#### `AppConfig[:indexer_thread_count]` - -`AppConfig[:indexer_thread_count] = 4` - - -#### `AppConfig[:indexer_solr_timeout_seconds]` - -> TODO - Needs more documentation - -`AppConfig[:indexer_solr_timeout_seconds] = 300` - - - -### PUI Indexer Settings - -#### `AppConfig[:pui_indexer_enabled]` - -> TODO - Needs more documentation - -`AppConfig[:pui_indexer_enabled] = true` - - -#### `AppConfig[:pui_indexing_frequency_seconds]` - -> TODO - Needs more documentation - -`AppConfig[:pui_indexing_frequency_seconds] = 30` - - -#### `AppConfig[:pui_indexer_records_per_thread]` - -> TODO - Needs more documentation - -`AppConfig[:pui_indexer_records_per_thread] = 25` - - -#### `AppConfig[:pui_indexer_thread_count]` - -> TODO - Needs more documentation - -`AppConfig[:pui_indexer_thread_count] = 1` - - - -### Index state - - - -#### `AppConfig[:index_state_class]` - -Set to 'IndexStateS3' for amazon s3 - -> TODO - Needs more documentation - -`AppConfig[:index_state_class] = 'IndexState'` - - -#### `AppConfig[:index_state_s3]` - -Store indexer state in amazon s3 (optional) -NOTE: s3 charges for read / update requests and the pui indexer is continually -writing to state files so you may want to increase pui_indexing_frequency_seconds - -> TODO - Needs more documentation - -``` -AppConfig[:index_state_s3] = { - region: ENV.fetch("AWS_REGION"), - aws_access_key_id: ENV.fetch("AWS_ACCESS_KEY_ID"), - aws_secret_access_key: ENV.fetch("AWS_SECRET_ACCESS_KEY"), - bucket: ENV.fetch("AWS_ASPACE_BUCKET"), - prefix: proc { "#{AppConfig[:cookie_prefix]}_" }, -} -``` - - -### Misc. database options - -#### `AppConfig[:allow_other_unmapped]` - -> TODO - Needs more documentation - -`AppConfig[:allow_other_unmapped] = false` - - -#### `AppConfig[:db_url_redacted]` - -> TODO - Needs more documentation - -`AppConfig[:db_url_redacted] = proc { AppConfig[:db_url].gsub(/(user|password)=(.*?)(&|$)/, '\1=[REDACTED]\3') }` - - -#### `AppConfig[:demo_db_backup_schedule]` - -> TODO - Needs more documentation - -`AppConfig[:demo_db_backup_schedule] = "0 4 * * *"` - - -#### `AppConfig[:allow_unsupported_database]` - -> TODO - Needs more documentation - -`AppConfig[:allow_unsupported_database] = false` - - -#### `AppConfig[:allow_non_utf8_mysql_database]` - -> TODO - Needs more documentation - -`AppConfig[:allow_non_utf8_mysql_database] = false` - - -#### `AppConfig[:demo_db_backup_number_to_keep] = 7` - -> TODO - Needs more documentation - -`AppConfig[:demo_db_backup_number_to_keep] = 7` - - - -### Proxy URLs - -If you are serving user-facing applications via proxy -(i.e., another domain or port, or via https, or for a prefix) it is -recommended that you record those URLs in your configuration - - -#### `AppConfig[:frontend_proxy_url] = proc { AppConfig[:frontend_url] }` - -Proxy URL for the frontend (staff interface) - -`AppConfig[:frontend_proxy_url] = proc { AppConfig[:frontend_url] }` - - -#### `AppConfig[:public_proxy_url]` - -Proxy URL for the public interface - -`AppConfig[:public_proxy_url] = proc { AppConfig[:public_url] }` - - -#### `AppConfig[:frontend_proxy_prefix]` - -Don't override this setting unless you know what you're doing - -`AppConfig[:frontend_proxy_prefix] = proc { "#{URI(AppConfig[:frontend_proxy_url]).path}/".gsub(%r{/+$}, "/") }` - - -#### `AppConfig[:public_proxy_prefix]` - -Don't override this setting unless you know what you're doing - -`AppConfig[:public_proxy_prefix] = proc { "#{URI(AppConfig[:public_proxy_url]).path}/".gsub(%r{/+$}, "/") }` - - -### Enable component applications - -Setting any of these false will prevent the associated applications from starting. -Temporarily disabling the frontend and public UIs and/or the indexer may help users -who are running into memory-related issues during migration. - - -#### `AppConfig[:enable_backend]` - -`AppConfig[:enable_backend] = true` - - -#### `AppConfig[:enable_frontend]` - -`AppConfig[:enable_frontend] = true` - - -#### `AppConfig[:enable_public]` - -`AppConfig[:enable_public] = true` - - -#### `AppConfig[:enable_solr]` - -`AppConfig[:enable_solr] = true` - - -#### `AppConfig[:enable_indexer]` - -`AppConfig[:enable_indexer] = true` - - -#### `AppConfig[:enable_docs]` - -`AppConfig[:enable_docs] = true` - - -#### `AppConfig[:enable_oai]` - -`AppConfig[:enable_oai] = true` - - - -### Jetty shutdown - -Some use cases want the ability to shutdown the Jetty service using Jetty's -ShutdownHandler, which allows a POST request to a specific URI to signal -server shutdown. The prefix for this URI path is set to `/xkcd` to reduce the -possibility of a collision in the path configuration. So, full path would be - -`/xkcd/shutdown?token={randomly generated password}` - -The launcher creates a password to use this, which is stored -in the data directory. This is not turned on by default. - - -#### `AppConfig[:use_jetty_shutdown_handler]` - -`AppConfig[:use_jetty_shutdown_handler] = false` - - -#### `AppConfig[:jetty_shutdown_path]` - -`AppConfig[:jetty_shutdown_path] = "/xkcd"` - - -### Managing multile backend instances - -If you have multiple instances of the backend running behind a load -balancer, list the URL of each backend instance here. This is used by the -real-time indexing, which needs to connect directly to each running -instance. - -By default we assume you're not using a load balancer, so we just connect -to the regular backend URL. - - -#### `AppConfig[:backend_instance_urls]` - -`AppConfig[:backend_instance_urls] = proc { [AppConfig[:backend_url]] }` - - -### Theme - -> TODO - Needs more documentation - - -#### `AppConfig[:frontend_theme]` - -`AppConfig[:frontend_theme] = "default"` - - -#### `AppConfig[:public_theme]` - -`AppConfig[:public_theme] = "default"` - - - -### Session expiration - -#### `AppConfig[:session_expire_after_seconds]` - -Sessions marked as expirable will timeout after this number of seconds of inactivity - -`AppConfig[:session_expire_after_seconds] = 3600` - - -#### `AppConfig[:session_nonexpirable_force_expire_after_seconds]` - -Sessions marked as non-expirable will eventually expire too, but after a longer period. - -`AppConfig[:session_nonexpirable_force_expire_after_seconds] = 604800` - - -### System usernames - -> TODO - Needs more documentation - - -#### `AppConfig[:search_username]` - -`AppConfig[:search_username] = "search_indexer"` - - -#### `AppConfig[:public_username]` - -`AppConfig[:public_username] = "public_anonymous"` - - -#### `AppConfig[:staff_username]` - -`AppConfig[:staff_username] = "staff_system"` - - - -### Authentication sources - -> TODO - Needs more documentation - -#### `AppConfig[:authentication_sources]` - -`AppConfig[:authentication_sources] = []` - - - -### Misc. backlog and snapshot settings - -#### `AppConfig[:realtime_index_backlog_ms]` - -> TODO - Needs more documentation - -`AppConfig[:realtime_index_backlog_ms] = 60000` - - -#### `AppConfig[:notifications_backlog_ms]` - -> TODO - Needs more documentation - -`AppConfig[:notifications_backlog_ms] = 60000` - - -#### `AppConfig[:notifications_poll_frequency_ms]` - -> TODO - Needs more documentation - -`AppConfig[:notifications_poll_frequency_ms] = 1000` - - -#### `AppConfig[:max_usernames_per_source]` - -> TODO - Needs more documentation - -`AppConfig[:max_usernames_per_source] = 50` - - -#### `AppConfig[:demodb_snapshot_flag]` - -> TODO - Needs more documentation - -`AppConfig[:demodb_snapshot_flag] = proc { File.join(AppConfig[:data_directory], "create_demodb_snapshot.txt") }` - - - -### Report Configuration - -#### `AppConfig[:report_page_layout]` - -Uses valid values for the CSS3 @page directive's size property: -http://www.w3.org/TR/css3-page/#page-size-prop - -`AppConfig[:report_page_layout] = "letter"` - - -#### `AppConfig[:report_pdf_font_paths]` - -> TODO - Needs more documentation - -`AppConfig[:report_pdf_font_paths] = proc { ["#{AppConfig[:backend_url]}/reports/static/fonts/dejavu/DejaVuSans.ttf"] }` - - -#### `AppConfig[:report_pdf_font_family]` - -> TODO - Needs more documentation - -`AppConfig[:report_pdf_font_family] = "\"DejaVu Sans\", sans-serif"` - - -### Plugins directory - -#### `AppConfig[:plugins_directory]` - -By default, the plugins directory will be in your ASpace Home. -If you want to override that, update this with an absolute path - -`AppConfig[:plugins_directory] = "plugins"` - - -### Feedback - -#### `AppConfig[:feedback_url]` - -URL to direct the feedback link. -You can remove this from the footer by making the value blank. - -`AppConfig[:feedback_url] = "http://archivesspace.org/contact"` - - -### User registration - -#### `AppConfig[:allow_user_registration]` - -Allow an unauthenticated user to create an account - -`AppConfig[:allow_user_registration] = true` - - - -### Help Configuration - -#### `AppConfig[:help_enabled]` - -> TODO - Needs more documentation - -`AppConfig[:help_enabled] = true` - - -#### `AppConfig[:help_url]` - -> TODO - Needs more documentation - -`AppConfig[:help_url] = "https://archivesspace.atlassian.net/wiki/spaces/ArchivesSpaceUserManual/overview"`` - -#### `AppConfig[:help_topic_base_url]` - -> TODO - Needs more documentation - -`AppConfig[:help_topic_base_url] = "https://archivesspace.atlassian.net/wiki/spaces/ArchivesSpaceUserManual/pages/"`` - - -### Shared storage - -#### `AppConfig[:shared_storage]` - -`AppConfig[:shared_storage] = proc { File.join(AppConfig[:data_directory], "shared") }` - - - -### Background jobs - -#### `AppConfig[:job_file_path]` - -Formerly known as :import_job_path - -> TODO - Needs more documentation - -`AppConfig[:job_file_path] = proc { AppConfig.has_key?(:import_job_path) ? AppConfig[:import_job_path] : File.join(AppConfig[:shared_storage], "job_files") }` - - -#### `AppConfig[:job_poll_seconds]` - -> TODO - Needs more documentation - -`AppConfig[:job_poll_seconds] = proc { AppConfig.has_key?(:import_poll_seconds) ? AppConfig[:import_poll_seconds] : 5 }` - - -#### `AppConfig[:job_timeout_seconds]` - -> TODO - Needs more documentation - -`AppConfig[:job_timeout_seconds] = proc { AppConfig.has_key?(:import_timeout_seconds) ? AppConfig[:import_timeout_seconds] : 300 }` - - -#### `AppConfig[:jobs_cancelable]` - -By default, only allow jobs to be cancelled if we're running against MySQL (since we can rollback) - -`AppConfig[:jobs_cancelable] = proc { (AppConfig[:db_url] != AppConfig.demo_db_url).to_s }` - - -### Locations - -#### `AppConfig[:max_location_range]` - -> TODO - Needs more documentation - -`AppConfig[:max_location_range] = 1000` - - - -### Schema Info check - -#### `AppConfig[:ignore_schema_info_check]` - -ASpace backend will not start if the db's schema_info version is not set -correctly for this version of ASPACE. This is to ensure that all the -migrations have run and completed before starting the app. You can override -this check here. Do so at your own peril. - -`AppConfig[:ignore_schema_info_check] = false` - - -### Demo data - -#### `AppConfig[:demo_data_url]` - -This is a URL that points to some demo data that can be used for testing, -teaching, etc. To use this, set an OS environment variable of ASPACE_DEMO = true - -`AppConfig[:demo_data_url] = "https://s3-us-west-2.amazonaws.com/archivesspacedemo/latest-demo-data.zip"` - - -### External IDs - -#### `AppConfig[:show_external_ids]` - -Expose external ids in the frontend - -`AppConfig[:show_external_ids] = false` - - -### Jetty request/response buffer - -Set the allowed size of the request/response header that Jetty will accept -(anything bigger gets a 403 error). Note if you want to jack this size up, -you will also have to configure your Nginx/Apache as well if you're using that - -#### `AppConfig[:jetty_response_buffer_size_bytes]` - -`AppConfig[:jetty_response_buffer_size_bytes] = 64 * 1024` - - -#### `AppConfig[:jetty_request_buffer_size_bytes]` - -`AppConfig[:jetty_request_buffer_size_bytes] = 64 * 1024` - - - -### Container management configuration fields - -#### `AppConfig[:container_management_barcode_length]` - -Defines global and repo-level barcode validations (validating on length only). -Barcodes that have either no value, or a value between :min and :max, will validate on save. -Set global constraints via :system_default, and use the repo_code value for repository-level constraints. -Note that :system_default will always inherit down its values when possible. - -`AppConfig[:container_management_barcode_length] = {:system_default => {:min => 5, :max => 10}, 'repo' => {:min => 9, :max => 12}, 'other_repo' => {:min => 9, :max => 9} }` - - -#### `AppConfig[:container_management_extent_calculator]` - -Globally defines the behavior of the exent calculator. -Use :report_volume (true/false) to define whether space should be reported in cubic -or linear dimensions. -Use :unit (:feet, :inches, :meters, :centimeters) to define the unit which the calculator -reports extents in. -Use :decimal_places to define how many decimal places the calculator should return. - -Example: - -`AppConfig[:container_management_extent_calculator] = { :report_volume => true, :unit => :feet, :decimal_places => 3 }` - - -### Record inheritance in public interface - -#### `AppConfig[:record_inheritance]` - -Define the fields for a record type that are inherited from ancestors -if they don't have a value in the record itself. -This is used in common/record_inheritance.rb and was developed to support -the new public UI application. -Note - any changes to record_inheritance config will require a reindex of pui -records to take affect. To do this remove files from indexer_pui_state - -``` -AppConfig[:record_inheritance] = { - :archival_object => { - :inherited_fields => [ - { - :property => 'title', - :inherit_directly => true - }, - { - :property => 'component_id', - :inherit_directly => false - }, - { - :property => 'language', - :inherit_directly => true - }, - { - :property => 'dates', - :inherit_directly => true - }, - { - :property => 'extents', - :inherit_directly => false - }, - { - :property => 'linked_agents', - :inherit_if => proc {|json| json.select {|j| j['role'] == 'creator'} }, - :inherit_directly => false - }, - { - :property => 'notes', - :inherit_if => proc {|json| json.select {|j| j['type'] == 'accessrestrict'} }, - :inherit_directly => true - }, - { - :property => 'notes', - :inherit_if => proc {|json| json.select {|j| j['type'] == 'scopecontent'} }, - :inherit_directly => false - }, - { - :property => 'notes', - :inherit_if => proc {|json| json.select {|j| j['type'] == 'langmaterial'} }, - :inherit_directly => false - }, - ] - } -} -``` - - -To enable composite identifiers - added to the merged record in a property -`\_composite_identifier` - -The values for `:include_level` and `:identifier_delimiter` shown here are the defaults - -If `:include_level` is set to true then level values (eg Series) will be included in `\_composite_identifier` - -The `:identifier_delimiter` is used when joining the four part identifier for resources - -``` -AppConfig[:record_inheritance][:archival_object][:composite_identifiers] = { - :include_level => false, - :identifier_delimiter => ' ' -} -``` - - -To configure additional elements to be inherited use this pattern in your config - -``` -AppConfig[:record_inheritance][:archival_object][:inherited_fields] << - { - :property => 'linked_agents', - :inherit_if => proc {|json| json.select {|j| j['role'] == 'subject'} }, - :inherit_directly => true - } -``` - -... or use this pattern to add many new elements at once - -``` -AppConfig[:record_inheritance][:archival_object][:inherited_fields].concat( - [ - { - :property => 'subjects', - :inherit_if => proc {|json| - json.select {|j| - ! j['_resolved']['terms'].select { |t| t['term_type'] == 'topical'}.empty? } - }, - :inherit_directly => true - }, - { - :property => 'external_documents', - :inherit_directly => false - }, - { - :property => 'rights_statements', - :inherit_directly => false - }, - { - :property => 'instances', - :inherit_directly => false - }, - ]) -``` - -If you want to modify any of the default rules, the safest approach is to uncomment -the entire default record_inheritance config and make your changes. -For example, to stop scopecontent notes from being inherited into file or item records -uncomment the entire record_inheritance default config above, and add a skip_if -clause to the scopecontent rule, like this: - -``` - { - :property => 'notes', - :skip_if => proc {|json| ['file', 'item'].include?(json['level']) }, - :inherit_if => proc {|json| json.select {|j| j['type'] == 'scopecontent'} }, - :inherit_directly => false - }, -``` - - - -### PUI Configurations - -#### `AppConfig[:pui_search_results_page_size]` - -`AppConfig[:pui_search_results_page_size] = 10` - - -#### `AppConfig[:pui_branding_img]` - -`AppConfig[:pui_branding_img] = 'archivesspace.small.png'` - - -#### `AppConfig[:pui_block_referrer]` - -`AppConfig[:pui_block_referrer] = true # patron privacy; blocks full 'referer' when going outside the domain` - - - -#### `AppConfig[:pui_max_concurrent_pdfs]` - -The number of PDFs we'll generate (in the background) at the same time. - -PDF generation can be a little memory intensive for large collections, so we -set this fairly low out of the box. - -`AppConfig[:pui_max_concurrent_pdfs] = 2` - - -#### `AppConfig[:pui_pdf_timeout]` - -You can set this to nil or zero to prevent a timeout - -`AppConfig[:pui_pdf_timeout] = 600` - - -#### `AppConfig[:pui_hide]` - -`AppConfig[:pui_hide] = {}` - -The following determine which 'tabs' are on the main horizontal menu: - -``` -AppConfig[:pui_hide][:repositories] = false -AppConfig[:pui_hide][:resources] = false -AppConfig[:pui_hide][:digital_objects] = false -AppConfig[:pui_hide][:accessions] = false -AppConfig[:pui_hide][:subjects] = false -AppConfig[:pui_hide][:agents] = false -AppConfig[:pui_hide][:classifications] = false -AppConfig[:pui_hide][:search_tab] = false -``` - -The following determine globally whether the various "badges" appear on the Repository page -can be overriden at repository level below (e.g.: -`AppConfig[:repos][{repo_code}][:hide][:counts] = true` - -``` -AppConfig[:pui_hide][:resource_badge] = false -AppConfig[:pui_hide][:record_badge] = true # hide by default -AppConfig[:pui_hide][:digital_object_badge] = false -AppConfig[:pui_hide][:accession_badge] = false -AppConfig[:pui_hide][:subject_badge] = false -AppConfig[:pui_hide][:agent_badge] = false -AppConfig[:pui_hide][:classification_badge] = false -AppConfig[:pui_hide][:counts] = false -``` - -The following determines globally whether the 'container inventory' navigation -tab/pill is hidden on resource/collection page - -``` -AppConfig[:pui_hide][:container_inventory] = false -``` - - -#### `AppConfig[:pui_requests_permitted_for_types]` - -Determine when the request button is displayed - -`AppConfig[:pui_requests_permitted_for_types] = [:resource, :archival_object, :accession, :digital_object, :digital_object_component]` - - -#### `AppConfig[:pui_requests_permitted_for_containers_only]` - -Set to 'true' if you want to disable if there is no top container - -`AppConfig[:pui_requests_permitted_for_containers_only] = false` - - -#### `AppConfig[:pui_repos]` - -Repository-specific examples. Replace {repo_code} with your repository code, i.e. 'foo' - note the lower-case - -`AppConfig[:pui_repos] = {}` - -Examples: - -for a particular repository, only enable requests for certain record types (Note this configuration will override AppConfig[:pui_requests_permitted_for_types] for the repository) - -``` -AppConfig[:pui_repos]['foo'][:requests_permitted_for_types] = [:resource, :archival_object, :accession, :digital_object, :digital_object_component] -``` - -For a particular repository, disable request - -``` -AppConfig[:pui_repos]['foo'][:requests_permitted_for_containers_only] = true -``` - -Set the email address to send any repository requests: - -``` -AppConfig[:pui_repos]['foo'][:request_email] = {email address} -``` - -> TODO - Needs more documentation here - -``` -AppConfig[:pui_repos]['foo'][:hide] = {} -AppConfig[:pui_repos]['foo'][:hide][:counts] = true -``` - - -#### `AppConfig[:pui_display_deaccessions]` - -> TODO - Needs more documentation - -`AppConfig[:pui_display_deaccessions] = true` - - - -#### `AppConfig[:pui_page_actions_cite]` - -Enable / disable PUI resource/archival object page 'cite' action - -`AppConfig[:pui_page_actions_cite] = true` - - -#### `AppConfig[:pui_page_actions_bookmark]` - -Enable / disable PUI resource/archival object page 'bookmark' action - -`AppConfig[:pui_page_actions_bookmark] = true` - - -#### `AppConfig[:pui_page_actions_request]` - -Enable / disable PUI resource/archival object page 'request' action - -`AppConfig[:pui_page_actions_request] = true` - - -#### `AppConfig[:pui_page_actions_print]` - -Enable / disable PUI resource/archival object page 'print' action - -`AppConfig[:pui_page_actions_print] = true` - - -#### `AppConfig[:pui_enable_staff_link]` - -when a user is authenticated, add a link back to the staff interface from the specified record - -`AppConfig[:pui_enable_staff_link] = true` - - -#### `AppConfig[:pui_staff_link_mode]` - -by default, staff link will open record in staff interface in edit mode, -change this to 'readonly' for it to open in readonly mode - -`AppConfig[:pui_staff_link_mode] = 'edit'` - - -#### `AppConfig[:pui_page_custom_actions]` - -Add page actions via the configuration - -`AppConfig[:pui_page_custom_actions] = []` - -Javascript action example: - -``` -AppConfig[:pui_page_custom_actions] << { - 'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for - 'label' => 'actions.do_something', # the I18n path for the action button - 'icon' => 'fa-paw', # the font-awesome icon CSS class - 'onclick_javascript' => 'alert("do something grand");', -} -``` - -Hyperlink action example: - -``` -AppConfig[:pui_page_custom_actions] << { - 'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for - 'label' => 'actions.do_something', # the I18n path for the action button - 'icon' => 'fa-paw', # the font-awesome icon CSS class - 'url_proc' => proc {|record| 'http://example.com/aspace?uri='+record.uri}, -} -``` - -Form-POST action example: - -``` -AppConfig[:pui_page_custom_actions] << { - 'record_type' => ['resource', 'archival_object'], # the jsonmodel type to show for - 'label' => 'actions.do_something', # the I18n path for the action button - 'icon' => 'fa-paw', # the font-awesome icon CSS class - # 'post_params_proc' returns a hash of params which populates a form with hidden inputs ('name' => 'value') - 'post_params_proc' => proc {|record| {'uri' => record.uri, 'display_string' => record.display_string} }, - # 'url_proc' returns the URL for the form to POST to - 'url_proc' => proc {|record| 'http://example.com/aspace?uri='+record.uri}, - # 'form_id' as string to be used as the form's ID - 'form_id' => 'my_grand_action', -} -``` - -ERB action example: - -``` -AppConfig[:pui_page_custom_actions] << { - 'record_type' => ['resource', 'archival_object'], - # the jsonmodel type to show for - # 'erb_partial' returns the path to an erb template from which the action will be rendered - 'erb_partial' => 'shared/my_special_action', -} -``` - - -#### `AppConfig[:pui_email_enabled]` - -PUI email settings (logs emails when disabled) - -`AppConfig[:pui_email_enabled] = false` - - - -#### `AppConfig[:pui_email_override]` - -See above AppConfig[:pui_repos][{repo_code}][:request_email] for setting repository email overrides -'pui_email_override' for testing, this email will be the to-address for all sent emails - -`AppConfig[:pui_email_override] = 'testing@example.com'` - - -#### `AppConfig[:pui_request_email_fallback_to_address]` - -The 'to' email address for repositories that don't define their own email - -`AppConfig[:pui_request_email_fallback_to_address] = 'testing@example.com'` - - -#### `AppConfig[:pui_request_email_fallback_from_address]` - -The 'from' email address for repositories that don't define their own email - -`AppConfig[:pui_request_email_fallback_from_address] = 'testing@example.com'` - - -#### `AppConfig[:pui_request_use_repo_email]` - -Use the repository record email address for requests (overrides config email) - -`AppConfig[:pui_request_use_repo_email] = false` - - -#### `AppConfig[:pui_email_delivery_method]` - -`AppConfig[:pui_email_delivery_method] = :sendmail` - - - -#### `AppConfig[:pui_email_sendmail_settings]` - -``` -AppConfig[:pui_email_sendmail_settings] = { - location: '/usr/sbin/sendmail', - arguments: '-i' -} -``` - -#### `AppConfig[:pui_email_smtp_settings]` - -Apply when `AppConfig[:pui_email_delivery_method]` set to `:smtp` - -Example SMTP configuration: - -``` -AppConfig[:pui_email_smtp_settings] = { - address: 'smtp.gmail.com', - port: 587, - domain: 'gmail.com', - user_name: '', - password: '', - authentication: 'plain', - enable_starttls_auto: true, -} -``` - - -#### `AppConfig[:pui_email_perform_deliveries]` - -`AppConfig[:pui_email_perform_deliveries] = true` - - -#### `AppConfig[:pui_email_raise_delivery_errors]` - -`AppConfig[:pui_email_raise_delivery_errors] = true` - - -#### `AppConfig[:pui_readmore_max_characters]` - -The number of characters to truncate before showing the 'Read More' link on notes - -`AppConfig[:pui_readmore_max_characters] = 450` - -#### `AppConfig[:pui_expand_all]` - -Whether to expand all additional information blocks at the bottom of record pages by default. `true` expands all blocks, `false` collapses all blocks. - -`AppConfig[:pui_expand_all] = false` - -#### `AppConfig[:max_search_columns]` - -Use to specify the maximum number of columns to display when searching or browsing - -`AppConfig[:max_search_columns] = 7` diff --git a/customization/customizing_PUI_examples/.DS_Store b/customization/customizing_PUI_examples/.DS_Store new file mode 100644 index 00000000..30c06c74 Binary files /dev/null and b/customization/customizing_PUI_examples/.DS_Store differ diff --git a/customization/customizing_PUI_examples/README.md b/customization/customizing_PUI_examples/README.md deleted file mode 100644 index 09b3c8d8..00000000 --- a/customization/customizing_PUI_examples/README.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -permalink: /customization/customizing_PUI_examples/ ---- - -# Customizing the Public User Interface: Simple How-tos - -This is a collection of examples for simple how-tos for customizing the Public User Interface - -There is a presentation with more explanation of the examples contained here - -## Configuration examples - -The configuration file changes can be found in the config.rb file by searching for the string "# Example " with the corresponding letter included in the presentation - -## Plugins Examples - -Most of the plugin examples are included in the plugins/local directory: -* Customize Field and Option Labels -* Customize Branding Image -* Move Branding Image from Right to Left -* Add a Home Link to the Navigation Tool Bar -* Change Colors -* Change Icons - -There is a separate help_page_pui plugin that adds a help page diff --git a/customization/customizing_PUI_examples/plugins/.DS_Store b/customization/customizing_PUI_examples/plugins/.DS_Store new file mode 100644 index 00000000..9f34f512 Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/help_page_pui/.DS_Store b/customization/customizing_PUI_examples/plugins/help_page_pui/.DS_Store new file mode 100644 index 00000000..b7a36389 Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/help_page_pui/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/help_page_pui/public/.DS_Store b/customization/customizing_PUI_examples/plugins/help_page_pui/public/.DS_Store new file mode 100644 index 00000000..08bccb61 Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/help_page_pui/public/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/help_page_pui/public/views/.DS_Store b/customization/customizing_PUI_examples/plugins/help_page_pui/public/views/.DS_Store new file mode 100644 index 00000000..181e944a Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/help_page_pui/public/views/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/local/.DS_Store b/customization/customizing_PUI_examples/plugins/local/.DS_Store new file mode 100644 index 00000000..b7a36389 Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/local/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/local/public/.DS_Store b/customization/customizing_PUI_examples/plugins/local/public/.DS_Store new file mode 100644 index 00000000..f07f43eb Binary files /dev/null and b/customization/customizing_PUI_examples/plugins/local/public/.DS_Store differ diff --git a/customization/customizing_PUI_examples/plugins/local/public/views/_shared/_header.html.erb b/customization/customizing_PUI_examples/plugins/local/public/views/_shared/_header.html.erb deleted file mode 100644 index 5ede4193..00000000 --- a/customization/customizing_PUI_examples/plugins/local/public/views/_shared/_header.html.erb +++ /dev/null @@ -1,11 +0,0 @@ - diff --git a/customization/index.html b/customization/index.html new file mode 100644 index 00000000..5aaedfc2 --- /dev/null +++ b/customization/index.html @@ -0,0 +1,151 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/README.md + +

+ +

+ + Report issue on Jira + customization/README.md + +

+
+
+ +

ArchivesSpace customization and configuration

+ + + + +
+ +
+ + + diff --git a/customization/ldap.html b/customization/ldap.html new file mode 100644 index 00000000..25960e99 --- /dev/null +++ b/customization/ldap.html @@ -0,0 +1,203 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/ldap.md + +

+ +

+ + Report issue on Jira + customization/ldap.md + +

+
+
+ +

Configuring LDAP authentication

+ +

ArchivesSpace can manage its own user directory, but can also be +configured to authenticate against one or more LDAP directories by +specifying them in the application’s configuration file. When a user +attempts to log in, each authentication source is tried until one +matches.

+ +

Here is a minimal example of an LDAP configuration:

+ +
 AppConfig[:authentication_sources] = [{
+                                         :model => 'LDAPAuth',
+                                         :hostname => 'ldap.example.com',
+                                         :port => 389,
+                                         :base_dn => 'ou=people,dc=example,dc=com',
+                                         :username_attribute => 'uid',
+                                         :attribute_map => {:cn => :name},
+ }]
+
+ +

With this configuration, ArchivesSpace performs authentication by +connecting to ldap://ldap.example.com:389/, binding anonymously, +searching the ou=people,dc=example,dc=com tree for uid = <username>.

+ +

If the user is found, ArchivesSpace authenticates them by +binding using the password specified. Finally, the :attribute_map +entry specifies how LDAP attributes should be mapped to ArchivesSpace +user attributes (mapping LDAP’s cn to ArchivesSpace’s name in the +above example).

+ +

Many LDAP directories don’t support anonymous binding. To integrate +with such a directory, you will need to specify the username and +password of a user with permission to connect to the directory and +search for other users. Modifying the previous example for this case +looks like this:

+ +
 AppConfig[:authentication_sources] = [{
+                                         :model => 'LDAPAuth',
+                                         :hostname => 'ldap.example.com',
+                                         :port => 389,
+                                         :base_dn => 'ou=people,dc=example,dc=com',
+                                         :username_attribute => 'uid',
+                                         :attribute_map => {:cn => :name},
+                                         :bind_dn => 'uid=archivesspace_auth,ou=system,dc=example,dc=com',
+                                         :bind_password => 'secretsquirrel',
+ }]
+
+ +

Finally, some LDAP directories enforce the use of SSL encryption. To +configure ArchivesSpace to connect via LDAPS, change the port as +appropriate and specify the encryption option:

+ +
 AppConfig[:authentication_sources] = [{
+                                         :model => 'LDAPAuth',
+                                         :hostname => 'ldap.example.com',
+                                         :port => 636,
+                                         :base_dn => 'ou=people,dc=example,dc=com',
+                                         :username_attribute => 'uid',
+                                         :attribute_map => {:cn => :name},
+                                         :bind_dn => 'uid=archivesspace_auth,ou=system,dc=example,dc=com',
+                                         :bind_password => 'secretsquirrel',
+                                         :encryption => :simple_tls,
+ }]
+
+ + +
+ +
+ + + diff --git a/customization/ldap.md b/customization/ldap.md deleted file mode 100644 index aad5d127..00000000 --- a/customization/ldap.md +++ /dev/null @@ -1,63 +0,0 @@ -# Configuring LDAP authentication - -ArchivesSpace can manage its own user directory, but can also be -configured to authenticate against one or more LDAP directories by -specifying them in the application's configuration file. When a user -attempts to log in, each authentication source is tried until one -matches. - -Here is a minimal example of an LDAP configuration: - - AppConfig[:authentication_sources] = [{ - :model => 'LDAPAuth', - :hostname => 'ldap.example.com', - :port => 389, - :base_dn => 'ou=people,dc=example,dc=com', - :username_attribute => 'uid', - :attribute_map => {:cn => :name}, - }] - -With this configuration, ArchivesSpace performs authentication by -connecting to `ldap://ldap.example.com:389/`, binding anonymously, -searching the `ou=people,dc=example,dc=com` tree for `uid = `. - -If the user is found, ArchivesSpace authenticates them by -binding using the password specified. Finally, the `:attribute_map` -entry specifies how LDAP attributes should be mapped to ArchivesSpace -user attributes (mapping LDAP's `cn` to ArchivesSpace's `name` in the -above example). - -Many LDAP directories don't support anonymous binding. To integrate -with such a directory, you will need to specify the username and -password of a user with permission to connect to the directory and -search for other users. Modifying the previous example for this case -looks like this: - - - AppConfig[:authentication_sources] = [{ - :model => 'LDAPAuth', - :hostname => 'ldap.example.com', - :port => 389, - :base_dn => 'ou=people,dc=example,dc=com', - :username_attribute => 'uid', - :attribute_map => {:cn => :name}, - :bind_dn => 'uid=archivesspace_auth,ou=system,dc=example,dc=com', - :bind_password => 'secretsquirrel', - }] - - -Finally, some LDAP directories enforce the use of SSL encryption. To -configure ArchivesSpace to connect via LDAPS, change the port as -appropriate and specify the `encryption` option: - - AppConfig[:authentication_sources] = [{ - :model => 'LDAPAuth', - :hostname => 'ldap.example.com', - :port => 636, - :base_dn => 'ou=people,dc=example,dc=com', - :username_attribute => 'uid', - :attribute_map => {:cn => :name}, - :bind_dn => 'uid=archivesspace_auth,ou=system,dc=example,dc=com', - :bind_password => 'secretsquirrel', - :encryption => :simple_tls, - }] diff --git a/customization/locales.html b/customization/locales.html new file mode 100644 index 00000000..191f5d06 --- /dev/null +++ b/customization/locales.html @@ -0,0 +1,212 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/locales.md + +

+ +

+ + Report issue on Jira + customization/locales.md + +

+
+
+ +

Customizing text in ArchivesSpace

+ +

ArchivesSpace has abstracted all the labels, messages and tooltips out of the +application into the locale files, which are part of the +Rails Internationalization (I18n) API. +The locales in this directory represent the +basis of translations for use by all Archives Space applications. Each +application may then add to or override these values with their own locales files.

+ +

For a guide on managing these “i18n” files, please visit http://guides.rubyonrails.org/i18n.html

+ +

You can see the source files for both the Staff Frontend Application and +Public Application. There is also a common locale file for some values used throughout the ArchivesSpace applications.

+ +

The base translations are broken up:

+ +
    +
  • The top most file “en.yml” contains the translations for all the record labels, messages and tooltips in English
  • +
  • “enums/en.yml” contains the entries for the dynamic enumeration codes - add your translations to this file after importing your enumeration codes
  • +
+ +

These values are pulled into the views using the I18n.t() method, like I18n.t(“brand.welcome_message”).

+ +

If the value you want to override is in the common locale file (like the “digital object title” field label, for example) , you can change this by simply editing the locales/en.yml file in your ArchivesSpace distribution home directory. A restart is required to have the changes take effect.

+ +

If the value you want to change is in either the public or staff specific en.yml files, you can override these values using the plugins directory. For example, if you want to change the welcome message on the public frontend, make a file in your ArchivesSpace distribution called ‘plugins/local/public/locales/en.yml’ and put the following values:

+ +
en:
+	brand:
+	title: My Archive
+	home: Home
+ 		welcome_message: HEY HEY HEY!!
+
+ +

If you restart ArchivesSpace, these values will take effect.

+ +

If you are adding a new value you will also need to add the value into the Staff Frontend Application by clicking on the System dropdown menu and choosing Manage Controlled Value Lists. Select the list and add the value. If you restart ArchivesSpace the translation value that you set in the yml file should appear.

+ +

If you’re using a different language, simply swap out the en.yml for something else ( like fr.yml ) and update locale setting in the config.rb file ( i.e., AppConfig[:locale] = :fr )

+ +

Tooltips

+ +

To add a tooltip to a record label, simply add a new entry with “_tooltip” +appended to the label’s code. For example, to add a tooltip for the Accession’s +Title field:

+ +
en:
+  accession:
+    title: Title
+    title_tooltip: |
+        <p>The title assigned to an accession or resource. The accession title
+        need not be the same as the resource title. Moreover, a title need not
+        be expressed for the accession record, as it can be implicitly
+        inherited from the resource record to which the accession is
+        linked.</p>
+
+ +

Placeholders

+ +

For text fields or text areas, you may like to have some placeholder text to be +displayed when the field is empty (for more details see +http://www.w3.org/html/wg/drafts/html/master/forms.html#the-placeholder-attribute). +Please note while most modern browser releases support this feature, +older version will not.

+ +

To add a placeholder to a record’s text field, add a new entry of the label’s +code append with “_placeholder”. For example:

+ +
en:
+  accession:
+    title: Title
+    title_placeholder: See DACS 2.3.18-2.3.22
+
+ + +
+ +
+ + + diff --git a/customization/locales.md b/customization/locales.md deleted file mode 100644 index c021be23..00000000 --- a/customization/locales.md +++ /dev/null @@ -1,73 +0,0 @@ -# Customizing text in ArchivesSpace - -ArchivesSpace has abstracted all the labels, messages and tooltips out of the -application into the locale files, which are part of the -[Rails Internationalization (I18n)](http://guides.rubyonrails.org/i18n.html) API. -The locales in this directory represent the -basis of translations for use by all Archives Space applications. Each -application may then add to or override these values with their own locales files. - -For a guide on managing these "i18n" files, please visit http://guides.rubyonrails.org/i18n.html - -You can see the source files for both the [Staff Frontend Application](https://github.com/archivesspace/archivesspace/tree/master/frontend/config/locales) and -[Public Application](https://github.com/archivesspace/archivesspace/tree/master/public/config/locales). There is also a [common locale file](https://github.com/archivesspace/archivesspace/blob/master/common/locales/en.yml) for some values used throughout the ArchivesSpace applications. - -The base translations are broken up: - - * The top most file "en.yml" contains the translations for all the record labels, messages and tooltips in English - * "enums/en.yml" contains the entries for the dynamic enumeration codes - add your translations to this file after importing your enumeration codes - -These values are pulled into the views using the I18n.t() method, like I18n.t("brand.welcome_message"). - -If the value you want to override is in the common locale file (like the "digital object title" field label, for example) , you can change this by simply editing the locales/en.yml file in your ArchivesSpace distribution home directory. A restart is required to have the changes take effect. - -If the value you want to change is in either the public or staff specific en.yml files, you can override these values using the plugins directory. For example, if you want to change the welcome message on the public frontend, make a file in your ArchivesSpace distribution called 'plugins/local/public/locales/en.yml' and put the following values: - - en: - brand: - title: My Archive - home: Home - welcome_message: HEY HEY HEY!! - -If you restart ArchivesSpace, these values will take effect. - -If you are adding a new value you will also need to add the value into the Staff Frontend Application by clicking on the System dropdown menu and choosing Manage Controlled Value Lists. Select the list and add the value. If you restart ArchivesSpace the translation value that you set in the yml file should appear. - -If you're using a different language, simply swap out the en.yml for something else ( like fr.yml ) and update locale setting in the config.rb file ( i.e., AppConfig[:locale] = :fr ) - -## Tooltips - -To add a tooltip to a record label, simply add a new entry with "\_tooltip" -appended to the label's code. For example, to add a tooltip for the Accession's -Title field: - -``` -en: - accession: - title: Title - title_tooltip: | -

The title assigned to an accession or resource. The accession title - need not be the same as the resource title. Moreover, a title need not - be expressed for the accession record, as it can be implicitly - inherited from the resource record to which the accession is - linked.

-``` - -## Placeholders - -For text fields or text areas, you may like to have some placeholder text to be -displayed when the field is empty (for more details see -http://www.w3.org/html/wg/drafts/html/master/forms.html#the-placeholder-attribute). -Please note while most modern browser releases support this feature, -older version will not. - -To add a placeholder to a record's text field, add a new entry of the label's -code append with "\_placeholder". For example: - - -``` -en: - accession: - title: Title - title_placeholder: See DACS 2.3.18-2.3.22 -``` diff --git a/customization/plugins.html b/customization/plugins.html new file mode 100644 index 00000000..0f475f14 --- /dev/null +++ b/customization/plugins.html @@ -0,0 +1,419 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/plugins.md + +

+ +

+ + Report issue on Jira + customization/plugins.md + +

+
+
+ +

ArchivesSpace Plug-ins

+ +

Plug-ins are a powerful feature, designed to allow you to change +most aspects of how the application behaves.

+ +

Plug-ins provide a mechanism to customize ArchivesSpace by overriding or extending functions +without changing the core codebase. As they are self-contained, they also permit the ready +sharing of packages of customization between ArchivesSpace instances.

+ +

The ArchivesSpace distribution comes with the hello_world exemplar plug-in. Please refer to its README file for a detailed description of how it is constructed and implemented.

+ +

You can find other examples in the following plugin repositories. The ArchivesSpace plugins that are officially supported and maintained by the ArchivesSpace Program Team are in archivesspace-plugins (https://github.com/archivesspace-plugins). Deprecated code which is no longer supported but has been kept for future reference is in archivesspace-deprecated (https://github.com/archivesspace-deprecated). There is an open/unmanaged GitHub repository where community members can share their code called archivesspace-labs (https://github.com/archivesspace-labs). The community developed Python library for interacting with the ArchivesSpace API, called ArchivesSnake, is managed in the archivesspace-labs repository.

+ +

Enabling plugins

+ +

Plug-ins are enabled by placing them in the plugins directory, and referencing them in the +ArchivesSpace configuration, config/config.rb. For example:

+ +
AppConfig[:plugins] = ['local', 'hello_world', 'my_plugin']
+
+ +

This configuration assumes the following directories exist:

+ +
plugins
+  hello_world
+  local
+  my_plugin
+
+ +

Note that the order that the plug-ins are listed in the :plugins configuration option +determines the order in which they are loaded by the application.

+ +

Plugin structure

+ +

The directory structure within a plug-in is similar to the structure of the core application. +The following shows the supported plug-in structure. Files contained in these directories can +be used to override or extend the behavior of the core application.

+ +
backend
+  controllers ......... backend endpoints
+  model ............... database mapping models
+  converters .......... classes for importing data
+  job_runners ......... classes for defining background jobs
+  plugin_init.rb ...... if present, loaded when the backend first starts
+  lib/bulk_import ..... bulk import processor
+frontend
+  assets .............. static assets (such as images, javascript) in the staff interface
+  controllers ......... controllers for the staff interface
+  locales ............. locale translations for the staff interface
+  views ............... templates for the staff interface
+  plugin_init.rb ...... if present, loaded when the staff interface first starts
+public
+  assets .............. static assets (such as images, javascript) in the public interface
+  controllers ......... controllers for the public interface
+  locales ............. locale translations for the public interface
+  views ............... templates for the public interface
+  plugin_init.rb ...... if present, loaded when the public interface first starts
+migrations ............ database migrations
+schemas ............... JSONModel schema definitions
+search_definitions.rb . Advanced search fields
+
+ +

Note that backend/lib/bulk_import is the only directory in backend/lib/ that is loaded by the plugin manager. Other files in backend/lib/ will not be loaded during startup.

+ +

Note that, in order to override or extend the behavior of core models and controllers, you cannot simply put your replacement with the same name in the corresponding directory path. Core models and controllers can be overridden by adding an after_initialize block to plugin_init.rb (e.g. aspace-hvd-pui).

+ +

Overriding behavior

+ +

A general rule is: to override behavior, rather then extend it, match the path +to the file that contains the behavior to be overridden.

+ +

It is not necessary for a plug-in to have all of these directories. For example, to override +some part of a locale file for the staff interface, you can just add the following structure +to the local plug-in:

+ +
plugins/local/frontend/locales/en.yml
+
+ +

More detailed information about overriding locale files is found in Customizing text in ArchivesSpace

+ +

Overriding the visual (web) presentation

+ +

You can directly override any view file in the core application by placing an erb file of the same name in the analogous path. +For example, if you want to override the appearance of the “Welcome” [home] page of the Public User Interface, you can make your changes to a file show.html.erb and place it at plugins/my_fine_plugin/public/views/welcome/show.html.erb. (Where my_fine_plugin is the name of your plugin)

+ +

Implementing a broadly-applied style or javascript change

+ +

Unless you want to write inline style or javascript (which may be practiceable for a template or two), best practice is to create plugins/my_fine_plugin/public/views/layout_head.html.erb or plugins/my_fine_plugin/frontend/views/layout_head.html.erb, which contains the HTML statements to incorporate your javascript or css into the <HEAD> element of the template. Here’s an example:

+ +
    +
  • For the public interface, I want to change the size of the text in all links when the user is hovering. +
      +
    • I create plugins/my_fine_plugin/public/assets/my.css: +
            a:hover {font-size: 2em;}
      +
      +
    • +
    • I create plugins/my_fine_plugin/public/views/layout_head.html.erb, and insert: +
      <%= stylesheet_link_tag "#{@base_url}/assets/my.css", media: :all %>
      +
      +
    • +
    +
  • +
  • For the public interface, I want to add some javascript behavior such that, when the user hovers over a list item, astericks appear +
      +
    • I create plugins/my_fine_plugin/public/assets/my.js” +
        $(function() {
      +     $( "li" ).hover(
      +       function() {
      +          $( this ).append( $( "<span> ***</span>" ) );
      +      }, function() {
      +     $( this ).find( "span:last" ).remove();
      +      }
      +    );
      +   }
      +
      +
    • +
    • I add to plugins/my_fine_plugin/public/views/layout_head.html.erb: +
       <%= javascript_include_tag "#{@base_url}/assets/my.js" %>
      +
      +

      Adding your own branding

      +
    • +
    +
  • +
+ +

Another example, to override the branding of the staff interface, add +your own template at:

+ +
plugins/local/frontend/views/site/\_branding.html.erb
+
+ +

Files such as images, stylesheets and PDFs can be made available as static resources by +placing them in an assets directory under an enabled plug-in. For example, the following file:

+ +
plugins/local/frontend/assets/my_logo.png
+
+ +

Will be available via the following URL:

+ +
http://your.frontend.domain.and:port/assets/my_logo.png
+
+ +

For example, to reference this logo from the custom branding file, use +markup such as:

+ +
 <div class="container branding">
+   <img src="<%= #{AppConfig[:frontend_proxy_prefix]} %>assets/my_logo.png" alt="My logo" />
+ </div>
+
+ +

Plugin configuration

+ +

Plug-ins can optionally contain a configuration file at plugins/[plugin-name]/config.yml. +This configuration file supports the following options:

+ +
system_menu_controller
+  The name of a controller that will be accessible via a Plug-ins menu in the System toolbar
+repository_menu_controller
+  The name of a controller that will be accessible via a Plug-ins menu in the Repository toolbar
+parents
+  [record-type]
+    name
+    cardinality
+  ...
+
+ +

system_menu_controller and repository_menu_controller specify the names of frontend controllers +that will be accessible via the system and repository toolbars respectively. A Plug-ins dropdown +will appear in the toolbars if any enabled plug-ins have declared these configuration options. The +controller name follows the standard naming conventions, for example:

+ +
repository_menu_controller: hello_world
+
+ +

Points to a controller file at plugins/hello_world/frontend/controllers/hello_world_controller.rb +which implements a controller class called HelloWorldController. When the menu item is selected +by the user, the index action is called on the controller.

+ +

Note that the URLs for plug-in controllers are scoped under plugins, so the URL for the above +example is:

+ +
http://your.frontend.domain.and:port/plugins/hello_world
+
+ +

Also note that the translation for the plug-in’s name in the Plug-ins dropdown menu is specified +in a locale file in the frontend/locales directory in the plug-in. For example, in the hello_world +example there is an English locale file at:

+ +
plugins/hello_world/frontend/locales/en.yml
+
+ +

The translation for the plug-in name in the Plug-ins dropdown menus is specified by the key label +under the plug-in, like this:

+ +
en:
+  plugins:
+    hello_world:
+      label: Hello World
+
+ +

Note that the example locale file contains other keys that specify translations for text displayed +as part of the plug-in’s user interface. Be sure to place your plug-in’s translations as shown, under +plugins.[your_plugin_name] in order to avoid accidentally overriding translations for other +interface elements. In the example above, the translation for the label key can be referenced +directly in an erb view file as follows:

+ +
<%= I18n.t("plugins.hello_world.label") %>
+
+ +

Each entry under parents specifies a record type that this plug-in provides a new subrecord for. +[record-type] is the name of the existing record type, for example accession. name is the +name of the plug-in in its role as a subrecord of this parent, for example hello_worlds. +cardinality specifies the cardinality of the plug-in records. Currently supported values are +zero-to-many and zero-to-one.

+ +

Changing search behavior

+ +

A plugin can add additional fields to the advanced search interface by +including a search_definitions.rb file at the top-level of the +plugin directory. This file can contain definitions such as the +following:

+ +
AdvancedSearch.define_field(:name => 'payment_fund_code', :type => :enum, :visibility => [:staff], :solr_field => 'payment_fund_code_u_utext')
+AdvancedSearch.define_field(:name => 'payment_authorizers', :type => :text, :visibility => [:staff], :solr_field => 'payment_authorizers_u_utext')
+
+ +

Each field defined will appear in the advanced search interface as a +searchable field. The :visibility option controls whether the field +is presented in the staff or public interface (or both), while the +:type parameter determines what sort of search is being performed. +Valid values are :text:, :boolean, :date and :enum. Finally, +the :solr_field parameter controls which field is used from the +underlying index.

+ +

Adding Custom Reports

+ +

Custom reports may be added to plug-ins by adding a new report model as a subclass of AbstractReport to plugins/[plugin-name]/backend/model/, and the translations for said model to plugins/[plugin-name]/frontend/locales/[language].yml. Look to existing reports in reports subdirectory of the ArchivesSpace base directory for examples of how to structure a report model.

+ +

There are several limitations to adding reports to plug-ins, including that reports from plug-ins may only use the generic report template. ArchivesSpace only searches for report templates in the reports subdirectory of the ArchivesSpace base directory, not in plug-in directories. If you would like to implement a custom report with a custom template, consider adding the report to archivesspace/reports/ instead of archivesspace/plugins/[plugin-name]/backend/model/.

+ +

Frontend Specific Hooks

+ +

To make adding new records fields and sections to record forms a little eaiser via your plugin, the ArchivesSpace frontend provides a series of hooks via the frontend/config/initializers/plugin.rb module. These are as follows:

+ +
    +
  • +

    Plugins.add_search_base_facets(*facets) - add to the base facets list to include extra facets for all record searches and listing pages.

    +
  • +
  • +

    Plugins.add_search_facets(jsonmodel_type, *facets) - add facets for a particular JSONModel type to be included in searches and listing pages for that record type.

    +
  • +
  • +

    Plugins.add_resolve_field(field_name) - use this when you have added a new field/relationship and you need it to be resolved when the record is retrieved from the API.

    +
  • +
  • +

    Plugins.register_edit_role_for_type(jsonmodel_type, role) - when you add a new top level JSONModel, register it and its edit role so the listing view can determine if the “Edit” button can be displayed to the user.

    +
  • +
  • +

    Plugins.register_note_types_handler(proc) where proc handles parameters jsonmodel_type, note_types, context - allow a plugin to customize the note types shown for particular JSONModel type. For example, you can filter those that do not apply to your institution.

    +
  • +
  • +

    Plugins.register_plugin_section(section) - allows you define a template to be inserted as a section for a given JSONModel record. A section is a type of Plugins::AbstractPluginSection which defines the source plugin, section name, the jsonmodel_types for which the section should show and any opts required by the templates at the time of render. These new sections (readonly, edit and sidebar additions) are output as part of the PluginHelper render methods.

    + +

    Plugins::AbstractPluginSection can be subclassed to allow flexible inclusion of arbitrary HTML. There are two examples provided with ArchivesSpace:

    + +
      +
    • +

      Plugins::PluginSubRecord - uses the shared/subrecord partial to output a standard styled ArchivesSpace section. opts requires the jsonmodel field to be defined.

      +
    • +
    • +

      Plugins::PluginReadonlySearch - uses the search/embedded partial to output a search listing as a section. opts requires the custom filter terms for this search to be defined.

      +
    • +
    +
  • +
+ +

Further information

+ +

Be sure to test your plug-in thoroughly as it may have unanticipated impacts on your +ArchivesSpace application.

+ + +
+ +
+ + + diff --git a/customization/plugins.md b/customization/plugins.md deleted file mode 100644 index 9c607683..00000000 --- a/customization/plugins.md +++ /dev/null @@ -1,248 +0,0 @@ -# ArchivesSpace Plug-ins - -Plug-ins are a powerful feature, designed to allow you to change -most aspects of how the application behaves. - -Plug-ins provide a mechanism to customize ArchivesSpace by overriding or extending functions -without changing the core codebase. As they are self-contained, they also permit the ready -sharing of packages of customization between ArchivesSpace instances. - -The ArchivesSpace distribution comes with the `hello_world` exemplar plug-in. Please refer to its [README file](https://github.com/archivesspace/archivesspace/blob/master/plugins/hello_world/README.md) for a detailed description of how it is constructed and implemented. - -You can find other examples in the following plugin repositories. The ArchivesSpace plugins that are officially supported and maintained by the ArchivesSpace Program Team are in archivesspace-plugins (https://github.com/archivesspace-plugins). Deprecated code which is no longer supported but has been kept for future reference is in archivesspace-deprecated (https://github.com/archivesspace-deprecated). There is an open/unmanaged GitHub repository where community members can share their code called archivesspace-labs (https://github.com/archivesspace-labs). The community developed Python library for interacting with the ArchivesSpace API, called ArchivesSnake, is managed in the archivesspace-labs repository. - -## Enabling plugins - -Plug-ins are enabled by placing them in the `plugins` directory, and referencing them in the -ArchivesSpace configuration, `config/config.rb`. For example: - - AppConfig[:plugins] = ['local', 'hello_world', 'my_plugin'] - -This configuration assumes the following directories exist: - - plugins - hello_world - local - my_plugin - -Note that the order that the plug-ins are listed in the `:plugins` configuration option -determines the order in which they are loaded by the application. - -## Plugin structure - -The directory structure within a plug-in is similar to the structure of the core application. -The following shows the supported plug-in structure. Files contained in these directories can -be used to override or extend the behavior of the core application. - - backend - controllers ......... backend endpoints - model ............... database mapping models - converters .......... classes for importing data - job_runners ......... classes for defining background jobs - plugin_init.rb ...... if present, loaded when the backend first starts - lib/bulk_import ..... bulk import processor - frontend - assets .............. static assets (such as images, javascript) in the staff interface - controllers ......... controllers for the staff interface - locales ............. locale translations for the staff interface - views ............... templates for the staff interface - plugin_init.rb ...... if present, loaded when the staff interface first starts - public - assets .............. static assets (such as images, javascript) in the public interface - controllers ......... controllers for the public interface - locales ............. locale translations for the public interface - views ............... templates for the public interface - plugin_init.rb ...... if present, loaded when the public interface first starts - migrations ............ database migrations - schemas ............... JSONModel schema definitions - search_definitions.rb . Advanced search fields - -**Note** that `backend/lib/bulk_import` is the only directory in `backend/lib/` that is loaded by the plugin manager. Other files in `backend/lib/` will not be loaded during startup. - -**Note** that, in order to override or extend the behavior of core models and controllers, you cannot simply put your replacement with the same name in the corresponding directory path. Core models and controllers can be overridden by adding an `after_initialize` block to `plugin_init.rb` (e.g. [aspace-hvd-pui](https://github.com/harvard-library/aspace-hvd-pui/blob/master/public/plugin_init.rb#L43)). - -## Overriding behavior - -A general rule is: to override behavior, rather then extend it, match the path -to the file that contains the behavior to be overridden. - -It is not necessary for a plug-in to have all of these directories. For example, to override -some part of a locale file for the staff interface, you can just add the following structure -to the local plug-in: - - plugins/local/frontend/locales/en.yml - -More detailed information about overriding locale files is found in [Customizing text in ArchivesSpace](./locales.html) - - -## Overriding the visual (web) presentation - -You can directly override any view file in the core application by placing an erb file of the same name in the analogous path. -For example, if you want to override the appearance of the "Welcome" [home] page of the Public User Interface, you can make your changes to a file `show.html.erb` and place it at `plugins/my_fine_plugin/public/views/welcome/show.html.erb`. (Where *my_fine_plugin* is the name of your plugin) - -### Implementing a broadly-applied style or javascript change - -Unless you want to write inline style or javascript (which may be practiceable for a template or two), best practice is to create `plugins/my_fine_plugin/public/views/layout_head.html.erb` or `plugins/my_fine_plugin/frontend/views/layout_head.html.erb`, which contains the HTML statements to incorporate your javascript or css into the `` element of the template. Here's an example: - -* For the public interface, I want to change the size of the text in all links when the user is hovering. - - I create `plugins/my_fine_plugin/public/assets/my.css`: - ```css - a:hover {font-size: 2em;} - ``` - - I create `plugins/my_fine_plugin/public/views/layout_head.html.erb`, and insert: - ```ruby - <%= stylesheet_link_tag "#{@base_url}/assets/my.css", media: :all %> - ``` -* For the public interface, I want to add some javascript behavior such that, when the user hovers over a list item, astericks appear - - I create `plugins/my_fine_plugin/public/assets/my.js`" - ```javascript - $(function() { - $( "li" ).hover( - function() { - $( this ).append( $( " ***" ) ); - }, function() { - $( this ).find( "span:last" ).remove(); - } - ); - } - ``` - - I add to `plugins/my_fine_plugin/public/views/layout_head.html.erb`: - ```ruby - <%= javascript_include_tag "#{@base_url}/assets/my.js" %> - ``` -## Adding your own branding - - -Another example, to override the branding of the staff interface, add -your own template at: - - plugins/local/frontend/views/site/\_branding.html.erb - -Files such as images, stylesheets and PDFs can be made available as static resources by -placing them in an `assets` directory under an enabled plug-in. For example, the following file: - - plugins/local/frontend/assets/my_logo.png - -Will be available via the following URL: - - http://your.frontend.domain.and:port/assets/my_logo.png - -For example, to reference this logo from the custom branding file, use -markup such as: - -
- My logo -
- - -## Plugin configuration - -Plug-ins can optionally contain a configuration file at `plugins/[plugin-name]/config.yml`. -This configuration file supports the following options: - - system_menu_controller - The name of a controller that will be accessible via a Plug-ins menu in the System toolbar - repository_menu_controller - The name of a controller that will be accessible via a Plug-ins menu in the Repository toolbar - parents - [record-type] - name - cardinality - ... - -`system_menu_controller` and `repository_menu_controller` specify the names of frontend controllers -that will be accessible via the system and repository toolbars respectively. A `Plug-ins` dropdown -will appear in the toolbars if any enabled plug-ins have declared these configuration options. The -controller name follows the standard naming conventions, for example: - - repository_menu_controller: hello_world - -Points to a controller file at `plugins/hello_world/frontend/controllers/hello_world_controller.rb` -which implements a controller class called `HelloWorldController`. When the menu item is selected -by the user, the `index` action is called on the controller. - -Note that the URLs for plug-in controllers are scoped under `plugins`, so the URL for the above -example is: - - http://your.frontend.domain.and:port/plugins/hello_world - -Also note that the translation for the plug-in's name in the `Plug-ins` dropdown menu is specified -in a locale file in the `frontend/locales` directory in the plug-in. For example, in the `hello_world` -example there is an English locale file at: - - plugins/hello_world/frontend/locales/en.yml - -The translation for the plug-in name in the `Plug-ins` dropdown menus is specified by the key `label` -under the plug-in, like this: - - en: - plugins: - hello_world: - label: Hello World - -Note that the example locale file contains other keys that specify translations for text displayed -as part of the plug-in's user interface. Be sure to place your plug-in's translations as shown, under -`plugins.[your_plugin_name]` in order to avoid accidentally overriding translations for other -interface elements. In the example above, the translation for the `label` key can be referenced -directly in an erb view file as follows: - - <%= I18n.t("plugins.hello_world.label") %> - -Each entry under `parents` specifies a record type that this plug-in provides a new subrecord for. -`[record-type]` is the name of the existing record type, for example `accession`. `name` is the -name of the plug-in in its role as a subrecord of this parent, for example `hello_worlds`. -`cardinality` specifies the cardinality of the plug-in records. Currently supported values are -`zero-to-many` and `zero-to-one`. - - -## Changing search behavior - -A plugin can add additional fields to the advanced search interface by -including a `search_definitions.rb` file at the top-level of the -plugin directory. This file can contain definitions such as the -following: - - AdvancedSearch.define_field(:name => 'payment_fund_code', :type => :enum, :visibility => [:staff], :solr_field => 'payment_fund_code_u_utext') - AdvancedSearch.define_field(:name => 'payment_authorizers', :type => :text, :visibility => [:staff], :solr_field => 'payment_authorizers_u_utext') - -Each field defined will appear in the advanced search interface as a -searchable field. The `:visibility` option controls whether the field -is presented in the staff or public interface (or both), while the -`:type` parameter determines what sort of search is being performed. -Valid values are `:text:`, `:boolean`, `:date` and `:enum`. Finally, -the `:solr_field` parameter controls which field is used from the -underlying index. - -## Adding Custom Reports - -Custom reports may be added to plug-ins by adding a new report model as a subclass of `AbstractReport` to `plugins/[plugin-name]/backend/model/`, and the translations for said model to `plugins/[plugin-name]/frontend/locales/[language].yml`. Look to existing reports in reports subdirectory of the ArchivesSpace base directory for examples of how to structure a report model. - -There are several limitations to adding reports to plug-ins, including that reports from plug-ins may only use the generic report template. ArchivesSpace only searches for report templates in the reports subdirectory of the ArchivesSpace base directory, not in plug-in directories. If you would like to implement a custom report with a custom template, consider adding the report to `archivesspace/reports/` instead of `archivesspace/plugins/[plugin-name]/backend/model/`. - - -## Frontend Specific Hooks - -To make adding new records fields and sections to record forms a little eaiser via your plugin, the ArchivesSpace frontend provides a series of hooks via the `frontend/config/initializers/plugin.rb` module. These are as follows: - -* `Plugins.add_search_base_facets(*facets)` - add to the base facets list to include extra facets for all record searches and listing pages. - -* `Plugins.add_search_facets(jsonmodel_type, *facets)` - add facets for a particular JSONModel type to be included in searches and listing pages for that record type. - -* `Plugins.add_resolve_field(field_name)` - use this when you have added a new field/relationship and you need it to be resolved when the record is retrieved from the API. - -* `Plugins.register_edit_role_for_type(jsonmodel_type, role)` - when you add a new top level JSONModel, register it and its edit role so the listing view can determine if the "Edit" button can be displayed to the user. - -* `Plugins.register_note_types_handler(proc)` where proc handles parameters `jsonmodel_type, note_types, context` - allow a plugin to customize the note types shown for particular JSONModel type. For example, you can filter those that do not apply to your institution. - -* `Plugins.register_plugin_section(section)` - allows you define a template to be inserted as a section for a given JSONModel record. A section is a type of `Plugins::AbstractPluginSection` which defines the source `plugin`, section `name`, the `jsonmodel_types` for which the section should show and any `opts` required by the templates at the time of render. These new sections (readonly, edit and sidebar additions) are output as part of the `PluginHelper` render methods. - - `Plugins::AbstractPluginSection` can be subclassed to allow flexible inclusion of arbitrary HTML. There are two examples provided with ArchivesSpace: - - * `Plugins::PluginSubRecord` - uses the `shared/subrecord` partial to output a standard styled ArchivesSpace section. `opts` requires the jsonmodel field to be defined. - - * `Plugins::PluginReadonlySearch` - uses the `search/embedded` partial to output a search listing as a section. `opts` requires the custom filter terms for this search to be defined. - -## Further information - -**Be sure to test your plug-in thoroughly as it may have unanticipated impacts on your -ArchivesSpace application.** diff --git a/customization/reports.html b/customization/reports.html new file mode 100644 index 00000000..c4df4ddc --- /dev/null +++ b/customization/reports.html @@ -0,0 +1,202 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/reports.md + +

+ +

+ + Report issue on Jira + customization/reports.md + +

+
+
+ +

Reports

+ +

Adding a report is intended to be a fairly simple process. The requirements for creating a report are outlined below.

+ +

Adding a Report

+

Required

+
    +
  • Create a class for your report that is a subclass of AbstractReport.
  • +
  • Call register_report. If your report has any parameters, specify them here.
  • +
  • Implement query_string +
      +
    • This should be a raw SQL string
    • +
    • To prevent SQL injection, use db.literal for any user input i.e. use "select * from table where column = #{db.literal(value)}" instead of "select * from table where column = '#{value}'"
    • +
    +
  • +
  • Provide translations for column headers and the title of your report +
      +
    • They should be in yml files under language.reports.report name
    • +
    • The translation for title should be whatever you want the name of the report to be.
    • +
    • If the translation you want is already in language.reports.translation_defaults (found in the static folder) you do not need to specify it.
    • +
    • Translations specific to the individual report are given priority over translation defaults.
    • +
    +
  • +
+ +

Optional

+
    +
  • Implement your own initializer if your report has any parameters.
  • +
  • Implement fix_row in order to clean up data and add subreports. +
      +
    • Each result will be passed to fix_row as a hash
    • +
    • ReportUtils offers various class methods to simplify cleaning up data.
    • +
    • You can also add subreports here with something like row[:subreport_name] = SubreportClassName.new(self, row[:id]).get_content where row is the result as a hash which was a parameter to fix_row. See Adding a Subreport for more information on adding subreports.
    • +
    • Sometimes you will want to delete something from the result that you needed in order to generate a subreport but do not want to show up in the final report (such as id). To do this use row.delete(:id).
    • +
    +
  • +
  • Special implementation of query - The default implementation is simply db.fetch(query_string) but implementing it yourself may give you more flexibility. In the end, it needs to return a result set.
  • +
  • There is a hash called info that controls what shows up in the header at the top of the report. Examples may include total record count, total extent, or any parameters that are provided by the user for your report. Add anything you want to show up in the report header to info. Repository name will be included automatically. Be sure to provide translations for the keys you add to info.
  • +
  • after_tasks is run after fix_row executes on all the results. Implement this if you have anything that needs to get done here before the report is rendered
  • +
  • Specify identifier_field if you want to add a heading to each individual record. For instance, identifier_field might be :accession_number for a report on accessions.
  • +
  • Implement page_break to be false if you do not want a page break after each record in the PDF of the report.
  • +
  • Implement special_translation if there is anything you want translate in a special way (i.e. it can’t be accomplished by the yml file).
  • +
+ +

Adding A Subreport

+ +

Required

+
    +
  • Create a class for your subreport that is a subclass of AbstractSubreport.
  • +
  • Create an initializer that takes in the parent report/subreport as well as any parameters you need to run the subreport (usually this is just an id from the result in the parent report/subreport). Your initializer should call super(parent_report).
  • +
  • Implement query_string. This works the same way as it does for reports.
  • +
  • Provide necessary translations.
  • +
+ +

Optional

+
    +
  • Special implementation of query
  • +
  • fix_row works just like in reports +
      +
    • note that you can add nested subreports
    • +
    +
  • +
+ + +
+ +
+ + + diff --git a/customization/reports.md b/customization/reports.md deleted file mode 100644 index 849dd79f..00000000 --- a/customization/reports.md +++ /dev/null @@ -1,43 +0,0 @@ -# Reports - -Adding a report is intended to be a fairly simple process. The requirements for creating a report are outlined below. - -## Adding a Report -### Required -- Create a class for your report that is a subclass of AbstractReport. -- Call register_report. If your report has any parameters, specify them here. -- Implement query_string - - This should be a raw SQL string - - To prevent SQL injection, use db.literal for any user input i.e. use ```"select * from table where column = #{db.literal(value)}" ``` instead of ```"select * from table where column = '#{value}'"``` -- Provide translations for column headers and the title of your report - - They should be in yml files under *language*.reports.*report name* - - The translation for title should be whatever you want the name of the report to be. - - If the translation you want is already in *language*.reports.translation_defaults (found in the static folder) you do not need to specify it. - - Translations specific to the individual report are given priority over translation defaults. - -### Optional -- Implement your own initializer if your report has any parameters. -- Implement fix_row in order to clean up data and add subreports. - - Each result will be passed to fix_row as a hash - - ReportUtils offers various class methods to simplify cleaning up data. - - You can also add subreports here with something like ```row[:subreport_name] = SubreportClassName.new(self, row[:id]).get_content``` where row is the result as a hash which was a parameter to fix_row. See [Adding a Subreport](#adding-a-subreport) for more information on adding subreports. - - Sometimes you will want to delete something from the result that you needed in order to generate a subreport but do not want to show up in the final report (such as id). To do this use ```row.delete(:id)```. -- Special implementation of query - The default implementation is simply ```db.fetch(query_string)``` but implementing it yourself may give you more flexibility. In the end, it needs to return a result set. -- There is a hash called info that controls what shows up in the header at the top of the report. Examples may include total record count, total extent, or any parameters that are provided by the user for your report. Add anything you want to show up in the report header to info. Repository name will be included automatically. Be sure to provide translations for the keys you add to info. -- after_tasks is run after fix_row executes on all the results. Implement this if you have anything that needs to get done here before the report is rendered -- Specify identifier_field if you want to add a heading to each individual record. For instance, identifier_field might be ```:accession_number``` for a report on accessions. -- Implement page_break to be false if you do not want a page break after each record in the PDF of the report. -- Implement special_translation if there is anything you want translate in a special way (i.e. it can't be accomplished by the yml file). - -## Adding A Subreport - -### Required -- Create a class for your subreport that is a subclass of AbstractSubreport. -- Create an initializer that takes in the parent report/subreport as well as any parameters you need to run the subreport (usually this is just an id from the result in the parent report/subreport). Your initializer should call ```super(parent_report)```. -- Implement query_string. This works the same way as it does for reports. -- Provide necessary translations. - -### Optional -- Special implementation of query -- fix_row works just like in reports - - note that you can add nested subreports diff --git a/customization/theming.html b/customization/theming.html new file mode 100644 index 00000000..c34ce5d9 --- /dev/null +++ b/customization/theming.html @@ -0,0 +1,271 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + customization/theming.md + +

+ +

+ + Report issue on Jira + customization/theming.md + +

+
+
+ +

Theming ArchivesSpace

+ +

Making small changes

+ +

It’s easiest to use a plugin for small changes to your site’s theme. With a plugin, +we can override default views, controllers, models, etc. without having to do a +complete rebuild of the source code. Be sure to remove the # at the beginning of +any line that you want to change. Any line that starts with a # is ignored.

+ +

Let’s say we wanted to change the branding logo on the public +interface. That can be easily changed in your config.rb file:

+ +
AppConfig[:pui_branding_img]
+
+ +

That setting is used by the file found in public/app/views/shared/_header.html.erb to display your PUI side logo. You don’t need to change that file, only the setting in your config.rb file.

+ +

You can store the image in plugins/local/public/assets/images/logo.png You’ll most likely need to create one or more of the directories.

+ +

Your AppConfig[:pui_branding_img] setting should look something like this:

+ +
AppConfig[:pui_branding_img] = '/assets/images/logo.png'
+
+ +

Alt text for the PUI branding image can and should also be supplied via:

+ +
AppConfig[:pui_branding_img_alt_text] = 'My alt text'
+
+ +

Be sure to remove the # at the beginning of +any line that you want to change. Any line that starts with a # is ignored.

+ +

If you want your image on the PUI to link out to another location, you will need to make a change to the file public/app/views/shared/_header.html.erb. The line that creates the logo just needs a a href added. You should also alter AppConfig[:pui_branding_img_alt_text] to make it clear that the image also functions as a link (e.g. AppConfig[:pui_branding_img_alt_text] = 'Back to Example College Home'). That will end up looking something like this:

+ +
<div class="col-sm-3 hidden-xs"><a href="https://example.com"><img class="logo" src="<%= asset_path(AppConfig[:pui_branding_img]) %>" alt="<%= AppConfig[:pui_branding_img_alt_text] %>" /></a></div>
+
+ +

The Staff Side logo will need a small plugin file and cannot be set in your config.rb file. This needs to be changed in the plugins/local/frontend/views/site/_branding.html.erb file. You’ll most likely need to create one or more of the directories. Then create that _branding.html.erb file and paste in the following code:

+ +
<div class="container-fluid navbar-branding">
+  <%= image_tag "archivesspace/archivesspace.small.png", :class=>"img-responsive", :alt=>"My image alt text" %>
+</div>
+
+ +

Change the "archivesspace/archivesspace.small.png" to the path to your image /assets/images/logo.png and place your logo in the plugins/local/frontend/assets/images/ directory. You’ll most likely need to create one or more of the directories.

+ +

Note: Since anything we add to plugins directory will not be precompiled by +the Rails asset pipeline, we cannot use some of the tag helpers +(like img_tag ), since that’s assuming the asset is being managed by the +asset pipeline.

+ +

Restart the application and you should see your logo in the default view.

+ +

Adding CSS rules

+ +

You can customize CSS through the plugin system too. If you don’t want to create +a whole new plugin, the easiest way is to modify the ‘local’ plugin that ships +with ArchivesSpace (it’s intended for these kind of site-specific changes). As +long as you’ve still got ‘local’ listed in your AppConfig[:plugins] list, your +changes will get picked up.

+ +

To do that, create a file called +archivesspace/plugins/local/frontend/views/layout_head.html.erb for the staff +side or archivesspace/plugins/local/public/views/layout_head.html.erb for the +public. Then you can add the line to include the CSS in the site:

+ +
 <%= stylesheet_link_tag "#{@base_url}/assets/custom.css" %>
+
+ +

Then place your CSS in the file:

+ +
 staff side:
+ archivesspace/plugins/local/frontend/assets/custom.css
+ or public side:
+ archivesspace/plugins/local/public/assets/custom.css
+
+ +

and it will get loaded on each page.

+ +

You may also want to make changes to the main index page, or the header and +footer. Those overrides would go into the following places for the public side +of your site:

+ +
archivesspace/plugins/local/public/views/welcome/show.html.erb
+archivesspace/plugins/local/public/views/shared/_header.html.erb
+archivesspace/plugins/local/public/views/shared/_footer.html.erb
+
+ +

Heavy re-theming

+ +

If you’re wanting to really trick out your site, you could do this in a plugin +using the override methods shown above, although there are some big disadvantages +to this. The first is that assets will not be compiled by the Rails asset +pipeline. Another is that you won’t be able to take advantage of the variables +and mixins that Bootstrap and Less provide as a framework, which really helps +keep your assets well organized.

+ +

A better way to do this is to pull down a copy of the ArchivesSpace code and +build out a new theme. A good resource on how to do this is +this video . +This video covers the staff frontend UI, but the same steps can be applied to +the public UI as well.

+ +

Also become a little familiar with the +build system instructions

+ +

First, pull down a new copy of ArchivesSpace using git and be sure to checkout +a tag matching the version you’re using or wanting to use.

+ +
 $ git clone https://github.com/archivesspace/archivesspace.git
+ $ git checkout v2.5.2
+
+ +

You can start your application development server by executing:

+ +
     $ ./build/run bootstrap
+     $ ./build/run backend:devserver
+     $ ./build/run frontend:devserver
+     $ ./build/run public:devserver
+
+ +

Note: You don’t have to run all these commands all the time. The bootstrap +command really only has to be run the first time your pull down the code – +it will also take awhile. You also don’t have to start the frontend or public +if you’re not working on those interfaces. Backend does have to be started for +either the public or frontend interfaces to work. )

+ +

Follow the instructions in the video to create a new theme. A good way is to copy the existing default theme to a new folder and start making your updates. Be sure to take advantage of the existing variables set in the Less files to make your assets nice and organized.

+ +

Once you’ve updated you theme and have got it working, you can package your application. You can use the ./scripts/build_release to build a totally fresh AS distribution, but you don’t need to do that if you’ve simply made some minor changes to the UI. Instead, use the “./build/run public:war “ to compile your assets and package a war file. You can then take this public.war file and replace your ASpace distribution war file.

+ +

Be sure to update your theme setting in the config.rb file and restart ASpace.

+ + +
+ +
+ + + diff --git a/customization/theming.md b/customization/theming.md deleted file mode 100644 index 3058f3f8..00000000 --- a/customization/theming.md +++ /dev/null @@ -1,134 +0,0 @@ -# Theming ArchivesSpace - -## Making small changes - -It's easiest to use a plugin for small changes to your site's theme. With a plugin, -we can override default views, controllers, models, etc. without having to do a -complete rebuild of the source code. Be sure to remove the `#` at the beginning of -any line that you want to change. Any line that starts with a `#` is ignored. - -Let's say we wanted to change the branding logo on the public -interface. That can be easily changed in your `config.rb` file: - -``` -AppConfig[:pui_branding_img] -``` - -That setting is used by the file found in `public/app/views/shared/_header.html.erb` to display your PUI side logo. You don't need to change that file, only the setting in your `config.rb` file. - -You can store the image in `plugins/local/public/assets/images/logo.png` You'll most likely need to create one or more of the directories. - -Your `AppConfig[:pui_branding_img]` setting should look something like this: - -``` -AppConfig[:pui_branding_img] = '/assets/images/logo.png' -``` - -Alt text for the PUI branding image can and should also be supplied via: - -``` -AppConfig[:pui_branding_img_alt_text] = 'My alt text' -```` - -Be sure to remove the `#` at the beginning of -any line that you want to change. Any line that starts with a `#` is ignored. - -If you want your image on the PUI to link out to another location, you will need to make a change to the file `public/app/views/shared/_header.html.erb`. The line that creates the logo just needs a `a href` added. You should also alter `AppConfig[:pui_branding_img_alt_text]` to make it clear that the image also functions as a link (e.g. `AppConfig[:pui_branding_img_alt_text] = 'Back to Example College Home'`). That will end up looking something like this: - -``` - -``` - -The Staff Side logo will need a small plugin file and cannot be set in your `config.rb` file. This needs to be changed in the `plugins/local/frontend/views/site/_branding.html.erb` file. You'll most likely need to create one or more of the directories. Then create that `_branding.html.erb` file and paste in the following code: - -``` - -``` - -Change the `"archivesspace/archivesspace.small.png"` to the path to your image `/assets/images/logo.png` and place your logo in the `plugins/local/frontend/assets/images/` directory. You'll most likely need to create one or more of the directories. - -**Note:** Since anything we add to plugins directory will not be precompiled by -the Rails asset pipeline, we cannot use some of the tag helpers -(like img_tag ), since that's assuming the asset is being managed by the -asset pipeline. - -Restart the application and you should see your logo in the default view. - -## Adding CSS rules - -You can customize CSS through the plugin system too. If you don't want to create -a whole new plugin, the easiest way is to modify the 'local' plugin that ships -with ArchivesSpace (it's intended for these kind of site-specific changes). As -long as you've still got 'local' listed in your AppConfig[:plugins] list, your -changes will get picked up. - -To do that, create a file called -`archivesspace/plugins/local/frontend/views/layout_head.html.erb` for the staff -side or `archivesspace/plugins/local/public/views/layout_head.html.erb` for the -public. Then you can add the line to include the CSS in the site: - - <%= stylesheet_link_tag "#{@base_url}/assets/custom.css" %> - -Then place your CSS in the file: - - staff side: - archivesspace/plugins/local/frontend/assets/custom.css - or public side: - archivesspace/plugins/local/public/assets/custom.css - -and it will get loaded on each page. - -You may also want to make changes to the main index page, or the header and -footer. Those overrides would go into the following places for the public side -of your site: - - archivesspace/plugins/local/public/views/welcome/show.html.erb - archivesspace/plugins/local/public/views/shared/_header.html.erb - archivesspace/plugins/local/public/views/shared/_footer.html.erb - -## Heavy re-theming - -If you're wanting to really trick out your site, you could do this in a plugin -using the override methods shown above, although there are some big disadvantages -to this. The first is that assets will not be compiled by the Rails asset -pipeline. Another is that you won't be able to take advantage of the variables -and mixins that Bootstrap and Less provide as a framework, which really helps -keep your assets well organized. - -A better way to do this is to pull down a copy of the ArchivesSpace code and -build out a new theme. A good resource on how to do this is -[this video](https://www.youtube.com/watch?v=Uny736mZVnk) . -This video covers the staff frontend UI, but the same steps can be applied to -the public UI as well. - -Also become a little familiar with the -[build system instructions ](../development/dev.html) - - -First, pull down a new copy of ArchivesSpace using git and be sure to checkout -a tag matching the version you're using or wanting to use. - - $ git clone https://github.com/archivesspace/archivesspace.git - $ git checkout v2.5.2 - -You can start your application development server by executing: - - $ ./build/run bootstrap - $ ./build/run backend:devserver - $ ./build/run frontend:devserver - $ ./build/run public:devserver - -**Note:** You don't have to run all these commands all the time. The bootstrap -command really only has to be run the first time your pull down the code -- -it will also take awhile. You also don't have to start the frontend or public -if you're not working on those interfaces. Backend does have to be started for -either the public or frontend interfaces to work. ) - - -Follow the instructions in the video to create a new theme. A good way is to copy the existing default theme to a new folder and start making your updates. Be sure to take advantage of the existing variables set in the Less files to make your assets nice and organized. - -Once you've updated you theme and have got it working, you can package your application. You can use the ./scripts/build_release to build a totally fresh AS distribution, but you don't need to do that if you've simply made some minor changes to the UI. Instead, use the "./build/run public:war " to compile your assets and package a war file. You can then take this public.war file and replace your ASpace distribution war file. - -Be sure to update your theme setting in the config.rb file and restart ASpace. diff --git a/cypress/e2e/feedback.cy.js b/cypress/e2e/feedback.cy.js deleted file mode 100644 index 07e6c697..00000000 --- a/cypress/e2e/feedback.cy.js +++ /dev/null @@ -1,45 +0,0 @@ -const editBaseUrl = 'https://github.com/archivesspace/tech-docs/edit/master/'; -const issuesUrl = - 'https://archivesspace.atlassian.net/jira/software/projects/TD/issues'; - -describe('Feedback', () => { - it('includes an edit this page on GitHub link on every page', () => { - const editText = 'Edit this page on GitHub'; - let file = 'README.md'; - - cy.visit('http://localhost:4000/'); - cy.get('a') - .contains(editText) - .should('have.attr', 'href', `${editBaseUrl}${file}`) - .children() - .should('have.text', file); - - file = 'readme_develop.md'; - cy.visit('http://localhost:4000/readme_develop'); - cy.get('a') - .contains(editText) - .should('have.attr', 'href', `${editBaseUrl}${file}`) - .children() - .should('have.text', file); - }); - - it('includes a report issue on Jira link on every page', () => { - const reportText = 'Report issue on Jira'; - let file = 'README.md'; - - cy.visit('http://localhost:4000/'); - cy.get('a') - .contains(reportText) - .should('have.attr', 'href', `${issuesUrl}`) - .children() - .should('have.text', file); - - file = 'readme_develop.md'; - cy.visit('http://localhost:4000/readme_develop'); - cy.get('a') - .contains(reportText) - .should('have.attr', 'href', `${issuesUrl}`) - .children() - .should('have.text', file); - }); -}); diff --git a/cypress/fixtures/example.json b/cypress/fixtures/example.json deleted file mode 100644 index 02e42543..00000000 --- a/cypress/fixtures/example.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "name": "Using fixtures to represent data", - "email": "hello@cypress.io", - "body": "Fixtures are a great way to mock data for responses to routes" -} diff --git a/cypress/support/commands.js b/cypress/support/commands.js deleted file mode 100644 index 66ea16ef..00000000 --- a/cypress/support/commands.js +++ /dev/null @@ -1,25 +0,0 @@ -// *********************************************** -// This example commands.js shows you how to -// create various custom commands and overwrite -// existing commands. -// -// For more comprehensive examples of custom -// commands please read more here: -// https://on.cypress.io/custom-commands -// *********************************************** -// -// -// -- This is a parent command -- -// Cypress.Commands.add('login', (email, password) => { ... }) -// -// -// -- This is a child command -- -// Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... }) -// -// -// -- This is a dual command -- -// Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... }) -// -// -// -- This will overwrite an existing command -- -// Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... }) \ No newline at end of file diff --git a/cypress/support/e2e.js b/cypress/support/e2e.js deleted file mode 100644 index 0e7290a1..00000000 --- a/cypress/support/e2e.js +++ /dev/null @@ -1,20 +0,0 @@ -// *********************************************************** -// This example support/e2e.js is processed and -// loaded automatically before your test files. -// -// This is a great place to put global configuration and -// behavior that modifies Cypress. -// -// You can change the location of this file or turn off -// automatically serving support files with the -// 'supportFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/configuration -// *********************************************************** - -// Import commands.js using ES2015 syntax: -import './commands' - -// Alternatively you can use CommonJS syntax: -// require('./commands') \ No newline at end of file diff --git a/development/README.md b/development/README.md deleted file mode 100644 index 02a9252e..00000000 --- a/development/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -permalink: /development/ ---- - -# Information for ArchivesSpace developers and code contributors - -* [Running a development version of ArchivesSpace](./dev.html) -* [Building an ArchivesSpace release](./release.html) -* [Docker](./docker.html) -* [DB versions listed by release](./release_schema_versions.html) -* [User Interface Test Suite](./ui_test.html) -* [Upgrading Rack for ArchivesSpace](./development/jruby-rack-build.html) -* [ArchivesSpace Releases](./releases.html) -* [Using the VS Code editor for local development](./vscode.html) diff --git a/development/dev.html b/development/dev.html new file mode 100644 index 00000000..3b8705aa --- /dev/null +++ b/development/dev.html @@ -0,0 +1,592 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/dev.md + +

+ +

+ + Report issue on Jira + development/dev.md + +

+
+
+ +

Running a development version of ArchivesSpace

+ +

System requirements:

+ +
    +
  • Java 8, 11 (11 recommended) or 17
  • +
  • Docker & Docker Compose is optional but makes running MySQL and Solr more convenient
  • +
  • Supervisord is optional but makes running the development servers more convenient
  • +
+ +

Currently supported platforms for development:

+ +
    +
  • Linux (although generally only Ubuntu is actually used / tested)
  • +
  • Mac (x86)
  • +
+ +

Windows is not supported because of issues building gems with C extensions (such as sassc).

+ +

For Mac (arm) see https://teaspoon-consulting.com/articles/archivesspace-on-the-m1.html.

+ +

When installing Java OpenJDK is strongly recommended. Other vendors may work, but OpenJDK is +most extensively used and tested. It is highly recommended that you use Jabba +to install Java (OpenJDK). This has proven to be a reliable way of resolving cross platform +issues (looking at you Mac :/) that have occured via other means of installing Java.

+ +

Installing OpenJDK with jabba will look something like:

+ +
# assuming you have jabba installed
+jabba install openjdk@1.11.0-2
+jabba use openjdk@1.11.0-2
+jabba alias default openjdk@1.11.0-2 # [optional] make this the default java
+
+ +

On Linux/Ubuntu it is generally fine to install from system packages:

+ +
sudo apt install openjdk-$VERSION-jdk-headless
+# example: install 11 & 17
+sudo apt install openjdk-11-jdk-headless
+sudo apt install openjdk-17-jdk-headless
+
+# update-java-alternatives can be used to switch between versions
+sudo update-java-alternatives --list
+sudo update-java-alternatives --set $version
+
+ +

If using Docker & Docker Compose install them following the official documentation:

+ + + +

Do not use system packages or any other unofficial source as these have been found to be inconsistent with standard Docker.

+ +

The recommended way of developing ArchivesSpace is to fork the repository and clone it locally.

+ +

Note: all commands in the following instructions assume you are in the root directory of your local fork +unless otherwise specified.

+ +

Quickstart

+ +

This is an abridged reference for getting started with a limited explanation of the steps:

+ +
# Build images (required one time only for most use cases)
+docker-compose -f docker-compose-dev.yml build
+# Run MySQL and Solr in the background
+docker-compose -f docker-compose-dev.yml up --detach
+# Download the MySQL connector
+cd ./common/lib && wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.23/mysql-connector-java-8.0.23.jar && cd -
+# Download all application dependencies
+./build/run bootstrap
+# OPTIONAL: load dev database
+gzip -dc ./build/mysql_db_fixtures/accessibility.sql.gz | mysql --host=127.0.0.1 --port=3306  -u root -p123456 archivesspace
+# Setup the development database
+./build/run db:migrate
+# Clear out any existing Solr state (only needed after a database setup / restore after previous development)
+./build/run solr:reset
+# Run the development servers
+supervisord -c supervisord/archivesspace.conf
+# OPTIONAL: Run a backend (api) test (for checking setup is correct)
+./build/run backend:test -Dexample="User model"
+
+ +

Step by Step explanation

+ +

Run MySQL and Solr

+ +

ArchivesSpace development requires MySQL and Solr to be running. The easiest and +recommended way to run them is using the Docker Compose configuration provided by ArchivesSpace.

+ +

Start by building the images. This creates a custom Solr image that includes ArchivesSpace’s configuration:

+ +
docker-compose -f docker-compose-dev.yml build
+
+ +

Note: you only need to run the above command once. You would only need to rerun this command if a) +you delete the image and therefore need to recreate it, or b) you make a change to ArchivesSpace’s Solr +configuration and therefore need to rebuild the image to include the updated configuration.

+ +

Run MySQL and Solr in the background:

+ +
docker-compose -f docker-compose-dev.yml up --detach
+
+ +

By using Docker Compose to run MySQL and Solr you are guaranteed to have the correct connection settings +and don’t otherwise need to define connection settings for MySQL or Solr.

+ +

Verify that MySQL & Solr are running: docker ps. It should list the running containers:

+ +
CONTAINER ID   IMAGE                       COMMAND                  CREATED       STATUS       PORTS                               NAMES
+ec76bd09d73b   mysql:8.0                   "docker-entrypoint.s…"   8 hours ago   Up 8 hours   33060/tcp, 0.0.0.0:3307->3306/tcp   as_test_db
+30574171530f   archivesspace/solr:latest   "docker-entrypoint.s…"   8 hours ago   Up 8 hours   0.0.0.0:8984->8983/tcp              as_test_solr
+d84a6a183bb0   archivesspace/solr:latest   "docker-entrypoint.s…"   8 hours ago   Up 8 hours   0.0.0.0:8983->8983/tcp              as_dev_solr
+7df930293875   mysql:8.0                   "docker-entrypoint.s…"   8 hours ago   Up 8 hours   0.0.0.0:3306->3306/tcp, 33060/tcp   as_dev_db
+
+ +

To check the servers are online:

+ +
    +
  • MYSQL: mysql -h 127.0.0.1 -u as -pas123 archivesspace
  • +
  • SOLR: curl http://localhost:8983/solr/admin/cores
  • +
+ +

To stop and / or remove the servers:

+ +
docker-compose -f docker-compose-dev.yml stop # shutdowns the servers (data will be preserved)
+docker-compose -f docker-compose-dev.yml rm # deletes the containers (all data will be removed)
+
+ +

Advanced: running MySQL and Solr outside of Docker

+ +

You are not required to use Docker for MySQL and Solr. If you run them another way the default +requirements are:

+ +
    +
  • dev MySQL, localhost:3306 create db: archivesspace, username: as, password: as123
  • +
  • test MySQL, localhost:3307 create db: archivesspace, username: as, password: as123
  • +
  • dev Solr, localhost:8983 create archivesspace core using ArchivesSpace configuration
  • +
  • test Solr, localhost:8984, create archivesspace core using ArchivesSpace configuration
  • +
+ +

The defaults can be changed using environment variables located in the build file.

+ +

Download the MySQL connector

+ +

For licensing reasons the MySQL connector must be downloaded separately:

+ +
cd ./common/lib
+wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.23/mysql-connector-java-8.0.23.jar
+cd -
+
+ +

Run bootstrap

+ +

The bootstrap task:

+ +
 ./build/run bootstrap
+
+ +

Will bootstrap your development environment by downloading all +dependencies–JRuby, Gems, etc. This one command creates a fully +self-contained development environment where everything is downloaded +within the ArchivesSpace project build directory.

+ +

It is not necessary and generally incorrect to manually install JRuby +& bundler etc. for ArchivesSpace (whether with a version manager or +otherwise).

+ +

The self contained ArchivesSpace development environment typically does +not interact with other J/Ruby environments you may have on your system +(such as those managed by rbenv or similar).

+ +

This is the starting point for all ArchivesSpace development. You may need +to re-run this command after fetching updates, or when making changes to +Gemfiles or other dependencies such as those in the ./build/build.xml file.

+ +

Errors running bootstrap

+ +
     [java] INFO: jetty-9.4.44.v20210927; built: 2021-09-27T23:02:44.612Z; git: 8da83308eeca865e495e53ef315a249d63ba9332; jvm 11+28
+     [java] Exiting
+     [java] LoadError: no such file to load -- rails/commands
+     [java]   require at org/jruby/RubyKernel.java:974
+     [java]    <main> at script/rails:8
+
+ +
 ./build/run backend:devserver
+ ./build/run frontend:devserver
+ ./build/run public:devserver
+ ./build/run indexer
+
+ +

There have been various forms of the same LoadError. It’s a transient error +that is resolved by rerunning bootstrap.

+ +
     [java] org.jruby.Main -I uri:classloader://META-INF/jruby.home/lib/ruby/stdlib -r
+     [java] ./siteconf20220407-5224-13f6qi7.rb extconf.rb
+     [java] sh: /Library/Internet: No such file or directory
+     [java] sh: line 0: exec: /Library/Internet: cannot execute: No such file or directory
+     [java]
+     [java] extconf failed, exit code 126
+
+ +

This has been seen on Mac platforms resulting from the installation method +for Java. Installing the OpenJDK via Jabba has been effective in resolving +this error.

+ +

Advanced: bootstrap & the build directory

+ +

Running bootstrap will download jars to the build directory, including:

+ +
    +
  • jetty-runner
  • +
  • jruby
  • +
  • jruby-rack
  • +
+ +

Gems will be downloaded to: ./build/gems/jruby/$version/gems/.

+ +

Setup the development database

+ +

The migrate task:

+ +
./build/run db:migrate
+
+ +

Will setup the development database, creating all of the tables etc. +required by the application.

+ +

There is a task for resetting the database:

+ +
./build/run db:nuke
+
+ +

Which will first delete then migrate the database.

+ +

Advanced: Loading data fixtures into dev database

+ +

When loading a database into the development MySQL instance always ensure that ArchivesSpace +is not running. Stop ArchivesSpace if it is running. Run ./build/run solr:reset to +clear indexer state (a more thorough explanation of this step is described below).

+ +

If you are loading a database and MySQL has already been used for development you’ll want to +drop and create an empty database first.

+ +
mysql -h 127.0.0.1 -u as -pas123 -e "DROP DATABASE archivesspace"
+mysql -h 127.0.0.1 -u as -pas123 -e "CREATE DATABASE IF NOT EXISTS archivesspace DEFAULT CHARACTER SET utf8mb4"
+
+ +

Note: you can skip the above step if MySQL was just started for the first time or any time you +have an empty ArchivesSpace (one where db:migrate has not been run).

+ +

Assuming you have MySQL running and an empty archivesspace database available you can proceed +to restore:

+ +
gzip -dc ./build/mysql_db_fixtures/accessibility.sql.gz | mysql --host=127.0.0.1 --port=3306  -u root -p123456 archivesspace
+./build/run db:migrate
+
+ +

Note: The above instructions should work out-of-the-box. If you want to use your own database +and / or have configured MySQL differently then adjust the commands as needed.

+ +

After the restore ./build/run db:migrate is run to catch any migration updates. You can now +proceed to run the application dev servers, as described below, with data already +populated in ArchivesSpace.

+ +

Clear out existing Solr state

+ +

The Solr reset task:

+ +
./build/run solr:reset
+
+ +

Will wipe out any existing Solr state. This is not required when setting +up for the first time, but is often required after a database reset (such as +after running the ./build/run db:nuke task).

+ +

More specifically what this does is submit a delete all request to Solr and empty +out the contents of the ./build/dev/indexer*_state directories, which is described +below.

+ +

Run the development servers

+ +

Use Supervisord for a simpler way of running the development servers with output +for all servers sent to a single terminal window:

+ +
# run all of the services
+supervisord -c supervisord/archivesspace.conf
+
+# run in api mode (backend + indexer only)
+supervisord -c supervisord/api.conf
+
+# run just the backend (useful for trying out endpoints that don't require Solr)
+supervisord -c supervisord/backend.conf
+
+ +

ArchivesSpace is started with:

+ + + +

To stop supervisord: Ctrl-c.

+ +

Advanced: running the development servers directly

+ +

Supervisord is not required, or ideal for every situation. You can run the development +servers directly via build tasks:

+ +
./build/run backend:devserver # This is the REST API
+./build/run frontend:devserver # This is the staff user interface
+./build/run public:devserver # This is the public user interface
+./build/run indexer # This is the indexer (converts ASpace records to Solr Docs and ships to Solr)
+
+ +

These should be run in different terminal sessions and do not need to be run +in a specific order or are all required.

+ +

An example use case for running a server directly is to use the pry debugger.

+ +

Advanced: debugging with pry

+ +

To debug with pry you cannot use supervisord to run the application devserver, +however you can mix and match:

+ +
# run the backend and indexer with supervisord
+supervisord -c supervisord/api.conf
+
+# in a separate terminal run the frontend directly
+./build/run frontend:devserver
+
+ +

Add binding.pry to set breakpoints in the code. This can also be used in views: +<% binding.pry %>. Using pry you can easily inspect the request, params and +in scope instance variables that are available.

+ +

Advanced: development servers and the build directory

+ +
 ./build/run db:migrate
+
+ +

Running the developments servers will create directories in ./build/dev:

+ +
    +
  • indexer_pui_state: latest timestamps for PUI indexer activity
  • +
  • indexer_state: latest timestamps for (SUI) indexer activity
  • +
  • +

    shared: background job files

    + +

    ./build/run db:nuke

    +
  • +
+ +

Note: the folders will be created as they are needed, so they may not all be present +at all times.

+ +

Running the tests

+ +

ArchivesSpace uses a combination of RSpec, integration and Selenium +tests.

+ +
 ./build/run travis:test
+
+ +

It’s also useful to be able to run the backend unit tests separately. +To do this, run:

+ +
 ./build/run backend:test
+
+ +

You can also run a single spec file with:

+ +
 ./build/run backend:test -Dspec="myfile_spec.rb"
+
+ +

By default the tests are configured to run using a separate MySQL & Solr from the +development servers. This means that the development and test environments will not +interfere with each other.

+ +
# run the backend / api tests
+./build/run backend:test
+
+ +

You can also run a single spec file with:

+ +
./build/run backend:test -Dspec="myfile_spec.rb"
+
+ +

Or a single example with:

+ +
./build/run backend:test -Dexample="does something important"
+
+ +

There are specific instructions and requirements for the UI tests to work.

+ +

Advanced: tests and the build directory

+ +

Running the tests may create directories in ./build/test. These will be +the same as for the development servers as described above.

+ +

Coverage reports

+ +

You can run the coverage reports using:

+ +
 ./build/run coverage
+
+ +

This runs all of the above tests in coverage mode and, when the run +finishes, produces a set of HTML reports within the coverage +directory in your ArchivesSpace project directory.

+ +

Linting and formatting with Rubocop

+ +

If you are editing or adding source files that you intend to contribute via a pull request, +you should make sure your changes conform to the layout and style rules by running:

+ +
./build/run rubocop
+
+ +

Most errors can be auto-corrected by running:

+ +
./build/run rubocop -Dcorrect=true
+
+ +

Submitting a Pull Request

+ +

When you have code ready to be reviewed, open a pull request to ask for it to be +merged into the codebase.

+ +

To help make the review go smoothly, here are some general guidelines:

+ +
    +
  • Your pull request should address a single issue. +It’s better to split large or complicated PRs into discrete steps if possible. This +makes review more manageable and reduces the risk of conflicts with other changes.
  • +
  • Give your pull request a brief title, referencing any JIRA or Github issues resolved +by the pull request. +Including JIRA numbers (e.g. ‘ANW-123’) explicitly in your pull request title ensures the +PR will be linked to the original issue in JIRA. Similarly, referencing GitHub issue numbers +(e.g. ‘Fixes #123’) will automatically close that issue when the PR is merged.
  • +
  • Fill out as much of the Pull Request template as is possible/relevant. +This makes it easier to understand the full context of your PR, including any discussions or supporting documentation that went into developing the functionality or resolving the bug.
  • +
+ +

Building a distribution

+ +

See: Building an Archivesspace Release for information on building a distribution.

+ +

Generating API documentation

+ +

See: Building an Archivesspace Release for information on building the documentation.

+ + +
+ +
+ + + diff --git a/development/dev.md b/development/dev.md deleted file mode 100644 index a7056f4a..00000000 --- a/development/dev.md +++ /dev/null @@ -1,446 +0,0 @@ -# Running a development version of ArchivesSpace - -System requirements: - -- Java 8, 11 (11 recommended) or 17 -- [Docker](https://www.docker.com/) & [Docker Compose](https://docs.docker.com/compose/) is optional but makes running MySQL and Solr more convenient -- [Supervisord](http://supervisord.org/) is optional but makes running the development servers more convenient - -Currently supported platforms for development: - -- Linux (although generally only Ubuntu is actually used / tested) -- Mac (x86) - -Windows is not supported because of issues building gems with C extensions (such as sassc). - -For Mac (arm) see [https://teaspoon-consulting.com/articles/archivesspace-on-the-m1.html](https://teaspoon-consulting.com/articles/archivesspace-on-the-m1.html). - -When installing Java OpenJDK is strongly recommended. Other vendors may work, but OpenJDK is -most extensively used and tested. It is highly recommended that you use [Jabba](https://github.com/shyiko/jabba) -to install Java (OpenJDK). This has proven to be a reliable way of resolving cross platform -issues (looking at you Mac :/) that have occured via other means of installing Java. - -Installing OpenJDK with jabba will look something like: - -```bash -# assuming you have jabba installed -jabba install openjdk@1.11.0-2 -jabba use openjdk@1.11.0-2 -jabba alias default openjdk@1.11.0-2 # [optional] make this the default java -``` - -On Linux/Ubuntu it is generally fine to install from system packages: - -```bash -sudo apt install openjdk-$VERSION-jdk-headless -# example: install 11 & 17 -sudo apt install openjdk-11-jdk-headless -sudo apt install openjdk-17-jdk-headless - -# update-java-alternatives can be used to switch between versions -sudo update-java-alternatives --list -sudo update-java-alternatives --set $version -``` - -If using Docker & Docker Compose install them following the official documentation: - -- [https://docs.docker.com/get-docker/](https://docs.docker.com/get-docker/) -- [https://docs.docker.com/compose/install/](https://docs.docker.com/compose/install/) - -_Do not use system packages or any other unofficial source as these have been found to be inconsistent with standard Docker._ - -The recommended way of developing ArchivesSpace is to fork the repository and clone it locally. - -_Note: all commands in the following instructions assume you are in the root directory of your local fork -unless otherwise specified._ - -__Quickstart__ - -This is an abridged reference for getting started with a limited explanation of the steps: - -```bash -# Build images (required one time only for most use cases) -docker-compose -f docker-compose-dev.yml build -# Run MySQL and Solr in the background -docker-compose -f docker-compose-dev.yml up --detach -# Download the MySQL connector -cd ./common/lib && wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.23/mysql-connector-java-8.0.23.jar && cd - -# Download all application dependencies -./build/run bootstrap -# OPTIONAL: load dev database -gzip -dc ./build/mysql_db_fixtures/accessibility.sql.gz | mysql --host=127.0.0.1 --port=3306 -u root -p123456 archivesspace -# Setup the development database -./build/run db:migrate -# Clear out any existing Solr state (only needed after a database setup / restore after previous development) -./build/run solr:reset -# Run the development servers -supervisord -c supervisord/archivesspace.conf -# OPTIONAL: Run a backend (api) test (for checking setup is correct) -./build/run backend:test -Dexample="User model" -``` - -## Step by Step explanation - -### Run MySQL and Solr - -ArchivesSpace development requires MySQL and Solr to be running. The easiest and -recommended way to run them is using the Docker Compose configuration provided by ArchivesSpace. - -Start by building the images. This creates a custom Solr image that includes ArchivesSpace's configuration: - -```bash -docker-compose -f docker-compose-dev.yml build -``` - -_Note: you only need to run the above command once. You would only need to rerun this command if a) -you delete the image and therefore need to recreate it, or b) you make a change to ArchivesSpace's Solr -configuration and therefore need to rebuild the image to include the updated configuration._ - -Run MySQL and Solr in the background: - -```bash -docker-compose -f docker-compose-dev.yml up --detach -``` - -By using Docker Compose to run MySQL and Solr you are guaranteed to have the correct connection settings -and don't otherwise need to define connection settings for MySQL or Solr. - -Verify that MySQL & Solr are running: `docker ps`. It should list the running containers: - -```txt -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -ec76bd09d73b mysql:8.0 "docker-entrypoint.s…" 8 hours ago Up 8 hours 33060/tcp, 0.0.0.0:3307->3306/tcp as_test_db -30574171530f archivesspace/solr:latest "docker-entrypoint.s…" 8 hours ago Up 8 hours 0.0.0.0:8984->8983/tcp as_test_solr -d84a6a183bb0 archivesspace/solr:latest "docker-entrypoint.s…" 8 hours ago Up 8 hours 0.0.0.0:8983->8983/tcp as_dev_solr -7df930293875 mysql:8.0 "docker-entrypoint.s…" 8 hours ago Up 8 hours 0.0.0.0:3306->3306/tcp, 33060/tcp as_dev_db -``` - -To check the servers are online: - -- MYSQL: `mysql -h 127.0.0.1 -u as -pas123 archivesspace` -- SOLR: `curl http://localhost:8983/solr/admin/cores` - -To stop and / or remove the servers: - -```bash -docker-compose -f docker-compose-dev.yml stop # shutdowns the servers (data will be preserved) -docker-compose -f docker-compose-dev.yml rm # deletes the containers (all data will be removed) -``` - -__Advanced: running MySQL and Solr outside of Docker__ - -You are not required to use Docker for MySQL and Solr. If you run them another way the default -requirements are: - -- dev MySQL, localhost:3306 create db: archivesspace, username: as, password: as123 -- test MySQL, localhost:3307 create db: archivesspace, username: as, password: as123 -- dev Solr, localhost:8983 create archivesspace core using ArchivesSpace configuration -- test Solr, localhost:8984, create archivesspace core using ArchivesSpace configuration - -The defaults can be changed using [environment variables](https://github.com/archivesspace/archivesspace/blob/master/build/build.xml#L43-L46) located in the build file. - -### Download the MySQL connector - -For licensing reasons the MySQL connector must be downloaded separately: - -```bash -cd ./common/lib -wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.23/mysql-connector-java-8.0.23.jar -cd - -``` - -### Run bootstrap - -The bootstrap task: - - ./build/run bootstrap - -Will bootstrap your development environment by downloading all -dependencies--JRuby, Gems, etc. This one command creates a fully -self-contained development environment where everything is downloaded -within the ArchivesSpace project `build` directory. - -_It is not necessary and generally incorrect to manually install JRuby -& bundler etc. for ArchivesSpace (whether with a version manager or -otherwise)._ - -_The self contained ArchivesSpace development environment typically does -not interact with other J/Ruby environments you may have on your system -(such as those managed by rbenv or similar)._ - -This is the starting point for all ArchivesSpace development. You may need -to re-run this command after fetching updates, or when making changes to -Gemfiles or other dependencies such as those in the `./build/build.xml` file. - -**Errors running bootstrap** - -```txt - [java] INFO: jetty-9.4.44.v20210927; built: 2021-09-27T23:02:44.612Z; git: 8da83308eeca865e495e53ef315a249d63ba9332; jvm 11+28 - [java] Exiting - [java] LoadError: no such file to load -- rails/commands - [java] require at org/jruby/RubyKernel.java:974 - [java]
at script/rails:8 -``` - - ./build/run backend:devserver - ./build/run frontend:devserver - ./build/run public:devserver - ./build/run indexer - -There have been various forms of the same `LoadError`. It's a transient error -that is resolved by rerunning bootstrap. - -```txt - [java] org.jruby.Main -I uri:classloader://META-INF/jruby.home/lib/ruby/stdlib -r - [java] ./siteconf20220407-5224-13f6qi7.rb extconf.rb - [java] sh: /Library/Internet: No such file or directory - [java] sh: line 0: exec: /Library/Internet: cannot execute: No such file or directory - [java] - [java] extconf failed, exit code 126 -``` - -This has been seen on Mac platforms resulting from the installation method -for Java. Installing the OpenJDK via Jabba has been effective in resolving -this error. - -__Advanced: bootstrap & the build directory__ - -Running bootstrap will download jars to the build directory, including: - -- jetty-runner -- jruby -- jruby-rack - -Gems will be downloaded to: `./build/gems/jruby/$version/gems/`. - -### Setup the development database - -The migrate task: - -```bash -./build/run db:migrate -``` - -Will setup the development database, creating all of the tables etc. -required by the application. - -There is a task for resetting the database: - -```bash -./build/run db:nuke -``` - -Which will first delete then migrate the database. - -__Advanced: Loading data fixtures into dev database__ - -When loading a database into the development MySQL instance always ensure that ArchivesSpace -is **not** running. Stop ArchivesSpace if it is running. Run `./build/run solr:reset` to -clear indexer state (a more thorough explanation of this step is described below). - -If you are loading a database and MySQL has already been used for development you'll want to -drop and create an empty database first. - -```bash -mysql -h 127.0.0.1 -u as -pas123 -e "DROP DATABASE archivesspace" -mysql -h 127.0.0.1 -u as -pas123 -e "CREATE DATABASE IF NOT EXISTS archivesspace DEFAULT CHARACTER SET utf8mb4" -``` - -_Note: you can skip the above step if MySQL was just started for the first time or any time you -have an empty ArchivesSpace (one where `db:migrate` has not been run)._ - -Assuming you have MySQL running and an empty `archivesspace` database available you can proceed -to restore: - -```bash -gzip -dc ./build/mysql_db_fixtures/accessibility.sql.gz | mysql --host=127.0.0.1 --port=3306 -u root -p123456 archivesspace -./build/run db:migrate -``` - -_Note: The above instructions should work out-of-the-box. If you want to use your own database -and / or have configured MySQL differently then adjust the commands as needed._ - -After the restore `./build/run db:migrate` is run to catch any migration updates. You can now -proceed to run the application dev servers, as described below, with data already -populated in ArchivesSpace. - -### Clear out existing Solr state - -The Solr reset task: - -```bash -./build/run solr:reset -``` - -Will wipe out any existing Solr state. This is not required when setting -up for the first time, but is often required after a database reset (such as -after running the `./build/run db:nuke` task). - -_More specifically what this does is submit a delete all request to Solr and empty -out the contents of the `./build/dev/indexer*_state` directories, which is described -below._ - -### Run the development servers - -Use [Supervisord](http://supervisord.org/) for a simpler way of running the development servers with output -for all servers sent to a single terminal window: - -```bash -# run all of the services -supervisord -c supervisord/archivesspace.conf - -# run in api mode (backend + indexer only) -supervisord -c supervisord/api.conf - -# run just the backend (useful for trying out endpoints that don't require Solr) -supervisord -c supervisord/backend.conf -``` - -ArchivesSpace is started with: - -- the staff interface on [http://localhost:3000/](http://localhost:3000/) -- the PUI on [http://localhost:3001/](http://localhost:3001/) -- the API on [http://localhost:4567/](http://localhost:4567/) - -To stop supervisord: `Ctrl-c`. - -__Advanced: running the development servers directly__ - -Supervisord is not required, or ideal for every situation. You can run the development -servers directly via build tasks: - -```bash -./build/run backend:devserver # This is the REST API -./build/run frontend:devserver # This is the staff user interface -./build/run public:devserver # This is the public user interface -./build/run indexer # This is the indexer (converts ASpace records to Solr Docs and ships to Solr) -``` - -These should be run in different terminal sessions and do not need to be run -in a specific order or are all required. - -_An example use case for running a server directly is to use the pry debugger._ - -__Advanced: debugging with pry__ - -To debug with pry you cannot use supervisord to run the application devserver, -however you can mix and match: - -```bash -# run the backend and indexer with supervisord -supervisord -c supervisord/api.conf - -# in a separate terminal run the frontend directly -./build/run frontend:devserver -``` - -Add `binding.pry` to set breakpoints in the code. This can also be used in views: -`<% binding.pry %>`. Using pry you can easily inspect the `request`, `params` and -in scope instance variables that are available. - -__Advanced: development servers and the build directory__ - - ./build/run db:migrate - -Running the developments servers will create directories in `./build/dev`: - -- indexer_pui_state: latest timestamps for PUI indexer activity -- indexer_state: latest timestamps for (SUI) indexer activity -- shared: background job files - - ./build/run db:nuke - -_Note: the folders will be created as they are needed, so they may not all be present -at all times._ - -## Running the tests - -ArchivesSpace uses a combination of RSpec, integration and Selenium -tests. - - ./build/run travis:test - -It's also useful to be able to run the backend unit tests separately. -To do this, run: - - ./build/run backend:test - -You can also run a single spec file with: - - ./build/run backend:test -Dspec="myfile_spec.rb" - -_By default the tests are configured to run using a separate MySQL & Solr from the -development servers. This means that the development and test environments will not -interfere with each other._ - -```bash -# run the backend / api tests -./build/run backend:test -``` - -You can also run a single spec file with: - -```bash -./build/run backend:test -Dspec="myfile_spec.rb" -``` - -Or a single example with: - -```bash -./build/run backend:test -Dexample="does something important" -``` - -There are specific instructions and requirements for the [UI tests](ui_test.html) to work. - -__Advanced: tests and the build directory__ - -Running the tests may create directories in `./build/test`. These will be -the same as for the development servers as described above. - -## Coverage reports - -You can run the coverage reports using: - - ./build/run coverage - -This runs all of the above tests in coverage mode and, when the run -finishes, produces a set of HTML reports within the `coverage` -directory in your ArchivesSpace project directory. - -## Linting and formatting with Rubocop - -If you are editing or adding source files that you intend to contribute via a pull request, -you should make sure your changes conform to the layout and style rules by running: - - ./build/run rubocop - -Most errors can be auto-corrected by running: - - ./build/run rubocop -Dcorrect=true - - -## Submitting a Pull Request - -When you have code ready to be reviewed, open a pull request to ask for it to be -merged into the codebase. - -To help make the review go smoothly, here are some general guidelines: - -* __Your pull request should address a single issue.__ - It's better to split large or complicated PRs into discrete steps if possible. This - makes review more manageable and reduces the risk of conflicts with other changes. -* __Give your pull request a brief title, referencing any JIRA or Github issues resolved -by the pull request.__ - Including JIRA numbers (e.g. 'ANW-123') explicitly in your pull request title ensures the - PR will be linked to the original issue in JIRA. Similarly, referencing GitHub issue numbers - (e.g. 'Fixes #123') will automatically close that issue when the PR is merged. -* __Fill out as much of the Pull Request template as is possible/relevant.__ - This makes it easier to understand the full context of your PR, including any discussions or supporting documentation that went into developing the functionality or resolving the bug. - -## Building a distribution - -See: [Building an Archivesspace Release](release.html) for information on building a distribution. - -## Generating API documentation - -See: [Building an Archivesspace Release](release.html) for information on building the documentation. diff --git a/development/docker.html b/development/docker.html new file mode 100644 index 00000000..4db03bc7 --- /dev/null +++ b/development/docker.html @@ -0,0 +1,186 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/docker.md + +

+ +

+ + Report issue on Jira + development/docker.md + +

+
+
+ +

Docker

+ +

The Docker configuration is used to create automated builds on Docker Hub, which are deployed to the latest version when the build completes.

+ +

Please note:

+ +
    +
  • Docker is not supported as an install method.
  • +
  • Docker configuration is being used for internal purposes only.
  • +
  • Use of Docker by anyone else is “use at your own risk”.
  • +
  • Docker related files may be updated at anytime without warning or presence in release notes.
  • +
+ +

Custom builds

+ +

Run ArchivesSpace with MySQL, external Solr and a Web Proxy. Switch to the +branch you want to build:

+ +
bash
+#if you already have running containers and want to clear them out
+docker-compose stop
+docker-compose rm
+
+#build the local image
+docker-compose build # needed whenever the branch is changed and ready to test
+docker-compose up
+
+#running specific containers
+docker-compose up -d db solr # in background
+docker-compose up app web # in foreground
+>to access running container
+docker exec -it archivesspace_app_1 bash
+
+ +

Sharing an image

+ +

To share the build image the easiest way is to create an account on Docker Hub. Next retag the image and push to the hub account:

+ +
DOCKER_ID_USER=example
+TAG=awesome-updates
+docker tag archivesspace_app:latest $DOCKER_ID_USER/archivesspace:$TAG
+docker push $DOCKER_ID_USER/archivesspace:$TAG
+
+ +

To download the image: docker pull example/archivesspace:awesome-updates.

+ +
+ + +
+ +
+ + + diff --git a/development/docker.md b/development/docker.md deleted file mode 100644 index 0d514d9a..00000000 --- a/development/docker.md +++ /dev/null @@ -1,47 +0,0 @@ -# Docker - -The [Docker](https://www.docker.com/) configuration is used to create [automated builds](https://hub.docker.com/r/archivesspace/archivesspace/) on Docker Hub, which are deployed to [the latest version](http://test.archivesspace.org) when the build completes. - -Please note: - -- Docker is not supported as an install method. -- Docker configuration is being used for internal purposes only. -- Use of Docker by anyone else is "use at your own risk". -- Docker related files may be updated at anytime without warning or presence in release notes. - -## Custom builds - -Run ArchivesSpace with MySQL, external Solr and a Web Proxy. Switch to the -branch you want to build: - -``` -bash -#if you already have running containers and want to clear them out -docker-compose stop -docker-compose rm - -#build the local image -docker-compose build # needed whenever the branch is changed and ready to test -docker-compose up - -#running specific containers -docker-compose up -d db solr # in background -docker-compose up app web # in foreground ->to access running container -docker exec -it archivesspace_app_1 bash -``` - -## Sharing an image - -To share the build image the easiest way is to create an account on [Docker Hub](https://hub.docker.com/). Next retag the image and push to the hub account: - -```bash -DOCKER_ID_USER=example -TAG=awesome-updates -docker tag archivesspace_app:latest $DOCKER_ID_USER/archivesspace:$TAG -docker push $DOCKER_ID_USER/archivesspace:$TAG -``` - -To download the image: `docker pull example/archivesspace:awesome-updates`. - ---- diff --git a/development/index.html b/development/index.html new file mode 100644 index 00000000..5ca0f96c --- /dev/null +++ b/development/index.html @@ -0,0 +1,151 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/README.md + +

+ +

+ + Report issue on Jira + development/README.md + +

+
+
+ +

Information for ArchivesSpace developers and code contributors

+ + + + +
+ +
+ + + diff --git a/development/jruby-rack-build.html b/development/jruby-rack-build.html new file mode 100644 index 00000000..25a1d728 --- /dev/null +++ b/development/jruby-rack-build.html @@ -0,0 +1,227 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/jruby-rack-build.md + +

+ +

+ + Report issue on Jira + development/jruby-rack-build.md + +

+
+
+ +

Upgrading Rack for ArchivesSpace

+ +
    +
  • Install local JRuby (match aspace version, currently: 9.2.12.0) and switch to it.
  • +
  • Install Maven.
  • +
  • Download jruby-rack.
  • +
+ +
git checkout 1.1-stable
+# install bundler version to match 1.1-stable Gemfile.lock
+gem install bundler --version=1.14.6
+
+ +

Should result in:

+ +
Fetching bundler-1.14.6.gem
+Successfully installed bundler-1.14.6
+Parsing documentation for bundler-1.14.6
+Installing ri documentation for bundler-1.14.6
+Done installing documentation for bundler after 5 seconds
+1 gem installed
+
+ +

Set environment to target rack version (the version being upgraded to):

+ +
export RACK_VERSION=2.2.3
+bundle
+
+ +

Should result in:

+ +
Fetching gem metadata from https://rubygems.org/.............
+Fetching version metadata from https://rubygems.org/..
+Resolving dependencies...
+Installing rake 10.4.2
+Using bundler 1.14.6
+Using diff-lcs 1.2.5
+Installing jruby-openssl 0.9.21 (java)
+Using rack 2.2.3 (was 1.6.8)
+Using rspec-core 2.14.8
+Using rspec-mocks 2.14.6
+Using appraisal 0.5.2
+Using rspec-expectations 2.14.5
+Using rspec 2.14.1
+Bundle complete! 5 Gemfile dependencies, 10 gems now installed.
+Use `bundle show [gemname]` to see where a bundled gem is installed.
+
+ +

This will have bumped the Rack version in Gemfile.lock:

+ +
diff --git a/Gemfile.lock b/Gemfile.lock
+index 493c667..f016925 100644
+--- a/Gemfile.lock
++++ b/Gemfile.lock
+@@ -6,7 +6,7 @@ GEM
+       rake
+     diff-lcs (1.2.5)
+     jruby-openssl (0.9.21-java)
+-    rack (1.6.8)
++    rack (2.2.3)
+     rake (10.4.2)
+     rspec (2.14.1)
+       rspec-core (~> 2.14.0)
+@@ -23,7 +23,7 @@ PLATFORMS
+ DEPENDENCIES
+   appraisal
+   jruby-openssl (~> 0.9.20)
+-  rack (~> 1.6.8)
++  rack (= 2.2.3)
+   rake (~> 10.4.2)
+   rspec (~> 2.14.1)
+
+ +

Build the jruby-rack jar:

+ +
bundle exec jruby -S rake clean gem SKIP_SPECS=true
+
+ +

This creates target/jruby-rack-1.1.21.jar with Rack 2.2.3.

+ +

Upload the jar to the public s3 bucket, specifying the rack version:

+ +
aws s3 cp target/jruby-rack-1.1.21.jar \
+  s3://as-public-shared-files/jruby-rack-1.1.21_rack-2.2.3.jar \
+  --profile archivesspace
+
+ +

Finally, update rack_version in the aspace build.xml file.

+ + +
+ +
+ + + diff --git a/development/jruby-rack-build.md b/development/jruby-rack-build.md deleted file mode 100644 index 60299b66..00000000 --- a/development/jruby-rack-build.md +++ /dev/null @@ -1,93 +0,0 @@ -# Upgrading Rack for ArchivesSpace - -- Install local JRuby (match aspace version, currently: 9.2.12.0) and switch to it. -- Install Maven. -- Download jruby-rack. - -``` -git checkout 1.1-stable -# install bundler version to match 1.1-stable Gemfile.lock -gem install bundler --version=1.14.6 -``` - -Should result in: - -``` -Fetching bundler-1.14.6.gem -Successfully installed bundler-1.14.6 -Parsing documentation for bundler-1.14.6 -Installing ri documentation for bundler-1.14.6 -Done installing documentation for bundler after 5 seconds -1 gem installed -``` - -Set environment to target rack version (the version being upgraded to): - -``` -export RACK_VERSION=2.2.3 -bundle -``` - -Should result in: - -``` -Fetching gem metadata from https://rubygems.org/............. -Fetching version metadata from https://rubygems.org/.. -Resolving dependencies... -Installing rake 10.4.2 -Using bundler 1.14.6 -Using diff-lcs 1.2.5 -Installing jruby-openssl 0.9.21 (java) -Using rack 2.2.3 (was 1.6.8) -Using rspec-core 2.14.8 -Using rspec-mocks 2.14.6 -Using appraisal 0.5.2 -Using rspec-expectations 2.14.5 -Using rspec 2.14.1 -Bundle complete! 5 Gemfile dependencies, 10 gems now installed. -Use `bundle show [gemname]` to see where a bundled gem is installed. -``` - -This will have bumped the Rack version in Gemfile.lock: - -```diff -diff --git a/Gemfile.lock b/Gemfile.lock -index 493c667..f016925 100644 ---- a/Gemfile.lock -+++ b/Gemfile.lock -@@ -6,7 +6,7 @@ GEM - rake - diff-lcs (1.2.5) - jruby-openssl (0.9.21-java) -- rack (1.6.8) -+ rack (2.2.3) - rake (10.4.2) - rspec (2.14.1) - rspec-core (~> 2.14.0) -@@ -23,7 +23,7 @@ PLATFORMS - DEPENDENCIES - appraisal - jruby-openssl (~> 0.9.20) -- rack (~> 1.6.8) -+ rack (= 2.2.3) - rake (~> 10.4.2) - rspec (~> 2.14.1) -``` - -Build the jruby-rack jar: - -``` -bundle exec jruby -S rake clean gem SKIP_SPECS=true -``` - -This creates `target/jruby-rack-1.1.21.jar` with Rack 2.2.3. - -Upload the jar to the public s3 bucket, specifying the rack version: - -```bash -aws s3 cp target/jruby-rack-1.1.21.jar \ - s3://as-public-shared-files/jruby-rack-1.1.21_rack-2.2.3.jar \ - --profile archivesspace -``` - -Finally, update `rack_version` in the aspace `build.xml` file. diff --git a/development/release.html b/development/release.html new file mode 100644 index 00000000..e84752ad --- /dev/null +++ b/development/release.html @@ -0,0 +1,387 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/release.md + +

+ +

+ + Report issue on Jira + development/release.md + +

+
+
+ +

Building an ArchivesSpace release

+ + + +

Clone the git repository

+ +

When building a release it is important to start from a clean repository. The +safest way of ensuring this is to clone the repo:

+ +
git clone https://github.com/archivesspace/archivesspace.git
+
+ +

Checkout the release branch and create release tag

+ +

If you are building a major or minor version (see https://semver.org), +start by creating a branch for the release and all future patch releases:

+ +
git checkout -b release-v1.0.x
+git tag v1.0.0
+
+

If you are building a patch version, just check out the existing branch and see below:

+ +
git checkout release-v1.0.x
+
+ +

Patch versions typically arise because a regression or critical bug has arisen since +the last major or minor release. We try to ensure that the “hotfix” is merged into both +master and the release branch without the need to cherry-pick commits from one branch to +the other. The reason is that cherry-picking creates a new commit (with a new commit id) +that contains identical changes, which is not optimal for the repository history.

+ +

It is therefore preferable to start from the release branch when creating a “hotfix” +that needs to be merged into both the release branch and master. The Pull Request should +then be based on the release branch. After that Pull Request has been through Code review, +QA and merged, a second Pull Request should be created to merge the updated release branch +to master.

+ +

Consider the following scenario. The current production release is v1.0.0 and a critical +bug has been discovered. In the time since v1.0.0 was released, new features have been +added to the master branch, intended for release in v1.1.0:

+ +
git checkout -b oh-no-some-migration-corrupts-some-data origin/release-v1.0.0
+( fixes problem )
+git commit -m "fix bad migration and add a migration to repair corrupted data"
+gh pr create -B release-v1.0.x --web
+( PR is reviewed and merged to the release branch)
+git checkout release-v1.0.x
+git pull
+git tag v1.0.1
+gh pr create -B master --web
+( PR is reviewed and merged to the master branch)
+
+ +

Pre-Release Steps

+ +

Run the ArchivesSpace rake tasks to check for issues

+ +

Before proceeding further, it’s a good idea to check that there aren’t missing +translations or multiple gem versions.

+ +
    +
  1. Bootstrap your current development environment on the latest master branch +by downloading all dependencies–JRuby, Gems, Solr, etc. +
    build/run bootstrap
    +
    +
  2. +
  3. Run the following checks (recommended): +
    build/run rake -Dtask=check:locales
    +build/run rake -Dtask=check:multiple_gem_versions
    +
    +
  4. +
  5. Missing locales do not need to be addressed for a Release Candidate, but +should be noted and provided prior to a full release. If multiple gem +versions are reported, that should be addressed prior to moving on.
  6. +
+ +

Build and Publish the API and Yard Docs

+ +

API docs are built using the submodule in docs/slate and Docker. +YARD docs are built using the YARD gem. At this time, they cover a small +percentage of the code and are not especially useful.

+ +

Build the API docs

+ +
    +
  1. API documentation depends on the archivesspace/slate submodule +and on Docker. Slate cannot run on JRuby. +
    git submodule init
    +git submodule update
    +
    +
  2. +
  3. Run the doc:api task to generate Slate API and Yard documentation. (Note: the +API generation requires a DB connection with standard enumeration values.) +
    ARCHIVESSPACE_VERSION=X.Y.Z APPCONFIG_DB_URL=$APPCONFIG_DB_URL build/run doc:api
    +
    +

    This generates docs/slate/source/index.html.md (Slate source document).

    +
  4. +
  5. (Optional) Run a docker container to preview API docs. +
    docker-compose -f docker-compose-docs.yml up
    +
    +

    Visit http://localhost:4568 to preview the api docs.

    +
  6. +
  7. Build the static api files. The api markdown document should already be in docs/slate/source (step 2 above). +The api markdown will be rendered to html and moved to docs/build/api. +
    docker run --rm --name slate -v $(pwd)/docs/build/api:/srv/slate/build -v $(pwd)/docs/slate/source:/srv/slate/source slatedocs/slate build
    +
    +
  8. +
+ +

Build the YARD docs

+ +
    +
  1. Build the YARD docs in the docs/build/doc directory: + shell + ./build/run doc:yardoc +
  2. +
+ +

Commit built docs and push to Github pages

+ +
    +
  1. Double check that you are on a release branch (we don’t need this stuff in master) and +commit the newly built documentation: +
    git add docs/build
    +git commit -m "release-vx.y.z api and yard documentation"
    +
    + +

    Use git subtree to push the documentation to the gh-pages branch:

    +
    git subtree push --prefix docs/build origin gh-pages
    +
    +

    Published documents should appear a short while later at: +http://archivesspace.github.io/archivesspace/api +http://archivesspace.github.io/archivesspace/doc

    + +

    Note: if the push command fails you may need to delete gh-pages in the remote repo:

    +
    git push origin :gh-pages
    +
    +
  2. +
+ +

Building a release yourself

+ +
    +
  1. Building the actual release is very simple. Run the following: +
    ./scripts/build_release vX.X.X
    +
    + +

    Replace X.X.X with the version number. This will build and package a release +in a zip file.

    +
  2. +
+ +

Building a release on Github

+ +
    +
  1. There is no need to build the release yourself. Just push your tag to Github +and trigger the release workflow: +
    git push vX.X.X
    +
    +

    Replace X.X.X with the version number. You can set the resulting release page to +“draft” using the Github API.

    +
  2. +
+ +

Create the Release with Notes

+ +

Build the release notes

+ +

As of v3.4.0, it should no longer necessary to build release notes manually.

+ +

To manually generate release notes:

+ +
export GITHUB_TOKEN={YOUR DEPLOYMENT TOKEN ON GITHUB}
+./build/run doc:release_notes -Dcurrent_tag=v3.4.0 -Doutfile=RELEASE_NOTES.md -Dtoken=$GITHUB_TOKEN
+
+ +

Edit Release Page As Neccessary

+ +

If there are any special considerations add them to the release page manually. Special considerations +might include changes that will require 3rd party plugins to be updated or a +that a full reindex is required.

+ +

Example content:

+ +
This release requires a **full reindex** of ArchivesSpace for all functionality to work
+correctly. Please follow the [instructions for reindexing](https://archivesspace.github.io/tech-docs/administration/indexes.html)
+before starting ArchivesSpace with the new version.
+
+ +

Post release updates

+ +

After a release has been put out it’s time for some maintenance before the next +cycle of development clicks into full gear. Consider the following, depending on +current team consensus:

+ +

Branches

+ +

Delete merged and stale branches in Github as appropriate.

+ +

Milestones

+ +

Close the just-released Milestone, adding a due date of today’s date. Create a +new Milestone for the anticipated next release (this can be changed later if the +version numbering is changed for some reason).

+ +

Test Servers

+ +

Review existing test servers, and request the removal of any that are no longer +needed (e.g. feature branches that have been merged for the release).

+ +

GitHub Issues

+ +

Review existing opening GH issues and close any that have been resolved by +the new release (linking to a specific PR if possible). For the remaining open +issues, review if they are still a problem, apply labels, link to known JIRA +issues, and add comments as necessary/relevant.

+ +

Accessibility Scan

+ +

Run accessibility scans for both the public and staff sites and file a ticket +for any new and ongoing accessibility errors.

+ +

PR Assignments

+ +

Begin assigning queued PRs to members of the Core Committers group, making +sure to include the appropriate milestone for the anticipated next release.

+ +

Dependencies

+ +

Gems

+ +

Take a look at all the Gemfile.lock files ( in backend, frontend, public, +etc ) and review the gem versions. Pay close attention to the Rails & Friends +( ActiveSupport, ActionPack, etc ), Rack, and Sinatra versions and make sure +there have not been any security patch versions. There usually are, especially +since Rails sends fix updates rather frequently.

+ +

To update the gems, update the version in Gemfile, delete the Gemfile.lock, and +run ./build/run bootstrap to download everything. Then make sure your test +suite passes.

+ +

Once everything passes, commit your Gemfiles and Gemfile.lock files.

+ + +
+ +
+ + + diff --git a/development/release.md b/development/release.md deleted file mode 100644 index b2199e6d..00000000 --- a/development/release.md +++ /dev/null @@ -1,249 +0,0 @@ -# Building an ArchivesSpace release - -- [Pre-Release Steps](#prerelease) -- [Build the Docs](#docs) -- [Build the Release](#release) -- [Post the Release with Release Notes](#notes) -- [Post-Release Steps](#postrelease) - -## Clone the git repository - -When building a release it is important to start from a clean repository. The -safest way of ensuring this is to clone the repo: - -```shell -git clone https://github.com/archivesspace/archivesspace.git -``` - -## Checkout the release branch and create release tag - -If you are building a major or minor version (see [https://semver.org](https://semver.org)), -start by creating a branch for the release and all future patch releases: - -``` shell -git checkout -b release-v1.0.x -git tag v1.0.0 -``` -If you are building a patch version, just check out the existing branch and see below: - -``` shell -git checkout release-v1.0.x -``` - -Patch versions typically arise because a regression or critical bug has arisen since -the last major or minor release. We try to ensure that the "hotfix" is merged into both -master and the release branch without the need to cherry-pick commits from one branch to -the other. The reason is that cherry-picking creates a new commit (with a new commit id) -that contains identical changes, which is not optimal for the repository history. - -It is therefore preferable to start from the release branch when creating a "hotfix" -that needs to be merged into both the release branch and master. The Pull Request should -then be based on the release branch. After that Pull Request has been through Code review, -QA and merged, a second Pull Request should be created to merge the updated release branch -to master. - -Consider the following scenario. The current production release is v1.0.0 and a critical -bug has been discovered. In the time since v1.0.0 was released, new features have been -added to the master branch, intended for release in v1.1.0: - -``` shell -git checkout -b oh-no-some-migration-corrupts-some-data origin/release-v1.0.0 -( fixes problem ) -git commit -m "fix bad migration and add a migration to repair corrupted data" -gh pr create -B release-v1.0.x --web -( PR is reviewed and merged to the release branch) -git checkout release-v1.0.x -git pull -git tag v1.0.1 -gh pr create -B master --web -( PR is reviewed and merged to the master branch) -``` - -## Pre-Release Steps - -### Run the ArchivesSpace rake tasks to check for issues - -Before proceeding further, it’s a good idea to check that there aren’t missing -translations or multiple gem versions. - -1. Bootstrap your current development environment on the latest master branch - by downloading all dependencies--JRuby, Gems, Solr, etc. - ```shell - build/run bootstrap - ``` - -2. Run the following checks (recommended): - ```shell - build/run rake -Dtask=check:locales - build/run rake -Dtask=check:multiple_gem_versions - ``` - -3. Missing locales do not need to be addressed for a Release Candidate, but - should be noted and provided prior to a full release. If multiple gem - versions are reported, that should be addressed prior to moving on. - - -## Build and Publish the API and Yard Docs - -API docs are built using the submodule in `docs/slate` and Docker. -YARD docs are built using the YARD gem. At this time, they cover a small -percentage of the code and are not especially useful. - -### Build the API docs - -1. API documentation depends on the [archivesspace/slate](https://github.com/archivesspace/slate) submodule - and on Docker. Slate cannot run on JRuby. - ```shell - git submodule init - git submodule update - ``` - -2. Run the `doc:api` task to generate Slate API and Yard documentation. (Note: the - API generation requires a DB connection with standard enumeration values.) - ```shell - ARCHIVESSPACE_VERSION=X.Y.Z APPCONFIG_DB_URL=$APPCONFIG_DB_URL build/run doc:api - ``` - This generates `docs/slate/source/index.html.md` (Slate source document). - -3. (Optional) Run a docker container to preview API docs. - ```shell - docker-compose -f docker-compose-docs.yml up - ``` - Visit `http://localhost:4568` to preview the api docs. - -4. Build the static api files. The api markdown document should already be in `docs/slate/source` (step 2 above). - The api markdown will be rendered to html and moved to `docs/build/api`. - ```shell - docker run --rm --name slate -v $(pwd)/docs/build/api:/srv/slate/build -v $(pwd)/docs/slate/source:/srv/slate/source slatedocs/slate build - ``` - -### Build the YARD docs - -1. Build the YARD docs in the `docs/build/doc` directory: - ```shell - ./build/run doc:yardoc - ``` - -### Commit built docs and push to Github pages - -1. Double check that you are on a release branch (we don't need this stuff in master) and - commit the newly built documentation: - ```shell - git add docs/build - git commit -m "release-vx.y.z api and yard documentation" - ``` - - Use `git subtree` to push the documentation to the `gh-pages` branch: - ```shell - git subtree push --prefix docs/build origin gh-pages - ``` - Published documents should appear a short while later at: - [http://archivesspace.github.io/archivesspace/api](http://archivesspace.github.io/archivesspace/api) - [http://archivesspace.github.io/archivesspace/doc](http://archivesspace.github.io/archivesspace/doc) - - Note: if the push command fails you may need to delete `gh-pages` in the remote repo: - ```shell - git push origin :gh-pages - ``` - -## Building a release yourself - -1. Building the actual release is very simple. Run the following: - ```shell - ./scripts/build_release vX.X.X - ``` - - Replace X.X.X with the version number. This will build and package a release - in a zip file. - -## Building a release on Github - -1. There is no need to build the release yourself. Just push your tag to Github - and trigger the `release` workflow: - ```shell - git push vX.X.X - ``` - Replace X.X.X with the version number. You can set the resulting release page to - "draft" using the Github API. - - -## Create the Release with Notes - -### Build the release notes - -**As of v3.4.0, it should no longer necessary to build release notes manually.** - -To manually generate release notes: - -```shell -export GITHUB_TOKEN={YOUR DEPLOYMENT TOKEN ON GITHUB} -./build/run doc:release_notes -Dcurrent_tag=v3.4.0 -Doutfile=RELEASE_NOTES.md -Dtoken=$GITHUB_TOKEN -``` - -#### Edit Release Page As Neccessary - -If there are any special considerations add them to the release page manually. Special considerations -might include changes that will require 3rd party plugins to be updated or a -that a full reindex is required. - -Example content: - -```md -This release requires a **full reindex** of ArchivesSpace for all functionality to work -correctly. Please follow the [instructions for reindexing](https://archivesspace.github.io/tech-docs/administration/indexes.html) -before starting ArchivesSpace with the new version. -``` - -## Post release updates - -After a release has been put out it's time for some maintenance before the next -cycle of development clicks into full gear. Consider the following, depending on -current team consensus: - -### Branches - -Delete merged and stale branches in Github as appropriate. - -### Milestones - -Close the just-released Milestone, adding a due date of today's date. Create a -new Milestone for the anticipated next release (this can be changed later if the -version numbering is changed for some reason). - -### Test Servers - -Review existing test servers, and request the removal of any that are no longer -needed (e.g. feature branches that have been merged for the release). - -### GitHub Issues - -Review existing opening GH issues and close any that have been resolved by -the new release (linking to a specific PR if possible). For the remaining open -issues, review if they are still a problem, apply labels, link to known JIRA -issues, and add comments as necessary/relevant. - -### Accessibility Scan - -Run accessibility scans for both the public and staff sites and file a ticket -for any new and ongoing accessibility errors. - -### PR Assignments - -Begin assigning queued PRs to members of the Core Committers group, making -sure to include the appropriate milestone for the anticipated next release. - -### Dependencies - -#### Gems - -Take a look at all the Gemfile.lock files ( in backend, frontend, public, -etc ) and review the gem versions. Pay close attention to the Rails & Friends -( ActiveSupport, ActionPack, etc ), Rack, and Sinatra versions and make sure -there have not been any security patch versions. There usually are, especially -since Rails sends fix updates rather frequently. - -To update the gems, update the version in Gemfile, delete the Gemfile.lock, and -run ./build/run bootstrap to download everything. Then make sure your test -suite passes. - -Once everything passes, commit your Gemfiles and Gemfile.lock files. diff --git a/development/release_schema_versions.html b/development/release_schema_versions.html new file mode 100644 index 00000000..dd272e2d --- /dev/null +++ b/development/release_schema_versions.html @@ -0,0 +1,287 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/release_schema_versions.md + +

+ +

+ + Report issue on Jira + development/release_schema_versions.md + +

+
+
+ +

ArchivesSpace releases and database versions

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ReleaseDB Version
1.1.033
1.1.135
1.1.235
1.2.038
1.3.056
1.4.059
1.4.159
1.4.259
1.5.074
1.5.174
1.5.275
1.5.375
1.5.475
2.0.084
2.0.184
2.1.092
2.1.192
2.1.292
2.2.093
2.2.194
2.2.295
2.3.097
2.3.197
2.3.297
2.4.0100
2.4.1100
2.5.0102
2.5.1102
2.5.2108
2.6.0120
2.7.0126
2.7.1129
2.8.0135
2.8.1138
+ + + + + + + + diff --git a/development/release_schema_versions.md b/development/release_schema_versions.md deleted file mode 100644 index 8d8833e0..00000000 --- a/development/release_schema_versions.md +++ /dev/null @@ -1,38 +0,0 @@ -# ArchivesSpace releases and database versions - -| Release | DB Version | -|---------|------------| -| 1.1.0 | 33 | -| 1.1.1 | 35 | -| 1.1.2 | 35 | -| 1.2.0 | 38 | -| 1.3.0 | 56 | -| 1.4.0 | 59 | -| 1.4.1 | 59 | -| 1.4.2 | 59 | -| 1.5.0 | 74 | -| 1.5.1 | 74 | -| 1.5.2 | 75 | -| 1.5.3 | 75 | -| 1.5.4 | 75 | -| 2.0.0 | 84 | -| 2.0.1 | 84 | -| 2.1.0 | 92 | -| 2.1.1 | 92 | -| 2.1.2 | 92 | -| 2.2.0 | 93 | -| 2.2.1 | 94 | -| 2.2.2 | 95 | -| 2.3.0 | 97 | -| 2.3.1 | 97 | -| 2.3.2 | 97 | -| 2.4.0 | 100 | -| 2.4.1 | 100 | -| 2.5.0 | 102 | -| 2.5.1 | 102 | -| 2.5.2 | 108 | -| 2.6.0 | 120 | -| 2.7.0 | 126 | -| 2.7.1 | 129 | -| 2.8.0 | 135 | -| 2.8.1 | 138 | diff --git a/development/releases.html b/development/releases.html new file mode 100644 index 00000000..71ad2c20 --- /dev/null +++ b/development/releases.html @@ -0,0 +1,328 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/releases.md + +

+ +

+ + Report issue on Jira + development/releases.md + +

+
+
+ +

Releases:

+

3.4.0 May 24, 2023 +The schema number for this release is 172. +https://github.com/archivesspace/archivesspace/tree/v3.4.0

+ +

3.3.1 Oct 4, 2022 +The schema number for this release is 164 +https://github.com/archivesspace/archivesspace/tree/v3.3.1

+ +

3.2.0 February 4, 2022 +The schema number for this release is 159. +https://github.com/archivesspace/archivesspace/releases/download/v3.2.0/archivesspace-v3.2.0.zip

+ +

3.1.1 Novemver 19, 2021 +The schema number for this release is 157. +https://github.com/archivesspace/archivesspace/releases/download/v3.1.0/archivesspace-v3.1.1.zip

+ +

3.1.0 September 20, 2021 +The schema number for this release is 157. +https://github.com/archivesspace/archivesspace/releases/download/v3.1.0/archivesspace-v3.1.0.zip

+ +

3.0.2 August 11, 2021
+The schema number for this release is 148.
+https://github.com/archivesspace/archivesspace/releases/download/v3.0.2/archivesspace-v3.0.2.zip

+ +

3.0.1 June 4, 2021
+The schema number for this release is 147.
+https://github.com/archivesspace/archivesspace/releases/download/v3.0.1/archivesspace-v3.0.1.zip

+ +

3.0.0 May 10, 2021
+The schema number for this release is 147.
+[Bug in Release]

+ +

2.8.1 Nov 11, 2020.
+The schema number for this release is 138.
+https://github.com/archivesspace/archivesspace/releases/download/v2.8.1/archivesspace-v2.8.1.zip

+ +

2.8.0 Jul 16, 2020.
+The schema number for this release is 135.
+https://github.com/archivesspace/archivesspace/releases/download/v2.8.0/archivesspace-v2.8.0.zip

+ +

2.7.1 Feb 14, 2020.
+The schema number for this release is 129.
+https://github.com/archivesspace/archivesspace/releases/download/v2.7.1/archivesspace-v2.7.1.zip

+ +

2.7.0 Oct 9, 2019.
+The schema number for this release is 126.
+https://github.com/archivesspace/archivesspace/releases/download/v2.7.0/archivesspace-v2.7.0.zip

+ +

2.6.0 May 30, 2019.
+The schema number for this release is 120.
+https://github.com/archivesspace/archivesspace/releases/download/v2.6.0/archivesspace-v2.6.0.zip

+ +

2.5.2 Jan 15, 2019.
+The schema number for this release is 108.
+https://github.com/archivesspace/archivesspace/releases/download/v2.5.2/archivesspace-v2.5.2.zip

+ +

2.5.1 Oct 17, 2018.
+This release includes no new database migrations.
+https://github.com/archivesspace/archivesspace/releases/download/v2.5.1/archivesspace-v2.5.1.zip

+ +

2.5.0 Aug 10, 2018.
+The schema number for this release is 102.
+https://github.com/archivesspace/archivesspace/releases/download/v2.5.0/archivesspace-v2.5.0.zip

+ +

2.4.1 Jun 22, 2018.
+This release includes no new database migrations.
+https://github.com/archivesspace/archivesspace/releases/download/v2.4.1/archivesspace-v2.4.1.zip

+ +

2.4.0 Jun 7, 2018.
+The schema number for this release is 100.
+https://github.com/archivesspace/archivesspace/releases/download/v2.4.0/archivesspace-v2.4.0.zip

+ +

2.3.2 Mar 27, 2018.
+This release includes no new database migrations.
+https://github.com/archivesspace/archivesspace/releases/download/v2.3.2/archivesspace-v2.3.2.zip

+ +

2.3.1 Feb 28, 2018.
+This release includes no new database migrations.
+https://github.com/archivesspace/archivesspace/releases/download/v2.3.1/archivesspace-v2.3.1.zip

+ +

2.3.0 Feb 5, 2018.
+The schema number for this release is 97.
+https://github.com/archivesspace/archivesspace/releases/download/v2.3.0/archivesspace-v2.3.0.zip

+ +

2.2.2 Dec 13, 2017.
+The schema number for this release is 95.
+https://github.com/archivesspace/archivesspace/releases/download/v2.2.2/archivesspace-v2.2.2.zip

+ +

2.2.0 Oct 12, 2017.
+The schema number for this release is 93.
+https://github.com/archivesspace/archivesspace/releases/download/v2.2.0/archivesspace-v2.2.0.zip

+ +

2.1.2 Sep 1, 2017.
+The schema number for this release is 92.
+https://github.com/archivesspace/archivesspace/releases/download/v2.1.2/archivesspace-v2.1.2.zip

+ +

2.1.1 Aug 16, 2017.
+The schema number for this release is 92.
+https://github.com/archivesspace/archivesspace/releases/download/v2.1.1/archivesspace-v2.1.1.zip

+ +

2.1.0 Jul 18, 2017.
+The schema number for this release is 92.
+https://github.com/archivesspace/archivesspace/releases/download/v2.1.0/archivesspace-v2.1.0.zip

+ +

2.0.1 May 2, 2017.
+The schema number for this release is 84.
+https://github.com/archivesspace/archivesspace/releases/download/v2.0.1/archivesspace-v2.0.1.zip

+ +

2.0.0 Apr 18, 2017.
+The schema number for this release is 84.
+https://github.com/archivesspace/archivesspace/releases/download/v2.0.0/archivesspace-v2.0.0.zip

+ +

1.5.4 Mar 16, 2017.
+The schema number for this release is 75.
+https://github.com/archivesspace/archivesspace/releases/download/v1.5.4/archivesspace-v1.5.4.zip

+ +

1.5.3 Feb 15, 2017.
+The schema number for this release is 75.
+https://github.com/archivesspace/archivesspace/releases/download/v1.5.3/archivesspace-v1.5.3.zip

+ +

1.5.2 Dec 8, 2016.
+The schema number for this release is 75.
+https://github.com/archivesspace/archivesspace/releases/download/v1.5.2/archivesspace-v1.5.2.zip

+ +

1.5.1 Jul 29, 2016.
+The schema number for this release is 74.
+https://github.com/archivesspace/archivesspace/releases/download/v1.5.1/archivesspace-v1.5.1.zip

+ +

1.5.0 Jul 20, 2016.
+The schema number for this release is 74.
+https://github.com/archivesspace/archivesspace/releases/download/v1.5.0/archivesspace-v1.5.0.zip

+ +

1.4.2 Oct 27, 2015.
+The schema number for this release is 59.
+https://github.com/archivesspace/archivesspace/releases/download/v1.4.2/archivesspace-v1.4.2.zip

+ +

1.4.1 Oct 13, 2015.
+The schema number for this release is 59.
+https://github.com/archivesspace/archivesspace/releases/download/v1.4.1/archivesspace-v1.4.1.zip

+ +

1.4.0 Sep 29, 2015.
+The schema number for this release is 59.
+https://github.com/archivesspace/archivesspace/releases/download/v1.4.0/archivesspace-v1.4.0.zip

+ +

1.3.0 Jun 30, 2015.
+The schema number for this release is 56.
+https://github.com/archivesspace/archivesspace/releases/download/v1.3.0/archivesspace-v1.3.0.zip

+ +

1.2.0 Mar 30, 2015.
+The schema number for this release is 38.
+https://github.com/archivesspace/archivesspace/releases/download/v1.2.0/archivesspace-v1.2.0.zip

+ +

1.1.2 Jan 21, 2015.
+The schema number for this release is 35.
+https://github.com/archivesspace/archivesspace/releases/download/v1.1.2/archivesspace-v1.1.2.zip

+ +

1.1.1 Jan 6, 2015.
+The schema number for this release is 35.
+https://github.com/archivesspace/archivesspace/archive/refs/tags/v1.1.1.zip (only source available)

+ +

1.1.0 Oct 20, 2014.
+The schema number for this release is 33.
+https://github.com/archivesspace/archivesspace/releases/download/v1.1.0/archivesspace-v1.1.0.zip

+ +

1.0.9 May 13, 2014.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.9/archivesspace-v1.0.9.zip

+ +

1.0.7.1 March 7, 2014.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.7.1/archivesspace-v1.0.7.1.zip

+ +

1.0.4 Jan 14, 2014.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.4/archivesspace-v1.0.4.zip

+ +

1.0.2 Nov 26, 2013.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.2/archivesspace-v1.0.2.zip

+ +

1.0.1 Nov 1, 2013.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.1/archivesspace-v1.0.1.zip

+ +

1.0.0 Oct 4, 2013.
+The schema number for this release is ???
+https://github.com/archivesspace/archivesspace/releases/download/v1.0.0/archivesspace-v1.0.0.zip

+ + + +
+ +
+ + + diff --git a/development/releases.md b/development/releases.md deleted file mode 100644 index c2a97f8f..00000000 --- a/development/releases.md +++ /dev/null @@ -1,191 +0,0 @@ -## Releases: -3.4.0 May 24, 2023 -The schema number for this release is 172. -https://github.com/archivesspace/archivesspace/tree/v3.4.0 - -3.3.1 Oct 4, 2022 -The schema number for this release is 164 -https://github.com/archivesspace/archivesspace/tree/v3.3.1 - -3.2.0 February 4, 2022 -The schema number for this release is 159. -https://github.com/archivesspace/archivesspace/releases/download/v3.2.0/archivesspace-v3.2.0.zip - -3.1.1 Novemver 19, 2021 -The schema number for this release is 157. -https://github.com/archivesspace/archivesspace/releases/download/v3.1.0/archivesspace-v3.1.1.zip - -3.1.0 September 20, 2021 -The schema number for this release is 157. -https://github.com/archivesspace/archivesspace/releases/download/v3.1.0/archivesspace-v3.1.0.zip - - -3.0.2 August 11, 2021 -The schema number for this release is 148. -https://github.com/archivesspace/archivesspace/releases/download/v3.0.2/archivesspace-v3.0.2.zip - - -3.0.1 June 4, 2021 -The schema number for this release is 147. -https://github.com/archivesspace/archivesspace/releases/download/v3.0.1/archivesspace-v3.0.1.zip - -3.0.0 May 10, 2021 -The schema number for this release is 147. -[Bug in Release] - -2.8.1 Nov 11, 2020. -The schema number for this release is 138. -https://github.com/archivesspace/archivesspace/releases/download/v2.8.1/archivesspace-v2.8.1.zip - -2.8.0 Jul 16, 2020. -The schema number for this release is 135. -https://github.com/archivesspace/archivesspace/releases/download/v2.8.0/archivesspace-v2.8.0.zip - -2.7.1 Feb 14, 2020. -The schema number for this release is 129. -https://github.com/archivesspace/archivesspace/releases/download/v2.7.1/archivesspace-v2.7.1.zip - -2.7.0 Oct 9, 2019. -The schema number for this release is 126. -https://github.com/archivesspace/archivesspace/releases/download/v2.7.0/archivesspace-v2.7.0.zip - -2.6.0 May 30, 2019. -The schema number for this release is 120. -https://github.com/archivesspace/archivesspace/releases/download/v2.6.0/archivesspace-v2.6.0.zip - -2.5.2 Jan 15, 2019. -The schema number for this release is 108. -https://github.com/archivesspace/archivesspace/releases/download/v2.5.2/archivesspace-v2.5.2.zip - -2.5.1 Oct 17, 2018. -This release includes no new database migrations. -https://github.com/archivesspace/archivesspace/releases/download/v2.5.1/archivesspace-v2.5.1.zip - -2.5.0 Aug 10, 2018. -The schema number for this release is 102. -https://github.com/archivesspace/archivesspace/releases/download/v2.5.0/archivesspace-v2.5.0.zip - -2.4.1 Jun 22, 2018. -This release includes no new database migrations. -https://github.com/archivesspace/archivesspace/releases/download/v2.4.1/archivesspace-v2.4.1.zip - -2.4.0 Jun 7, 2018. -The schema number for this release is 100. -https://github.com/archivesspace/archivesspace/releases/download/v2.4.0/archivesspace-v2.4.0.zip - -2.3.2 Mar 27, 2018. -This release includes no new database migrations. -https://github.com/archivesspace/archivesspace/releases/download/v2.3.2/archivesspace-v2.3.2.zip - -2.3.1 Feb 28, 2018. -This release includes no new database migrations. -https://github.com/archivesspace/archivesspace/releases/download/v2.3.1/archivesspace-v2.3.1.zip - -2.3.0 Feb 5, 2018. -The schema number for this release is 97. -https://github.com/archivesspace/archivesspace/releases/download/v2.3.0/archivesspace-v2.3.0.zip - -2.2.2 Dec 13, 2017. -The schema number for this release is 95. -https://github.com/archivesspace/archivesspace/releases/download/v2.2.2/archivesspace-v2.2.2.zip - -2.2.0 Oct 12, 2017. -The schema number for this release is 93. -https://github.com/archivesspace/archivesspace/releases/download/v2.2.0/archivesspace-v2.2.0.zip - -2.1.2 Sep 1, 2017. -The schema number for this release is 92. -https://github.com/archivesspace/archivesspace/releases/download/v2.1.2/archivesspace-v2.1.2.zip - -2.1.1 Aug 16, 2017. -The schema number for this release is 92. -https://github.com/archivesspace/archivesspace/releases/download/v2.1.1/archivesspace-v2.1.1.zip - -2.1.0 Jul 18, 2017. -The schema number for this release is 92. -https://github.com/archivesspace/archivesspace/releases/download/v2.1.0/archivesspace-v2.1.0.zip - -2.0.1 May 2, 2017. -The schema number for this release is 84. -https://github.com/archivesspace/archivesspace/releases/download/v2.0.1/archivesspace-v2.0.1.zip - -2.0.0 Apr 18, 2017. -The schema number for this release is 84. -https://github.com/archivesspace/archivesspace/releases/download/v2.0.0/archivesspace-v2.0.0.zip - -1.5.4 Mar 16, 2017. -The schema number for this release is 75. -https://github.com/archivesspace/archivesspace/releases/download/v1.5.4/archivesspace-v1.5.4.zip - -1.5.3 Feb 15, 2017. -The schema number for this release is 75. -https://github.com/archivesspace/archivesspace/releases/download/v1.5.3/archivesspace-v1.5.3.zip - -1.5.2 Dec 8, 2016. -The schema number for this release is 75. -https://github.com/archivesspace/archivesspace/releases/download/v1.5.2/archivesspace-v1.5.2.zip - -1.5.1 Jul 29, 2016. -The schema number for this release is 74. -https://github.com/archivesspace/archivesspace/releases/download/v1.5.1/archivesspace-v1.5.1.zip - -1.5.0 Jul 20, 2016. -The schema number for this release is 74. -https://github.com/archivesspace/archivesspace/releases/download/v1.5.0/archivesspace-v1.5.0.zip - -1.4.2 Oct 27, 2015. -The schema number for this release is 59. -https://github.com/archivesspace/archivesspace/releases/download/v1.4.2/archivesspace-v1.4.2.zip - -1.4.1 Oct 13, 2015. -The schema number for this release is 59. -https://github.com/archivesspace/archivesspace/releases/download/v1.4.1/archivesspace-v1.4.1.zip - -1.4.0 Sep 29, 2015. -The schema number for this release is 59. -https://github.com/archivesspace/archivesspace/releases/download/v1.4.0/archivesspace-v1.4.0.zip - -1.3.0 Jun 30, 2015. -The schema number for this release is 56. -https://github.com/archivesspace/archivesspace/releases/download/v1.3.0/archivesspace-v1.3.0.zip - -1.2.0 Mar 30, 2015. -The schema number for this release is 38. -https://github.com/archivesspace/archivesspace/releases/download/v1.2.0/archivesspace-v1.2.0.zip - -1.1.2 Jan 21, 2015. -The schema number for this release is 35. -https://github.com/archivesspace/archivesspace/releases/download/v1.1.2/archivesspace-v1.1.2.zip - -1.1.1 Jan 6, 2015. -The schema number for this release is 35. -https://github.com/archivesspace/archivesspace/archive/refs/tags/v1.1.1.zip (only source available) - -1.1.0 Oct 20, 2014. -The schema number for this release is 33. -https://github.com/archivesspace/archivesspace/releases/download/v1.1.0/archivesspace-v1.1.0.zip - -1.0.9 May 13, 2014. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.9/archivesspace-v1.0.9.zip - -1.0.7.1 March 7, 2014. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.7.1/archivesspace-v1.0.7.1.zip - -1.0.4 Jan 14, 2014. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.4/archivesspace-v1.0.4.zip - -1.0.2 Nov 26, 2013. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.2/archivesspace-v1.0.2.zip - -1.0.1 Nov 1, 2013. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.1/archivesspace-v1.0.1.zip - -1.0.0 Oct 4, 2013. -The schema number for this release is ??? -https://github.com/archivesspace/archivesspace/releases/download/v1.0.0/archivesspace-v1.0.0.zip - diff --git a/development/ui_test.html b/development/ui_test.html new file mode 100644 index 00000000..8baa7669 --- /dev/null +++ b/development/ui_test.html @@ -0,0 +1,261 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/ui_test.md + +

+ +

+ + Report issue on Jira + development/ui_test.md + +

+
+
+ +

UI test suites

+ +

ArchivesSpace’s staff and public interfaces use Selenium to run automated browser tests. These tests can be run using Firefox via geckodriver and Chrome (either regular Chrome or headless).

+ +

Firefox is the default used in our CI workflows.

+ +

On Ubuntu 22.04 or later, the included Firefox deb package is a transition package that actually installs Firefox through snap. Snap has security restrictions that do not work with automated testing without additional configuration.

+ +

To uninstall the Firefox snap package and reinstall it as a traditional deb package on Ubuntu use:

+ +
# remove old snap firefox package (if installed)
+sudo snap remove firefox
+
+# create a keyring directory (if not existing)
+sudo install -d -m 0755 /etc/apt/keyrings
+
+# download mozilla key and add it to the keyring
+wget -q https://packages.mozilla.org/apt/repo-signing-key.gpg -O- | sudo tee /etc/apt/keyrings/packages.mozilla.org.asc > /dev/null
+
+# set high priority for the mozilla pakcages
+echo "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main" | sudo tee -a /etc/apt/sources.list.d/mozilla.list > /dev/null
+echo '
+Package: *
+Pin: origin packages.mozilla.org
+Pin-Priority: 1000
+' | sudo tee /etc/apt/preferences.d/mozilla
+
+# install firefox and geckdriver as a standard deb package
+sudo apt update && sudo apt install firefox firefox-geckodriver
+
+ +

On Mac you can use: brew install geckodriver.

+ +

To run using Chrome, you must first download the appropriate ChromeDriver +executable +and place it somewhere in your OS system path. Mac users with Homebrew may accomplish this via brew cask install chromedriver.

+ +

Please note, you must have either Firefox or Chrome installed on your system to +run these tests. Consult the Firefox WebDriver +or ChromeDriver +documentation to ensure your Selenium, driver, browser, and OS versions all match +and support each other.

+ +

Before running:

+ +

Run the bootstrap build task to configure JRuby and all required dependencies:

+ +
 $ cd ..
+ $ build/run bootstrap
+
+ +

Note: all example code assumes you are running from your ArchivesSpace project directory.

+ +

Running the tests:

+ +
#Frontend tests
+./build/run frontend:selenium # Firefox, headless
+FIREFOX_OPTS= ./build/run frontend:selenium # Firefox, no-opts = heady
+
+SELENIUM_CHROME=true ./build/run frontend:selenium # Chrome, headless
+SELENIUM_CHROME=true CHROME_OPTS= ./build/run frontend:selenium # Chrome, no-opts = heady
+
+#Public tests
+./build/run public:test # Firefox, headless
+FIREFOX_OPTS= ./build/run public:test # Firefox, no-opts = heady
+
+SELENIUM_CHROME=true ./build/run public:test # Chrome, headless
+SELENIUM_CHROME=true CHROME_OPTS= ./build/run public:test # Chrome, no-opts = heady
+
+ +

Tests can be scoped to specific files or groups:

+ +
./build/run .. -Dspec='path/to/spec/from/spec/directory' # single file
+./build/run .. -Dexample='[description from it block]' # specific block
+
+#EXAMPLES
+./build/run frontend:selenium -Dexample='Repository model'
+FIREFOX_OPTS= ./build/run frontend:selenium -Dexample='Repository model'# Firefox, heady
+
+./build/run public:test -Dspec='features/accessibility_spec.rb'
+SELENIUM_CHROME=true CHROME_OPTS= ./build/run public:test -Dspec='features/accessibility_spec.rb' # Chrome, heady
+
+ +

Test require a backend and a frontend service to be running. To ovoid the overhead of starting and stopping them while developing, you can run tests against a dev backend:

+ +
# start mysql and solr containers:
+docker-compose -f docker-compose-dev.yml up
+
+# start services:
+ supervisord -c supervisord/archivesspace.conf
+
+# run a spec using the started backend:
+ASPACE_TEST_BACKEND_URL='http://localhost:4567' ./build/run frontend:test -Dpattern="./features/events_spec.rb"
+
+# run all examples that contain "can spawn" in their description:
+./build/run frontend:test -Dpattern="./features/accessions_spec.rb" -Dexample="can spawn"
+
+ +

Note, however, that some tests are dependent on a sequence of ordered steps and may not always run cleanly in isolation. In this case, more than the example provided may be run, and/or unexpected fails may result.

+ +

Saved pages on spec failures

+

When frontend specs fail, a screenshot and an html page is saved for each failed example under frontend/tmp/capybara. On the CI, a zip file will be available for each failed CI job run under Summary -> Artifacts. In order to load the assets (and not see plain html) when viewing the saved html pages, a dev server should be running locally on port 3000, see [Running a development version of ArchivesSpace(https://archivesspace.github.io/tech-docs/development/dev.html).

+ +

Keeping the test database up to date

+

When calling ./build/run frontend:test to run frontend specs, the following steps happen before the actual specs run:

+ +
    +
  • All tables of the test database are dropped: ./build/run db:nuke:test
  • +
  • frontend/spec/fixtures/archivesspace-test.sql is loaded to the test database: ./build/run db:load:test
  • +
  • Any not-yet-applied migrations are run: ./build/run db:migrate:test
  • +
+ +

Updating the test database dump

+

If any migrations are being applied whenever you run one or all frontend specs, it means that the test database dump frontend/spec/fixtures/archivesspace-test.sql has stayed behind. A new test database dump can be created by running:

+ +
./build/run db:nuke:test
+./build/run db:load:test
+./build/run db:migrate:test
+./build/run db:dump:test
+
+ +

An updated frontend/spec/fixtures/archivesspace-test.sql will be created that can be committed and pushed to a Pull Request.

+ + +
+ +
+ + + diff --git a/development/ui_test.md b/development/ui_test.md deleted file mode 100644 index a5fd417a..00000000 --- a/development/ui_test.md +++ /dev/null @@ -1,124 +0,0 @@ -# UI test suites - -ArchivesSpace's staff and public interfaces use [Selenium](http://docs.seleniumhq.org/) to run automated browser tests. These tests can be run using [Firefox via geckodriver](https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/index.html) and [Chrome](https://sites.google.com/a/chromium.org/chromedriver/home) (either regular Chrome or headless). - -Firefox is the default used in our [CI workflows](https://github.com/archivesspace/archivesspace/actions). - -On Ubuntu 22.04 or later, the included Firefox deb package is a transition package that actually installs Firefox through [snap](https://snapcraft.io/). Snap has security restrictions that do not work with automated testing without additional configuration. - -To uninstall the Firefox snap package and reinstall it as a traditional deb package on Ubuntu use: - -```bash -# remove old snap firefox package (if installed) -sudo snap remove firefox - -# create a keyring directory (if not existing) -sudo install -d -m 0755 /etc/apt/keyrings - -# download mozilla key and add it to the keyring -wget -q https://packages.mozilla.org/apt/repo-signing-key.gpg -O- | sudo tee /etc/apt/keyrings/packages.mozilla.org.asc > /dev/null - -# set high priority for the mozilla pakcages -echo "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main" | sudo tee -a /etc/apt/sources.list.d/mozilla.list > /dev/null -echo ' -Package: * -Pin: origin packages.mozilla.org -Pin-Priority: 1000 -' | sudo tee /etc/apt/preferences.d/mozilla - -# install firefox and geckdriver as a standard deb package -sudo apt update && sudo apt install firefox firefox-geckodriver -``` - -On Mac you can use: `brew install geckodriver`. - -To run using Chrome, you must first download the appropriate [ChromeDriver -executable](https://sites.google.com/a/chromium.org/chromedriver/downloads) -and place it somewhere in your OS system path. Mac users with Homebrew may accomplish this via `brew cask install chromedriver`. - -**Please note, you must have either Firefox or Chrome installed on your system to -run these tests. Consult the [Firefox WebDriver](https://developer.mozilla.org/en-US/docs/Web/WebDriver) -or [ChromeDriver](https://sites.google.com/a/chromium.org/chromedriver/home) -documentation to ensure your Selenium, driver, browser, and OS versions all match -and support each other.** - -## Before running: - -Run the bootstrap build task to configure JRuby and all required dependencies: - - $ cd .. - $ build/run bootstrap - -Note: all example code assumes you are running from your ArchivesSpace project directory. - -## Running the tests: - -```bash -#Frontend tests -./build/run frontend:selenium # Firefox, headless -FIREFOX_OPTS= ./build/run frontend:selenium # Firefox, no-opts = heady - -SELENIUM_CHROME=true ./build/run frontend:selenium # Chrome, headless -SELENIUM_CHROME=true CHROME_OPTS= ./build/run frontend:selenium # Chrome, no-opts = heady - -#Public tests -./build/run public:test # Firefox, headless -FIREFOX_OPTS= ./build/run public:test # Firefox, no-opts = heady - -SELENIUM_CHROME=true ./build/run public:test # Chrome, headless -SELENIUM_CHROME=true CHROME_OPTS= ./build/run public:test # Chrome, no-opts = heady -``` - -Tests can be scoped to specific files or groups: - -```bash -./build/run .. -Dspec='path/to/spec/from/spec/directory' # single file -./build/run .. -Dexample='[description from it block]' # specific block - -#EXAMPLES -./build/run frontend:selenium -Dexample='Repository model' -FIREFOX_OPTS= ./build/run frontend:selenium -Dexample='Repository model'# Firefox, heady - -./build/run public:test -Dspec='features/accessibility_spec.rb' -SELENIUM_CHROME=true CHROME_OPTS= ./build/run public:test -Dspec='features/accessibility_spec.rb' # Chrome, heady -``` - -Test require a backend and a frontend service to be running. To ovoid the overhead of starting and stopping them while developing, you can run tests against a dev backend: - -``` -# start mysql and solr containers: -docker-compose -f docker-compose-dev.yml up - -# start services: - supervisord -c supervisord/archivesspace.conf - -# run a spec using the started backend: -ASPACE_TEST_BACKEND_URL='http://localhost:4567' ./build/run frontend:test -Dpattern="./features/events_spec.rb" - -# run all examples that contain "can spawn" in their description: -./build/run frontend:test -Dpattern="./features/accessions_spec.rb" -Dexample="can spawn" -``` - -Note, however, that some tests are dependent on a sequence of ordered steps and may not always run cleanly in isolation. In this case, more than the example provided may be run, and/or unexpected fails may result. - -### Saved pages on spec failures -When frontend specs fail, a screenshot and an html page is saved for each failed example under `frontend/tmp/capybara`. On the CI, a zip file will be available for each failed CI job run under Summary -> Artifacts. In order to load the assets (and not see plain html) when viewing the saved html pages, a dev server should be running locally on port 3000, see [Running a development version of ArchivesSpace(https://archivesspace.github.io/tech-docs/development/dev.html). - -### Keeping the test database up to date -When calling `./build/run frontend:test` to run frontend specs, the following steps happen before the actual specs run: - -* All tables of the test database are dropped: `./build/run db:nuke:test` -* `frontend/spec/fixtures/archivesspace-test.sql` is loaded to the test database: `./build/run db:load:test` -* Any not-yet-applied migrations are run: `./build/run db:migrate:test` - -#### Updating the test database dump -If any migrations are being applied whenever you run one or all frontend specs, it means that the test database dump `frontend/spec/fixtures/archivesspace-test.sql` has stayed behind. A new test database dump can be created by running: - -``` -./build/run db:nuke:test -./build/run db:load:test -./build/run db:migrate:test -./build/run db:dump:test -``` - -An updated `frontend/spec/fixtures/archivesspace-test.sql` will be created that can be committed and pushed to a Pull Request. diff --git a/development/vscode.html b/development/vscode.html new file mode 100644 index 00000000..a952e311 --- /dev/null +++ b/development/vscode.html @@ -0,0 +1,192 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + development/vscode.md + +

+ +

+ + Report issue on Jira + development/vscode.md + +

+
+
+ +

Using the VS Code editor for local development

+ +

ArchivesSpace provides a VS Code settings file that makes it easy for contributors using VS Code to follow the code style of the project. Using this tool chain in your editor helps fix code format and lint errors before committing files or running tests. In many cases such errors will be fixed automatically when the file being worked on is saved. Errors that can’t be fixed automatically will be highlighted with squiggly lines. Hovering your cursor over these lines will display a description of the error to help reach a solution.

+ +

Prerequisites

+ +
    +
  1. Node.js
  2. +
  3. Ruby
  4. +
  5. VS Code
  6. +
+ +

Set up VS Code

+ +

Add system dependencies

+ +
    +
  1. ESLint
  2. +
  3. Prettier
  4. +
  5. Rubocop
  6. +
  7. Stylelint
  8. +
+ +

Rubocop

+ +
gem install rubocop
+
+ +

See https://docs.rubocop.org/rubocop/installation.html for further information, including using Bundler.

+ +

ESLint, Prettier, Stylelint

+ +

Run the following command from the ArchivesSpace root directory.

+ +
npm install
+
+ +

See package.json for further details on how these tools are used in ArchivesSpace.

+ +

Add VS Code extensions

+ +

Add the following extensions via the VS Code command palette or the Extensions panel. (See this documentation for installing and managing extensions).

+ +
    +
  1. ESLint (dbaeumer.vscode-eslint)
  2. +
  3. Prettier (esbenp.prettier-vscode)
  4. +
  5. Ruby Rubocop Revised (LoranKloeze.ruby-rubocop-revived)
  6. +
  7. Stylelint (stylelint.vscode-stylelint)
  8. +
+ +

It’s important to note that since these extensions work in tandem with the VS Code settings file, these settings only impact your ArchivesSpace VS Code Workspace, not your global VS Code User settings.

+ +

The extensions should now work out of the box at this point providing error messages and autocorrecting fixable errors on file save!

+ + +
+ +
+ + + diff --git a/development/vscode.md b/development/vscode.md deleted file mode 100644 index fdcafff5..00000000 --- a/development/vscode.md +++ /dev/null @@ -1,49 +0,0 @@ -# Using the VS Code editor for local development - -ArchivesSpace provides a [VS Code settings file](https://github.com/archivesspace/archivesspace/blob/master/.vscode/settings.json) that makes it easy for contributors using VS Code to follow the code style of the project. Using this tool chain in your editor helps fix code format and lint errors _before_ committing files or running tests. In many cases such errors will be fixed automatically when the file being worked on is saved. Errors that can't be fixed automatically will be highlighted with squiggly lines. Hovering your cursor over these lines will display a description of the error to help reach a solution. - -## Prerequisites - -1. [Node.js](https://nodejs.org) -2. [Ruby](https://www.ruby-lang.org/) -3. [VS Code](https://code.visualstudio.com/) - -## Set up VS Code - -### Add system dependencies - -1. [ESLint](https://eslint.org/) -2. [Prettier](https://prettier.io/) -3. [Rubocop](https://rubocop.org/) -4. [Stylelint](https://stylelint.io/) - -#### Rubocop - -```bash -gem install rubocop -``` - -See https://docs.rubocop.org/rubocop/installation.html for further information, including using Bundler. - -#### ESLint, Prettier, Stylelint - -Run the following command from the ArchivesSpace root directory. - -```bash -npm install -``` - -See [package.json](https://github.com/archivesspace/archivesspace/blob/master/package.json) for further details on how these tools are used in ArchivesSpace. - -### Add VS Code extensions - -Add the following extensions via the VS Code command palette or the Extensions panel. (See this [documentation for installing and managing extensions](https://code.visualstudio.com/learn/get-started/extensions)). - -1. [ESLint](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) (dbaeumer.vscode-eslint) -2. [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) (esbenp.prettier-vscode) -3. [Ruby Rubocop Revised](https://marketplace.visualstudio.com/items?itemName=LoranKloeze.ruby-rubocop-revived) (LoranKloeze.ruby-rubocop-revived) -4. [Stylelint](https://marketplace.visualstudio.com/items?itemName=stylelint.vscode-stylelint) (stylelint.vscode-stylelint) - -It's important to note that since these extensions work in tandem with the [VS Code settings file](https://github.com/archivesspace/archivesspace/blob/master/.vscode/settings.json), these settings only impact your ArchivesSpace VS Code Workspace, not your global VS Code User settings. - -The extensions should now work out of the box at this point providing error messages and autocorrecting fixable errors on file save! diff --git a/import_export/README.md b/import_export/README.md deleted file mode 100644 index 58fb2866..00000000 --- a/import_export/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -permalink: /import_export/ ---- - -# Importing and exporting data in ArchivesSpace - -**This section needs work, or maybe to be incorporated somewhere else** - -* [ArchivesSpace repository EAD Exporter](./ead_exporter.html) -* [ArchivesSpace XSL stylesheets](./xsl_stylesheets) diff --git a/import_export/accession_import_template.html b/import_export/accession_import_template.html new file mode 100644 index 00000000..30999298 --- /dev/null +++ b/import_export/accession_import_template.html @@ -0,0 +1,152 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/accession_import_template.md + +

+ +

+ + Report issue on Jira + import_export/accession_import_template.md + +

+
+
+ +

Accession CSV Import Template

+ +

Download

+ +

Use this CSV template for importing Accession data into ArchivesSpace.

+ +

Keep the header information. Rows below header provide example usage but should +be delete prior to import.

+ +

** PLEASE NOTE **

+ +

The importer requires a specific order to the columns. If you modify the +template, but sure that you keep the columns in the same sequence.

+ + +
+ +
+ + + diff --git a/import_export/accession_import_template.md b/import_export/accession_import_template.md deleted file mode 100644 index e33f2a6f..00000000 --- a/import_export/accession_import_template.md +++ /dev/null @@ -1,13 +0,0 @@ -# Accession CSV Import Template - -[Download](https://raw.githubusercontent.com/archivesspace/archivesspace/master/backend/app/exporters/examples/accession/aspace_accession_import_template.csv) - -Use this CSV template for importing Accession data into ArchivesSpace. - -Keep the header information. Rows below header provide example usage but should -be delete prior to import. - -*** PLEASE NOTE *** - -The importer requires a specific order to the columns. If you modify the -template, but sure that you keep the columns in the same sequence. diff --git a/import_export/assessment_import_template.html b/import_export/assessment_import_template.html new file mode 100644 index 00000000..3e729fbe --- /dev/null +++ b/import_export/assessment_import_template.html @@ -0,0 +1,191 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/assessment_import_template.md + +

+ +

+ + Report issue on Jira + import_export/assessment_import_template.md + +

+
+
+ +

Assessment CSV Import Template

+ +

Download

+ +

Use this CSV template for importing Assessment data into ArchivesSpace.

+ +

The first two rows define the column headers. The order of the columns doesn’t matter.

+ +

The first row specifies the section that the column belongs to. +Valid values are: basic, rating, format and conservation.

+ +

The second row specifies the field within the section for that column.

+ +

The ‘basic’ columns are fixed fields on the assessment record.

+ +

In the ‘basic’ section, ‘record’, ‘surveyed_by’ and ‘reviewer’ are repeating fields. +Simply add enough of these columns for the assessment record that has the most +linked records, surveyors or reviewers, then +leave the surplus columns blank for rows that have fewer.

+ +

The ‘basic’, ‘record’ columns should contain references to existing archival records in +the current repository. They must be of type resource, archival_object, accession or digital_object.

+ +

The reference must take the form: type_id.

+ +

It is also permitted to use ‘/’, ‘.’ or space, in place of the ‘_’. For example, the following +are valid record references (assuming the corresponding records exist in the current repository):

+ +
resource_12
+accession/5
+archival_object 2970
+
+ +

The ‘basic’, ‘surveyed_by’ and ‘reviewer’ columns must contain usernames of existing users +in the ArchivesSpace instance.

+ +

The fields for other sections refer to attribute definitions. +The importer will attempt to match the field value against a definition. +For example: ‘format’, ‘Film’ will match the ‘Film (negative, slide, or motion picture)’ definition.

+ +

If it can’t find a matching definition, or if more than one definition matches, the importer +will abort and report the problem.

+ +

The template has columns for all of the default attribute definitions. It is possible to +add repository specific definitions through the management interface. To import into +these attributes, simply add columns to your CSV. For example, if your repository has +defined an assessment rating called “Comedic Value”, add a column to your CSV with ‘rating’ +in the first row, and ‘Comedic Value’ in the second.

+ +

‘rating’ type definitions support an associated note field. To import into this field +the field must end with ‘_note’. In the example above, add another column with ‘rating’ +in the first row, and ‘Comedic Value_note’ in the second.

+ + +
+ +
+ + + diff --git a/import_export/assessment_import_template.md b/import_export/assessment_import_template.md deleted file mode 100644 index aaff7feb..00000000 --- a/import_export/assessment_import_template.md +++ /dev/null @@ -1,51 +0,0 @@ -# Assessment CSV Import Template - -[Download](https://raw.githubusercontent.com/archivesspace/archivesspace/master/backend/app/exporters/examples/assessment/aspace_assessment_import_template.csv) - -Use this CSV template for importing Assessment data into ArchivesSpace. - -The first two rows define the column headers. The order of the columns doesn't matter. - -The first row specifies the section that the column belongs to. -Valid values are: basic, rating, format and conservation. - -The second row specifies the field within the section for that column. - -The 'basic' columns are fixed fields on the assessment record. - -In the 'basic' section, 'record', 'surveyed_by' and 'reviewer' are repeating fields. -Simply add enough of these columns for the assessment record that has the most -linked records, surveyors or reviewers, then -leave the surplus columns blank for rows that have fewer. - -The 'basic', 'record' columns should contain references to existing archival records in -the current repository. They must be of type resource, archival_object, accession or digital_object. - -The reference must take the form: type_id. - -It is also permitted to use '/', '.' or space, in place of the '_'. For example, the following -are valid record references (assuming the corresponding records exist in the current repository): - - resource_12 - accession/5 - archival_object 2970 - -The 'basic', 'surveyed_by' and 'reviewer' columns must contain usernames of existing users -in the ArchivesSpace instance. - -The fields for other sections refer to attribute definitions. -The importer will attempt to match the field value against a definition. -For example: 'format', 'Film' will match the 'Film (negative, slide, or motion picture)' definition. - -If it can't find a matching definition, or if more than one definition matches, the importer -will abort and report the problem. - -The template has columns for all of the default attribute definitions. It is possible to -add repository specific definitions through the management interface. To import into -these attributes, simply add columns to your CSV. For example, if your repository has -defined an assessment rating called "Comedic Value", add a column to your CSV with 'rating' -in the first row, and 'Comedic Value' in the second. - -'rating' type definitions support an associated note field. To import into this field -the field must end with '_note'. In the example above, add another column with 'rating' -in the first row, and 'Comedic Value_note' in the second. diff --git a/import_export/digital_object_import_template.html b/import_export/digital_object_import_template.html new file mode 100644 index 00000000..54da0837 --- /dev/null +++ b/import_export/digital_object_import_template.html @@ -0,0 +1,152 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/digital_object_import_template.md + +

+ +

+ + Report issue on Jira + import_export/digital_object_import_template.md + +

+
+
+ +

Digital Object CSV Import Template

+ +

Download

+ +

Use this CSV template for importing Digital Object data into ArchivesSpace.

+ +

Keep the header information. Rows below header provide example usage but should +be delete prior to import.

+ +

** PLEASE NOTE **

+ +

The importer requires a specific order to the columns. If you modify the +template, but sure that you keep the columns in the same sequence.

+ + +
+ +
+ + + diff --git a/import_export/digital_object_import_template.md b/import_export/digital_object_import_template.md deleted file mode 100644 index 0012873d..00000000 --- a/import_export/digital_object_import_template.md +++ /dev/null @@ -1,14 +0,0 @@ -# Digital Object CSV Import Template - -[Download](https://raw.githubusercontent.com/archivesspace/archivesspace/master/backend/app/exporters/examples/digital_object/aspace_digital_object_import_template.csv) - -Use this CSV template for importing Digital Object data into ArchivesSpace. - - -Keep the header information. Rows below header provide example usage but should -be delete prior to import. - -** PLEASE NOTE ** - -The importer requires a specific order to the columns. If you modify the -template, but sure that you keep the columns in the same sequence. diff --git a/import_export/ead_exporter.html b/import_export/ead_exporter.html new file mode 100644 index 00000000..2252eb28 --- /dev/null +++ b/import_export/ead_exporter.html @@ -0,0 +1,166 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/ead_exporter.md + +

+ +

+ + Report issue on Jira + import_export/ead_exporter.md + +

+
+
+ +

ArchivesSpace repository EAD Exporter

+ +

Exports all published resource record EAD XML files associated with a single +repository into a zip archive. This zip file will be saved in the ArchivesSpace +data directory (as defined in config.rb) and include the repository id in the +filename.

+ +

Usage

+ +
./scripts/ead_export.sh user password repository_id
+
+ +

A best practice would be to put the password in a hidden file such as:

+ +
touch ~/.aspace_password
+chmod 0600 ~/.aspace_password
+vi ~/.aspace_password # enter your password
+
+ +

Then call the script like:

+ +
./scripts/ead_export.sh user $(cat /home/user/.aspace_password) repository_id
+
+ +

This way you avoid directly exposing it on the command line or in crontab etc.

+ +
+ + +
+ +
+ + + diff --git a/import_export/ead_exporter.md b/import_export/ead_exporter.md deleted file mode 100644 index 538b257a..00000000 --- a/import_export/ead_exporter.md +++ /dev/null @@ -1,31 +0,0 @@ -# ArchivesSpace repository EAD Exporter - -Exports all published resource record EAD XML files associated with a single -repository into a zip archive. This zip file will be saved in the ArchivesSpace -data directory (as defined in `config.rb`) and include the repository id in the -filename. - -Usage ------ - -``` -./scripts/ead_export.sh user password repository_id -``` - -A best practice would be to put the password in a hidden file such as: - -``` -touch ~/.aspace_password -chmod 0600 ~/.aspace_password -vi ~/.aspace_password # enter your password -``` - -Then call the script like: - -``` -./scripts/ead_export.sh user $(cat /home/user/.aspace_password) repository_id -``` - -This way you avoid directly exposing it on the command line or in crontab etc. - ---- diff --git a/import_export/index.html b/import_export/index.html new file mode 100644 index 00000000..76b5b16d --- /dev/null +++ b/import_export/index.html @@ -0,0 +1,147 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/README.md + +

+ +

+ + Report issue on Jira + import_export/README.md + +

+
+
+ +

Importing and exporting data in ArchivesSpace

+ +

This section needs work, or maybe to be incorporated somewhere else

+ + + + +
+ +
+ + + diff --git a/import_export/xsl_stylesheets.html b/import_export/xsl_stylesheets.html new file mode 100644 index 00000000..5d818241 --- /dev/null +++ b/import_export/xsl_stylesheets.html @@ -0,0 +1,155 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + import_export/xsl_stylesheets.md + +

+ +

+ + Report issue on Jira + import_export/xsl_stylesheets.md + +

+
+
+ +

ArchivesSpace XSL stylesheets

+ +

ArchivesSpace includes three stylesheets for you to transform exported data +into human-friendly formats. The stylesheets included are as follows:

+ +
    +
  • as-eac-cpf-html.xsl: Generates HTML from EAC-CPF records
  • +
  • as-ead-html.xsl: Generates HTML from EAD records
  • +
  • as-ead-pdf.xsl: Generates XSL:FO output from EAD for transformation into PDF
  • +
+ +

These stylesheets have been tested and are known to work with +Saxon 9.5.1.1 and higher.

+ +

The as-helper-functions.xsl stylesheet is required by the other three +stylesheets listed above.

+ + +
+ +
+ + + diff --git a/import_export/xsl_stylesheets.md b/import_export/xsl_stylesheets.md deleted file mode 100644 index 863fd2b6..00000000 --- a/import_export/xsl_stylesheets.md +++ /dev/null @@ -1,14 +0,0 @@ -# ArchivesSpace XSL stylesheets - -ArchivesSpace includes three stylesheets for you to transform exported data -into human-friendly formats. The stylesheets included are as follows: - -* `as-eac-cpf-html.xsl`: Generates HTML from EAC-CPF records -* `as-ead-html.xsl`: Generates HTML from EAD records -* `as-ead-pdf.xsl`: Generates XSL:FO output from EAD for transformation into PDF - -These stylesheets have been tested and are known to work with -[Saxon](http://saxonica.com/download/download_page.xml) 9.5.1.1 and higher. - -The `as-helper-functions.xsl` stylesheet is required by the other three -stylesheets listed above. diff --git a/index.html b/index.html new file mode 100644 index 00000000..8d8aff0a --- /dev/null +++ b/index.html @@ -0,0 +1,159 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + README.md + +

+ +

+ + Report issue on Jira + README.md + +

+
+
+ +

ArchivesSpace technical documentation

+ +

The technical documentation covers a range of topics of interest to those working with ArchivesSpace in different technical capacities, and is organized in order to help you find the information most appropriate to your role.

+ + + +

To suggest corrections or additions, please submit a pull request or issue report on Github

+ +

Other technical documentation resources:

+ + + + +
+ +
+ + + diff --git a/migrations/migrate_from_archivists_toolkit.html b/migrations/migrate_from_archivists_toolkit.html new file mode 100644 index 00000000..321a3de1 --- /dev/null +++ b/migrations/migrate_from_archivists_toolkit.html @@ -0,0 +1,300 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + migrations/migrate_from_archivists_toolkit.md + +

+ +

+ + Report issue on Jira + migrations/migrate_from_archivists_toolkit.md + +

+
+
+ +

Migrating Data from Archivists’ Toolkit to ArchivesSpace Using the Migration Tool

+ +

These guidelines are for migrating data from Archivists’ Toolkit 2.0 Update 16 to all ArchivesSpace 2.1.x or 2.2.x releases using the migration tool provided by ArchivesSpace. Migrations of data from earlier versions of the Archivists’ Toolkit (AT) or other versions of ArchivesSpace are not supported by these guidelines or migration tool.

+ +
+

Note: A migration from Archivists’ Toolkit to ArchivesSpace should not be run against an active production database.

+
+ +

Preparing for migration

+ +
    +
  • Make a copy of the AT instance, including the database, to be migrated and use it as the source of the migration. It is strongly recommended that you not use your AT production instance and database as the source of the migration for the simple reason of protecting the production version from any anomalies that might occur during the migration process.
  • +
  • Review your source database for the quality of the data. Look for invalid records, duplicate name and subject records, and duplicate controlled values. Irregular data will either be carried forward to the ArchivesSpace instance or, in some cases, block the migration process.
  • +
  • Select a representative sample of accession, resource, and digital object records to be examined closely when the migration is completed. Make sure to represent in the sample both the simplest and most complicated or extensive records in the overall data collection.
  • +
+ +

Notes

+ +
    +
  • An AT subject record will be set to type ‘topical’ if it does not have a valid AT type statement or its type is not one of the types in ArchivesSpace. Several other AT LookupList values are not present in ArchivesSpace. These LookupList values cannot be added during the AT migration process and will therefore need to be changed in AT prior to migration. For full details on enum (controlled value list) mappings see the data map. You can use the AT Lookup List tool to change values that will not map correctly, as specified by the data map.
  • +
  • Record audit information (created by, date created, modified by, and date modified) will not migrate from AT to ArchivesSpace. ArchivesSpace will assign new audit data to each record as it is imported into ArchivesSpace. The exception to this is that the username of the user who creates an accession record will be migrated to the accession general note field.
  • +
  • Implement an ArchivesSpace production version including the setting up of a MySQL database to migrate into. Instructions are included at Getting Started with ArchivesSpace and Running ArchivesSpace against MySQL.
  • +
+ +

Preparing for Migrating AT Data

+ +
    +
  • The migration process is iterative in nature. A migration report is generated at the end of each migration routine. The report indicates errors or issues occurring with the migration. (An example of an AT migration report is provided at the end of this document.) You should use this report to determine if any problems observed in the migration results are best remedied in the source data or in the migrated data in the ArchivesSpace instance. If you address the problems in the source data, then you can simply conduct the migration again.
  • +
  • However, once you accept the migration and address problems in the migrated data, you cannot migrate the source data again without establishing a new target ArchivesSpace instance. Migrating data to a previously migrated ArchivesSpace database may result in a great many duplicate record error messages and may cause unrecoverable damage to the ArchivesSpace database.
  • +
  • Please note, data migration can be a very memory and time intensive task due to the large number of records being transferred. As such, we recommend running the AT migration on a computer with at least 2GB of available memory.
  • +
  • Make sure your ArchivesSpace MySQL database is setup correctly, following the documentation in the ArchivesSpace README file. When creating a MySQL database, you MUST set the default character encoding for the database to be UTF8. This is particularly important if you use a MySQL client, such as Navicat, MySQL Workbench, phpMyAdmin, etc., to create the database. See Running ArchivesSpace against MySQL for more details.
  • +
  • Increase the maximum Java heap space if you are experiencing time out events. To do so: +
      +
    • Stop the current ArchivesSpace instance
    • +
    • Open in a text editor the file “archivesspace.sh” (Linux / Mac OSX) or archivesspace.bat (Windows). The file is located in the ArchivesSpace installation directory.
    • +
    • Find the text string “-Xmx512m” and change it to “-Xmx1024m”.
    • +
    • Save the file.
    • +
    • Restart the ArchivesSpace instance.
    • +
    • Restart the AT migration process.
    • +
    +
  • +
+ +

Running the Migration Tool as an AT Plugin

+ +
    +
  • Make sure that the AT instance you want to migrate from is shut down. Next, download the “scriptAT.zip” file from the at-migration release github page (https://github.com/archivesspace/at-migration/releases) and copy the file into the plugins folder of the AT instance, overwriting the one that’s already there if needed.
  • +
  • Make sure the ArchivesSpace instance that you are migrating into is up and running.
  • +
  • Restart the AT instance to load the newly installed plug-in. To run the plug-in go to the “Tools” menu, then select “Script Runtime v1.0”, and finally “ArchivesSpace Data Migrator”. This will cause the plug-in window to display.
  • +
+ +

AT migrator

+
    +
  • Change the default information in the Migrator UI: +
      +
    • Threads – Used to specify the number of clients that are used to copy Resource records simultaneously. The limit on the number of clients depends on the record size and allocated memory. A number from 4 to 6 is generally a good value to use, but can be reduced if an “Out of Memory Exception” occurs.
    • +
    • Host – The URL and port number of the ArchivesSpace backend server
    • +
    • “Copy records when done” checkbox – Used to specify that the records should +be copied once the repository check has completed.
    • +
    • Password – password for the ArchivesSpace “admin” account. The default value +of “admin” should work unless it was changed by the ArchivesSpace +administrator.
    • +
    • Reset Password – Each user account transferred has its password reset to this. +Please note that users need to change their password when they first log-in +unless LDAP is used for authentication.
    • +
    • “Specify Type of Extent Data” Radio button – If you are using the BYU Plugin, +select that option. Otherwise, leave as the default – Normal or Harvard Plugin.
    • +
    • Specify Unlinked Records to NOT Copy checkboxes – If you have name or +subject records that are not linked to accessions, resources, or digital objects, +you can choose not to migrate those to ArchivesSpace.
    • +
    • “Records to Publish?” checkboxes – Used to specify what types of records +should be published after they are migrated to ArchivesSpace.
    • +
    • Text box showing -refid_unique, -term_default – This is needed for the +functioning of the migration tool. Please do not make changes to this area.
    • +
    • Output Console – Display section for following the migration while it is running
    • +
    • View Error Log – Used to view a printout of all the errors encountered during the +migration process. This can be used while the migration process is underway as well.
    • +
    +
  • +
  • Once you have made the appropriate changes to the UI, there are three buttons to choose from to start the migration process. +
      +
    • Copy to ArchivesSpace – This starts the migration to the ArchivesSpace instance +you have made the appropriate changes to the UI, there are three buttons to +indicated by the Host URL.
    • +
    • +

      Run Repository Check – The repository check searches for, and attempts to fix repository misalignment between Resources and linked Accession/Digital Object records. The fix applied entails copying the linked accession/digital object record to the repository of the resource record in the ArchivesSpace database (those record positions are not modified in the AT database).

      + +

      As long as accession records are not linked to multiple Resource records in different repositories, the fix will be valid. Otherwise, you will receive a warning message. For such cases, the Resource and Accession record(s) will still be migrated, but without links to one another; those links will need to be re-established in ArchivesSpace.

      + +

      This misalignment problem involves only accession and resource records and not digital object records, as accession and resource records have a many-to-many relationship. Assessments also can have a many-to-many relationship with resources, accessions, and digital objects. However, since assessments are small and quick to copy, they will simply be copied to as many repositories as needed to establish all the appropriate links.

      + +

      If the “Copy Records When Done” checkbox is selected, the records will be migrated to the ArchivesSpace instance once the check is completed.

      +
    • +
    • Continue Previous Migration – If the migration process fails, this is used to skip to the place the failed previous migration left off. This should allow the migration process of resource records to be gracefully restarted without having to clean out the ArchivesSpace backend database and start from scratch.
    • +
    +
  • +
  • For most part, the data migration process should be automatic, with an error log being generated when completed. However, depending on the particular data, various errors may occur that would require the migration to be re-run after they have been resolved by the user. The time a migration takes to complete will depend on a number of factors (database size, network performance etc.), but can be anywhere from a couple of hours to a few days.
  • +
  • Data from the following AT modules will migrate: +
      +
    • Lookup Lists
    • +
    • Repositories
    • +
    • Locations
    • +
    • Users
    • +
    • Subjects
    • +
    • Names
    • +
    • Accessions
    • +
    • Digital Object and Digital Object Components
    • +
    • Resources and Resource Components
    • +
    • Assessments
    • +
    +
  • +
  • Data +
      +
    • Reports from the following AT modules will not migrate +
      +

      INFORMATION MISSING FROM SOURCE DOCUMENT - NEEDS REVIEW!!!

      +
      +
    • +
    +
  • +
+ +

Assessing the Migration and Cleaning Up Data

+ +

Use the migration report to assess the fidelity of the migration and to determine whether to:

+
    +
  • Fix data in the source AT instance and conduct the migration again, or
  • +
  • Fix data in the target ArchivesSpace instance.
  • +
+ +

If you select to fix the data in AT and conduct the migration again, you will need to delete all the content in the ArchivesSpace instance.

+ +

If you accept the migration in the ArchivesSpace instance, the following outlines how to check and fix your data.

+ +
    +
  • Re-establish user passwords. While user records will migrate, the passwords associated with them will not. You will need to re-assign those passwords according to the policies or conventions of your repositories.
  • +
  • Review closely the set of sample records you selected: +
      +
    • Accessions
    • +
    • Resources
    • +
    • Digital objects
    • +
    +
  • +
  • Review the following groups of records, making sure the correct number of records migrated: +
      +
    • Accessions
    • +
    • Assessments
    • +
    • Resources
    • +
    • Digital objects
    • +
    • Controlled vocabulary lists
    • +
    • Subjects
    • +
    • Agents (Name records in AT)
    • +
    • Locations
    • +
    • Collection Management Classifications
    • +
    • There may be a few extra agent records due to ArchivesSpace defaults or extra assessments if they were linked to records from more than one repository.
    • +
    +
  • +
  • In conducting the reviews, look for duplicate or incomplete records, broken links, or truncated data.
  • +
  • Take special care to check to make sure your container data and locations are correct. The model for this is significantly different between AT and ArchivesSpace (where locations are tied to a container rather than directly to a resource or accession), so this presents some challenges for migration.
  • +
  • Merge enumeration values as necessary. For instance, if you had both ‘local’ and ‘local sources’ as a source for names, it might be a good idea to merge these values.
  • +
+ + +
+ +
+ + + diff --git a/migrations/migrate_from_archivists_toolkit.md b/migrations/migrate_from_archivists_toolkit.md deleted file mode 100644 index 29a279a0..00000000 --- a/migrations/migrate_from_archivists_toolkit.md +++ /dev/null @@ -1,119 +0,0 @@ -# Migrating Data from Archivists' Toolkit to ArchivesSpace Using the Migration Tool - -These guidelines are for migrating data from Archivists' Toolkit 2.0 Update 16 to all ArchivesSpace 2.1.x or 2.2.x releases using the migration tool provided by ArchivesSpace. Migrations of data from earlier versions of the Archivists' Toolkit (AT) or other versions of ArchivesSpace are not supported by these guidelines or migration tool. - -> Note: A migration from Archivists' Toolkit to ArchivesSpace should not be run against an active production database. - -## Preparing for migration - -* Make a copy of the AT instance, including the database, to be migrated and use it as the source of the migration. It is strongly recommended that you not use your AT production instance and database as the source of the migration for the simple reason of protecting the production version from any anomalies that might occur during the migration process. -* Review your source database for the quality of the data. Look for invalid records, duplicate name and subject records, and duplicate controlled values. Irregular data will either be carried forward to the ArchivesSpace instance or, in some cases, block the migration process. -* Select a representative sample of accession, resource, and digital object records to be examined closely when the migration is completed. Make sure to represent in the sample both the simplest and most complicated or extensive records in the overall data collection. - -### Notes - -* An AT subject record will be set to type 'topical' if it does not have a valid AT type statement or its type is not one of the types in ArchivesSpace. Several other AT LookupList values are not present in ArchivesSpace. These LookupList values cannot be added during the AT migration process and will therefore need to be changed in AT prior to migration. For full details on enum (controlled value list) mappings see the data map. You can use the AT Lookup List tool to change values that will not map correctly, as specified by the data map. -* Record audit information (created by, date created, modified by, and date modified) will not migrate from AT to ArchivesSpace. ArchivesSpace will assign new audit data to each record as it is imported into ArchivesSpace. The exception to this is that the username of the user who creates an accession record will be migrated to the accession general note field. -* Implement an ArchivesSpace production version including the setting up of a MySQL database to migrate into. Instructions are included at [Getting Started with ArchivesSpace](../administration/getting_started.html) and [Running ArchivesSpace against MySQL](../provisioning/mysql.html). - -## Preparing for Migrating AT Data - -* The migration process is iterative in nature. A migration report is generated at the end of each migration routine. The report indicates errors or issues occurring with the migration. (An example of an AT migration report is provided at the end of this document.) You should use this report to determine if any problems observed in the migration results are best remedied in the source data or in the migrated data in the ArchivesSpace instance. If you address the problems in the source data, then you can simply conduct the migration again. -* However, once you accept the migration and address problems in the migrated data, you cannot migrate the source data again without establishing a new target ArchivesSpace instance. Migrating data to a previously migrated ArchivesSpace database may result in a great many duplicate record error messages and may cause unrecoverable damage to the ArchivesSpace database. -* Please note, data migration can be a very memory and time intensive task due to the large number of records being transferred. As such, we recommend running the AT migration on a computer with at least 2GB of available memory. -* Make sure your ArchivesSpace MySQL database is setup correctly, following the documentation in the ArchivesSpace README file. When creating a MySQL database, you MUST set the default character encoding for the database to be UTF8. This is particularly important if you use a MySQL client, such as Navicat, MySQL Workbench, phpMyAdmin, etc., to create the database. See [Running ArchivesSpace against MySQL](../provisioning/mysql.html) for more details. -* Increase the maximum Java heap space if you are experiencing time out events. To do so: - * Stop the current ArchivesSpace instance - * Open in a text editor the file "archivesspace.sh" (Linux / Mac OSX) or archivesspace.bat (Windows). The file is located in the ArchivesSpace installation directory. - * Find the text string "-Xmx512m" and change it to "-Xmx1024m". - * Save the file. - * Restart the ArchivesSpace instance. - * Restart the AT migration process. - -## Running the Migration Tool as an AT Plugin - -* Make sure that the AT instance you want to migrate from is shut down. Next, download the "scriptAT.zip" file from the at-migration release github page (https://github.com/archivesspace/at-migration/releases) and copy the file into the plugins folder of the AT instance, overwriting the one that's already there if needed. -* Make sure the ArchivesSpace instance that you are migrating into is up and running. -* Restart the AT instance to load the newly installed plug-in. To run the plug-in go to the "Tools" menu, then select "Script Runtime v1.0", and finally "ArchivesSpace Data Migrator". This will cause the plug-in window to display. - -![AT migrator](../images/at_migrator.jpg) -* Change the default information in the Migrator UI: - * **Threads** – Used to specify the number of clients that are used to copy Resource records simultaneously. The limit on the number of clients depends on the record size and allocated memory. A number from 4 to 6 is generally a good value to use, but can be reduced if an "Out of Memory Exception" occurs. - * **Host** – The URL and port number of the ArchivesSpace backend server - * **"Copy records when done" checkbox** – Used to specify that the records should -be copied once the repository check has completed. - * **Password** – password for the ArchivesSpace "admin" account. The default value -of "admin" should work unless it was changed by the ArchivesSpace -administrator. - * **Reset Password** – Each user account transferred has its password reset to this. -Please note that users need to change their password when they first log-in -unless LDAP is used for authentication. - * **"Specify Type of Extent Data" Radio button** – If you are using the BYU Plugin, -select that option. Otherwise, leave as the default – Normal or Harvard Plugin. - * **Specify Unlinked Records to NOT Copy checkboxes** – If you have name or -subject records that are not linked to accessions, resources, or digital objects, -you can choose not to migrate those to ArchivesSpace. - * **"Records to Publish?" checkboxes** – Used to specify what types of records -should be published after they are migrated to ArchivesSpace. - * **Text box showing -refid_unique, -term_default** – This is needed for the -functioning of the migration tool. Please do not make changes to this area. - * **Output Console** – Display section for following the migration while it is running - * **View Error Log** – Used to view a printout of all the errors encountered during the -migration process. This can be used while the migration process is underway as well. -* Once you have made the appropriate changes to the UI, there are three buttons to choose from to start the migration process. - * **Copy to ArchivesSpace** – This starts the migration to the ArchivesSpace instance -you have made the appropriate changes to the UI, there are three buttons to -indicated by the Host URL. - * **Run Repository Check** – The repository check searches for, and attempts to fix repository misalignment between Resources and linked Accession/Digital Object records. The fix applied entails copying the linked accession/digital object record to the repository of the resource record in the ArchivesSpace database (those record positions are not modified in the AT database). - - As long as accession records are not linked to multiple Resource records in different repositories, the fix will be valid. Otherwise, you will receive a warning message. For such cases, the Resource and Accession record(s) will still be migrated, but without links to one another; those links will need to be re-established in ArchivesSpace. - - This misalignment problem involves only accession and resource records and not digital object records, as accession and resource records have a many-to-many relationship. Assessments also can have a many-to-many relationship with resources, accessions, and digital objects. However, since assessments are small and quick to copy, they will simply be copied to as many repositories as needed to establish all the appropriate links. - - If the "Copy Records When Done" checkbox is selected, the records will be migrated to the ArchivesSpace instance once the check is completed. - * **Continue Previous Migration** – If the migration process fails, this is used to skip to the place the failed previous migration left off. This should allow the migration process of resource records to be gracefully restarted without having to clean out the ArchivesSpace backend database and start from scratch. -* For most part, the data migration process should be automatic, with an error log being generated when completed. However, depending on the particular data, various errors may occur that would require the migration to be re-run after they have been resolved by the user. The time a migration takes to complete will depend on a number of factors (database size, network performance etc.), but can be anywhere from a couple of hours to a few days. -* Data from the following AT modules will migrate: - * Lookup Lists - * Repositories - * Locations - * Users - * Subjects - * Names - * Accessions - * Digital Object and Digital Object Components - * Resources and Resource Components - * Assessments -* Data - * Reports from the following AT modules will not migrate - > INFORMATION MISSING FROM SOURCE DOCUMENT - NEEDS REVIEW!!! - -## Assessing the Migration and Cleaning Up Data - -Use the migration report to assess the fidelity of the migration and to determine whether to: -* Fix data in the source AT instance and conduct the migration again, or -* Fix data in the target ArchivesSpace instance. - -If you select to fix the data in AT and conduct the migration again, you will need to delete all the content in the ArchivesSpace instance. - -If you accept the migration in the ArchivesSpace instance, the following outlines how to check and fix your data. - -* Re-establish user passwords. While user records will migrate, the passwords associated with them will not. You will need to re-assign those passwords according to the policies or conventions of your repositories. -* Review closely the set of sample records you selected: - * Accessions - * Resources - * Digital objects -* Review the following groups of records, making sure the correct number of records migrated: - * Accessions - * Assessments - * Resources - * Digital objects - * Controlled vocabulary lists - * Subjects - * Agents (Name records in AT) - * Locations - * Collection Management Classifications - * There may be a few extra agent records due to ArchivesSpace defaults or extra assessments if they were linked to records from more than one repository. -* In conducting the reviews, look for duplicate or incomplete records, broken links, or truncated data. -* Take special care to check to make sure your container data and locations are correct. The model for this is significantly different between AT and ArchivesSpace (where locations are tied to a container rather than directly to a resource or accession), so this presents some challenges for migration. -* Merge enumeration values as necessary. For instance, if you had both 'local' and 'local sources' as a source for names, it might be a good idea to merge these values. diff --git a/migrations/migrate_from_archon.html b/migrations/migrate_from_archon.html new file mode 100644 index 00000000..bc637936 --- /dev/null +++ b/migrations/migrate_from_archon.html @@ -0,0 +1,375 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + migrations/migrate_from_archon.md + +

+ +

+ + Report issue on Jira + migrations/migrate_from_archon.md + +

+
+
+ +

Migrating Data from Archon to ArchivesSpace Using the Migration Tool

+ +

These guidelines are for migrating data from Archon 3.21-rev3 to all ArchivesSpace 2.2.2 using the migration tool provided by ArchivesSpace. Migrations of data from earlier versions of the Archon or other versions of ArchivesSpace are not supported by these guidelines or migration tool.

+ +
+

Note: A migration from Archon to ArchivesSpace should not be run against an active production database.

+
+ +

Preparing for migration

+ +

Select a representative sample of accession, classification, collection, collection content, and digital object records to be examined closely when the migration is completed. Make sure to include both simple and more complicated or extensive records in the sample.

+ +

Review your Archon database for data quality

+ +

Accession Records

+ +
    +
  • Supply an accession date for all records, when possible. If an accession date is not +recorded in Archon, the date of 01/01/9999 will be supplied during the migration process. If you wish to change this default value, you may do so by editing the following file in the new Archon distribution, prior to running the migration: +packages/core/templates/default/accession-list.inc.php
  • +
  • Supply an identifier for all records, when possible. If an identifier is not recorded in Archon, a supplied identifier will be constructed during the migration process, consisting of the date and the truncated accession title.
  • +
+ +

Classification Records

+ +

Ensure that there are no duplicate classification titles at the same level in the classification hierarchy. If the migration tool encounters a duplicate value, some of the save operations for classifications will fail, and you will need to redo the migration.

+ +

Collection Records

+ +

If normalized dates are not recorded correctly (i.e. if the end date and begin date are reversed), they will not be migrated or may cause the migration to fail. To check for such entries, a system administrator can run the follow query against the database:

+ +

SELECT ID, Title, NormalDateBegin, NormalDateEnd FROM tblCollections_Collections WHERE NormalDateBegin > NormalDateEnd;

+ +

Level/Container Manager

+ +

Review the settings to make sure that each ‘level container’ is appropriately marked with the correct values for “Intellectual Level” and “Physical Container” and that EAD Values are correctly recorded.

+ +

Level Container Manager

+ +

Failure to code level container values correctly may result in incorrect nesting of resource components in ArchivesSpace. While the following information does not need to be acted upon prior to migration, please note the following if you find that content is not nested correctly after you migrate:

+ +
    +
  • Collection content records that have a level container that is ‘Intellectual Only’ will be migrated to ArchivesSpace as resource components. Each level/container that has ‘intellectual level’ checked should have a valid value recorded in the “EAD Level” field (i.e. class, collection, file, fonds, item, otherlevel, recordgrp, series, subfonds, subgrp, subseries). These values are case sensitive, and all other values will be migrated as “otherlevel” on the collection content/resource component records to which they apply.
  • +
  • Collection content records that have a level container that is ‘Physical Only’ will be migrated to ArchivesSpace as instance records of the type ‘text’ attached to a container in ArchivesSpace. These instance/container records will be attached to the intellectual level or levels that are immediate children of the container record as it was previously expressed in Archon. If the instance/container has no children it will be attached to its parent intellectual level instead. For illustrative purposes, the following screenshots show a container record prior to and following migration. +Archon container example
  • +
  • Collection content records that have both physical and intellectual levels will be migrated as both resource components and instances. In this case the instance will be attached to the resource component.
  • +
  • Collection content records that are neither physical nor intellectual levels will be migrated as if they were ‘Intellectual Only’. This is not recommended and should be fixed prior to migration.
  • +
+ +

Collection Content Records

+ +
    +
  • If a value has not been set in the “Title” or “Inclusive Dates” field of an “intellectual” level/container in Archon, the collection content record being migrated will be supplied a title, based on its “label” value and the “level/container” type set in Archon. +Collection Content Records
  • +
  • Optionally, if a migration fails, check for collection content records that reference invalid ‘level/containers’. These records are found in the database tables, but are not visible to staff or end users and must be eliminated prior to migration. If not eliminated, the migration will fail. In order to identify these records, you should follow these steps. Be very careful. If you are uncertain what you are doing, backup the database first or speak with a systems administrator!
  • +
  • In MySQL or SQL Server, open the table titled ‘tblCollections_LevelContainers’. Note the ‘ID’ value recorded of each row (i.e. LevelContainer).
  • +
  • Run a query against tblCollections_Content to find records where the LevelID column references an invalid value. For example, if tblCollections_Level Containers holds ‘ID’ values1-6 and 8-22: +SELECT * FROM tblCollections_Content WHERE LevelContainerID > 22 OR (LevelContainerID > 6 AND LevelContainerID < 8); +This will provide a list of all records with invalid ‘LevelID’ (i.e. where a record with the primary key referenced by a foreign key cannot be found). Review this list carefully to make sure you are comfortable deleting the records, or change the LevelID to a valid integer if you wish to retain the records. If you choose to delete the records, you will need to do so directly in the database (see below.) If you choose to do the latter, you may need to take additional steps directly in the database to link these records to a valid parent content record or collection; additional instructions can be supplied upon request.
  • +
  • Run a query to delete the invalid records from the collections content table. For example: +DELETE FROM tblCollections_Content WHERE LevelContainerID > 22 OR (LevelContainerID > 6 AND LevelContainerID < 8);
  • +
  • Optionally, if the migration fails, check for ‘duplicate’ collection content records. ‘Duplicate’ records are those that occupy the same node in the collection/content hiearchy. To check for these records, run the following query in mysql or sql server. +SELECT ParentID, SortOrder, COUNT (*) FROM tblCollections_Content GROUP BY ParentID, SortOrder HAVING COUNT(*) > 1;
  • +
  • +

    The query above checks for records that occupy the same branch and same position in the content hierarchy. If you discover such records, the sort order value of one of the records must be changed, so that both records occupy a unique position. In order to do this, run a query that finds all records attached to the parent record, then run an update query to change the sort order of one of the offending records so that each has a unique sort order. For example if the query above returns ParentID as a ‘duplicate’ value, you would run query one with the appropriate ParentID value to identify the offending records, and query two to fix the problem: +Query one:

    + +

    SELECT ID, ParentID, SortOrder, Title FROM tblCollections_Content WHERE ParentID=8619;

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    IDParentIDSortOrderTitle
    862086191to mother
    862186191from mother
    862286193to father
    682386194from father
    + +

    Query two:

    + +

    UPDATE tblCollections_Content SET SortOrder=2 WHERE ID=8621;

    +
  • +
+ +

Preparing for Migrating Archon Data

+ +

The migration process is iterative in nature. You should plan to do several test migrations, culminating in a final migration. Typically, migration will require assistance from a system administrator.

+ +

The migration tool will connect to your Archon installation, read data from defined ‘endpoints’, and place the information in a target ArchivesSpace instance.

+ +

A migration report is generated at the end of each migration routine and can be downloaded from the application. The report indicates errors or issues occurring with the migration. Sample data from migration report is provided in Appendix A.

+ +

You should use this report to determine if any problems observed in the migration results are best remedied in the source data or in the migrated data in the ArchivesSpace instance. If you address the problems in the source data, then you can simply clear the database and conduct the migration again. However, once you accept the migration and make changes to the migrated data in ArchivesSpace, you cannot migrate the source data again without either overwriting the previous migration or establishing a new target ArchivesSpace instance.

+ +

Please note, data migration can be a very memory and time intensive task due to the large amounts of records being transferred. As such, we recommend running the Archon migration tool on a server with at least 2GB of available memory. Test migrations have run from under an hour to twelve hours or more in the case of complex and large instances of Archon.

+ +

Before starting the migration process, make sure that your current Archon installation is up to date: i.e. that you are using version 3.21 rev3. If you are on an earlier version of Archon, make a copy of the Archon instance, including the database, to be migrated and use it as the source of the migration. It is strongly recommended that you not use your Archon production instance and database as the source of the migration for the simple reason of protecting the production version from any anomalies that might occur during the migration process. Upgrade the copy of the Archon instance to version 3.21 rev3 prior to starting the migration process.

+ +

Get Archon to ArchivesSpace Migration Tool

+ +

Download the latest JAR file release from https://github.com/archivesspace-deprecated/ArchonMigrator/releases/latest. This is an executable JAR file – double click to run it.

+ +

Install ArchivesSpace Instance

+ +

Implement an ArchivesSpace production version including the setting up of a MySQL database to migrate into. Instructions are included at Getting Started with ArchivesSpace and Running ArchivesSpace against MySQL

+ +

Prepare to Launch Migration

+ +
+

Important Note: The migration process should be launched from a networked computer with a stable (i.e. wired) connection, and you should turn power save settings off on the client computer you use to launch the migration. So that the migration can proceed in an undisturbed fashion, you should not try to access the ArchivesSpace or Archon front end or public interface until after the migration as completed. If you fail to follow these instructions, the migration tool may not provide useful feedback and it will be difficult to determine how successful the migration was.

+
+ +

For the most part, the data migration process should be automatic, with errors being provided as the tool migrates and a log being made available when migration is complete. Depending on the particular data being migrated, various errors may occur These may require the migration to be re-run after they have been resolved by the user. When this occurs, the MySQL database should be emptied by the system administrator, and the migration rerun after steps are taken to resolve the problem that caused the error.

+ +

The time that the migration takes to complete will depend on a number of factors (database size, network performance etc.), but has been known to take anywhere from a half hour to ten or twelve hours. Most of this time will probably be spent migrating collection records.

+ +

The following Archon datatypes will migrate, and all relationships that exist between these datatypes should be preserved in ArchivesSpace, except as noted in bold below. For each datatype, post- migration cleanup recommendations are provided in parentheses:

+
    +
  • Editable controlled value lists: +
      +
    • Subject sources (review post migration and merge values with ArchivesSpace defaults or functionally duplicate values, when possible)
    • +
    • Creatorsources(reviewpostmigrationandmergevalueswithArchivesSpacedefaults +or functionally duplicate values, when possible)
    • +
    • Extentunits/types(mergefunctionallyduplicatevalues) o MaterialTypes
    • +
    • ContainerTypes
    • +
    • FileTypes
    • +
    • ProcessingPriorities
    • +
    +
  • +
  • Repositories
  • +
  • User/logins (users will need to reset password)
  • +
  • Subjects (subjects of type personal corporate or family name are migrated as Agent +records, and are linked to resources and digital objects in the subject role. Review these +records and merge with duplicate agent names from creator migration, when possible.)
  • +
  • Creators/Names
  • +
  • Accessions (The migration tool will supply accession identifiers when these are blank in Archon. Review and change values, if appropriate.)
  • +
  • Digital Objects: The migration tool will generate digital object metadata records in ArchivesSpace for each digital library record that is stored in your Archon instance. For each file that has an attached digital library record, the migration tool will generate a digital object component and file instance record. In addition, the migration tool will provide a folder containing the source file you uploaded to Archon when you created the record. In order to link these files to the digital file records in ArchivesSpace, you should place the files in a single directory on a webserver. +To preserve the linkage between the file’s metadata in ArchivesSpace, you must provide the base URL to the folder where the objects will be placed. The migration tool prepends this URL to the filename to form a complete path to the object location, for each file being exported, as shown in the screenshot below. (In version 2.2.2 of ArchivesSpace, with the default digital object templates, these files will be available in the public interface by clicking a link.)
  • +
  • Locations (Controlled location records are much more granular in ArchivesSpace than in Archon. You should have a location record for each unique combination of location drop down, range, section, and shelf in Archon, and these records should be linked to top container records which are in turn linked to an instance for each collection where they apply.)
  • +
  • Resources and Resource Components (see locations, above). +Data from the following Archon modules will not migrate to ArchivesSpace
  • +
  • Books (Book data could be migrated later if a plugin is developed to support this data).
  • +
  • AVSAP/Assessments
  • +
+ +

Launch Migration Process

+ +

Make sure the ArchivesSpace instance that you are migrating into is up and running, then open up the migration tool.

+ +

Archon migrator

+ +
    +
  1. Change the default information in the migration tool user interface: +
      +
    • ArchonSource – Supply the base URL for the Archon instance.
    • +
    • Archon User – Username for an account with full administrator privileges.
    • +
    • Password – Password for that same account.
    • +
    • Download Digital Object Files checkbox – Check if you want to move any attached digital object files and supply a webpath to a web accessible folder where you intend to place the digital objects after the migration is complete.
    • +
    • Set Download Folder – Clicking this will open a file explorer that will allow you to specify the folder to which you want digital files from Archon to be downloaded.
    • +
    • Set Default Repository checkbox – Select “Set Default Repository” checkbox to set which Repository Accession and Unlinked digital objects are copied to. The default is “Based on Linked Collection,” which will copy Accession records to the same repository of any Collection records they are linked to, or the first repository if they are not. You can also select a specific repository from the drop-down list.
    • +
    • Host – The URL and port number of the ArchivesSpace backend server.
    • +
    • ASpace admin – User name for the ArchivesSpace “admin” account. The default value of “admin” should work unless it was changed by the ArchivesSpace administrator.
    • +
    • Password – Password for the ArchivesSpace “admin” account. The default value of “admin” should work unless it was changed by the ArchivesSpace administrator.
    • +
    • Reset Password – Each user account transferred has its password reset to this. Please note that users need to change their password when they first log-in unless LDAP is used for authentication.
    • +
    • Migration Options – This is needed for the functioning of the migration tool. Please do not make changes to this area.
    • +
    • Output Console – Display section for following the migration while it is running
    • +
    • View Error Log – Used to view a printout of all the errors encountered during the migration process. This can be used while the migration process is underway as well.
    • +
    +
  2. +
  3. Press the “Copy to ArchivesSpace” button to start the migration process. This starts the migration to the ArchivesSpace instance indicated by the Host URL.
  4. +
  5. If the migration process fails: Review the error message provided and /or the migration log. Fix any issues that have been identified, clear the target MySQL and try again.
  6. +
  7. When the process has completed: +
      +
    • Download the migration report.
    • +
    • Move digital objects into the folder location corresponding to the URL you provided to the migration tool.
    • +
    +
  8. +
+ +

Assessing the Migration and Cleaning Up Data

+ +
    +
  1. Use the migration report to assess the fidelity of the migration and to determine whether to fix data in the source Archon instance and conduct the migration again, or fix data in the target ArchivesSpace instance. If you select to fix data in Archon, you will need to delete all the content in the ArchivesSpace instance, then rerun the migration after clearing the ArchivesSpace database.
  2. +
  3. Review the following record types, making sure the correct number of records migrated. In conducting the reviews, look for duplicate or incomplete records, broken links, or truncated data. +
      +
    • Controlled vocabulary lists
    • +
    • Classifications
    • +
    • Accessions
    • +
    • Resources
    • +
    • Digital objects
    • +
    • Subjects (not persons, families, and corporate bodies)
    • +
    • Creators (known as Agents in ArchivesSpace)
    • +
    • Locations
    • +
    +
  4. +
  5. Review closely the set of sample records you selected, comparing data in Archon to data in ArchivesSpace.
  6. +
  7. If you accept the migration in the ArchivesSpace instance, then proceed to re-establish user passwords. While user records will migrate, the passwords associated with them will not. You will need to reassign those passwords according to the policies or conventions of your repositories.
  8. +
+ +

Appendix A: Migration Log Review

+ +

The migration log provides a description of any irregularities that take place during a migration and should be saved in a secure location, for future reference. The log contains both save errors and warnings. The warnings should be reviewed after the migration for information, for potential action.

+ +

Most warnings will not require a follow up action. For example, they may note that a supplied value has been provided to meet an ArchivesSpace data model requirement. This occurs for all collections with empty identifiers. Occasionally, warnings will indicate that there was a problem establishing a link between two records for a reason such as a resource component not being found. Warnings like this should be cause for review since they may indicate that some data was lost.

+ +

Save errors will note that a particular piece of data could not be migrated because it is not supported in the ArchivesSpace data model or for some other reason. In these cases, you should review the record in Archon and in ArchivesSpace if it was migrated at all. Oftentimes, these occur due to duplicate records (such as if you have a matching creator and person subject). If a save error occurs due to a duplicate record, this is usually okay but should still be reviewed to make sure there was no data loss. If a save error occurs for any other reason, this typically means the migration will need to be rerun (unless the record it occurred on is not needed or is easier just to migrate manually).

+ +

Typically, the migration log will record the Archon internal IDs of the original Archon object being migrated whenever a save error or warning occurs. This simplifies finding and correcting relevant records.

+ + +
+ +
+ + + diff --git a/migrations/migrate_from_archon.md b/migrations/migrate_from_archon.md deleted file mode 100644 index f8b30c8f..00000000 --- a/migrations/migrate_from_archon.md +++ /dev/null @@ -1,176 +0,0 @@ -# Migrating Data from Archon to ArchivesSpace Using the Migration Tool - -These guidelines are for migrating data from Archon 3.21-rev3 to all ArchivesSpace 2.2.2 using the migration tool provided by ArchivesSpace. Migrations of data from earlier versions of the Archon or other versions of ArchivesSpace are not supported by these guidelines or migration tool. - -> Note: A migration from Archon to ArchivesSpace should not be run against an active production database. - -## Preparing for migration - -Select a representative sample of accession, classification, collection, collection content, and digital object records to be examined closely when the migration is completed. Make sure to include both simple and more complicated or extensive records in the sample. - -Review your Archon database for data quality - -### Accession Records - -* Supply an accession date for all records, when possible. If an accession date is not -recorded in Archon, the date of 01/01/9999 will be supplied during the migration process. If you wish to change this default value, you may do so by editing the following file in the new Archon distribution, prior to running the migration: - `packages/core/templates/default/accession-list.inc.php` -* Supply an identifier for all records, when possible. If an identifier is not recorded in Archon, a supplied identifier will be constructed during the migration process, consisting of the date and the truncated accession title. - -### Classification Records - -Ensure that there are no duplicate classification titles at the same level in the classification hierarchy. If the migration tool encounters a duplicate value, some of the save operations for classifications will fail, and you will need to redo the migration. - -### Collection Records - -If normalized dates are not recorded correctly (i.e. if the end date and begin date are reversed), they will not be migrated or may cause the migration to fail. To check for such entries, a system administrator can run the follow query against the database: - -`SELECT ID, Title, NormalDateBegin, NormalDateEnd FROM tblCollections_Collections WHERE NormalDateBegin > NormalDateEnd;` - -### Level/Container Manager - -Review the settings to make sure that each 'level container' is appropriately marked with the correct values for "Intellectual Level" and "Physical Container" and that EAD Values are correctly recorded. - -![Level Container Manager](../images/archon_level.jpg) - -Failure to code level container values correctly may result in incorrect nesting of resource components in ArchivesSpace. While the following information does not need to be acted upon prior to migration, please note the following if you find that content is not nested correctly after you migrate: - -* Collection content records that have a level container that is 'Intellectual Only' will be migrated to ArchivesSpace as resource components. Each level/container that has 'intellectual level' checked should have a valid value recorded in the "EAD Level" field (i.e. class, collection, file, fonds, item, otherlevel, recordgrp, series, subfonds, subgrp, subseries). These values are case sensitive, and all other values will be migrated as "otherlevel" on the collection content/resource component records to which they apply. -* Collection content records that have a level container that is 'Physical Only' will be migrated to ArchivesSpace as instance records of the type 'text' attached to a container in ArchivesSpace. These instance/container records will be attached to the intellectual level or levels that are immediate children of the container record as it was previously expressed in Archon. If the instance/container has no children it will be attached to its parent intellectual level instead. For illustrative purposes, the following screenshots show a container record prior to and following migration. - ![Archon container example](../images/archon_container.jpg) -* Collection content records that have both physical and intellectual levels will be migrated as both resource components and instances. In this case the instance will be attached to the resource component. -* Collection content records that are neither physical nor intellectual levels will be migrated as if they were 'Intellectual Only'. This is not recommended and should be fixed prior to migration. - -### Collection Content Records - -* If a value has not been set in the "Title" or "Inclusive Dates" field of an "intellectual" level/container in Archon, the collection content record being migrated will be supplied a title, based on its "label" value and the "level/container" type set in Archon. - ![Collection Content Records](../images/archon_collection.jpg) -* Optionally, if a migration fails, check for collection content records that reference invalid 'level/containers'. These records are found in the database tables, but are not visible to staff or end users and must be eliminated prior to migration. If not eliminated, the migration will fail. In order to identify these records, you should follow these steps. **Be very careful. If you are uncertain what you are doing, backup the database first or speak with a systems administrator!** -* In MySQL or SQL Server, open the table titled 'tblCollections_LevelContainers'. Note the 'ID' value recorded of each row (i.e. LevelContainer). -* Run a query against tblCollections_Content to find records where the LevelID column references an invalid value. For example, if tblCollections_Level Containers holds 'ID' values1-6 and 8-22: - `SELECT * FROM tblCollections_Content WHERE LevelContainerID > 22 OR (LevelContainerID > 6 AND LevelContainerID < 8);` - This will provide a list of all records with invalid 'LevelID' (i.e. where a record with the primary key referenced by a foreign key cannot be found). Review this list carefully to make sure you are comfortable deleting the records, or change the LevelID to a valid integer if you wish to retain the records. If you choose to delete the records, you will need to do so directly in the database (see below.) If you choose to do the latter, you may need to take additional steps directly in the database to link these records to a valid parent content record or collection; additional instructions can be supplied upon request. -* Run a query to delete the invalid records from the collections content table. For example: - `DELETE FROM tblCollections_Content WHERE LevelContainerID > 22 OR (LevelContainerID > 6 AND LevelContainerID < 8);` -* Optionally, if the migration fails, check for 'duplicate' collection content records. 'Duplicate' records are those that occupy the same node in the collection/content hiearchy. To check for these records, run the following query in mysql or sql server. - `SELECT ParentID, SortOrder, COUNT (*) FROM tblCollections_Content GROUP BY ParentID, SortOrder HAVING COUNT(*) > 1;` -* The query above checks for records that occupy the same branch and same position in the content hierarchy. If you discover such records, the sort order value of one of the records must be changed, so that both records occupy a unique position. In order to do this, run a query that finds all records attached to the parent record, then run an update query to change the sort order of one of the offending records so that each has a unique sort order. For example if the query above returns ParentID as a 'duplicate' value, you would run query one with the appropriate ParentID value to identify the offending records, and query two to fix the problem: - **Query one:** - - `SELECT ID, ParentID, SortOrder, Title FROM tblCollections_Content WHERE ParentID=8619;` - - ID | ParentID | SortOrder | Title - -- | -------- | --------- | ----- - 8620 | 8619 | 1 | to mother - 8621 | 8619 | 1 | from mother - 8622 | 8619 | 3 | to father - 6823 | 8619 | 4 | from father - - **Query two:** - - `UPDATE tblCollections_Content SET SortOrder=2 WHERE ID=8621;` - -## Preparing for Migrating Archon Data - -The migration process is iterative in nature. You should plan to do several test migrations, culminating in a final migration. Typically, migration will require assistance from a system administrator. - -The migration tool will connect to your Archon installation, read data from defined 'endpoints', and place the information in a target ArchivesSpace instance. - -A migration report is generated at the end of each migration routine and can be downloaded from the application. The report indicates errors or issues occurring with the migration. Sample data from migration report is provided in [Appendix A](#Appendix-A%3A-Migration-Log-Review). - -You should use this report to determine if any problems observed in the migration results are best remedied in the source data or in the migrated data in the ArchivesSpace instance. If you address the problems in the source data, then you can simply clear the database and conduct the migration again. However, once you accept the migration and make changes to the migrated data in ArchivesSpace, you cannot migrate the source data again without either overwriting the previous migration or establishing a new target ArchivesSpace instance. - -Please note, data migration can be a very memory and time intensive task due to the large amounts of records being transferred. As such, we recommend running the Archon migration tool on a server with at least 2GB of available memory. Test migrations have run from under an hour to twelve hours or more in the case of complex and large instances of Archon. - -Before starting the migration process, make sure that your current Archon installation is up to date: i.e. that you are using version 3.21 rev3. If you are on an earlier version of Archon, make a copy of the Archon instance, including the database, to be migrated and use it as the source of the migration. It is strongly recommended that you not use your Archon production instance and database as the source of the migration for the simple reason of protecting the production version from any anomalies that might occur during the migration process. Upgrade the copy of the Archon instance to version 3.21 rev3 prior to starting the migration process. - -### Get Archon to ArchivesSpace Migration Tool - -Download the latest JAR file release from https://github.com/archivesspace-deprecated/ArchonMigrator/releases/latest. This is an executable JAR file – double click to run it. - -### Install ArchivesSpace Instance - -Implement an ArchivesSpace production version including the setting up of a MySQL database to migrate into. Instructions are included at [Getting Started with ArchivesSpace](../administration/getting_started.html) and [Running ArchivesSpace against MySQL](../provisioning/mysql.html) - -### Prepare to Launch Migration - -> **Important Note:** The migration process should be launched from a networked computer with a stable (i.e. wired) connection, and you should turn power save settings off on the client computer you use to launch the migration. So that the migration can proceed in an undisturbed fashion, you should not try to access the ArchivesSpace or Archon front end or public interface until after the migration as completed. **If you fail to follow these instructions, the migration tool may not provide useful feedback and it will be difficult to determine how successful the migration was.** - -For the most part, the data migration process should be automatic, with errors being provided as the tool migrates and a log being made available when migration is complete. Depending on the particular data being migrated, various errors may occur These may require the migration to be re-run after they have been resolved by the user. When this occurs, the MySQL database should be emptied by the system administrator, and the migration rerun after steps are taken to resolve the problem that caused the error. - -The time that the migration takes to complete will depend on a number of factors (database size, network performance etc.), but has been known to take anywhere from a half hour to ten or twelve hours. Most of this time will probably be spent migrating collection records. - -The following Archon datatypes will migrate, and all relationships that exist between these datatypes should be preserved in ArchivesSpace, except as noted in bold below. For each datatype, post- migration cleanup recommendations are provided in parentheses: -* Editable controlled value lists: - * Subject sources (review post migration and merge values with ArchivesSpace defaults or functionally duplicate values, when possible) - * Creatorsources(reviewpostmigrationandmergevalueswithArchivesSpacedefaults -or functionally duplicate values, when possible) - * Extentunits/types(mergefunctionallyduplicatevalues) o MaterialTypes - * ContainerTypes - * FileTypes - * ProcessingPriorities -* Repositories -* User/logins (users will need to reset password) -* Subjects (subjects of type personal corporate or family name are migrated as Agent -records, and are linked to resources and digital objects in the subject role. Review these -records and merge with duplicate agent names from creator migration, when possible.) -* Creators/Names -* Accessions (The migration tool will supply accession identifiers when these are blank in Archon. Review and change values, if appropriate.) -* Digital Objects: The migration tool will generate digital object metadata records in ArchivesSpace for each digital library record that is stored in your Archon instance. For each file that has an attached digital library record, the migration tool will generate a digital object component and file instance record. In addition, the migration tool will provide a folder containing the source file you uploaded to Archon when you created the record. In order to link these files to the digital file records in ArchivesSpace, you should place the files in a single directory on a webserver. - **To preserve the linkage between the file's metadata in ArchivesSpace, you must provide the base URL to the folder where the objects will be placed.** The migration tool prepends this URL to the filename to form a complete path to the object location, for each file being exported, as shown in the screenshot below. (In version 2.2.2 of ArchivesSpace, with the default digital object templates, these files will be available in the public interface by clicking a link.) -* Locations (Controlled location records are much more granular in ArchivesSpace than in Archon. You should have a location record for each unique combination of location drop down, range, section, and shelf in Archon, and these records should be linked to top container records which are in turn linked to an instance for each collection where they apply.) -* Resources and Resource Components (see locations, above). -Data from the following Archon modules will not migrate to ArchivesSpace -* Books (Book data could be migrated later if a plugin is developed to support this data). -* AVSAP/Assessments - -## Launch Migration Process - -Make sure the ArchivesSpace instance that you are migrating into is up and running, then open up the migration tool. - -![Archon migrator](../images/archon_migrator.jpg) - -1. Change the default information in the migration tool user interface: - * ArchonSource – Supply the base URL for the Archon instance. - * Archon User – Username for an account with full administrator privileges. - * Password – Password for that same account. - * Download Digital Object Files checkbox – Check if you want to move any attached digital object files and supply a webpath to a web accessible folder where you intend to place the digital objects after the migration is complete. - * Set Download Folder – Clicking this will open a file explorer that will allow you to specify the folder to which you want digital files from Archon to be downloaded. - * Set Default Repository checkbox -- Select "Set Default Repository" checkbox to set which Repository Accession and Unlinked digital objects are copied to. The default is "Based on Linked Collection," which will copy Accession records to the same repository of any Collection records they are linked to, or the first repository if they are not. You can also select a specific repository from the drop-down list. - * Host – The URL and port number of the ArchivesSpace backend server. - * ASpace admin – User name for the ArchivesSpace "admin" account. The default value of "admin" should work unless it was changed by the ArchivesSpace administrator. - * Password – Password for the ArchivesSpace "admin" account. The default value of "admin" should work unless it was changed by the ArchivesSpace administrator. - * Reset Password – Each user account transferred has its password reset to this. Please note that users need to change their password when they first log-in unless LDAP is used for authentication. - * Migration Options – This is needed for the functioning of the migration tool. Please do not make changes to this area. - * Output Console – Display section for following the migration while it is running - * View Error Log – Used to view a printout of all the errors encountered during the migration process. This can be used while the migration process is underway as well. -2. Press the "Copy to ArchivesSpace" button to start the migration process. This starts the migration to the ArchivesSpace instance indicated by the Host URL. -3. If the migration process fails: Review the error message provided and /or the migration log. Fix any issues that have been identified, clear the target MySQL and try again. -4. When the process has completed: - * Download the migration report. - * Move digital objects into the folder location corresponding to the URL you provided to the migration tool. - -## Assessing the Migration and Cleaning Up Data - -1. Use the migration report to assess the fidelity of the migration and to determine whether to fix data in the source Archon instance and conduct the migration again, or fix data in the target ArchivesSpace instance. If you select to fix data in Archon, you will need to delete all the content in the ArchivesSpace instance, then rerun the migration after clearing the ArchivesSpace database. -2. Review the following record types, making sure the correct number of records migrated. In conducting the reviews, look for duplicate or incomplete records, broken links, or truncated data. - * Controlled vocabulary lists - * Classifications - * Accessions - * Resources - * Digital objects - * Subjects (not persons, families, and corporate bodies) - * Creators (known as Agents in ArchivesSpace) - * Locations -3. Review closely the set of sample records you selected, comparing data in Archon to data in ArchivesSpace. -4. If you accept the migration in the ArchivesSpace instance, then proceed to re-establish user passwords. While user records will migrate, the passwords associated with them will not. You will need to reassign those passwords according to the policies or conventions of your repositories. - -## Appendix A: Migration Log Review - -The migration log provides a description of any irregularities that take place during a migration and should be saved in a secure location, for future reference. The log contains both save errors and warnings. The warnings should be reviewed after the migration for information, for potential action. - -Most warnings will not require a follow up action. For example, they may note that a supplied value has been provided to meet an ArchivesSpace data model requirement. This occurs for all collections with empty identifiers. Occasionally, warnings will indicate that there was a problem establishing a link between two records for a reason such as a resource component not being found. Warnings like this should be cause for review since they may indicate that some data was lost. - -Save errors will note that a particular piece of data could not be migrated because it is not supported in the ArchivesSpace data model or for some other reason. In these cases, you should review the record in Archon and in ArchivesSpace if it was migrated at all. Oftentimes, these occur due to duplicate records (such as if you have a matching creator and person subject). If a save error occurs due to a duplicate record, this is usually okay but should still be reviewed to make sure there was no data loss. If a save error occurs for any other reason, this typically means the migration will need to be rerun (unless the record it occurred on is not needed or is easier just to migrate manually). - -Typically, the migration log will record the Archon internal IDs of the original Archon object being migrated whenever a save error or warning occurs. This simplifies finding and correcting relevant records. diff --git a/migrations/migration_tools.html b/migrations/migration_tools.html new file mode 100644 index 00000000..6ccf109d --- /dev/null +++ b/migrations/migration_tools.html @@ -0,0 +1,206 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + migrations/migration_tools.md + +

+ +

+ + Report issue on Jira + migrations/migration_tools.md + +

+
+
+ +

Migration tools and data mapping

+ +

Archivists’ Toolkit

+ + + +

Older information

+ + + +

Archon

+ + + +

Older information

+ + + +

Data Import and Export Maps

+ + + +

(newly reviewed) MARCXML Import Map +MARCXML Export Map

+ +

OAI-PMH-only maps

+ +

Most ArchivesSpace OAI-PMH responses are based on the export maps above, but there are a few that are only available through OAI-PMH

+ +

MODS for resources and resource components +Dublin Core for resources and resource components +DCMI Metadata Terms for resources and resource components

+ + + +
+ +
+ + + diff --git a/migrations/migration_tools.md b/migrations/migration_tools.md deleted file mode 100644 index 982a9db3..00000000 --- a/migrations/migration_tools.md +++ /dev/null @@ -1,58 +0,0 @@ -# Migration tools and data mapping - -## Archivists' Toolkit - -* [AT migration tool instructions](migrate_from_archivists_toolkit.html) -* [AT migration plugin](https://github.com/archivesspace/at-migration/releases) -* [AT migration source code](https://github.com/archivesspace/at-migration) -* [AT migration mapping (for 2.x versions of the tool and ArchivesSpace](https://github.com/archivesspace/at-migration/blob/master/docs/ATMappingDocument.xlsx) - -### Older information - -* [AT migration guidelines (for migrations using the original migration tool through version 1.4.2; only supports migrations to version 1.4.2 or lower of ArchivesSpace)](http://archivesspace.org/wp-content/uploads/2016/08/ATMigrationGuidelines-REV-20140417.pdf) -* [AT migration mapping (for migrations through version 1.4.2 or lower of the tool and ArchivesSpace)](http://archivesspace.org/wp-content/uploads/2016/08/ATMappingDocument_AT-ASPACE_BETA.xls) - -## Archon - -* [Archon migration tool instructions](migrate_from_archon.html) -* [Archon migration tool](https://github.com/archivesspace/archon-migration/releases/latest) -* [Archon migration source code](https://github.com/archivesspace/archon-migration/) -* [Archon migration mapping (for 2.x versions of the tool and ArchivesSpace)](https://docs.google.com/spreadsheets/d/13soN5djk16QYmRoSajtyAc_nBrNldyL58ViahKFJAog/edit?usp=sharing) - -### Older information - -* [refactored Archon migration plugin](https://github.com/archivesspace-deprecated/ArchonMigrator/releases) -* [information about refactoring project](https://archivesspace.atlassian.net/browse/AR-1278) -* [previous Archon migration plugin](https://github.com/archivesspace/archon-migration/releases) -* [Plugin read me text](https://github.com/archivesspace-deprecated/ArchonMigrator/blob/master/README.md) -* [Archon migration guidelines](http://archivesspace.org/wp-content/uploads/2016/05/Archon_Migration_Guidelines-7_13_2017.docx) -* [Archon migration mapping](http://archivesspace.org/wp-content/uploads/2016/08/ArchonSchemaMappingsPublic.xlsx) - -## Data Import and Export Maps - -* [Accession CSV Map](http://archivesspace.org/wp-content/uploads/2016/05/Accession-CSV-mapping-2013-08-05.xlsx) -* [Accession CSV Template](https://github.com/archivesspace/archivesspace/tree/master/templates) -* [Archival Objects from Excel or CSV with Load Via Spreadsheet](https://github.com/archivesspace/archivesspace/tree/master/templates) -* [Assessment CSV Template](https://github.com/archivesspace/archivesspace/tree/master/templates) -* [Digital Object CSV Map](http://archivesspace.org/wp-content/uploads/2016/08/DigitalObject-CSV-mapping-2013-02-26.xlsx) -* [Digital Object CSV Template](https://github.com/archivesspace/archivesspace/tree/master/templates) -* [Digital Objects Export Maps](http://archivesspace.org/wp-content/uploads/2016/08/ASpace-Dig-Object-Exports.xlsx) -* [EAD Import / Export Map](https://archivesspace.org/wp-content/uploads/2021/06/EAD-Import-Export-Mapping-20171030.xlsx) -* [Location Record CSV Template](https://github.com/archivesspace/archivesspace/tree/master/templates) -* (newly reviewed) [MARCXML Import Map](https://archivesspace.org/wp-content/uploads/2021/06/AS-MARC-import-mappings-2021-06-15.xlsx) -* [MARCXML Export Map](https://archivesspace.org/wp-content/uploads/2021/06/MARCXML-Export-Mapping-20130715.xlsx) -* [MARCXML Authority Import / Export Map](https://archivesspace.org/wp-content/uploads/2021/05/Agents-ASpace-to-MARCXMLMay2021.xlsx) -* [EAC-CPF Import / Export Map](https://archivesspace.org/wp-content/uploads/2021/05/Agents-ASpace-to-EAC-CPFMay2021.xlsx) - -(newly reviewed) MARCXML Import Map -MARCXML Export Map - - -### OAI-PMH-only maps - -Most ArchivesSpace OAI-PMH responses are based on the export maps above, but there are a few that are only available through OAI-PMH - -[MODS for resources and resource components](https://archivesspace.org/wp-content/uploads/2019/06/MODS-OAI-Export-Mapping-20190610.xlsx) -[Dublin Core for resources and resource components](https://archivesspace.org/wp-content/uploads/2019/06/DC-OAI-Export-Mapping-20190610.xlsx) -[DCMI Metadata Terms for resources and resource components](https://archivesspace.org/wp-content/uploads/2019/06/DCTerms-OAI-Export-Mapping-20190611.xlsx) - diff --git a/package-lock.json b/package-lock.json deleted file mode 100644 index 79d34377..00000000 --- a/package-lock.json +++ /dev/null @@ -1,1966 +0,0 @@ -{ - "name": "tech-docs", - "version": "0.1.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "tech-docs", - "version": "0.1.0", - "license": "ECL-2.0", - "devDependencies": { - "cypress": "^13.14.1" - }, - "engines": { - "node": ">=20.17.0" - } - }, - "node_modules/@colors/colors": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/@cypress/request": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@cypress/request/-/request-3.0.1.tgz", - "integrity": "sha512-TWivJlJi8ZDx2wGOw1dbLuHJKUYX7bWySw377nlnGOW3hP9/MUKIsEdXT/YngWxVdgNCHRBmFlBipE+5/2ZZlQ==", - "dev": true, - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "http-signature": "~1.3.6", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "performance-now": "^2.1.0", - "qs": "6.10.4", - "safe-buffer": "^5.1.2", - "tough-cookie": "^4.1.3", - "tunnel-agent": "^0.6.0", - "uuid": "^8.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/@cypress/xvfb": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@cypress/xvfb/-/xvfb-1.2.4.tgz", - "integrity": "sha512-skbBzPggOVYCbnGgV+0dmBdW/s77ZkAOXIC1knS8NagwDjBrNC1LuXtQJeiN6l+m7lzmHtaoUw/ctJKdqkG57Q==", - "dev": true, - "dependencies": { - "debug": "^3.1.0", - "lodash.once": "^4.1.1" - } - }, - "node_modules/@cypress/xvfb/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/@types/node": { - "version": "22.5.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.2.tgz", - "integrity": "sha512-acJsPTEqYqulZS/Yp/S3GgeE6GZ0qYODUR8aVr/DkhHQ8l9nd4j5x1/ZJy9/gHrRlFMqkO6i0I3E27Alu4jjPg==", - "dev": true, - "optional": true, - "dependencies": { - "undici-types": "~6.19.2" - } - }, - "node_modules/@types/sinonjs__fake-timers": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.1.tgz", - "integrity": "sha512-0kSuKjAS0TrGLJ0M/+8MaFkGsQhZpB6pxOmvS3K8FYI72K//YmdfoW9X2qPsAKh1mkwxGD5zib9s1FIFed6E8g==", - "dev": true - }, - "node_modules/@types/sizzle": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.8.tgz", - "integrity": "sha512-0vWLNK2D5MT9dg0iOo8GlKguPAU02QjmZitPEsXRuJXU/OGIOt9vT9Fc26wtYuavLxtO45v9PGleoL9Z0k1LHg==", - "dev": true - }, - "node_modules/@types/yauzl": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz", - "integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==", - "dev": true, - "optional": true, - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-colors": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", - "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dev": true, - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/arch": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", - "dev": true, - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/async": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", - "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", - "dev": true - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "dev": true - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", - "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==", - "dev": true - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", - "dev": true, - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/blob-util": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/blob-util/-/blob-util-2.0.2.tgz", - "integrity": "sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ==", - "dev": true - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/cachedir": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.4.0.tgz", - "integrity": "sha512-9EtFOZR8g22CL7BWjJ9BUx1+A/djkofnyW3aOXZORNW2kxoUpx2h+uN2cOqwPmFhnpVmxg+KW2OjOSgChTEvsQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/call-bind": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", - "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", - "dev": true, - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==", - "dev": true - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chalk/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/check-more-types": { - "version": "2.24.0", - "resolved": "https://registry.npmjs.org/check-more-types/-/check-more-types-2.24.0.tgz", - "integrity": "sha512-Pj779qHxV2tuapviy1bSZNEL1maXr13bPYpsvSDB68HlYcYuhlDrmGd63i0JHMCLKzc7rUSNIrpdJlhVlNwrxA==", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "engines": { - "node": ">=8" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "dependencies": { - "restore-cursor": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cli-table3": { - "version": "0.6.5", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", - "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", - "dev": true, - "dependencies": { - "string-width": "^4.2.0" - }, - "engines": { - "node": "10.* || >= 12.*" - }, - "optionalDependencies": { - "@colors/colors": "1.5.0" - } - }, - "node_modules/cli-truncate": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", - "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", - "dev": true, - "dependencies": { - "slice-ansi": "^3.0.0", - "string-width": "^4.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/colorette": { - "version": "2.0.20", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", - "dev": true - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", - "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/common-tags": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.2.tgz", - "integrity": "sha512-gk/Z852D2Wtb//0I+kRFNKKE9dIIVirjoqPoA1wJU+XePVXZfGeBpk45+A1rKO4Q43prqWBNY/MiIeRLbPWUaA==", - "dev": true, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", - "dev": true - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cypress": { - "version": "13.14.1", - "resolved": "https://registry.npmjs.org/cypress/-/cypress-13.14.1.tgz", - "integrity": "sha512-Wo+byPmjps66hACEH5udhXINEiN3qS3jWNGRzJOjrRJF3D0+YrcP2LVB1T7oYaVQM/S+eanqEvBWYc8cf7Vcbg==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@cypress/request": "^3.0.1", - "@cypress/xvfb": "^1.2.4", - "@types/sinonjs__fake-timers": "8.1.1", - "@types/sizzle": "^2.3.2", - "arch": "^2.2.0", - "blob-util": "^2.0.2", - "bluebird": "^3.7.2", - "buffer": "^5.7.1", - "cachedir": "^2.3.0", - "chalk": "^4.1.0", - "check-more-types": "^2.24.0", - "cli-cursor": "^3.1.0", - "cli-table3": "~0.6.1", - "commander": "^6.2.1", - "common-tags": "^1.8.0", - "dayjs": "^1.10.4", - "debug": "^4.3.4", - "enquirer": "^2.3.6", - "eventemitter2": "6.4.7", - "execa": "4.1.0", - "executable": "^4.1.1", - "extract-zip": "2.0.1", - "figures": "^3.2.0", - "fs-extra": "^9.1.0", - "getos": "^3.2.1", - "is-ci": "^3.0.1", - "is-installed-globally": "~0.4.0", - "lazy-ass": "^1.6.0", - "listr2": "^3.8.3", - "lodash": "^4.17.21", - "log-symbols": "^4.0.0", - "minimist": "^1.2.8", - "ospath": "^1.2.2", - "pretty-bytes": "^5.6.0", - "process": "^0.11.10", - "proxy-from-env": "1.0.0", - "request-progress": "^3.0.0", - "semver": "^7.5.3", - "supports-color": "^8.1.1", - "tmp": "~0.2.3", - "untildify": "^4.0.0", - "yauzl": "^2.10.0" - }, - "bin": { - "cypress": "bin/cypress" - }, - "engines": { - "node": "^16.0.0 || ^18.0.0 || >=20.0.0" - } - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/dayjs": { - "version": "1.11.13", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", - "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", - "dev": true - }, - "node_modules/debug": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", - "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/define-data-property": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", - "dev": true, - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", - "dev": true, - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enquirer": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.4.1.tgz", - "integrity": "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ==", - "dev": true, - "dependencies": { - "ansi-colors": "^4.1.1", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", - "dev": true, - "dependencies": { - "get-intrinsic": "^1.2.4" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "dev": true, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eventemitter2": { - "version": "6.4.7", - "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.7.tgz", - "integrity": "sha512-tYUSVOGeQPKt/eC1ABfhHy5Xd96N3oIijJvN3O9+TsC28T5V9yX9oEfEK5faP0EFSNVOG97qtAS68GBrQB2hDg==", - "dev": true - }, - "node_modules/execa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", - "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.0", - "get-stream": "^5.0.0", - "human-signals": "^1.1.1", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.0", - "onetime": "^5.1.0", - "signal-exit": "^3.0.2", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/executable": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", - "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", - "dev": true, - "dependencies": { - "pify": "^2.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, - "node_modules/extract-zip": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", - "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", - "dev": true, - "dependencies": { - "debug": "^4.1.1", - "get-stream": "^5.1.0", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - }, - "engines": { - "node": ">= 10.17.0" - }, - "optionalDependencies": { - "@types/yauzl": "^2.9.1" - } - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", - "dev": true, - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "dev": true, - "dependencies": { - "pend": "~1.2.0" - } - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-intrinsic": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", - "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", - "dev": true, - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/getos": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/getos/-/getos-3.2.1.tgz", - "integrity": "sha512-U56CfOK17OKgTVqozZjUKNdkfEv6jk5WISBJ8SHoagjE6L69zOwl3Z+O8myjY9MEW3i2HPWQBt/LTbCgcC973Q==", - "dev": true, - "dependencies": { - "async": "^3.2.0" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", - "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", - "dev": true, - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dev": true, - "dependencies": { - "get-intrinsic": "^1.1.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", - "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", - "dev": true, - "dependencies": { - "es-define-property": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-proto": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", - "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/http-signature": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.3.6.tgz", - "integrity": "sha512-3adrsD6zqo4GsTqtO7FyrejHNv+NgiIfAfv68+jVlFmSr9OGy7zrxONceFRLKvnnZA5jbxQBX1u9PpB6Wi32Gw==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^2.0.2", - "sshpk": "^1.14.1" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/human-signals": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", - "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", - "dev": true, - "engines": { - "node": ">=8.12.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", - "dev": true, - "dependencies": { - "ci-info": "^3.2.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "dev": true, - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", - "dev": true - }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==", - "dev": true - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", - "dev": true - }, - "node_modules/json-schema": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", - "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", - "dev": true - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", - "dev": true - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsprim": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz", - "integrity": "sha512-gqXddjPqQ6G40VdnI6T6yObEC+pDNvyP95wdQhkWkg7crHH3km5qP1FsOXEkzEQwnz6gz5qGTn1c2Y52wP3OyQ==", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.4.0", - "verror": "1.10.0" - } - }, - "node_modules/lazy-ass": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/lazy-ass/-/lazy-ass-1.6.0.tgz", - "integrity": "sha512-cc8oEVoctTvsFZ/Oje/kGnHbpWHYBe8IAJe4C0QNc3t8uM/0Y8+erSz/7Y1ALuXTEZTMvxXwO6YbX1ey3ujiZw==", - "dev": true, - "engines": { - "node": "> 0.8" - } - }, - "node_modules/listr2": { - "version": "3.14.0", - "resolved": "https://registry.npmjs.org/listr2/-/listr2-3.14.0.tgz", - "integrity": "sha512-TyWI8G99GX9GjE54cJ+RrNMcIFBfwMPxc3XTFiAYGN4s10hWROGtOg7+O6u6LE3mNkyld7RSLE6nrKBvTfcs3g==", - "dev": true, - "dependencies": { - "cli-truncate": "^2.1.0", - "colorette": "^2.0.16", - "log-update": "^4.0.0", - "p-map": "^4.0.0", - "rfdc": "^1.3.0", - "rxjs": "^7.5.1", - "through": "^2.3.8", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "enquirer": ">= 2.3.0 < 3" - }, - "peerDependenciesMeta": { - "enquirer": { - "optional": true - } - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", - "dev": true - }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/log-update": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-4.0.0.tgz", - "integrity": "sha512-9fkkDevMefjg0mmzWFBW8YkFP91OrizzkW3diF7CpG+S2EYdy4+TVfGwz1zeF8x7hCx1ovSPTOE9Ngib74qqUg==", - "dev": true, - "dependencies": { - "ansi-escapes": "^4.3.0", - "cli-cursor": "^3.1.0", - "slice-ansi": "^4.0.0", - "wrap-ansi": "^6.2.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/log-update/node_modules/slice-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", - "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/slice-ansi?sponsor=1" - } - }, - "node_modules/log-update/node_modules/wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "dev": true, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dev": true, - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dev": true, - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/object-inspect": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", - "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ospath": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/ospath/-/ospath-1.2.2.tgz", - "integrity": "sha512-o6E5qJV5zkAbIDNhGSIlyOhScKXgQrSRMilfph0clDfM0nEnBOlKlH4sWDmG95BW/CvwNz0vmm7dJVtU2KlMiA==", - "dev": true - }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dev": true, - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", - "dev": true - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", - "dev": true - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pretty-bytes": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", - "dev": true, - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/proxy-from-env": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.0.0.tgz", - "integrity": "sha512-F2JHgJQ1iqwnHDcQjVBsq3n/uoaFL+iPW/eAeL7kVxy/2RrWaN4WroKjjvbsoRtv0ftelNyC01bjRhn/bhcf4A==", - "dev": true - }, - "node_modules/psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/qs": { - "version": "6.10.4", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.4.tgz", - "integrity": "sha512-OQiU+C+Ds5qiH91qh/mg0w+8nwQuLjM4F4M/PbmhDOoYehPh+Fb0bDjtR1sOvy7YKxvj28Y/M0PhP5uVX0kB+g==", - "dev": true, - "dependencies": { - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true - }, - "node_modules/request-progress": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-3.0.0.tgz", - "integrity": "sha512-MnWzEHHaxHO2iWiQuHrUPBi/1WeBf5PkxQqNyNvLl9VAYSdXkP8tQ3pBSeCPD+yw0v0Aq1zosWLz0BdeXpWwZg==", - "dev": true, - "dependencies": { - "throttleit": "^1.0.0" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true - }, - "node_modules/restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/rfdc": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz", - "integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==", - "dev": true - }, - "node_modules/rxjs": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", - "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", - "dev": true, - "dependencies": { - "tslib": "^2.1.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "node_modules/semver": { - "version": "7.6.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", - "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/set-function-length": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", - "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", - "dev": true, - "dependencies": { - "define-data-property": "^1.1.4", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.7", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true - }, - "node_modules/slice-ansi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", - "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/sshpk": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", - "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", - "dev": true, - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "bin": { - "sshpk-conv": "bin/sshpk-conv", - "sshpk-sign": "bin/sshpk-sign", - "sshpk-verify": "bin/sshpk-verify" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/throttleit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.1.tgz", - "integrity": "sha512-vDZpf9Chs9mAdfY046mcPt8fg5QSZr37hEH4TXYBnDF+izxgrbRGUAAaBvIk/fJm9aOFCGFd1EsNg5AZCbnQCQ==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", - "dev": true - }, - "node_modules/tmp": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz", - "integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==", - "dev": true, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/tough-cookie": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", - "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", - "dev": true, - "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/tough-cookie/node_modules/universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/tslib": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz", - "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==", - "dev": true - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", - "dev": true - }, - "node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/undici-types": { - "version": "6.19.8", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", - "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", - "dev": true, - "optional": true - }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/untildify": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", - "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "dev": true, - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } - } -} diff --git a/package.json b/package.json deleted file mode 100644 index 91a750b2..00000000 --- a/package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "tech-docs", - "type": "module", - "version": "0.1.0", - "description": "ArchivesSpace technical documentation", - "repository": "https://github.com/archivesspace/tech-docs", - "license": "ECL-2.0", - "engines": { - "node": ">=20.17.0" - }, - "scripts": { - "test": "cypress run", - "dev": "bundle exec jekyll serve", - "build": "bundle exec jekyll build", - "start": "npm run dev" - }, - "devDependencies": { - "cypress": "^13.14.1" - } -} diff --git a/provisioning/README.md b/provisioning/README.md deleted file mode 100644 index 8a23bab9..00000000 --- a/provisioning/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -permalink: /provisioning/ ---- - -# ArchivesSpace provisioning and server configuration - -* [Running ArchivesSpace with load balancing and multiple tenants](./clustering.html) -* [Serving ArchivesSpace over subdomains](./domains.html) -* [Serving ArchivesSpace user-facing applications over HTTPS](./https.html) -* [JMeter Test Group Template](./jmeter.html) -* [Running ArchivesSpace against MySQL](./mysql.html) -* [Application monitoring with New Relic](./newrelic.html) -* [Running ArchivesSpace under a prefix](./prefix.html) -* [robots.txt](./robots.html) -* [Running ArchivesSpace with external Solr](./solr.html) -* [Tuning ArchivesSpace](./tuning.html) diff --git a/provisioning/clustering.html b/provisioning/clustering.html new file mode 100644 index 00000000..74b43514 --- /dev/null +++ b/provisioning/clustering.html @@ -0,0 +1,503 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/clustering.md + +

+ +

+ + Report issue on Jira + provisioning/clustering.md + +

+
+
+ +

Running ArchivesSpace with load balancing and multiple tenants

+ +

This document describes two aspects of running ArchivesSpace in a +clustered environment: for load-balancing purposes, and for supporting +multiple tenants (isolated installations of the system in a common +deployment environment).

+ +

The configuration described in this document is one possible approach, +but it is not intended to be prescriptive: the application layer of +ArchivesSpace is stateless, so any mechanism you prefer for load +balancing across web applications should work just as well as the one +described here.

+ +

Unless otherwise stated, it is assumed that you have root access on +your machines, and all commands are to be run as root (or with sudo).

+ +

Architecture overview

+ +

This document assumes an architecture with the following components:

+ +
    +
  • A load balancer machine running the Nginx web server
  • +
  • Two application servers, each running a full ArchivesSpace +application stack
  • +
  • A MySQL server
  • +
  • A shared NFS volume mounted under /aspace on each machine
  • +
+ +

Overview of files

+ +

The files directory in this repository (in the same directory as this +README.md) contains what will become the contents of the /aspace +directory, shared by all servers. It has the following layout:

+ +
 /aspace
+ ├── archivesspace
+ │   ├── config
+ │   │   ├── config.rb
+ │   │   └── tenant.rb
+ │   ├── software
+ │   └── tenants
+ │       └── \_template
+ │           └── archivesspace
+ │               ├── config
+ │               │   ├── config.rb
+ │               │   └── instance_hostname.rb.example
+ │               └── init_tenant.sh
+ └── nginx
+     └── conf
+         ├── common
+         │   └── server.conf
+         └── tenants
+             └── \_template.conf.example
+
+ +

The highlights:

+ +
    +
  • /aspace/archivesspace/config/config.rb – A global configuration file for all ArchivesSpace instances. Any configuration options added to this file will be applied to all tenants on all machines.
  • +
  • /aspace/archivesspace/software/ – This directory will hold the master copies of the archivesspace.zip distribution. Each tenant will reference one of the versions of the ArchivesSpace software in this directory.
  • +
  • /aspace/archivesspace/tenants/ – Each tenant will have a sub-directory under here, based on the _template directory provided. This holds the configuration files for each tenant.
  • +
  • /aspace/archivesspace/tenants/[tenant name]/config/config.rb – The global configuration file for [tenant name]. This contains tenant-specific options that should apply to all of the tenant’s ArchivesSpace instances (such as their database connection settings).
  • +
  • /aspace/archivesspace/tenants/[tenant name]/config/instance_[hostname].rb – The configuration file for a tenant’s ArchivesSpace instance running on a particular machine. This allows configuration options to be set on a per-machine basis (for example, setting different ports for different application servers)
  • +
  • /aspace/nginx/conf/common/server.conf – Global Nginx configuration settings (applying to all tenants)
  • +
  • /aspace/nginx/conf/tenants/[tenant name].conf – A tenant-specific Nginx configuration file. Used to set the URLs of each tenant’s ArchivesSpace instances.
  • +
+ +

Getting started

+ +

We’ll assume you already have the following ready to go:

+ +
    +
  • Three newly installed machines, each running RedHat (or CentOS) +Linux (we’ll refer to these as loadbalancer, apps1 and +apps2).
  • +
  • A MySQL server.
  • +
  • An NFS volume that has been mounted as /aspace on each machine. +All machines should have full read/write access to this area.
  • +
  • An area under /aspace.local which will store instance-specific +files (such as log files and Solr indexes). Ideally this is just +a directory on local disk.
  • +
  • Java 1.6 (or above) installed on each machine.
  • +
+ +

Populate your /aspace/ directory

+ +

Start by copying the directory structure from files/ into your +/aspace volume. This will contain all of the configuration files +shared between servers:

+ +
 mkdir /var/tmp/aspace/
+ cd /var/tmp/aspace/
+ unzip -x /path/to/archivesspace.zip
+ cp -av archivesspace/clustering/files/* /aspace/
+
+ +

You can do this on any machine that has access to the shared +/aspace/ volume.

+ +

Install the cluster init script

+ +

On your application servers (apps1 and apps2) you will need to +install the supplied init script:

+ +
 cp -a /aspace/aspace-cluster.init /etc/init.d/aspace-cluster
+ chkconfig --add aspace-cluster
+
+ +

This will start all configured instances when the system boots up, and +can also be used to start/stop individual instances.

+ +

Install and configure Nginx

+ +

You will need to install Nginx on your loadbalancer machine, which +you can do by following the directions at +http://nginx.org/en/download.html. Using the pre-built packages for +your platform is fine. At the time of writing, the process for CentOS +is simply:

+ +
 wget http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ rpm -i nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ yum install nginx
+
+ +

Nginx will place its configuration files under /etc/nginx/. For +now, the only change we need to make is to configure Nginx to load our +tenants’ configuration files. To do this, edit +/etc/nginx/conf.d/default.conf and add the line:

+ +
 include /aspace/nginx/conf/tenants/\*.conf;
+
+ +

Note: the location of Nginx’s main config file might vary between +systems. Another likely candidate is /etc/nginx/nginx.conf.

+ +

Download the ArchivesSpace distribution

+ +

Rather than having every tenant maintain their own copy of the +ArchivesSpace software, we put a shared copy under +/aspace/archivesspace/software/ and have each tenant instance refer +to that copy. To set this up, run the following commands on any one +of the servers:

+ +
 cd /aspace/archivesspace/software/
+ unzip -x /path/to/downloaded/archivesspace-x.y.z.zip
+ mv archivesspace archivesspace-x.y.z
+ ln -s archivesspace-x.y.z stable
+
+ +

Note that we unpack the distribution into a directory containing its +version number, and then assign that version the symbolic name +“stable”. This gives us a convenient way of referring to particular +versions of the software, and we’ll use this later on when setting up +our tenant.

+ +

We’ll be using MySQL, which means we must make the MySQL connector +library available. To do this, place it in the lib/ directory of +the ArchivesSpace package:

+ +
 cd /aspace/archivesspace/software/stable/lib
+ wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar
+
+ +

Defining a new tenant

+ +

With our server setup out of the way, we’re ready to define our first +tenant. As shown in Overview of files above, each tenant has their +own directory under /aspace/archivesspace/tenants/ that holds all of +their configuration files. In defining our new tenant, we will:

+ +
    +
  • Create a Unix account for the tenant
  • +
  • Create a database for the tenant
  • +
  • Create a new set of ArchivesSpace configuration files for the +tenant
  • +
  • Set up the database
  • +
+ +

Our newly defined tenant won’t initially have any ArchivesSpace +instances, but we’ll set those up afterwards.

+ +

To complete the remainder of this process, there are a few bits of +information you will need. In particular, you will need to know:

+ +
    +
  • The identifier you will use for the tenant you will be creating. +In this example we use exampletenant.
  • +
  • Which port numbers you will use for the application’s backend, +Solr instance, staff and public interfaces. These must be free on +your application servers.
  • +
  • If running each tenant under a separate Unix account, the UID and +GID you’ll use for them (which must be free on each of your +servers).
  • +
  • The public-facing URLs for the new tenant. We’ll use +staff.example.com for the staff interface, and public.example.com +for the public interface.
  • +
+ +

Creating a Unix account

+ +

Although not strictly required, for security and ease of system +monitoring it’s a good idea to have each tenant instance running under +a dedicated Unix account.

+ +

We will call our new tenant exampletenant, so let’s create a user +and group for them now. You will need to run these commands on both +application servers (apps1 and apps2):

+ +
 groupadd --gid 2000 exampletenant
+ useradd --uid 2000 --gid 2000 exampletenant
+
+ +

Note that we specify a UID and GID explicitly to ensure they match +across machines.

+ +

Creating the database

+ +

ArchivesSpace assumes that each tenant will have their own MySQL +database. You can create this from the MySQL shell:

+ +
 create database exampletenant default character set utf8;
+ grant all on exampletenant.* to 'example'@'%' identified by 'example123';
+
+ +

In this example, we have a MySQL database called exampletenant, and +we grant full access to the user example with password example123. +Assuming our database server is db.example.com, this corresponds to +the database URL:

+ +
 jdbc:mysql://db.example.com:3306/exampletenant?user=example&password=example123&useUnicode=true&characterEncoding=UTF-8
+
+ +

We’ll make use of this URL in the following section.

+ +

Creating the tenant configuration

+ +

Each tenant has their own set of files under the +/aspace/archivesspace/tenants/ directory. We’ll define our new +tenant (called exampletenant) by copying the template set of +configurations and running the init_tenant.sh script to set them +up. We can do this on either apps1 or apps2–it only needs to be +done once:

+ +
 cd /aspace/archivesspace/tenants
+ cp -a \_template exampletenant
+
+ +

Note that we’ve named the tenant exampletenant to match the Unix +account it will run as. Later on, the startup script will use this +fact to run each instance as the correct user.

+ +

For now, we’ll just edit the configuration file for this tenant, under +exampletenant/archivesspace/config/config.rb. When you open this file you’ll see two +placeholders that need filling in: one for your database URL, which in +our case is just:

+ +
 jdbc:mysql://db.example.com:3306/exampletenant?user=example&password=example123&useUnicode=true&characterEncoding=UTF-8
+
+ +

and the other for this tenant’s search, staff and public user secrets, +which should be random, hard to guess passwords.

+ +

Adding the tenant instances

+ +

To add our tenant instances, we just need to initialize them on each +of our servers. On apps1 and apps2, we run:

+ +
 cd /aspace/archivesspace/tenants/exampletenant/archivesspace
+ ./init_tenant.sh stable
+
+ +

If you list the directory now, you will see that the init_tenant.sh +script has created a number of symlinks. Most of these refer back to +the stable version of the ArchivesSpace software we unpacked +previously, and some contain references to the data and logs +directories stored under /aspace.local.

+ +

Each server has its own configuration file that tells the +ArchivesSpace application which ports to listen on. To set this up, +make two copies of the example configuration by running the following +command on apps1 then apps2:

+ +
 cd /aspace/archivesspace/tenants/exampletenant/archivesspace
+ cp config/instance_hostname.rb.example config/instance_`hostname`.rb
+
+ +

Then edit each file to set the URLs that the instance will use. +Here’s our config/instance_apps1.example.com.rb:

+ +
 {
+   :backend_url => "http://apps1.example.com:8089",
+   :frontend_url => "http://apps1.example.com:8080",
+   :solr_url => "http://apps1.example.com:8090",
+   :indexer_url => "http://apps1.example.com:8091",
+   :public_url => "http://apps1.example.com:8081",
+ }
+
+ +

Note that the filename is important here: it must be:

+ +
 instance_[server hostname].rb
+
+ +

These URLs will determine which ports the application listens on when +it starts up, and are also used by the ArchivesSpace indexing system +to track updates across the cluster.

+ +

Starting up

+ +

As a one-off, we need to populate this tenant’s database with the +default set of tables. You can do this by running the +setup-database.sh script on either apps1 or apps2:

+ +
 cd /aspace/archivesspace/tenants/exampletenant/archivesspace
+ scripts/setup-database.sh
+
+ +

With the two instances configured, you can now use the init script to +start them up on each server:

+ +
 /etc/init.d/aspace-cluster start-tenant exampletenant
+
+ +

and you can monitor each instance’s log file under +/aspace.local/tenants/exampletenant/logs/. Once they’re started, +you should be able to connect to each instance with your web browser +at the configured URLs.

+ +

Configuring the load balancer

+ +

Our final step is configuring Nginx to accept requests for our staff +and public interfaces and forward them to the appropriate application +instance. Working on the loadbalancer machine, we create a new +configuration file for our tenant:

+ +
 cd /aspace/nginx/conf/tenants
+ cp -a \_template.conf.example exampletenant.conf
+
+ +

Now open /aspace/nginx/conf/tenants/exampletenant.conf in an +editor. You will need to:

+ +
    +
  • Replace <tenantname> with exampletenant where it appears.
  • +
  • Change the server URLs to match the hostnames and ports you +configured each instance with.
  • +
  • Insert the tenant’s hostnames for each server_name entry. In +our case these are public.example.com for the public interface, and +staff.example.com for the staff interface.
  • +
+ +

Once you’ve saved your configuration, you can test it with:

+ +
 /usr/sbin/nginx -t
+
+ +

If Nginx reports that all is well, reload the configurations with:

+ +
 /usr/sbin/nginx -s reload
+
+ +

And, finally, browse to http://public.example.com/ to verify that Nginx +is now accepting requests and forwarding them to your app servers. +We’re done!

+ + +
+ +
+ + + diff --git a/provisioning/clustering.md b/provisioning/clustering.md deleted file mode 100644 index 048ce109..00000000 --- a/provisioning/clustering.md +++ /dev/null @@ -1,347 +0,0 @@ -# Running ArchivesSpace with load balancing and multiple tenants - -This document describes two aspects of running ArchivesSpace in a -clustered environment: for load-balancing purposes, and for supporting -multiple tenants (isolated installations of the system in a common -deployment environment). - -The configuration described in this document is one possible approach, -but it is not intended to be prescriptive: the application layer of -ArchivesSpace is stateless, so any mechanism you prefer for load -balancing across web applications should work just as well as the one -described here. - -Unless otherwise stated, it is assumed that you have root access on -your machines, and all commands are to be run as root (or with sudo). - - -## Architecture overview - -This document assumes an architecture with the following components: - - * A load balancer machine running the Nginx web server - * Two application servers, each running a full ArchivesSpace - application stack - * A MySQL server - * A shared NFS volume mounted under `/aspace` on each machine - - -## Overview of files - -The `files` directory in this repository (in the same directory as this -`README.md`) contains what will become the contents of the `/aspace` -directory, shared by all servers. It has the following layout: - - /aspace - ├── archivesspace - │   ├── config - │   │   ├── config.rb - │   │   └── tenant.rb - │   ├── software - │   └── tenants - │   └── \_template - │   └── archivesspace - │   ├── config - │   │   ├── config.rb - │   │   └── instance_hostname.rb.example - │   └── init_tenant.sh - └── nginx - └── conf - ├── common - │   └── server.conf - └── tenants - └── \_template.conf.example - - -The highlights: - - * `/aspace/archivesspace/config/config.rb` -- A global configuration file for all ArchivesSpace instances. Any configuration options added to this file will be applied to all tenants on all machines. - * `/aspace/archivesspace/software/` -- This directory will hold the master copies of the `archivesspace.zip` distribution. Each tenant will reference one of the versions of the ArchivesSpace software in this directory. - * `/aspace/archivesspace/tenants/` -- Each tenant will have a sub-directory under here, based on the `_template` directory provided. This holds the configuration files for each tenant. - * `/aspace/archivesspace/tenants/[tenant name]/config/config.rb` -- The global configuration file for [tenant name]. This contains tenant-specific options that should apply to all of the tenant's ArchivesSpace instances (such as their database connection settings). - * `/aspace/archivesspace/tenants/[tenant name]/config/instance_[hostname].rb` -- The configuration file for a tenant's ArchivesSpace instance running on a particular machine. This allows configuration options to be set on a per-machine basis (for example, setting different ports for different application servers) - * `/aspace/nginx/conf/common/server.conf` -- Global Nginx configuration settings (applying to all tenants) - * `/aspace/nginx/conf/tenants/[tenant name].conf` -- A tenant-specific Nginx configuration file. Used to set the URLs of each tenant's ArchivesSpace instances. - - -## Getting started - -We'll assume you already have the following ready to go: - - * Three newly installed machines, each running RedHat (or CentOS) - Linux (we'll refer to these as `loadbalancer`, `apps1` and - `apps2`). - * A MySQL server. - * An NFS volume that has been mounted as `/aspace` on each machine. - All machines should have full read/write access to this area. - * An area under `/aspace.local` which will store instance-specific - files (such as log files and Solr indexes). Ideally this is just - a directory on local disk. - * Java 1.6 (or above) installed on each machine. - - -### Populate your /aspace/ directory - -Start by copying the directory structure from `files/` into your -`/aspace` volume. This will contain all of the configuration files -shared between servers: - - mkdir /var/tmp/aspace/ - cd /var/tmp/aspace/ - unzip -x /path/to/archivesspace.zip - cp -av archivesspace/clustering/files/* /aspace/ - -You can do this on any machine that has access to the shared -`/aspace/` volume. - - -### Install the cluster init script - -On your application servers (`apps1` and `apps2`) you will need to -install the supplied init script: - - cp -a /aspace/aspace-cluster.init /etc/init.d/aspace-cluster - chkconfig --add aspace-cluster - -This will start all configured instances when the system boots up, and -can also be used to start/stop individual instances. - - -### Install and configure Nginx - -You will need to install Nginx on your `loadbalancer` machine, which -you can do by following the directions at -http://nginx.org/en/download.html. Using the pre-built packages for -your platform is fine. At the time of writing, the process for CentOS -is simply: - - wget http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm - rpm -i nginx-release-centos-6-0.el6.ngx.noarch.rpm - yum install nginx - -Nginx will place its configuration files under `/etc/nginx/`. For -now, the only change we need to make is to configure Nginx to load our -tenants' configuration files. To do this, edit -`/etc/nginx/conf.d/default.conf` and add the line: - - include /aspace/nginx/conf/tenants/\*.conf; - -*Note:* the location of Nginx's main config file might vary between -systems. Another likely candidate is `/etc/nginx/nginx.conf`. - - -### Download the ArchivesSpace distribution - -Rather than having every tenant maintain their own copy of the -ArchivesSpace software, we put a shared copy under -`/aspace/archivesspace/software/` and have each tenant instance refer -to that copy. To set this up, run the following commands on any one -of the servers: - - cd /aspace/archivesspace/software/ - unzip -x /path/to/downloaded/archivesspace-x.y.z.zip - mv archivesspace archivesspace-x.y.z - ln -s archivesspace-x.y.z stable - -Note that we unpack the distribution into a directory containing its -version number, and then assign that version the symbolic name -"stable". This gives us a convenient way of referring to particular -versions of the software, and we'll use this later on when setting up -our tenant. - -We'll be using MySQL, which means we must make the MySQL connector -library available. To do this, place it in the `lib/` directory of -the ArchivesSpace package: - - cd /aspace/archivesspace/software/stable/lib - wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar - - -## Defining a new tenant - -With our server setup out of the way, we're ready to define our first -tenant. As shown in *Overview of files* above, each tenant has their -own directory under `/aspace/archivesspace/tenants/` that holds all of -their configuration files. In defining our new tenant, we will: - - * Create a Unix account for the tenant - * Create a database for the tenant - * Create a new set of ArchivesSpace configuration files for the - tenant - * Set up the database - -Our newly defined tenant won't initially have any ArchivesSpace -instances, but we'll set those up afterwards. - -To complete the remainder of this process, there are a few bits of -information you will need. In particular, you will need to know: - - * The identifier you will use for the tenant you will be creating. - In this example we use `exampletenant`. - * Which port numbers you will use for the application's backend, - Solr instance, staff and public interfaces. These must be free on - your application servers. - * If running each tenant under a separate Unix account, the UID and - GID you'll use for them (which must be free on each of your - servers). - * The public-facing URLs for the new tenant. We'll use - `staff.example.com` for the staff interface, and `public.example.com` - for the public interface. - - -### Creating a Unix account - -Although not strictly required, for security and ease of system -monitoring it's a good idea to have each tenant instance running under -a dedicated Unix account. - -We will call our new tenant `exampletenant`, so let's create a user -and group for them now. You will need to run these commands on *both* -application servers (`apps1` and `apps2`): - - groupadd --gid 2000 exampletenant - useradd --uid 2000 --gid 2000 exampletenant - -Note that we specify a UID and GID explicitly to ensure they match -across machines. - - -### Creating the database - -ArchivesSpace assumes that each tenant will have their own MySQL -database. You can create this from the MySQL shell: - - create database exampletenant default character set utf8; - grant all on exampletenant.* to 'example'@'%' identified by 'example123'; - -In this example, we have a MySQL database called `exampletenant`, and -we grant full access to the user `example` with password `example123`. -Assuming our database server is `db.example.com`, this corresponds to -the database URL: - - jdbc:mysql://db.example.com:3306/exampletenant?user=example&password=example123&useUnicode=true&characterEncoding=UTF-8 - -We'll make use of this URL in the following section. - - -### Creating the tenant configuration - -Each tenant has their own set of files under the -`/aspace/archivesspace/tenants/` directory. We'll define our new -tenant (called `exampletenant`) by copying the template set of -configurations and running the `init_tenant.sh` script to set them -up. We can do this on either `apps1` or `apps2`--it only needs to be -done once: - - cd /aspace/archivesspace/tenants - cp -a \_template exampletenant - -Note that we've named the tenant `exampletenant` to match the Unix -account it will run as. Later on, the startup script will use this -fact to run each instance as the correct user. - -For now, we'll just edit the configuration file for this tenant, under -`exampletenant/archivesspace/config/config.rb`. When you open this file you'll see two -placeholders that need filling in: one for your database URL, which in -our case is just: - - jdbc:mysql://db.example.com:3306/exampletenant?user=example&password=example123&useUnicode=true&characterEncoding=UTF-8 - -and the other for this tenant's search, staff and public user secrets, -which should be random, hard to guess passwords. - - - -## Adding the tenant instances - -To add our tenant instances, we just need to initialize them on each -of our servers. On `apps1` *and* `apps2`, we run: - - cd /aspace/archivesspace/tenants/exampletenant/archivesspace - ./init_tenant.sh stable - -If you list the directory now, you will see that the `init_tenant.sh` -script has created a number of symlinks. Most of these refer back to -the `stable` version of the ArchivesSpace software we unpacked -previously, and some contain references to the `data` and `logs` -directories stored under `/aspace.local`. - -Each server has its own configuration file that tells the -ArchivesSpace application which ports to listen on. To set this up, -make two copies of the example configuration by running the following -command on `apps1` then `apps2`: - - cd /aspace/archivesspace/tenants/exampletenant/archivesspace - cp config/instance_hostname.rb.example config/instance_`hostname`.rb - -Then edit each file to set the URLs that the instance will use. -Here's our `config/instance_apps1.example.com.rb`: - - { - :backend_url => "http://apps1.example.com:8089", - :frontend_url => "http://apps1.example.com:8080", - :solr_url => "http://apps1.example.com:8090", - :indexer_url => "http://apps1.example.com:8091", - :public_url => "http://apps1.example.com:8081", - } - -Note that the filename is important here: it must be: - - instance_[server hostname].rb - -These URLs will determine which ports the application listens on when -it starts up, and are also used by the ArchivesSpace indexing system -to track updates across the cluster. - - -### Starting up - -As a one-off, we need to populate this tenant's database with the -default set of tables. You can do this by running the -`setup-database.sh` script on either `apps1` or `apps2`: - - cd /aspace/archivesspace/tenants/exampletenant/archivesspace - scripts/setup-database.sh - -With the two instances configured, you can now use the init script to -start them up on each server: - - /etc/init.d/aspace-cluster start-tenant exampletenant - -and you can monitor each instance's log file under -`/aspace.local/tenants/exampletenant/logs/`. Once they're started, -you should be able to connect to each instance with your web browser -at the configured URLs. - - -## Configuring the load balancer - -Our final step is configuring Nginx to accept requests for our staff -and public interfaces and forward them to the appropriate application -instance. Working on the `loadbalancer` machine, we create a new -configuration file for our tenant: - - cd /aspace/nginx/conf/tenants - cp -a \_template.conf.example exampletenant.conf - -Now open `/aspace/nginx/conf/tenants/exampletenant.conf` in an -editor. You will need to: - - * Replace `` with `exampletenant` where it appears. - * Change the `server` URLs to match the hostnames and ports you - configured each instance with. - * Insert the tenant's hostnames for each `server_name` entry. In - our case these are `public.example.com` for the public interface, and - `staff.example.com` for the staff interface. - -Once you've saved your configuration, you can test it with: - - /usr/sbin/nginx -t - -If Nginx reports that all is well, reload the configurations with: - - /usr/sbin/nginx -s reload - -And, finally, browse to `http://public.example.com/` to verify that Nginx -is now accepting requests and forwarding them to your app servers. -We're done! diff --git a/provisioning/domains.html b/provisioning/domains.html new file mode 100644 index 00000000..fbf55142 --- /dev/null +++ b/provisioning/domains.html @@ -0,0 +1,222 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/domains.md + +

+ +

+ + Report issue on Jira + provisioning/domains.md + +

+
+
+ +

Serving ArchivesSpace over subdomains

+ +

This document describes how to configure ArchivesSpace and your web server to serve the application over subdomains (e.g., http://staff.myarchive.org/ and http://public.myarchive.org/), which is the recommended +practice. Separate documentation is available if you wish to serve ArchivesSpace under a prefix (e.g., http://aspace.myarchive.org/staff and +http://aspace.myarchive.org/public).

+ +
    +
  1. Configuring Your Firewall
  2. +
  3. Configuring Your Web Server + +
  4. +
  5. Configuring ArchivesSpace
  6. +
+ +

Step 1: Configuring Your Firewall

+ +

Since using subdomains negates the need for users to access the application directly on ports 8080 and 8081, these should be locked down to access by localhost only. On a Linux server, this can be done using iptables:

+ +
 iptables -A INPUT -p tcp -s localhost --dport 8080 -j ACCEPT
+ iptables -A INPUT -p tcp --dport 8080 -j DROP
+ iptables -A INPUT -p tcp -s localhost --dport 8081 -j ACCEPT
+ iptables -A INPUT -p tcp --dport 8081 -j DROP
+
+ +

Step 2: Configuring Your Web Server

+ +

Apache

+ +

The mod_proxy module is necessary for Apache to route public web traffic to ArchivesSpace’s ports as designated in your config.rb file (ports 8080 and 8081 by default).

+ +

This can be set up as a reverse proxy in the Apache configuration like so:

+ +
  <VirtualHost *:80>
+  ServerName public.myarchive.org
+  ProxyPass / http://localhost:8081/
+  ProxyPassReverse / http://localhost:8081/
+  </VirtualHost>
+
+  <VirtualHost *:80>
+  ServerName staff.myarchive.org
+  ProxyPass / http://localhost:8080/
+  ProxyPassReverse / http://localhost:8080/
+  </VirtualHost>
+
+ +

The purpose of ProxyPass is to route incoming traffic on the public URL (public.myarchive.org) to port 8081 of your server, where ArchivesSpace’s public interface sits. The purpose of ProxyPassReverse is to intercept outgoing traffic and rewrite the header to match the URL that the browser is expecting to see (public.myarchive.org).

+ +

nginx

+ +

Using nginx as a reverse proxy needs a configuration file like so:

+ +
  server {
+  listen 80;
+  listen [::]:80;
+  server_name staff.myarchive.org;
+  location / {
+      proxy_pass http://localhost:8080/;
+    }
+  }
+        server {
+  listen 80;
+  listen [::]:80;
+  server_name public.myarchive.org;
+  location / {
+      proxy_pass http://localhost:8081/;
+    }
+  }
+
+ +

Step 3: Configuring ArchivesSpace

+ +

The only configuration within ArchivesSpace that needs to occur is adding your domain names to the following lines in config.rb:

+ +
 AppConfig[:frontend_proxy_url] = 'http://staff.myarchive.org'
+ AppConfig[:public_proxy_url] = 'http://public.myarchive.org'
+
+ +

This configuration allows the staff edit links to appear on the public site to users logged in to the staff interface.

+ +

Do not change AppConfig[:public_url] or AppConfig[:frontend_url]; these must retain their port numbers in order for the application to run.

+ + +
+ +
+ + + diff --git a/provisioning/domains.md b/provisioning/domains.md deleted file mode 100644 index 7f15de98..00000000 --- a/provisioning/domains.md +++ /dev/null @@ -1,78 +0,0 @@ -# Serving ArchivesSpace over subdomains - -This document describes how to configure ArchivesSpace and your web server to serve the application over subdomains (e.g., `http://staff.myarchive.org/` and `http://public.myarchive.org/`), which is the recommended -practice. Separate documentation is available if you wish to [serve ArchivesSpace under a prefix](prefix.html) (e.g., `http://aspace.myarchive.org/staff` and -`http://aspace.myarchive.org/public`). - -1. [Configuring Your Firewall](#Step-1%3A-Configuring-Your-Firewall) -2. [Configuring Your Web Server](#Step-2%3A-Configuring-Your-Web-Server) - - [Apache](#Apache) - - [Nginx](#Nginx) -3. [Configuring ArchivesSpace](#Step-3%3A-Configuring-ArchivesSpace) - - - -## Step 1: Configuring Your Firewall - -Since using subdomains negates the need for users to access the application directly on ports 8080 and 8081, these should be locked down to access by localhost only. On a Linux server, this can be done using iptables: - - iptables -A INPUT -p tcp -s localhost --dport 8080 -j ACCEPT - iptables -A INPUT -p tcp --dport 8080 -j DROP - iptables -A INPUT -p tcp -s localhost --dport 8081 -j ACCEPT - iptables -A INPUT -p tcp --dport 8081 -j DROP - - -## Step 2: Configuring Your Web Server - -### Apache - -The [mod_proxy module](https://httpd.apache.org/docs/2.4/mod/mod_proxy.html) is necessary for Apache to route public web traffic to ArchivesSpace's ports as designated in your config.rb file (ports 8080 and 8081 by default). - -This can be set up as a reverse proxy in the Apache configuration like so: - - - ServerName public.myarchive.org - ProxyPass / http://localhost:8081/ - ProxyPassReverse / http://localhost:8081/ - - - - ServerName staff.myarchive.org - ProxyPass / http://localhost:8080/ - ProxyPassReverse / http://localhost:8080/ - - -The purpose of ProxyPass is to route *incoming* traffic on the public URL (public.myarchive.org) to port 8081 of your server, where ArchivesSpace's public interface sits. The purpose of ProxyPassReverse is to intercept *outgoing* traffic and rewrite the header to match the URL that the browser is expecting to see (public.myarchive.org). - -### nginx - -Using nginx as a reverse proxy needs a configuration file like so: - - server { - listen 80; - listen [::]:80; - server_name staff.myarchive.org; - location / { - proxy_pass http://localhost:8080/; - } - } - server { - listen 80; - listen [::]:80; - server_name public.myarchive.org; - location / { - proxy_pass http://localhost:8081/; - } - } - - -## Step 3: Configuring ArchivesSpace - -The only configuration within ArchivesSpace that needs to occur is adding your domain names to the following lines in config.rb: - - AppConfig[:frontend_proxy_url] = 'http://staff.myarchive.org' - AppConfig[:public_proxy_url] = 'http://public.myarchive.org' - -This configuration allows the staff edit links to appear on the public site to users logged in to the staff interface. - -Do **not** change `AppConfig[:public_url]` or `AppConfig[:frontend_url]`; these must retain their port numbers in order for the application to run. diff --git a/provisioning/https.html b/provisioning/https.html new file mode 100644 index 00000000..9328f9f6 --- /dev/null +++ b/provisioning/https.html @@ -0,0 +1,301 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/https.md + +

+ +

+ + Report issue on Jira + provisioning/https.md + +

+
+
+ +

Serving ArchivesSpace user-facing applications over HTTPS

+ +

This document describes the approach for those wishing to install +ArchivesSpace in such a manner that all end-user requests (i.e., URLs) +are served over HTTPS rather than HTTP. For the purposes of this documentation, the URLs for the staff and public interfaces will be:

+ +
    +
  • https://staff.myarchive.org - staff interface
  • +
  • https://public.myarchive.org - public interface
  • +
+ +

The configuration described in this document is one possible approach, +and to keep things simple the following are assumed:

+ +
    +
  • ArchivesSpace is running on a single Linux server
  • +
  • The server is running Apache or Nginx
  • +
  • You have obtained an SSL certificate and key from an authority
  • +
  • You have ensured that appropriate firewall ports have been opened (80 and 443).
  • +
+ +
    +
  1. Configuring the Web Server + +
  2. +
  3. Configuring ArchivesSpace
  4. +
+ +

Step 1: Configure Web Server (Apache or Nginx)

+ +

Apache

+

Information about configuring Apache for SSL can be found at http://httpd.apache.org/docs/current/ssl/ssl_howto.html. You should read +that documentation before attempting to configure SSL.

+ +

Setting up SSL

+ +

Use the NameVirtualHost and VirtualHost directives to proxy +requests to the actual application urls. This requires the use of the mod_proxy module in Apache.

+ +
 NameVirtualHost *:443
+
+ <VirtualHost *:443>
+   ServerName staff.myarchive.org
+   SSLEngine On
+   SSLCertificateFile "/path/to/your/cert.crt"
+   SSLCertificateKeyFile "/path/to/your/key.key"
+   RequestHeader set X-Forwarded-Proto "https"
+   ProxyPreserveHost On
+   ProxyPass / http://localhost:8080/
+   ProxyPassReverse / http://localhost:8080/
+ </VirtualHost>
+
+ <VirtualHost *:443>
+   ServerName public.myarchive.org
+   SSLEngine On
+   SSLCertificateFile "/path/to/your/cert.crt"
+   SSLCertificateKeyFile "/path/to/your/key.key"
+   RequestHeader set X-Forwarded-Proto "https"
+   ProxyPreserveHost On
+   ProxyPass / http://localhost:8081/
+   ProxyPassReverse / http://localhost:8081/
+ </VirtualHost>
+
+ +

You may optionally set the Set-Cookie: Secure attribute by adding Header edit Set-Cookie ^(.*)$ $1;HttpOnly;Secure. When a cookie has the Secure attribute, the user agent will include the cookie in an HTTP request only if the request is transmitted over a secure channel.

+ +

Users may encounter a warning in the browser’s console stating Cookie “archivesspace_session” does not have a proper “SameSite” attribute value. Soon, cookies without the “SameSite” attribute or with an invalid value will be treated as “Lax”. This means that the cookie will no longer be sent in third-party contexts (example from Firefox 104) or something similar. Some browsers (for example, Chrome version 80 or above) already enforce this. Standard ArchivesSpace installations should be unaffected, but if you encounter problems with integrations and/or customizations of your particular installation, the following directive may be required: Header edit Set-Cookie ^(.*)$ $1;SameSite=None;Secure. Alternatively, it may be the case that SameSite=Lax (the default) or even SameSite=Strict are more appropriate depending on your functional and/or security requirements. Please refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite or other resources for more information.

+ +

Setting up Redirects

+

When running a site over HTTPS, it’s a good idea to set up a redirect to ensure any outdated HTTP requests are routed to the correct URL. This can be done through Apache as follows:

+ +
<VirtualHost *:80>
+ServerName staff.myarchive.org
+RewriteEngine On
+RewriteCond %{HTTPS} off
+RewriteRule (.*) https://staff.myarchive.org$1 [R,L]
+</VirtualHost>
+
+<VirtualHost *:80>
+ServerName public.myarchive.org
+RewriteEngine On
+RewriteCond %{HTTPS} off
+RewriteRule (.*) https://public.myarchive.org$1 [R,L]
+</VirtualHost>
+
+ +

Nginx

+ +

Information about configuring nginx for SSL can be found at http://nginx.org/en/docs/http/configuring_https_servers.html You should read +that documentation before attempting to configure SSL.

+ +

+server {
+	listen 80;
+	listen [::]:80;
+	server_name staff.myarchive.org;
+	return 301 https://staff.myarchive.org;
+}
+
+
+server {
+  listen 443 ssl;
+  server_name staff.myarchive.org;
+  charset utf-8;
+  }
+ 
+  ssl_certificate     /path/to/your/fullchain.pem;
+  ssl_certificate_key /path/to/your/key.pem
+
+  location / {
+    allow 0.0.0.0/0;
+    deny all;
+    proxy_pass http://localhost:8081;
+  }
+}
+
+server {
+	listen 80;
+	listen [::]:80;
+	server_name public.myarchive.org;
+	return 301 https://public.myarchive.org;
+}
+
+
+server {
+  listen 443 ssl;
+  server_name staff.myarchive.org;
+  charset utf-8;
+  }
+ 
+  ssl_certificate     /path/to/your/fullchain.pem;
+  ssl_certificate_key /path/to/your/key.pem
+
+  location / {
+    allow 0.0.0.0/0;
+    deny all;
+    proxy_pass http://localhost:8080;
+  }
+}
+
+
+ +

Step 2: Configure ArchivesSpace

+ +

The following lines need to be altered in the config.rb file:

+
AppConfig[:frontend_proxy_url] = "https://staff.myarchive.org"
+AppConfig[:public_proxy_url] = "https://public.myarchive.org"
+
+

These lines don’t need to be altered and should remain with their default values. E.g.:

+
AppConfig[:frontend_url] = "http://localhost:8080"
+AppConfig[:public_url] = "http://localhost:8081"
+AppConfig[:frontend_proxy_prefix] = proc { "#{URI(AppConfig[:frontend_proxy_url]).path}/".gsub(%r{/+$}, "/") }
+AppConfig[:public_proxy_prefix] = proc { "#{URI(AppConfig[:public_proxy_url]).path}/".gsub(%r{/+$}, "/") }
+
+ + +
+ +
+ + + diff --git a/provisioning/https.md b/provisioning/https.md deleted file mode 100644 index 44a433f6..00000000 --- a/provisioning/https.md +++ /dev/null @@ -1,155 +0,0 @@ -# Serving ArchivesSpace user-facing applications over HTTPS - -This document describes the approach for those wishing to install -ArchivesSpace in such a manner that all end-user requests (i.e., URLs) -are served over HTTPS rather than HTTP. For the purposes of this documentation, the URLs for the staff and public interfaces will be: - - * `https://staff.myarchive.org` - staff interface - * `https://public.myarchive.org` - public interface - -The configuration described in this document is one possible approach, -and to keep things simple the following are assumed: - - * ArchivesSpace is running on a single Linux server - * The server is running Apache or Nginx - * You have obtained an SSL certificate and key from an authority - * You have ensured that appropriate firewall ports have been opened (80 and 443). - -1. [Configuring the Web Server](#Step-1%3A-Configure-Web-Server-(Apache-or-Nginx)) - - [Apache](#Apache) - - [Setting up SSL](#Setting-up-SSL) - - [Setting up Redirects](#Setting-up-Redirects) - - [Nginx](#Nginx) -2. [Configuring ArchivesSpace](#Step-2%3A-Configure-ArchivesSpace) - - -## Step 1: Configure Web Server (Apache or Nginx) - -### Apache -Information about configuring Apache for SSL can be found at http://httpd.apache.org/docs/current/ssl/ssl_howto.html. You should read -that documentation before attempting to configure SSL. - -#### Setting up SSL - - -Use the `NameVirtualHost` and `VirtualHost` directives to proxy -requests to the actual application urls. This requires the use of the `mod_proxy` module in Apache. - - NameVirtualHost *:443 - - - ServerName staff.myarchive.org - SSLEngine On - SSLCertificateFile "/path/to/your/cert.crt" - SSLCertificateKeyFile "/path/to/your/key.key" - RequestHeader set X-Forwarded-Proto "https" - ProxyPreserveHost On - ProxyPass / http://localhost:8080/ - ProxyPassReverse / http://localhost:8080/ - - - - ServerName public.myarchive.org - SSLEngine On - SSLCertificateFile "/path/to/your/cert.crt" - SSLCertificateKeyFile "/path/to/your/key.key" - RequestHeader set X-Forwarded-Proto "https" - ProxyPreserveHost On - ProxyPass / http://localhost:8081/ - ProxyPassReverse / http://localhost:8081/ - - -You may optionally set the `Set-Cookie: Secure attribute` by adding `Header edit Set-Cookie ^(.*)$ $1;HttpOnly;Secure`. When a cookie has the Secure attribute, the user agent will include the cookie in an HTTP request only if the request is transmitted over a secure channel. - -Users may encounter a warning in the browser's console stating `Cookie “archivesspace_session” does not have a proper “SameSite” attribute value. Soon, cookies without the “SameSite” attribute or with an invalid value will be treated as “Lax”. This means that the cookie will no longer be sent in third-party contexts` (example from Firefox 104) or something similar. Some browsers (for example, Chrome version 80 or above) already enforce this. Standard ArchivesSpace installations should be unaffected, but if you encounter problems with integrations and/or customizations of your particular installation, the following directive may be required: `Header edit Set-Cookie ^(.*)$ $1;SameSite=None;Secure`. Alternatively, it may be the case that `SameSite=Lax` (the default) or even `SameSite=Strict` are more appropriate depending on your functional and/or security requirements. Please refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite or other resources for more information. - -#### Setting up Redirects -When running a site over HTTPS, it's a good idea to set up a redirect to ensure any outdated HTTP requests are routed to the correct URL. This can be done through Apache as follows: - -``` - -ServerName staff.myarchive.org -RewriteEngine On -RewriteCond %{HTTPS} off -RewriteRule (.*) https://staff.myarchive.org$1 [R,L] - - - -ServerName public.myarchive.org -RewriteEngine On -RewriteCond %{HTTPS} off -RewriteRule (.*) https://public.myarchive.org$1 [R,L] - -``` - -### Nginx - -Information about configuring nginx for SSL can be found at http://nginx.org/en/docs/http/configuring_https_servers.html You should read -that documentation before attempting to configure SSL. - -``` - -server { - listen 80; - listen [::]:80; - server_name staff.myarchive.org; - return 301 https://staff.myarchive.org; -} - - -server { - listen 443 ssl; - server_name staff.myarchive.org; - charset utf-8; - } - - ssl_certificate /path/to/your/fullchain.pem; - ssl_certificate_key /path/to/your/key.pem - - location / { - allow 0.0.0.0/0; - deny all; - proxy_pass http://localhost:8081; - } -} - -server { - listen 80; - listen [::]:80; - server_name public.myarchive.org; - return 301 https://public.myarchive.org; -} - - -server { - listen 443 ssl; - server_name staff.myarchive.org; - charset utf-8; - } - - ssl_certificate /path/to/your/fullchain.pem; - ssl_certificate_key /path/to/your/key.pem - - location / { - allow 0.0.0.0/0; - deny all; - proxy_pass http://localhost:8080; - } -} - -``` - -## Step 2: Configure ArchivesSpace - -The following lines need to be altered in the config.rb file: -``` -AppConfig[:frontend_proxy_url] = "https://staff.myarchive.org" -AppConfig[:public_proxy_url] = "https://public.myarchive.org" -``` -These lines don't need to be altered and should remain with their default values. E.g.: -``` -AppConfig[:frontend_url] = "http://localhost:8080" -AppConfig[:public_url] = "http://localhost:8081" -AppConfig[:frontend_proxy_prefix] = proc { "#{URI(AppConfig[:frontend_proxy_url]).path}/".gsub(%r{/+$}, "/") } -AppConfig[:public_proxy_prefix] = proc { "#{URI(AppConfig[:public_proxy_url]).path}/".gsub(%r{/+$}, "/") } -``` diff --git a/provisioning/index.html b/provisioning/index.html new file mode 100644 index 00000000..bc671033 --- /dev/null +++ b/provisioning/index.html @@ -0,0 +1,153 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/README.md + +

+ +

+ + Report issue on Jira + provisioning/README.md + +

+
+
+ +

ArchivesSpace provisioning and server configuration

+ + + + +
+ +
+ + + diff --git a/provisioning/jmeter.html b/provisioning/jmeter.html new file mode 100644 index 00000000..da0bc9d3 --- /dev/null +++ b/provisioning/jmeter.html @@ -0,0 +1,151 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/jmeter.md + +

+ +

+ + Report issue on Jira + provisioning/jmeter.md + +

+
+
+ +

JMeter Test Group Template

+ +

Creating a test group:

+ +

Load the file ‘example_test_plan.jmx’ into JMeter and make sure the following are true for the example to run successfully:

+ +
    +
  • The backend is running on localhost port 4567
  • +
  • There is at least one repository, and its url is /repositories/2
  • +
+ +

The example will log in to the backend, store the session key as a JMeter variable, and make two basic requests, one of which will require a session key.

+ + +
+ +
+ + + diff --git a/provisioning/jmeter.md b/provisioning/jmeter.md deleted file mode 100644 index 4ed5e385..00000000 --- a/provisioning/jmeter.md +++ /dev/null @@ -1,10 +0,0 @@ -# JMeter Test Group Template - -## Creating a test group: - - Load the file 'example_test_plan.jmx' into JMeter and make sure the following are true for the example to run successfully: - - * The backend is running on localhost port 4567 - * There is at least one repository, and its url is /repositories/2 - -The example will log in to the backend, store the session key as a JMeter variable, and make two basic requests, one of which will require a session key. diff --git a/provisioning/mysql.html b/provisioning/mysql.html new file mode 100644 index 00000000..6aed0802 --- /dev/null +++ b/provisioning/mysql.html @@ -0,0 +1,219 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/mysql.md + +

+ +

+ + Report issue on Jira + provisioning/mysql.md + +

+
+
+ +

Running ArchivesSpace against MySQL

+ +

Out of the box, the ArchivesSpace distribution runs against an +embedded database, but this is only suitable for demonstration +purposes. When you are ready to starting using ArchivesSpace with +real users and data, you should switch to using MySQL. MySQL offers +significantly better performance when multiple people are using the +system, and will ensure that your data is kept safe.

+ +

ArchivesSpace is currently able to run on MySQL version 5.x & 8.x.

+ +

Download MySQL Connector

+ +

ArchivesSpace requires the +MySQL Connector for Java, +which must be downloaded separately because of its licensing agreement. +Download the Connector and place it in a location where ArchivesSpace can +find it on its classpath:

+ +
     $ cd lib
+     $ curl -Oq https://repo1.maven.org/maven2/com/mysql/mysql-connector-j/8.0.33/mysql-connector-j-8.0.33.jar
+
+ +

Note that the version of the MySQL connector may be different by the +time you read this.

+ +

Set up your MySQL database

+ +

Next, create an empty database in MySQL and grant access to a dedicated +ArchivesSpace user. The following example uses username as +and password as123.

+ +

NOTE: WHEN CREATING THE DATABASE, YOU MUST SET THE DEFAULT CHARACTER +ENCODING FOR THE DATABASE TO BE utf8. This is particularly important +if you use a MySQL client to create the database (e.g. Navicat, MySQL +Workbench, phpMyAdmin, etc.).

+ + + +

NOTE: If using AWS RDS MySQL databases, binary logging is not enabled by default and updates will fail. To enable binary logging, you must create a custom db parameter group for the database and set the log_bin_trust_function_creators = 1. See Working with DB Parameter Groups for information about RDS parameter groups. Within a MySQL session you can also try SET GLOBAL log_bin_trust_function_creators = 1;

+ +
     $ mysql -uroot -p
+
+     mysql> create database archivesspace default character set utf8mb4;
+     Query OK, 1 row affected (0.08 sec)
+
+ +

If using MySQL 5.7 and below:

+ +
     mysql> grant all on archivesspace.* to 'as'@'localhost' identified by 'as123';
+     Query OK, 0 rows affected (0.21 sec)
+
+ +

If using MySQL 8+:

+ +
     mysql> create user 'as'@'localhost' identified by 'as123';
+     Query OK, 0 rows affected (0.08 sec)
+
+     mysql> grant all privileges on archivesspace.* to 'as'@'localhost';
+     Query OK, 0 rows affected (0.21 sec)
+
+ +

Then, modify your config/config.rb file to refer to your MySQL +database. When you modify your configuration file, MAKE SURE THAT YOU +SPECIFY THAT THE CHARACTER ENCODING FOR THE DATABASE TO BE UTF-8 as shown +below:

+ +
 AppConfig[:db_url] = "jdbc:mysql://localhost:3306/archivesspace?user=as&password=as123&useUnicode=true&characterEncoding=UTF-8"
+
+ +

There is a database setup script that will create all the tables that +ArchivesSpace requires. Run this with:

+ +
scripts/setup-database.sh  # or setup-database.bat under Windows
+
+ +

You can now follow the instructions in the “Getting Started” section to start +your ArchivesSpace application.

+ +

**NOTE: For MySQL 8. MySQL 8 uses a new method (caching_sha2_password) as the default authentication plugin instead of the old mysql_native_password that MySQL 5.7 and older used. This may require starting a MySQL 8 server with the --default-authentication-plugin=mysql_native_password option. You may also be able to change the auth mechanism on a per user basis by logging into mysql and running ALTER USER 'as'@'localhost' IDENTIFIED WITH mysql_native_password BY 'as123';. Also be sure to have the LATEST MySQL Connector for Java from MySQL in your /lib/ directory for ArchivesSpace.

+ + +
+ +
+ + + diff --git a/provisioning/mysql.md b/provisioning/mysql.md deleted file mode 100644 index ab75540c..00000000 --- a/provisioning/mysql.md +++ /dev/null @@ -1,76 +0,0 @@ -# Running ArchivesSpace against MySQL - -Out of the box, the ArchivesSpace distribution runs against an -embedded database, but this is only suitable for demonstration -purposes. When you are ready to starting using ArchivesSpace with -real users and data, you should switch to using MySQL. MySQL offers -significantly better performance when multiple people are using the -system, and will ensure that your data is kept safe. - -ArchivesSpace is currently able to run on MySQL version 5.x & 8.x. - -## Download MySQL Connector - -ArchivesSpace requires the -[MySQL Connector for Java](http://dev.mysql.com/downloads/connector/j/), -which must be downloaded separately because of its licensing agreement. -Download the Connector and place it in a location where ArchivesSpace can -find it on its classpath: - - $ cd lib - $ curl -Oq https://repo1.maven.org/maven2/com/mysql/mysql-connector-j/8.0.33/mysql-connector-j-8.0.33.jar - -Note that the version of the MySQL connector may be different by the -time you read this. - - -## Set up your MySQL database - -Next, create an empty database in MySQL and grant access to a dedicated -ArchivesSpace user. The following example uses username `as` -and password `as123`. - -**NOTE: WHEN CREATING THE DATABASE, YOU MUST SET THE DEFAULT CHARACTER -ENCODING FOR THE DATABASE TO BE `utf8`.** This is particularly important -if you use a MySQL client to create the database (e.g. Navicat, MySQL -Workbench, phpMyAdmin, etc.). - - - -**NOTE: If using AWS RDS MySQL databases, binary logging is not enabled by default and updates will fail.** To enable binary logging, you must create a custom db parameter group for the database and set the `log_bin_trust_function_creators = 1`. See [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) for information about RDS parameter groups. Within a MySQL session you can also try `SET GLOBAL log_bin_trust_function_creators = 1;` - - - $ mysql -uroot -p - - mysql> create database archivesspace default character set utf8mb4; - Query OK, 1 row affected (0.08 sec) - -If using MySQL 5.7 and below: - - mysql> grant all on archivesspace.* to 'as'@'localhost' identified by 'as123'; - Query OK, 0 rows affected (0.21 sec) - -If using MySQL 8+: - - mysql> create user 'as'@'localhost' identified by 'as123'; - Query OK, 0 rows affected (0.08 sec) - - mysql> grant all privileges on archivesspace.* to 'as'@'localhost'; - Query OK, 0 rows affected (0.21 sec) - -Then, modify your `config/config.rb` file to refer to your MySQL -database. When you modify your configuration file, **MAKE SURE THAT YOU -SPECIFY THAT THE CHARACTER ENCODING FOR THE DATABASE TO BE `UTF-8`** as shown -below: - - AppConfig[:db_url] = "jdbc:mysql://localhost:3306/archivesspace?user=as&password=as123&useUnicode=true&characterEncoding=UTF-8" - -There is a database setup script that will create all the tables that -ArchivesSpace requires. Run this with: - - scripts/setup-database.sh # or setup-database.bat under Windows - -You can now follow the instructions in the "Getting Started" section to start -your ArchivesSpace application. - -**NOTE: For MySQL 8. MySQL 8 uses a new method (caching_sha2_password) as the default authentication plugin instead of the old mysql_native_password that MySQL 5.7 and older used. This may require starting a MySQL 8 server with the `--default-authentication-plugin=mysql_native_password` option. You may also be able to change the auth mechanism on a per user basis by logging into mysql and running `ALTER USER 'as'@'localhost' IDENTIFIED WITH mysql_native_password BY 'as123';`. Also be sure to have the LATEST [MySQL Connector for Java](http://dev.mysql.com/downloads/connector/j/) from MySQL in your /lib/ directory for ArchivesSpace. diff --git a/provisioning/newrelic.html b/provisioning/newrelic.html new file mode 100644 index 00000000..25a6760a --- /dev/null +++ b/provisioning/newrelic.html @@ -0,0 +1,180 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/newrelic.md + +

+ +

+ + Report issue on Jira + provisioning/newrelic.md + +

+
+
+ +

Application monitoring with New Relic

+ +

New Relic is an application performance monitoring tool (amongst other things).

+ +

To use with ArchivesSpace you must:

+ +
    +
  • Signup for an account at newrelic (there is a free tier and paid plans)
  • +
  • Edit config.rb to: +
      +
    • activate the newrelic plugin
    • +
    • add the New Relic license key
    • +
    • add an application name to identify the ArchivesSpace instance in the New Relic dashboard
    • +
    +
  • +
+ +

For example, in config.rb:

+ +
## You may have other plugins
+AppConfig[:plugins] = ['local', 'newrelic']
+
+AppConfig[:newrelic_key] = "enteryourkeyhere"
+AppConfig[:newrelic_app_name] = "ArchivesSpace"
+
+ +
    +
  • Install the New Relic agent library by initializing the plugin: +
      ## For Linux/OSX
    +   $ scripts/initialize-plugin.sh newrelic
    +
    +   ## For Windows
    +   % scripts\initialize-plugin.bat newrelic
    +
    +
  • +
  • Start, or restart ArchivesSpace to pick up the configuration.
  • +
+ +

Within a few minutes the application should be visible in the New Relic dashboard with data being collected.

+ +
+ + +
+ +
+ + + diff --git a/provisioning/newrelic.md b/provisioning/newrelic.md deleted file mode 100644 index 0d6a15e4..00000000 --- a/provisioning/newrelic.md +++ /dev/null @@ -1,35 +0,0 @@ -# Application monitoring with New Relic - -[New Relic](http://newrelic.com/) is an application performance monitoring tool (amongst other things). - -**To use with ArchivesSpace you must:** - -- Signup for an account at newrelic (there is a free tier and paid plans) -- Edit config.rb to: - - activate the `newrelic` plugin - - add the New Relic license key - - add an application name to identify the ArchivesSpace instance in the New Relic dashboard - -For example, in config.rb: - -``` -## You may have other plugins -AppConfig[:plugins] = ['local', 'newrelic'] - -AppConfig[:newrelic_key] = "enteryourkeyhere" -AppConfig[:newrelic_app_name] = "ArchivesSpace" -``` - -- Install the New Relic agent library by initializing the plugin: -``` - ## For Linux/OSX - $ scripts/initialize-plugin.sh newrelic - - ## For Windows - % scripts\initialize-plugin.bat newrelic -``` -- Start, or restart ArchivesSpace to pick up the configuration. - -Within a few minutes the application should be visible in the New Relic dashboard with data being collected. - ---- diff --git a/provisioning/prefix.html b/provisioning/prefix.html new file mode 100644 index 00000000..18d8e80f --- /dev/null +++ b/provisioning/prefix.html @@ -0,0 +1,198 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/prefix.md + +

+ +

+ + Report issue on Jira + provisioning/prefix.md + +

+
+
+ +

Running ArchivesSpace under a prefix

+ +

Important Note: Prefixes do NOT work properly in versions between 2.0.1 and 2.2.2

+ +

This document describes a simple approach for those wishing to deviate from the recommended +practice of running each user-facing ArchivesSpace application on its own subdomain, and instead +serve each application under a prefix, e.g.

+ +
http://aspace.myarchive.org/staff
+http://aspace.myarchive.org/public
+
+ +

This configuration described in this document is one possible approach, +and to keep things simple the following are assumed:

+ +
    +
  • ArchivesSpace is running on a single Linux server
  • +
  • The server is running the Apache 2.2+ webserver
  • +
+ +

Unless otherwise stated, it is assumed that you have root access on +your machines, and all commands are to be run as root (or with sudo).

+ +

Step 1: Setup proxies in your Apache configuration

+ +

The following edits can be made in the httpd.conf file itself, or in an included file:

+ +
ProxyPass /staff http://localhost:8080/staff
+ProxyPassReverse /staff http://localhost:8080/
+ProxyPass /public http://localhost:8081/public
+ProxyPassReverse /public http://localhost:8081/
+
+ +

Now restart Apache.

+ +

Step 2: Install and configure ArchivesSpace

+ +

Follow the instructions in the main README to download and install ArchivesSpace.

+ +

Open the file archivesspace/config/config.rb and add the following lines:

+ +
AppConfig[:frontend_proxy_url] = 'http://aspace.myarchive.org/staff'
+AppConfig[:public_proxy_url] = 'http://aspace.myarchive.org/public'
+
+ +

(Note: These lines should NOT begin with a ‘#’ character.)

+ +

Start ArchivesSpace.

+ +

Step 3: (Optional) Lock down ports 8080 and 8081

+ +

By default, the staff and public applications are accessible on ports 8080 and 8081

+ +
http://aspace.myarchive.org:8080
+http://aspace.myarchive.org:8081
+
+ +

Since these are not the URLs at which users should access the application, you will probably +want to close them off. See README_HTTPS for more information on closing ports using iptables.

+ + +
+ +
+ + + diff --git a/provisioning/prefix.md b/provisioning/prefix.md deleted file mode 100644 index d7f7b089..00000000 --- a/provisioning/prefix.md +++ /dev/null @@ -1,54 +0,0 @@ -# Running ArchivesSpace under a prefix - -**Important Note: Prefixes do NOT work properly in versions between 2.0.1 and 2.2.2** - -This document describes a simple approach for those wishing to deviate from the recommended -practice of running each user-facing ArchivesSpace application on its own subdomain, and instead -serve each application under a prefix, e.g. - - http://aspace.myarchive.org/staff - http://aspace.myarchive.org/public - -This configuration described in this document is one possible approach, -and to keep things simple the following are assumed: - - * ArchivesSpace is running on a single Linux server - * The server is running the Apache 2.2+ webserver - -Unless otherwise stated, it is assumed that you have root access on -your machines, and all commands are to be run as root (or with sudo). - - -## Step 1: Setup proxies in your Apache configuration - -The following edits can be made in the httpd.conf file itself, or in an included file: - - ProxyPass /staff http://localhost:8080/staff - ProxyPassReverse /staff http://localhost:8080/ - ProxyPass /public http://localhost:8081/public - ProxyPassReverse /public http://localhost:8081/ - -Now restart Apache. - -## Step 2: Install and configure ArchivesSpace - -Follow the instructions in the main README to download and install ArchivesSpace. - -Open the file `archivesspace/config/config.rb` and add the following lines: - - AppConfig[:frontend_proxy_url] = 'http://aspace.myarchive.org/staff' - AppConfig[:public_proxy_url] = 'http://aspace.myarchive.org/public' - -(Note: These lines should NOT begin with a '#' character.) - -Start ArchivesSpace. - -## Step 3: (Optional) Lock down ports 8080 and 8081 - -By default, the staff and public applications are accessible on ports 8080 and 8081 - - http://aspace.myarchive.org:8080 - http://aspace.myarchive.org:8081 - -Since these are not the URLs at which users should access the application, you will probably -want to close them off. See README_HTTPS for more information on closing ports using iptables. diff --git a/provisioning/robots.html b/provisioning/robots.html new file mode 100644 index 00000000..f15d21da --- /dev/null +++ b/provisioning/robots.html @@ -0,0 +1,178 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/robots.md + +

+ +

+ + Report issue on Jira + provisioning/robots.md + +

+
+
+ +

ArchivesSpace robots.txt

+ +

The easiest way to add a robots.txt to your site is simply create +one in your /config/ directly. This file will be served as a standard +robots.txt file when you start your site.

+ +

If you’re not able to do that, you can use a seperate file and your proxy.

+ +

For Apache the config would look like this:

+ +
<Location "/robots.txt">
+ SetHandler None
+ Require all granted
+</Location>
+Alias /robots.txt /var/www/robots.txt
+
+ +

nginx, more like this:

+ +
  location /robots.txt {
+    alias /var/www/robots.txt;
+  }
+
+ +

You may also add robots meta-tags to your layout_head.html.erb to be included in the header area of your site.

+ +

example:

+ +

<meta name="robots" content="noindex, nofollow">

+ +

A sensible starting point for a robots.txt file looks something like this:

+ +
Disallow: /search*
+Disallow: /inventory/*
+Disallow: /collection_organization/*
+Disallow: /repositories/*/top_containers/*
+Disallow: /check_session*
+Disallow: /repositories/*/resources/*/tree/*
+
+ + +
+ +
+ + + diff --git a/provisioning/robots.md b/provisioning/robots.md deleted file mode 100644 index 8e019d1f..00000000 --- a/provisioning/robots.md +++ /dev/null @@ -1,42 +0,0 @@ -# ArchivesSpace robots.txt - -The easiest way to add a `robots.txt` to your site is simply create -one in your `/config/` directly. This file will be served as a standard -`robots.txt` file when you start your site. - -If you're not able to do that, you can use a seperate file and your proxy. - -For Apache the config would look like this: - -``` - - SetHandler None - Require all granted - -Alias /robots.txt /var/www/robots.txt -``` - -nginx, more like this: - -``` - location /robots.txt { - alias /var/www/robots.txt; - } -``` - -You may also add robots meta-tags to your `layout_head.html.erb` to be included in the header area of your site. - -example: - -`` - -A sensible starting point for a `robots.txt` file looks something like this: - -``` -Disallow: /search* -Disallow: /inventory/* -Disallow: /collection_organization/* -Disallow: /repositories/*/top_containers/* -Disallow: /check_session* -Disallow: /repositories/*/resources/*/tree/* -``` diff --git a/provisioning/solr.html b/provisioning/solr.html new file mode 100644 index 00000000..43d726f5 --- /dev/null +++ b/provisioning/solr.html @@ -0,0 +1,331 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/solr.md + +

+ +

+ + Report issue on Jira + provisioning/solr.md + +

+
+
+ +

Running ArchivesSpace with external Solr

+ +

For ArchivesSpace > 3.1.1 this is required. For previous versions it is optional.

+ +

Supported Solr Versions

+ +

See the Solr requirement notes

+ +

Install Solr

+ +

Refer to the Solr documentation +for instructions on setting up Solr on your server.

+ +

You will download the Solr package and extract it to a folder of your choosing. Do not start Solr +until you have added the ArchivesSpace configuration files.

+ +

We strongly recommend a standalone mode installation. No support will be provided for Solr +Cloud deployments specifically (i.e. we cannot help troubleshoot Zookeeper).

+ +

Create a configset

+ +

Before running Solr you will need to +setup a configset.

+ +

Linux

+ +

Using the command line:

+ +
mkdir -p /$path/$to/$solr/server/solr/configsets/archivesspace/conf
+
+ +

Be sure to replace /$path/$to/$solr with your actual Solr location, which might be something like:

+ +
mkdir -p /opt/solr/server/solr/configsets/archivesspace/conf
+
+ +

Windows

+ +

Right click on your Solr directory and open in Windows Terminal (Powershell).

+ +
mkdir -p .\server\solr\configsets\archivesspace\conf
+
+ +

You should see something like this in response:

+ +
Directory: C:\Users\archivesspace\Projects\solr-8.10.1\server\solr\configsets\archivesspace
+Mode                 LastWriteTime         Length Name
+----                 -------------         ------ ----
+d-----        10/25/2021  12:15 PM                conf
+
+ +
+ +

Copy the ArchivesSpace Solr configuration files from the solr directory included +with the release into the $SOLR_HOME/server/solr/configsets/archivesspace/conf directory.

+ +

There should be four files:

+ +
    +
  • schema.xml
  • +
  • solrconfig.xml
  • +
  • stopwords.txt
  • +
  • synonyms.txt
  • +
+ +
ls .\server\solr\configsets\archivesspace\conf\
+
+Directory: C:\Users\archivesspace\Projects\solr-8.10.1\server\solr\configsets\archivesspace\conf
+
+Mode                 LastWriteTime         Length Name
+----                 -------------         ------ ----
+-a----        10/25/2021  12:18 PM          18291 schema.xml
+-a----        10/25/2021  12:18 PM           3046 solrconfig.xml
+-a----        10/25/2021  12:18 PM              0 stopwords.txt
+-a----        10/25/2021  12:18 PM              0 synonyms.txt
+
+ +

Note: your exact output may be slightly different.

+ +

Setup the environment

+ +

From Solr v9 ArchivesSpace requires the use of Solr modules. +We recommend using the environment variable option to specify the modules to use:

+ +
SOLR_MODULES=analysis-extras
+
+ +

This environment variable needs to be available to the Solr instance at runtime.

+ +

For instructions on how set an environment variable here are some recommended articles:

+ +
    +
  • https://www.freecodecamp.org/news/how-to-set-an-environment-variable-in-linux
  • +
  • https://www.java.com/en/download/help/path.html (includes Windows)
  • +
+ +

Setup a Solr core

+ +

With the configset in place run the command to create an ArchivesSpace core:

+ +
bin/solr start
+
+ +

Wait for Solr to start (running as a non-admin user):

+ +
.\bin\solr start
+"java version info is 11.0.12"
+"Extracted major version is 11"
+OpenJDK 64-Bit Server VM warning: JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.
+Waiting up to 30 to see Solr running on port 8983
+Started Solr server on port 8983. Happy searching!
+
+ +

You can check that Solr is running on http://localhost:8983.

+ +

Now create the core:

+ +
bin/solr create -c archivesspace -d archivesspace
+
+ +

You should see confirmation:

+ +
"java version info is 11.0.12"
+"Extracted major version is 11"
+
+Created new core 'archivesspace'
+
+ +

In the browser you should be able to access the ArchivesSpace schema.

+ +

Disable the embedded server Solr instance (optional <= 3.1.1 only)

+ +

Edit the ArchivesSpace config.rb file:

+ +
AppConfig[:enable_solr] = false
+
+ +

Note that doing this means that you will have to backup Solr manually.

+ +

Set the Solr url in your config.rb file

+ +

This config setting should point to your Solr instance:

+ +
AppConfig[:solr_url] = "http://localhost:8983/solr/archivesspace"
+
+ +

If you are not running ArchivesSpace and Solr on the same server, update +localhost to your Solr address.

+ +

By default, on startup, ArchivesSpace will check that the Solr configuration +appears to be correct and will raise an error if not. You can disable this check +by setting AppConfig[:solr_verify_checksums] = false in config.rb.

+ +

Please note: if you’re upgrading an existing installation of ArchivesSpace to use an external Solr, you will need to trigger a full re-index. +See Indexes for more details.

+ +
+ +

You can now follow the instructions in the Getting started section to start +your ArchivesSpace application.

+ +
+ +

Upgrading Solr

+ +

If you are using an older version of Solr than is recommended you may need (if called out +in release notes) or want to upgrade. Before performing an upgrade it is recommended that you review:

+ + + +

You should also review this document as the installation steps may include +instructions that were not present in the past. For example, from Solr v9 there is a +requirement to use Solr modules with instructions to configure the modules using environment +variables.

+ +

The crucial part will be ensuring that ArchivesSpace’s schema is being used for the +ArchivesSpace Solr index. The config setting AppConfig[:solr_verify_checksums] = true +will perform a check on startup that confirms this is the case, otherwise ArchivesSpace +will not be able to start up.

+ +

From ArchivesSpace 3.5+ AppConfig[:solr_verify_checksums] does not check the +solrconfig.xml file. Therefore you can make changes to it without ArchivesSpace failing +on startup. However, for an upgrade you will want to at least compare the ArchivesSpace +solrconfig.xml to the one that is in use in case there are changes that need to be made to +work with the upgraded-to version of Solr. For example the ArchivesSpace Solr v8 solrconfig.xml +will not work as is with Solr v9.

+ +

After upgrading Solr you should trigger a full re-index. Instructions for this are in +Indexes.

+ + +
+ +
+ + + diff --git a/provisioning/solr.md b/provisioning/solr.md deleted file mode 100644 index f9c0ffd0..00000000 --- a/provisioning/solr.md +++ /dev/null @@ -1,198 +0,0 @@ -# Running ArchivesSpace with external Solr - -For ArchivesSpace > 3.1.1 this is **required**. For previous versions it is optional. - -## Supported Solr Versions - -See the [Solr requirement notes](../administration/getting_started.html#solr) - -## Install Solr - -Refer to the [Solr documentation](https://solr.apache.org/guide/solr/latest/) -for instructions on setting up Solr on your server. - -You will download the Solr package and extract it to a folder of your choosing. Do not start Solr -until you have added the ArchivesSpace configuration files. - -**We strongly recommend a standalone mode installation. No support will be provided for Solr -Cloud deployments specifically (i.e. we cannot help troubleshoot Zookeeper).** - -## Create a configset - -Before running Solr you will need to -setup a [configset](https://solr.apache.org/guide/8_10/config-sets.html#configsets-in-standalone-mode). - -### Linux - -Using the command line: - -``` -mkdir -p /$path/$to/$solr/server/solr/configsets/archivesspace/conf -``` - -Be sure to replace `/$path/$to/$solr` with your actual Solr location, which might be something like: - -``` -mkdir -p /opt/solr/server/solr/configsets/archivesspace/conf -``` - -### Windows - -Right click on your Solr directory and open in Windows Terminal (Powershell). - -``` -mkdir -p .\server\solr\configsets\archivesspace\conf -``` - -You should see something like this in response: - -``` -Directory: C:\Users\archivesspace\Projects\solr-8.10.1\server\solr\configsets\archivesspace -Mode LastWriteTime Length Name ----- ------------- ------ ---- -d----- 10/25/2021 12:15 PM conf -``` - ---- - -Copy the ArchivesSpace Solr configuration files from the `solr` directory included -with the release into the `$SOLR_HOME/server/solr/configsets/archivesspace/conf` directory. - -There should be four files: - -- schema.xml -- solrconfig.xml -- stopwords.txt -- synonyms.txt - -``` -ls .\server\solr\configsets\archivesspace\conf\ - -Directory: C:\Users\archivesspace\Projects\solr-8.10.1\server\solr\configsets\archivesspace\conf - -Mode LastWriteTime Length Name ----- ------------- ------ ---- --a---- 10/25/2021 12:18 PM 18291 schema.xml --a---- 10/25/2021 12:18 PM 3046 solrconfig.xml --a---- 10/25/2021 12:18 PM 0 stopwords.txt --a---- 10/25/2021 12:18 PM 0 synonyms.txt -``` - -_Note: your exact output may be slightly different._ - -## Setup the environment - -From Solr v9 ArchivesSpace requires the use of [Solr modules](https://solr.apache.org/guide/solr/latest/configuration-guide/solr-modules.html). -We recommend using the environment variable option to specify the modules to use: - -``` -SOLR_MODULES=analysis-extras -``` - -This environment variable needs to be available to the Solr instance at runtime. - -For instructions on how set an environment variable here are some recommended articles: - -- https://www.freecodecamp.org/news/how-to-set-an-environment-variable-in-linux -- https://www.java.com/en/download/help/path.html (includes Windows) - -## Setup a Solr core - -With the `configset` in place run the command to create an ArchivesSpace core: - -```bash -bin/solr start -``` - -Wait for Solr to start (running as a non-admin user): - -``` -.\bin\solr start -"java version info is 11.0.12" -"Extracted major version is 11" -OpenJDK 64-Bit Server VM warning: JVM cannot use large page memory because it does not have enough privilege to lock pages in memory. -Waiting up to 30 to see Solr running on port 8983 -Started Solr server on port 8983. Happy searching! -``` - -You can check that Solr is running on [http://localhost:8983](http://localhost:8983). - -Now create the core: - -``` -bin/solr create -c archivesspace -d archivesspace -``` - -You should see confirmation: - -``` -"java version info is 11.0.12" -"Extracted major version is 11" - -Created new core 'archivesspace' -``` - -In the browser you should be able to access the [ArchivesSpace schema](http://localhost:8983/solr/#/archivesspace/files?file=schema.xml). - -## Disable the embedded server Solr instance (optional <= 3.1.1 only) - -Edit the ArchivesSpace config.rb file: - -``` -AppConfig[:enable_solr] = false -``` - -Note that doing this means that you will have to backup Solr manually. - -## Set the Solr url in your config.rb file - -This config setting should point to your Solr instance: - -``` -AppConfig[:solr_url] = "http://localhost:8983/solr/archivesspace" -``` - -If you are not running ArchivesSpace and Solr on the same server, update -`localhost` to your Solr address. - -By default, on startup, ArchivesSpace will check that the Solr configuration -appears to be correct and will raise an error if not. You can disable this check -by setting `AppConfig[:solr_verify_checksums] = false` in `config.rb`. - -Please note: if you're upgrading an existing installation of ArchivesSpace to use an external Solr, you will need to trigger a full re-index. -See [Indexes](../administration/indexes.html) for more details. - ---- - -You can now follow the instructions in the [Getting started](../administration/getting_started.html) section to start -your ArchivesSpace application. - ---- - -## Upgrading Solr - -If you are using an older version of Solr than is recommended you may need (if called out -in release notes) or want to upgrade. Before performing an upgrade it is recommended that you review: - -- [Solr upgrade notes](https://solr.apache.org/guide/solr/latest/upgrade-notes/solr-upgrade-notes.html) -- [ArchivesSpace's release notes](https://github.com/archivesspace/archivesspace/releases) - -You should also review this document as the installation steps may include -instructions that were not present in the past. For example, from Solr v9 there is a -requirement to use Solr modules with instructions to configure the modules using environment -variables. - -The crucial part will be ensuring that ArchivesSpace's schema is being used for the -ArchivesSpace Solr index. The config setting `AppConfig[:solr_verify_checksums] = true` -will perform a check on startup that confirms this is the case, otherwise ArchivesSpace -will not be able to start up. - -From ArchivesSpace 3.5+ `AppConfig[:solr_verify_checksums]` does not check the -`solrconfig.xml` file. Therefore you can make changes to it without ArchivesSpace failing -on startup. However, for an upgrade you will want to at least compare the ArchivesSpace -`solrconfig.xml` to the one that is in use in case there are changes that need to be made to -work with the upgraded-to version of Solr. For example the ArchivesSpace Solr v8 `solrconfig.xml` -will not work as is with Solr v9. - -After upgrading Solr you should trigger a full re-index. Instructions for this are in -[Indexes](../administration/indexes.html). diff --git a/provisioning/tuning.html b/provisioning/tuning.html new file mode 100644 index 00000000..1fd9993c --- /dev/null +++ b/provisioning/tuning.html @@ -0,0 +1,186 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + provisioning/tuning.md + +

+ +

+ + Report issue on Jira + provisioning/tuning.md + +

+
+
+ +

Tuning ArchivesSpace

+ +

ArchivesSpace is a stack of web applications which may require special tuning in order to run most effectively. This is especially the case for institutions with lots of data or many simultaneous users editing metadata. +Keep in mind that ArchivesSpace can be hosted on multiple server, either in a multitenant setup or by deploying the various applications ( i.e. backend, frontend, public, solr, & indexer ) on separate servers.

+ +

Application Settings

+ +

The application itself can tuned in numerous ways. It’s a good idea to read the configuration documentation, as there are numerous settings that can be adjusted to fit your needs.

+ +

An important thing to note is that since ArchivesSpace is a Java application, it’s possible to set the memory allocations used by the JVM. There are numerous articles on the internet full of information about what the optimal settings are, which will depend greatly on the load your server is experiencing and the hardware. It’s a good idea to monitor the application and ensure that it’s not hitting the top limits what you’ve set as the heap.

+ +

These settings are:

+ +
    +
  • ASPACE_JAVA_XMX : Maximum heap space ( maps to Java’s Xmx, default “Xmx1024m” )
  • +
  • ASPACE_JAVA_XSS : Thread stack size ( maps to Xss, default “Xss2m” )
  • +
  • ASPACE_GC_OPTS : Options used by the Java garbage collector ( default : “-XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:NewRatio=1” )
  • +
+ +

To modify these settings, Linux users can either export an environment variable ( e.g. $ export ASPACE_JAVA_XMX=”Xmx2048m” ) or edit the archivesspace.sh startup script and modify the defaults.

+ +

Windows users must edit the archivesspace.bat file.

+ +

If you’re having trouble with errors like java.lang.OutOfMemoryError try doubling the ASPACE_JAVA_XMX. On Linux you can do this either by setting an environment variable like $ export ASPACE_JAVA_XMX="Xmx2048m" or by editing archivsspace.sh:

+ +
if [ "$ASPACE_JAVA_XMX" = "" ]; then
+    ASPACE_JAVA_XMX="-Xmx2048m"
+fi
+
+

For Windows, you’ll change archivesspace.bat:

+ +
java -Darchivesspace-daemon=yes %JAVA_OPTS% -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:NewRatio=1 -Xss2m -X
+mx2048m -Dfile.encoding=UTF-8 -cp "%GEM_HOME%\gems\jruby-rack-1.1.12\lib\*;lib\*;launcher\lib\*!JRUBY!" org.jruby.Main "la
+uncher/launcher.rb" > "logs/archivesspace.out" 2>&1
+
+ +

NOTE: THE APPLICATION WILL NOT USE THE AVAILABLE MEMORY UNLESS YOU SET THE MAXIMUM HEAP SIZE TO ALLOCATE IT For example, if your server has 4 gigs of RAM, but you haven’t adjusted the ArchivesSpace settings, you’ll only be using 1 gig.

+ +

MySQL

+ +

The ArchivesSpace application can hit a database server rather hard, since it’s a metadata rich application. There are many articles online about how to tune a MySQL database. A good place to start is try something like MySQL Tuner or Tuning Primer which can give good hints on possible tweaks to make to your MySQL server configuration.

+ +

Keep a close eye on the memory available to the server, as well as your InnoDB buffer pool.

+ +

Solr

+ +

The internet is full of many suggestions on how to optimize a Solr index. Running an external Solr index can be beneficial to the performance of ArchivesSpace, since that moves the index to its own server.

+ + +
+ +
+ + + diff --git a/provisioning/tuning.md b/provisioning/tuning.md deleted file mode 100644 index 02894e0e..00000000 --- a/provisioning/tuning.md +++ /dev/null @@ -1,49 +0,0 @@ -# Tuning ArchivesSpace - -ArchivesSpace is a stack of web applications which may require special tuning in order to run most effectively. This is especially the case for institutions with lots of data or many simultaneous users editing metadata. -Keep in mind that ArchivesSpace can be hosted on multiple server, either in a [multitenant setup](clustering.html) or by deploying the various applications ( i.e. backend, frontend, public, solr, & indexer ) on separate servers. - -## Application Settings - -The application itself can tuned in numerous ways. It’s a good idea to read the [configuration documentation](../customization/configuration.html), as there are numerous settings that can be adjusted to fit your needs. - -An important thing to note is that since ArchivesSpace is a Java application, it’s possible to set the memory allocations used by the JVM. There are numerous articles on the internet full of information about what the optimal settings are, which will depend greatly on the load your server is experiencing and the hardware. It’s a good idea to monitor the application and ensure that it’s not hitting the top limits what you’ve set as the heap. - -These settings are: - -* ASPACE_JAVA_XMX : Maximum heap space ( maps to Java’s Xmx, default "Xmx1024m" ) -* ASPACE_JAVA_XSS : Thread stack size ( maps to Xss, default "Xss2m" ) -* ASPACE_GC_OPTS : Options used by the Java garbage collector ( default : "-XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:NewRatio=1" ) - -To modify these settings, Linux users can either export an environment variable ( e.g. $ export ASPACE_JAVA_XMX="Xmx2048m" ) or edit the archivesspace.sh startup script and modify the defaults. - -Windows users must edit the archivesspace.bat file. - - -If you're having trouble with errors like `java.lang.OutOfMemoryError` try doubling the `ASPACE_JAVA_XMX`. On Linux you can do this either by setting an environment variable like `$ export ASPACE_JAVA_XMX="Xmx2048m"` or by editing archivsspace.sh: - -``` -if [ "$ASPACE_JAVA_XMX" = "" ]; then - ASPACE_JAVA_XMX="-Xmx2048m" -fi -``` -For Windows, you'll change archivesspace.bat: - -``` -java -Darchivesspace-daemon=yes %JAVA_OPTS% -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:NewRatio=1 -Xss2m -X -mx2048m -Dfile.encoding=UTF-8 -cp "%GEM_HOME%\gems\jruby-rack-1.1.12\lib\*;lib\*;launcher\lib\*!JRUBY!" org.jruby.Main "la -uncher/launcher.rb" > "logs/archivesspace.out" 2>&1 -``` - - -**NOTE: THE APPLICATION WILL NOT USE THE AVAILABLE MEMORY UNLESS YOU SET THE MAXIMUM HEAP SIZE TO ALLOCATE IT** For example, if your server has 4 gigs of RAM, but you haven’t adjusted the ArchivesSpace settings, you’ll only be using 1 gig. - -## MySQL - -The ArchivesSpace application can hit a database server rather hard, since it’s a metadata rich application. There are many articles online about how to tune a MySQL database. A good place to start is try something like [MySQL Tuner](http://mysqltuner.com/) or [Tuning Primer](https://rtcamp.com/tutorials/mysql/tuning-primer/) which can give good hints on possible tweaks to make to your MySQL server configuration. - -Keep a close eye on the memory available to the server, as well as your InnoDB buffer pool. - -## Solr - -The internet is full of many suggestions on how to optimize a Solr index. [Running an external Solr index](solr.html) can be beneficial to the performance of ArchivesSpace, since that moves the index to its own server. diff --git a/readme_develop.html b/readme_develop.html new file mode 100644 index 00000000..82178f77 --- /dev/null +++ b/readme_develop.html @@ -0,0 +1,187 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + readme_develop.md + +

+ +

+ + Report issue on Jira + readme_develop.md + +

+
+
+ +

Resources for developers

+ +

This information will be useful for those creating ArchivesSpace plugins, contributing to the core codebase, or integrating ArchivesSpace withother systems. Additional documentation is available for installing, configuring and maintaining an ArchivesSpace instance and to provide a technical overview of ArchivesSpace for evaluating technical requirements and capabilities.

+ +
+

If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an issue or pull request as appropriate.

+
+ +

Setting up a development environment

+ + +

Building integrations and plugins

+ + +

Exporting data from ArchivesSpace

+ + +

Repositories and CoC

+ + + +
+ +
+ + + diff --git a/readme_develop.md b/readme_develop.md deleted file mode 100644 index 469ca392..00000000 --- a/readme_develop.md +++ /dev/null @@ -1,38 +0,0 @@ -# Resources for developers - -This information will be useful for those creating ArchivesSpace plugins, contributing to the core codebase, or integrating ArchivesSpace withother systems. Additional documentation is available for [installing, configuring and maintaining an ArchivesSpace instance](./readme_implement.html) and to provide [a technical overview of ArchivesSpace](./readme_evaluate.html) for evaluating technical requirements and capabilities. - -> If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an [issue](https://github.com/archivesspace/tech-docs/issues) or [pull request](https://github.com/archivesspace/tech-docs/pulls) as appropriate. - -## Setting up a development environment -* [Running a development version of ArchivesSpace](./development/dev.html) -* [Building an ArchivesSpace release](./development/release.html) -* [Docker](./development/docker.html) -* [DB versions listed by release](./development/release_schema_versions.html) -* [User Interface Test Suite](./development/ui_test.html) -* [Upgrading Rack for ArchivesSpace](./development/jruby-rack-build.html) -* [ArchivesSpace Releases](./development/releases.html) -* [Using the VS Code editor for local development](./development/vscode.html) - -## Building integrations and plugins -* [ArchivesSpace Plug-ins](./customization/plugins.html) -* [Working with the ArchivesSpace Database](./architecture/backend/database.html) -* [Working with the ArchivesSpace API](./api) -* [Customizing text in ArchivesSpace](./customization/locales.html) -* [Theming ArchivesSpace](./customization/theming.html) -* [Managing frontend assets with Bower](./customization/bower.html) - -## Exporting data from ArchivesSpace -* [ArchivesSpace repository EAD Exporter](./import_export/ead_exporter.html) -* [ArchivesSpace XSL stylesheets](./import_export/xsl_stylesheets.html) -* [Creating Custom Reports](./customization/reports.html) - -## Repositories and CoC -* [Code of Conduct](https://archivesspace.org/archivesspace-code-of-conduct) -* [Main ArchivesSpace Repository](https://github.com/archivesspace/archivesspace) -* [Plugins supported by the Program Team](https://github.com/archivesspace-plugins) -* [Repository for community development projects](https://github.com/archivesspace-labs) -* [Program Team's Youtube channel](https://www.youtube.com/channel/UCxR6D-UlSx6N6UWTeqHTjzA) -* [Hudson Molonglo's Youtub channel](https://www.youtube.com/channel/UCMBmBY_CsxwJy9rJKxQrVoQ) -* [Sandbox - latest release](http://sandbox.archivesspace.org/) -* [Test Server - latest commit](http://test.archivesspace.org/) diff --git a/readme_develop.md.bak b/readme_develop.md.bak deleted file mode 100644 index 7595fd69..00000000 --- a/readme_develop.md.bak +++ /dev/null @@ -1,26 +0,0 @@ -# Resources for developers - -This information will be useful for those creating ArchivesSpace plugins, contributing to the core codebase, or integrating ArchivesSpace withother systems. Additional documentation is available for [installing, configuring and maintaining an ArchivesSpace instance](./readme_implement.md) and to provide [a technical overview of ArchivesSpace](./readme_evaluate.md) for evaluating technical requirements and capabilities. - -> If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an [issue](https://github.com/archivesspace/tech-docs/issues) or [pull request](https://github.com/archivesspace/tech-docs/pulls) as appropriate. - -## Setting up a development environment -* [Running a development version of ArchivesSpace](./development/dev.md) -* [Building an ArchivesSpace release](./development/release.md) -* [Docker](./development/docker.md) -* [DB versions listed by release](./development/release_schema_versions.md) -* [User Interface Test Suite](./development/ui_test.md) -* [Upgrading Rack for ArchivesSpace](./development/jruby-rack-build.md) - -## Building integrations and plugins -* [ArchivesSpace Plug-ins](./customization/plugins.md) -* [Working with the ArchivesSpace Database](./backend/database.md) -* [Working with the ArchivesSpace API](./api) -* [Customizing text in ArchivesSpace](./customization/locales.md) -* [Theming ArchivesSpace](./customization/theming.md) -* [Managing frontend assets with Bower](./customization/bower.md) - -## Exporting data from ArchivesSpace -* [ArchivesSpace repository EAD Exporter](./import_export/ead_exporter.md) -* [ArchivesSpace XSL stylesheets](./import_export/xsl_stylesheets.md) -* [Creating Custom Reports](./customization/reports.md) diff --git a/readme_evaluate.html b/readme_evaluate.html new file mode 100644 index 00000000..2b9329f7 --- /dev/null +++ b/readme_evaluate.html @@ -0,0 +1,178 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + readme_evaluate.md + +

+ +

+ + Report issue on Jira + readme_evaluate.md + +

+
+
+ +

ArchivesSpace technical overview

+ +

This documentation will provide implementers and administrators with the information needed to install, configure and maintain an ArchivesSpase instance. Additional documentation is available for developers and for installing, configuring and maintaining an ArchivesSpace instance.

+ +
+

If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an issue or pull request as appropriate.

+
+ +

Architecture and components

+ + +

Configuration and customization

+ + +

Migrating from Archivists’ Toolkit and Archon

+ + + + +
+ +
+ + + diff --git a/readme_evaluate.md b/readme_evaluate.md deleted file mode 100644 index 08347f3d..00000000 --- a/readme_evaluate.md +++ /dev/null @@ -1,31 +0,0 @@ -# ArchivesSpace technical overview - -This documentation will provide implementers and administrators with the information needed to install, configure and maintain an ArchivesSpase instance. Additional documentation is available [for developers](./readme_develop.html) and for [installing, configuring and maintaining an ArchivesSpace instance](./readme_implement.html). - -> If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an [issue](https://github.com/archivesspace/tech-docs/issues) or [pull request](https://github.com/archivesspace/tech-docs/pulls) as appropriate. - -## Architecture and components - * [The ArchivesSpace backend](./architecture/backend) - * [The ArchivesSpace staff interface](./architecture/frontend) - * [The ArchivesSpace public user interface](./architecture/public) - * [Background Jobs](./architecture/jobs) - * [Search indexing](./architecture/search) - * [OAI-PMH interface](./architecture/oai-pmh) - * [Languages, platforms, and included open source projects](./architecture/languages.html) - * [Working with the ArchivesSpace Database](./architecture/backend/database.html) - * [Working with the ArchivesSpace API](./api) - -## Configuration and customization - * [Running ArchivesSpace as a Unix daemon](./administration/unix_daemon.html) - * [Running ArchivesSpace as a Windows service](./administration/windows.html) - * [Running ArchivesSpace against MySQL](./provisioning/mysql.html) - * [ArchivesSpace Plug-ins](./customization/plugins.html) - * [Theming ArchivesSpace](./customization/theming.html) - * [Adding support for additional username/password-based authentication backends](./customization/authentication.html) - * [Customizing text in ArchivesSpace](./customization/locales.html) - -## Migrating from Archivists' Toolkit and Archon - * [Migration tools and data mapping](./migrations/migration_tools.html) - * [Archivists' Toolkit migration tool instructions](./migrations/migrate_from_archivists_toolkit.html) - * [Archon migration tool instructions](./migrations/migrate_from_archon.html) - diff --git a/readme_implement.html b/readme_implement.html new file mode 100644 index 00000000..64c6a913 --- /dev/null +++ b/readme_implement.html @@ -0,0 +1,199 @@ + + + + + + + + +tech-docs | Technical documentation for ArchivesSpace + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+

tech-docs

+ + + +

Technical documentation for ArchivesSpace

+ + +

View the Project on GitHub archivesspace/tech-docs

+ + + + + + +

+ + Edit this page on GitHub + readme_implement.md + +

+ +

+ + Report issue on Jira + readme_implement.md + +

+
+
+ +

Installing, configuring and maintaining an ArchivesSpace instance

+ +

This documentation will provide implementers and administrators with the information needed to install, configure and maintain an ArchivesSpase instance. Additional documentation is available for developers and to provide a technical overview of ArchivesSpace for evaluating technical requirements and capabilities.

+ +
+

If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an issue or pull request as appropriate.

+
+ +

Installation and configuration

+ + +

Customization and theming

+ + +

Maintenance

+ + +

Advanced configuration

+ + +

Migrating data from Archivists’ Toolkit and Archon

+ + +

Other migration resources

+ + + +
+ +
+ + + diff --git a/readme_implement.md b/readme_implement.md deleted file mode 100644 index 46904fcf..00000000 --- a/readme_implement.md +++ /dev/null @@ -1,47 +0,0 @@ -# Installing, configuring and maintaining an ArchivesSpace instance - -This documentation will provide implementers and administrators with the information needed to install, configure and maintain an ArchivesSpase instance. Additional documentation is available [for developers](./readme_develop.html) and to provide [a technical overview of ArchivesSpace](./readme_evaluate.html) for evaluating technical requirements and capabilities. - -> If you discover errors in the documentation, or would like to suggest or contribute additional documentation, please submit an [issue](https://github.com/archivesspace/tech-docs/issues) or [pull request](https://github.com/archivesspace/tech-docs/pulls) as appropriate. - -## Installation and configuration - * [Getting started](./administration/getting_started.html) (All Versions) - * [Configuring ArchivesSpace](./customization/configuration.html) (All Versions) - * [Running ArchivesSpace against MySQL](./provisioning/mysql.html) (All Versions) - * [Running ArchivesSpace with external Solr](./provisioning/solr.html) (Version 3.2+) - * [Running ArchivesSpace as a Unix daemon](./administration/unix_daemon.html) (All Versions) - * [Running ArchivesSpace as a Windows service](./administration/windows.html) (All Versions) - * [Adding support for additional username/password-based authentication backends](./customization/authentication.html) (All Versions) - * [Configuring LDAP authentication](./customization/ldap.html) (All Versions) - - -## Customization and theming - * [ArchivesSpace Plug-ins](./customization/plugins.html) - * [Customizing text in ArchivesSpace](./customization/locales.html) - * [Theming ArchivesSpace](./customization/theming.html) - * [Managing frontend assets with Bower](./customization/bower.html) - * [Creating Custom Reports](./customization/reports.html) - * [ArchivesSpace XSL stylesheets](./import_export/xsl_stylesheets.html) - -## Maintenance - * [Re-creating indexes](./administration/indexes.html) - * [Resetting passwords](./administration/passwords.html) - * [Upgrading](./administration/upgrading.html) - * [Backup and recovery](./administration/backup.html) - * [ArchivesSpace releases and database versions](./development/release_schema_versions.html) - -## Advanced configuration - * [Tuning ArchivesSpace](./provisioning/tuning.html) - * [Running ArchivesSpace with load balancing and multiple tenants](./provisioning/clustering.html) - * [Serving ArchivesSpace over subdomains](./provisioning/domains.html) - * [Serving ArchivesSpace user-facing applications over HTTPS](./provisioning/https.html) - * [Application monitoring with New Relic](./provisioning/newrelic.html) - * [Running ArchivesSpace under a prefix](./provisioning/prefix.html) - * [JMeter Test Group Template](./provisioning/jmeter.html) - -## Migrating data from Archivists' Toolkit and Archon - * [Archivists' Toolkit migration tool instructions](./migrations/migrate_from_archivists_toolkit.html) - * [Archon migration tool instructions](./migrations/migrate_from_archon.html) - -## Other migration resources - * [Migration tools and data mapping](./migrations/migration_tools.html)