diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..3ab207f24
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,4 @@
+# See https://help.github.com/articles/about-codeowners/
+
+# A Conveyal employee is required to approve PR merges
+* @conveyal/employees
diff --git a/.github/issue_template.md b/.github/issue_template.md
new file mode 100644
index 000000000..32706352d
--- /dev/null
+++ b/.github/issue_template.md
@@ -0,0 +1,21 @@
+_**NOTE:** This issue system is intended for reporting bugs and tracking progress in software development. Although this software is licensed with an open-source license, any issue opened here may not be responded to in a timely manner. [Conveyal](https://www.conveyal.com) is unable to provide technical support for custom deployments of this software unless your company has a support contract with us. Please remove this note when creating the issue._
+
+## Observed behavior
+
+Please explain what is being observed within the application here.
+
+## Expected behavior
+
+Please explain what should happen instead.
+
+## Steps to reproduce the problem
+
+Please be as specific as possible.
+
+## Any special notes on configuration used
+
+Please describe any applicable config files that were used
+
+## Version of datatools-server and datatools-ui if applicable (exact commit hash or branch name)
+
+If using this in conjunction with [datatools-ui](https://github.com/conveyal/datatools-ui), this info can be found by clicking on the gear icon on the sidebar.
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 000000000..8452340b0
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,14 @@
+### Checklist
+
+- [ ] Appropriate branch selected _(all PRs must first be merged to `dev` before they can be merged to `master`)_
+- [ ] Any modified or new methods or classes have helpful JavaDoc and code is thoroughly commented
+- [ ] The description lists all applicable issues this PR seeks to resolve
+- [ ] The description lists any configuration setting(s) that differ from the default settings
+- [ ] All tests and CI builds passing
+- [ ] The description lists all relevant PRs included in this release _(remove this if not merging to master)_
+- [ ] e2e tests are all passing _(remove this if not merging to master)_
+- [ ] Code coverage improves or is at 100% _(remove this if not merging to master)_
+
+### Description
+
+Please explain the changes you made here and, if not immediately obvious from the code, how they resolve any referenced issues. Be sure to include all issues being resolved and any special configuration settings that are need for the software to run properly with these changes. If merging to master, please also list the PRs that are to be included.
diff --git a/.travis.yml b/.travis.yml
index 368b8c6e6..ea6dfeb81 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,44 +1,79 @@
+dist: trusty # jdk 8 not available on xenial
language: java
-jdk:
-- oraclejdk8
+java:
+ - oraclejdk8
install: true
sudo: false
# Install mongoDB to perform persistence tests
-services: mongodb
+services:
+ - mongodb
+ - postgresql
+addons:
+ postgresql: 9.6
cache:
directories:
- - "$HOME/.m2"
+ - $HOME/.m2
+ - $HOME/.cache/yarn
+# Install semantic-release
+before_script:
+ - yarn global add @conveyal/maven-semantic-release semantic-release@15
+ # Create dir for GTFS+ files (used during testing)
+ - mkdir /tmp/gtfsplus
before_install:
#- sed -i.bak -e 's|https://nexus.codehaus.org/snapshots/|https://oss.sonatype.org/content/repositories/codehaus-snapshots/|g' ~/.m2/settings.xml
# set region in AWS config for S3 setup
- mkdir ~/.aws && printf '%s\n' '[default]' 'aws_access_key_id=foo' 'aws_secret_access_key=bar' 'region=us-east-1' > ~/.aws/config
- cp configurations/default/server.yml.tmp configurations/default/server.yml
+# create database for tests
+- psql -U postgres -c 'CREATE DATABASE catalogue;'
script:
# package jar
- mvn package
after_success:
-# Upload coverage reports to codecov.io
-- bash <(curl -s https://codecov.io/bash)
-# notify slack channel of build status
+ # this first codecov run will upload a report associated with the commit set through Travis CI environment variables
+ - bash <(curl -s https://codecov.io/bash)
+ # run maven-semantic-release to potentially create a new release of datatools-server. The flag --skip-maven-deploy is
+ # used to avoid deploying to maven central. So essentially, this just creates a release with a changelog on github.
+ #
+ # If maven-semantic-release finishes successfully and the current branch is master, upload coverage reports for the
+ # commits that maven-semantic-release generated. Since the above codecov run is associated with the commit that
+ # initiated the Travis build, the report will not be associated with the commits that maven-semantic-release performed
+ # (if it ended up creating a release and the two commits that were a part of that workflow). Therefore, if on master
+ # codecov needs to be ran two more times to create codecov reports for the commits made by maven-semantic-release.
+ # See https://github.com/conveyal/gtfs-lib/issues/193.
+ #
+ # The git commands get the commit hash of the HEAD commit and the commit just before HEAD.
+ - |
+ semantic-release --prepare @conveyal/maven-semantic-release --publish @semantic-release/github,@conveyal/maven-semantic-release --verify-conditions @semantic-release/github,@conveyal/maven-semantic-release --verify-release @conveyal/maven-semantic-release --use-conveyal-workflow --dev-branch=dev --skip-maven-deploy
+ if [[ "$TRAVIS_BRANCH" = "master" ]]; then
+ bash <(curl -s https://codecov.io/bash) -C "$(git rev-parse HEAD)"
+ bash <(curl -s https://codecov.io/bash) -C "$(git rev-parse HEAD^)"
+ fi
notifications:
+ # notify slack channel of build status
slack: conveyal:WQxmWiu8PdmujwLw4ziW72Gc
before_deploy:
# get branch name of current branch for use in jar name: https://graysonkoonce.com/getting-the-current-branch-name-during-a-pull-request-in-travis-ci/
- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo $TRAVIS_PULL_REQUEST_BRANCH; fi)
-# copy packaged jars over to deploy dir
+# Create directory that will contain artifacts to deploy to s3.
- mkdir deploy
+# Display contents of target directory (for logging purposes only).
+- ls target/*.jar
+# Copy packaged jars over to deploy dir.
- cp target/dt-*.jar deploy/
-- cp "target/dt-$(git describe --always).jar" "deploy/dt-latest-${BRANCH}.jar"
+# FIXME: Do not create a branch-specific jar for now. Having a jar that changes contents but keeps the same name
+# may cause confusion down the road and may be undesirable.
+# - cp "target/dt-$(git describe --always).jar" "deploy/dt-latest-${BRANCH}.jar"
deploy:
provider: s3
skip_cleanup: true
- access_key_id: AKIAJISY76KTZBNHS4SA
+ access_key_id: AKIAIWMAQP5YXWT7OZEA
secret_access_key:
- secure: a2PNYiv7kzgKxfSx6IhZxSCFBZTCjrbIAK/vmCB1KcpnlV4vTt/IL13i3u6XC8wAbUxhd6iJMtVRm4diIwmy0K7nnpp0h3cQDxYqWCmf1dHZWBJXkpurDpbfxW5G6IlL14i+EsTSCpmwalov+atOBDVyJWVGqfEYaj9c6Q1E0fiYNP3QwZQcsVuD1CRw91xzckfERwqYcz70p/hmTEPOgUwDHuyHsjFafJx+krY3mnBdRdDRLcnPavjcEtprjGkdiVbNETe3CHVNQrAVfqm187OoDA2tHTPjTFmlAdUedp4rYqLmF/WWbHZLzUkQb95FJkklx30vlwC0bIutP1TwIlr3ma5aCRFc58x3SzG07AeM+vbt/nh5A52cpdRjBnhctC2kL++QvwkJhwRy2xptl/WEd5AUagoN4ngnGzyDS4kk/taQFL0IAav5C2WH668kGyH17KNeWG/bCDd55oCvwNlppAYXH+WdbtylqiVb9Fllvs1wcIYWqqyX5zdYiyFEI8LyEQsNF/D5ekuAtLXcF25uwjNtHMjdAxQxHbAbBOeaaLwJd29os9GrKFI/2C0TVXZo2zaFLZyFaIsDHqAC+MXDBDtktimC9Uuozz7bXENCrOUBfsDEQXb46tkXLGaQNXeOhe3KwVKxlGDCsLb7iHIcdDyBm19hqUWhU3uA+dU=
+ secure: cDfIv+/+YimqsH8NvWQZy9YTqaplOwlIeEK+KEBCfsJ3DJK5sa6U4BMZCA4OMP1oTEaIxkd4Rcvj0OAYSFQVNQHtwc+1WeHobzu+MWajMNwmJYdjIvCqMFg2lgJdzCWv6vWcitNvrsYpuXxJlQOirY/4GjEh2gueHlilEdJEItBGYebQL0/5lg9704oeO9v+tIEVivtNc76K5DoxbAa1nW5wCYD7yMQ/cc9EQiMgR5PXNEVJS4hO7dfdDwk2ulGfpwTDrcSaR9JsHyoXj72kJHC9wocS9PLeeYzNAw6ctIymNIjotUf/QUeMlheBbLfTq6DKQ0ISLcD9YYOwviUMEGmnte+HCvTPTtxNbjBWPGa2HMkKsGjTptWu1RtqRJTLy19EN1WG5znO9M+lNGBjLivxHZA/3w7jyfvEU3wvQlzo59ytNMwOEJ3zvSm6r3/QmOr5BU+UHsqy5vv2lOQ9Nv10Uag11zDP1YWCoD96jvjZJsUZtW80ZweHYpDMq0vKdZwZSlbrhgHzS7vlDW7llZPUntz0SfKCjtddbRdy6T4HgsmA8EsBATfisWpmFA6roQSnYwfEZ5ooJ8IMjfOm1qGphrP1Qv8kYkqdtOyTijYErqJ3YzldjeItqaWtyD5tmHm6Wmq6XIbw4bnSfGRx9di+cG5lDEPe1tfBPCf9O5M=
# upload jars in deploy dir to bucket
bucket: datatools-builds
local-dir: deploy
acl: public_read
on:
- repo: catalogueglobal/datatools-server
+ repo: ibi-group/datatools-server
all_branches: true
diff --git a/configurations/default/env.yml.tmp b/configurations/default/env.yml.tmp
index e33b09933..eb5769962 100644
--- a/configurations/default/env.yml.tmp
+++ b/configurations/default/env.yml.tmp
@@ -1,10 +1,13 @@
+# This client ID refers to the UI client in Auth0.
AUTH0_CLIENT_ID: your-auth0-client-id
AUTH0_DOMAIN: your-auth0-domain
# Note: One of AUTH0_SECRET or AUTH0_PUBLIC_KEY should be used depending on the signing algorithm set on the client.
# It seems that newer Auth0 accounts (2017 and later) might default to RS256 (public key).
AUTH0_SECRET: your-auth0-secret # uses HS256 signing algorithm
# AUTH0_PUBLIC_KEY: /path/to/auth0.pem # uses RS256 signing algorithm
-AUTH0_TOKEN: your-auth0-token
+# This client/secret pair refer to a machine-to-machine Auth0 application used to access the Management API.
+AUTH0_API_CLIENT: your-api-client-id
+AUTH0_API_SECRET: your-api-secret-id
DISABLE_AUTH: false
OSM_VEX: http://localhost:1000
SPARKPOST_KEY: your-sparkpost-key
diff --git a/configurations/default/server.yml.tmp b/configurations/default/server.yml.tmp
index d95317b46..c29382e26 100644
--- a/configurations/default/server.yml.tmp
+++ b/configurations/default/server.yml.tmp
@@ -15,6 +15,9 @@ modules:
enabled: false
user_admin:
enabled: true
+ # Enable GTFS+ module for testing purposes
+ gtfsplus:
+ enabled: true
gtfsapi:
enabled: true
load_on_fetch: false
@@ -29,3 +32,6 @@ extensions:
enabled: true
api: http://api.transitfeeds.com/v1/getFeeds
key: your-api-key
+ # Enable MTC for testing purposes
+ mtc:
+ enabled: true
diff --git a/jmeter/README.md b/jmeter/README.md
index db8f693b2..6cbba28da 100644
--- a/jmeter/README.md
+++ b/jmeter/README.md
@@ -31,8 +31,8 @@ The test plan can be ran straight from the command line. A helper script is pro
| 1 | test plan mode | `batch`, `fetch`, `query` or `upload` | which test plan mode to use when running the jmeter script. (see notes below for more explanation of these test plan modes) |
| 2 | number of threads | an integer greater than 0 | The number of simultaneous threads to run at a time. The threads will have staggered start times 1 second apart. |
| 3 | number of loops | an integer greater than 0 | the number of loops to run. This is combined with the number of threads, so if the number of threads is 10 and the number of loops is 8, the total number of test plans to run will be 80. |
-| 4 | project name or batch csv file | string of the project name or string of file path to batch csv file | This argument is required if running the script with the `batch` test plan mode, otherwise, this argument is optional. The jmeter script will create new projects with a project name plus the current iteration number. The default name is "test project #". Also, if the s3 bucket argument is also provided, the output folder will be tarred up and with this name. |
-| 5 | s3 bucket | string of an s3 bucket | OPTIONAL. If provided, the script will tar up the output folder and attempt to upload to the specified s3 bucket. This assumes that aws credentials have been setup for use by the `aws` command line tool. |
+| 4 | project name or batch csv file | string of the project name or string of file path to batch csv file | This argument is required if running the script with the `batch` test plan mode, otherwise, this argument is optional.
If in `fetch` or `upload` mode, the jmeter script will create new projects with a the provided project name (or "test project" if a name is not provided) plus the current iteration number. In `fetch` or `upload` mode, the feed url and upload file is not configurable. In `fetch` mode, the url `http://documents.atlantaregional.com/transitdata/gtfs_ASC.zip` will be used to fetch the feed to create the feed version. In `upload` mode, the file `fixtures/gtfs.zip` will be uploaded to create the feed version.
If in `query` mode, jmeter will try to find the project matching the provided name (as long as the project name is not "test project") or a random project will be picked if this argument is not provided. |
+| 5 | s3 bucket | string of an s3 bucket | OPTIONAL. If provided, the script will tar up the output folder and attempt to upload to the specified s3 bucket. This assumes that aws credentials have been setup for use by the `aws` command line tool. If not running in batch mode and a project name has been specified, the name of this file will be `{project name}.tar.gz`. Otherwise, the name will be `output.tar.gz`. |
Examples:
@@ -48,7 +48,7 @@ _Run the test plan in query mode 80 total times in 10 threads each completing 8
_Run in batch mode. Note that all feeds in the csv file will be processed in each loop. So in the following command, each feed in the batch.csv file would be processed 6 times. See the section below for documentation on the csv file and also see the fixtures folder for an example file._
```sh
-./run-tests.sh query 3 2 batch.csv my-s3-bucket
+./run-tests.sh batch 3 2 batch.csv my-s3-bucket
```
### Running the upload test on multiple gtfs files
@@ -124,6 +124,8 @@ This section is run under the `query` test plan mode. This script assumes that
This section is run in all test plan modes.
+1. Fetch stops and a row count of stops
+1. Make sure the number of stops matches the row count of stops
1. Fetch all routes
1. Pick a random route
1. Fetch all trips on selected route
@@ -133,6 +135,8 @@ This section is run in all test plan modes.
1. Fetch embedded stop_times from trips from a random pattern
1. Check that all stop_times have proper trip_id
1. Check that all stop_times in trips on pattern have same stop sequence as pattern
+1. Make a GraphQL request that contains a nested query of routes, patterns and stops
+1. Make sure that each route is present in the route within the list of patterns
## Reporting
@@ -140,4 +144,4 @@ If running this script in GUI mode, it is possible to see all results in real-ti
When running the test plan from the command line in non-gui mode, reports will be saved to the `output` folder. The outputs will contain a csv file of all requests made and an html report summarizing the results. If the test plan mode was `batch`, `fetch` or `upload` than another csv file will be written that contains a list of the elapsed time for processing the creation of a new gtfs feed version.
-The csv files can be loaded into a jmeter GUI listener to view more details.
+The csv files can be loaded into a jmeter GUI to view more details.
diff --git a/jmeter/test-script.jmx b/jmeter/test-script.jmx
index 60e890cae..4ed4ed0eb 100644
--- a/jmeter/test-script.jmx
+++ b/jmeter/test-script.jmx
@@ -99,8 +99,9 @@
-
+ ${continueBatchLoop}
+ Used to determine if this loop should be completed again by advancing to the next record in a batch csv file
@@ -645,20 +646,47 @@ if (failureMessage?.trim()) {
-
+ false// do not do batch loop in this case
vars.put("continueBatchLoop", "false")
+ Batch mode is not enabled in query mode
-
+ projectId$[*].id0
+
+
+
+
+ import groovy.json.JsonSlurper;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// if a custom project name is specified, try to find it.
+// if a project matching the name is found, then override the projectId set by the JSON extractor in the step before this.
+if (!vars.get("projectName").equals("test project")) {
+ // parse json
+ try {
+ def jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+ jsonResponse.each{ project ->
+ if (project.name.equals(vars.get("projectName"))) {
+ vars.put("projectId", project.id)
+ }
+ }
+ } catch (Exception e) {
+
+ }
+}
+ groovy
+
+
@@ -764,6 +792,43 @@ vars.put("continueBatchLoop", "false")
+
+ groovy
+
+
+
+ import groovy.json.JsonSlurper;
+
+def failureMessage = "";
+def jsonResponse = null;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// parse json
+try {
+ jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+} catch (Exception e) {
+ failureMessage += "Invalid JSON.\n"
+}
+
+def curDate = jsonResponse.validationResult.firstCalendarDate.toInteger()
+def dateWithMostService = curDate
+def maxServiceSeconds = jsonResponse.validationResult.dailyTotalSeconds[0]
+
+jsonResponse.validationResult.dailyTotalSeconds.each {
+ // Update maxServiceSeconds if needed
+ if (it > maxServiceSeconds) {
+ dateWithMostService = curDate
+ maxServiceSeconds = it
+ }
+
+ // increment cur date
+ curDate += 1
+}
+
+vars.put("date", dateWithMostService.toString())
+
+ true
@@ -1114,16 +1179,18 @@ try {
failureMessage += "Invalid JSON.\n"
}
-def trips = jsonResponse.data.feed.patterns[0].trips
+def firstPattern = jsonResponse.data.feed.patterns[0]
-trips.each { trip ->
- trip.stop_times.each { stop_time ->
- if (!trip.trip_id.equals(stop_time.trip_id)) {
- failureMessage += "trip_id mismatch."
- failureMessage += "Parent trip has trip_id: " + trip.trip_id
- failureMessage += " Stop Time has stop_id: " + stop_time.stop_id
- failureMessage += " and trip_id: " + stop_time.trip_id + "\n"
- }
+if (firstPattern != null) {
+ firstPattern.trips.each { trip ->
+ trip.stop_times.each { stop_time ->
+ if (!trip.trip_id.equals(stop_time.trip_id)) {
+ failureMessage += "trip_id mismatch."
+ failureMessage += "Parent trip has trip_id: " + trip.trip_id
+ failureMessage += " Stop Time has stop_id: " + stop_time.stop_id
+ failureMessage += " and trip_id: " + stop_time.trip_id + "\n"
+ }
+ }
}
}
@@ -1153,27 +1220,97 @@ try {
failureMessage += "Invalid JSON.\n"
}
-def numStopsInPattern = jsonResponse.data.feed.patterns[0].stops.size()
def trips = jsonResponse.data.feed.patterns[0].trips
-def numStopTimesInTrip = 0
trips.each { trip ->
- numStopTimesInTrip = trip.stop_times.size()
- if (numStopTimesInTrip != numStopsInPattern) {
- failureMessage += "mismatch in number of trip stops vs number of pattern stops."
- failureMessage += "There are " + numStopsInPattern + " pattern stops"
- failureMessage += ", but there are " + numStopTimesInTrip + " stop_times"
- failureMessage += " in trip " + trip.trip_id + "\n"
- } else {
- trip.stop_times.eachWithIndex { stop_time, idx ->
- if (!stop_time.stop_id.equals(trip.stop_times[idx].stop_id)) {
- failureMessage += "stop_id mismatch."
- failureMessage += "Pattern stop list stop_id: " + trip.stop_times[idx].stop_id
- failureMessage += " at index: " + idx
- failureMessage += " Stop Time of trip " + trip.trip_id
- failureMessage += " at index: " + idx
- failureMessage += " has stop_id: " + stop_time.stop_id + "\n"
- }
+ trip.stop_times.eachWithIndex { stop_time, idx ->
+ if (!stop_time.stop_id.equals(trip.stop_times[idx].stop_id)) {
+ failureMessage += "stop_id mismatch."
+ failureMessage += "Pattern stop list stop_id: " + trip.stop_times[idx].stop_id
+ failureMessage += " at index: " + idx
+ failureMessage += " Stop Time of trip " + trip.trip_id
+ failureMessage += " at index: " + idx
+ failureMessage += " has stop_id: " + stop_time.stop_id + "\n"
+ }
+ }
+}
+
+// set assertion result to fail if an error happened
+if (failureMessage?.trim()) {
+ AssertionResult.setFailureMessage(failureMessage);
+ AssertionResult.setFailure(true);
+}
+ groovy
+
+
+
+
+ true
+
+
+
+ false
+ {"query":"query nestedQuery($namespace: String) {feed (namespace: $namespace) { routes(limit: 1) { patterns(limit: -1) { route { patterns(limit: -1) { route { route_id stops (limit: -1) { stop_id }}} route_id stops (limit: -1) { stop_id }}} route_id stops(limit: -1) { stop_id }}}}", "variables": {"namespace": "${namespace}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This should test whether the dataloader is able to cache queries and also if queries can be combined
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+
+
+ import groovy.json.JsonSlurper;
+
+def failureMessage = "";
+def jsonResponse = null;
+
+JsonSlurper JSON = new JsonSlurper ();
+
+// parse json
+try {
+ jsonResponse = JSON.parseText(prev.getResponseDataAsString());
+} catch (Exception e) {
+ failureMessage += "Invalid JSON.\n"
+}
+
+def routes = jsonResponse.data.feed.routes
+
+routes.each { route ->
+ routeId = route.route_id
+ route.patterns.each { pattern ->
+ if (pattern.route[0].route_id != routeId) {
+ failureMessage += "route id not found in nested list of the route in a pattern."
+ failureMessage += " Missing route id: " + routeId + "\n"
}
}
}
@@ -1187,8 +1324,100 @@ if (failureMessage?.trim()) {
+
+ true
+
+
+
+ false
+ {"query":"query shapesQuery($namespace: String) {feed (namespace: $namespace) {patterns(limit: -1) {shape(limit: -1) {shape_pt_lat shape_pt_lon shape_pt_sequence}}}}", "variables": {"namespace": "${namespace}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This tests a common query of getting all shapes of all patterns.
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ true
+
+
+
+ false
+ {"query":"query patternsQuery($date: String, $namespace: String, $routeId: [String]) { feed (namespace: $namespace) { routes (route_id: $routeId) { route_id, route_short_name, route_long_name, patterns (limit: -1) { pattern_id, name, shape (limit: -1) { lat: shape_pt_lat lon: shape_pt_lon }, stops (limit: -1) { stop_id } trips ( date: $date, limit: -1 ) { stop_times (limit: 1) { arrival_time departure_time } } } stops (limit: -1) { location_type stop_code stop_desc stop_id stop_lat stop_lon stop_name stop_url wheelchair_boarding zone_id } } } }", "variables": {"date": "${date}", "namespace": "${namespace}", "route_id": "${randomRouteId}"}}
+ =
+
+
+
+
+
+
+
+ /api/manager/secure/gtfs/graphql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+ This executes the patternsQuery which is used to display the TripsPerHourChart in datatools-ui
+
+
+
+ 1000
+ 1000.0
+
+
+
+
+
+ Content-Type
+ application/json
+
+
+
+
+
+
+ groovy
+
+
+
+ vars.put("continueBatchLoop", "true")
+
+ false
diff --git a/pom.xml b/pom.xml
index 1ac3ccf91..da5ec601d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -6,7 +6,7 @@
com.conveyaldatatools-server
- 3.2.0-SNAPSHOT
+ 3.5.1-SNAPSHOT
@@ -16,42 +16,33 @@
+ For other contributors, see https://github.com/ibi-group/datatools-server/graphs/contributors -->
Landon Reed
- lreed@conveyal.com
- Conveyal
- http://conveyal.com/
-
-
- Andrew Byrd
- abyrd@conveyal.com
- Conveyal
- http://conveyal.com/
-
-
- David Emory
- demory@conveyal.com
- Conveyal
- http://conveyal.com/
+ landon.reed@ibigroup.com
+ IBI Group
+ https://ibigroup.com/Evan Siroky
- esiroky@conveyal.com
- Conveyal
- http://conveyal.com/
+ evan.siroky@ibigroup.com
+ IBI Group
+ https://ibigroup.com/
- scm:git:https://github.com/catalogueglobal/datatools-server.git
- scm:git:ssh://git@github.com/catalogueglobal/datatools-server.git
- https://github.com/catalogueglobal/datatools-server.git
+ scm:git:https://github.com/ibi-group/datatools-server.git
+ scm:git:ssh://git@github.com/ibi-group/datatools-server.git
+ https://github.com/ibi-group/datatools-server.git
- 2.9.0
+ 2.9.9
+
+ 17.5
@@ -60,6 +51,7 @@
true**/*.properties
+ logback.xmlgtfs/*public/*
@@ -129,6 +121,7 @@
org.apache.maven.pluginsmaven-jar-plugin
+ 3.1.2
@@ -157,23 +150,6 @@
-
-
- maven-surefire-plugin
- 2.22.0
-
-
- org.junit.platform
- junit-platform-surefire-provider
- 1.3.1
-
-
- org.junit.jupiter
- junit-jupiter-engine
- 5.3.1
-
-
-
@@ -217,6 +193,11 @@
always
+
+
+ jitpack.io
+ https://jitpack.io
+
@@ -224,7 +205,7 @@
com.sparkjavaspark-core
- 2.5
+ 2.7.2
@@ -237,7 +218,7 @@
ch.qos.logbacklogback-classic
- 1.1.3
+ 1.2.3
@@ -254,11 +235,31 @@
2.1.0
-
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+
com.conveyalgtfs-lib
- 4.1.0
+ 5.0.2
+
+
+
+ org.slf4j
+ slf4j-simple
+
+
@@ -289,11 +290,11 @@
jackson-dataformat-yaml${jackson.version}
-
+
com.fasterxml.jackson.corejackson-databind
- ${jackson.version}
+ 2.9.9.1
@@ -321,21 +322,24 @@
org.geotoolsgt-shapefile
- 19.2
+ ${geotools.version}
-
-
+
- org.junit.jupiter
- junit-jupiter-api
- 5.3.1
- test
+ org.geotools
+ gt-metadata
+ ${geotools.version}
+
+
+ org.geotools
+ gt-api
+ ${geotools.version}com.bugsnag
- [3.0,4.0)
+ 3.3.0bugsnag
@@ -358,8 +362,32 @@
org.hamcrestjava-hamcrest2.0.0.0
+ test
+
+
+
+ com.github.conveyal
+ java-snapshot-matcher
+ 3495b32f7b4d3f82590e0a2284029214070b6984
+ test
+
+
+
+ com.github.tomakehurst
+ wiremock-standalone
+ 2.14.0
+ test
+
+
+
+ net.sf.supercsv
+ super-csv
+ 2.4.0
-
com.amazonaws
diff --git a/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java b/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java
index ea904da90..673008be7 100644
--- a/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java
+++ b/src/main/java/com/conveyal/datatools/common/status/MonitorableJob.java
@@ -1,15 +1,17 @@
package com.conveyal.datatools.common.status;
import com.conveyal.datatools.manager.DataManager;
+import com.google.common.collect.Sets;
import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.eclipse.jetty.util.ConcurrentHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -18,10 +20,11 @@
*/
public abstract class MonitorableJob implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(MonitorableJob.class);
- protected final String owner;
+ public final String owner;
// Public fields will be serialized over HTTP API and visible to the web client
public final JobType type;
+ public File file;
public String parentJobId;
public JobType parentJobType;
// Status is not final to allow some jobs to have extra status fields.
@@ -47,16 +50,18 @@ public enum JobType {
LOAD_FEED,
VALIDATE_FEED,
DEPLOY_TO_OTP,
+ EXPORT_GIS,
FETCH_PROJECT_FEEDS,
FETCH_SINGLE_FEED,
MAKE_PROJECT_PUBLIC,
PROCESS_FEED,
+ SYSTEM_JOB,
CREATE_SNAPSHOT,
EXPORT_SNAPSHOT_TO_GTFS,
CONVERT_EDITOR_MAPDB_TO_SQL,
VALIDATE_ALL_FEEDS,
- MERGE_PROJECT_FEEDS,
- MONITOR_SERVER_STATUS
+ MONITOR_SERVER_STATUS,
+ MERGE_FEED_VERSIONS
}
public MonitorableJob(String owner, String name, JobType type) {
@@ -66,27 +71,40 @@ public MonitorableJob(String owner, String name, JobType type) {
registerJob();
}
+ public MonitorableJob(String owner) {
+ this(owner, "Unnamed Job", JobType.UNKNOWN_TYPE);
+ }
+
+ /** Constructor for a usually unmonitored system job (but still something we want to conform to our model). */
+ public MonitorableJob () {
+ this("system", "System job", JobType.SYSTEM_JOB);
+ }
+
/**
* This method should never be called directly or overridden.
* It is a standard start-up stage for all monitorable jobs.
*/
private void registerJob() {
- ConcurrentHashSet userJobs = DataManager.userJobsMap.get(this.owner);
- if (userJobs == null) {
- userJobs = new ConcurrentHashSet<>();
- }
+ Set userJobs = DataManager.userJobsMap.get(this.owner);
+ // If there are no current jobs for the user, create a new empty set. NOTE: this should be a concurrent hash
+ // set so that it is threadsafe.
+ if (userJobs == null) userJobs = Sets.newConcurrentHashSet();
userJobs.add(this);
DataManager.userJobsMap.put(this.owner, userJobs);
}
+ public File retrieveFile () {
+ return file;
+ }
+
/**
* This method should never be called directly or overridden. It is a standard clean up stage for all
* monitorable jobs.
*/
private void unRegisterJob () {
// remove this job from the user-job map
- ConcurrentHashSet userJobs = DataManager.userJobsMap.get(this.owner);
+ Set userJobs = DataManager.userJobsMap.get(this.owner);
if (userJobs != null) userJobs.remove(this);
}
diff --git a/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java b/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java
index ecbda5f1f..e8ba09d72 100644
--- a/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java
+++ b/src/main/java/com/conveyal/datatools/common/utils/S3Utils.java
@@ -24,7 +24,7 @@
import java.net.URL;
import java.util.Date;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
/**
* Created by landon on 8/2/16.
@@ -34,12 +34,17 @@ public class S3Utils {
private static final Logger LOG = LoggerFactory.getLogger(S3Utils.class);
private static final int REQUEST_TIMEOUT_MSEC = 30 * 1000;
- public static String uploadBranding(Request req, String key) throws IOException, ServletException {
+ public static String uploadBranding(Request req, String key) {
String url;
String s3Bucket = DataManager.getConfigPropertyAsText("application.data.gtfs_s3_bucket");
if (s3Bucket == null) {
- haltWithMessage(req, 400, "s3bucket is incorrectly configured on server");
+ logMessageAndHalt(
+ req,
+ 500,
+ "s3bucket is incorrectly configured on server",
+ new Exception("s3bucket is incorrectly configured on server")
+ );
}
// Get file from request
@@ -47,17 +52,19 @@ public static String uploadBranding(Request req, String key) throws IOException,
MultipartConfigElement multipartConfigElement = new MultipartConfigElement(System.getProperty("java.io.tmpdir"));
req.raw().setAttribute("org.eclipse.jetty.multipartConfig", multipartConfigElement);
}
- Part part = req.raw().getPart("file");
- String extension = "." + part.getContentType().split("/", 0)[1];
- File tempFile = File.createTempFile(key + "_branding", extension);
- InputStream inputStream;
+ String extension = null;
+ File tempFile = null;
try {
+ Part part = req.raw().getPart("file");
+ extension = "." + part.getContentType().split("/", 0)[1];
+ tempFile = File.createTempFile(key + "_branding", extension);
+ InputStream inputStream;
inputStream = part.getInputStream();
FileOutputStream out = new FileOutputStream(tempFile);
IOUtils.copy(inputStream, out);
- } catch (Exception e) {
+ } catch (IOException | ServletException e) {
e.printStackTrace();
- haltWithMessage(req, 400, "Unable to read uploaded file");
+ logMessageAndHalt(req, 400, "Unable to read uploaded file");
}
try {
@@ -71,8 +78,7 @@ public static String uploadBranding(Request req, String key) throws IOException,
.withCannedAcl(CannedAccessControlList.PublicRead));
return url;
} catch (AmazonServiceException ase) {
- ase.printStackTrace();
- haltWithMessage(req, 400, "Error uploading file to S3");
+ logMessageAndHalt(req, 500, "Error uploading file to S3", ase);
return null;
} finally {
boolean deleted = tempFile.delete();
diff --git a/src/main/java/com/conveyal/datatools/common/utils/ScheduledJob.java b/src/main/java/com/conveyal/datatools/common/utils/ScheduledJob.java
new file mode 100644
index 000000000..a71a0d405
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/common/utils/ScheduledJob.java
@@ -0,0 +1,16 @@
+package com.conveyal.datatools.common.utils;
+
+import java.util.concurrent.ScheduledFuture;
+
+/**
+ * Utility class that associates a {@link Runnable} with its {@link ScheduledFuture} for easy storage and recall.
+ */
+public class ScheduledJob {
+ public final ScheduledFuture scheduledFuture;
+ public final Runnable job;
+
+ public ScheduledJob (Runnable job, ScheduledFuture scheduledFuture) {
+ this.job = job;
+ this.scheduledFuture = scheduledFuture;
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/common/utils/Scheduler.java b/src/main/java/com/conveyal/datatools/common/utils/Scheduler.java
new file mode 100644
index 000000000..df07a6be3
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/common/utils/Scheduler.java
@@ -0,0 +1,247 @@
+package com.conveyal.datatools.common.utils;
+
+import com.conveyal.datatools.manager.jobs.FeedExpirationNotificationJob;
+import com.conveyal.datatools.manager.jobs.FetchProjectFeedsJob;
+import com.conveyal.datatools.manager.models.FeedSource;
+import com.conveyal.datatools.manager.models.FeedVersion;
+import com.conveyal.datatools.manager.models.Project;
+import com.conveyal.datatools.manager.persistence.Persistence;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+import static com.conveyal.datatools.common.utils.Utils.getTimezone;
+import static com.google.common.collect.Multimaps.synchronizedListMultimap;
+
+/**
+ * This class centralizes the logic associated with scheduling and cancelling tasks (organized as a {@link ScheduledJob})
+ * for the Data Tools application. These tasks can be auto-scheduled according to application data (e.g., feed expiration
+ * notifications based on the latest feed version's last date of service) or enabled by users (e.g., scheduling a project
+ * auto feed fetch nightly at 2AM). The jobs are tracked in {@link #scheduledJobsForFeedSources} and
+ * {@link #scheduledJobsForProjects} so that they can be cancelled at a later point in time should the associated
+ * feeds/projects be deleted or if the user changes the fetch behavior.
+ */
+public class Scheduler {
+ private static final Logger LOG = LoggerFactory.getLogger(Scheduler.class);
+
+ // Scheduled executor that handles running scheduled jobs.
+ public final static ScheduledExecutorService schedulerService = Executors.newScheduledThreadPool(1);
+ /** Stores {@link ScheduledJob} objects containing scheduled tasks keyed on the tasks's associated {@link FeedSource} ID. */
+ public final static ListMultimap scheduledJobsForFeedSources =
+ synchronizedListMultimap(ArrayListMultimap.create());
+ /** Stores {@link ScheduledJob} objects containing scheduled tasks keyed on the tasks's associated {@link Project} ID. */
+ public final static ListMultimap scheduledJobsForProjects =
+ synchronizedListMultimap(ArrayListMultimap.create());
+
+ /**
+ * A method to initialize all scheduled tasks upon server startup.
+ */
+ public static void initialize() {
+ LOG.info("Scheduling recurring project auto fetches");
+ for (Project project : Persistence.projects.getAll()) {
+ if (project.autoFetchFeeds) {
+ scheduleAutoFeedFetch(project, 1);
+ }
+ }
+ LOG.info("Scheduling feed expiration notifications");
+ // Get all active feed sources
+ for (FeedSource feedSource : Persistence.feedSources.getAll()) {
+ // Schedule expiration notification jobs for the latest feed version
+ scheduleExpirationNotifications(feedSource);
+ }
+ }
+
+ /**
+ * Convenience method for scheduling one-off jobs for a feed source.
+ */
+ public static ScheduledJob scheduleFeedSourceJob (FeedSource feedSource, Runnable job, long delay, TimeUnit timeUnit) {
+ ScheduledFuture scheduledFuture = schedulerService.schedule(job, delay, timeUnit);
+ ScheduledJob scheduledJob = new ScheduledJob(job, scheduledFuture);
+ scheduledJobsForFeedSources.put(feedSource.id, scheduledJob);
+ return scheduledJob;
+ }
+
+ /**
+ * Cancels and removes all scheduled jobs for a given entity id and job class. NOTE: This is intended as an internal
+ * method that should operate on one of the scheduledJobsForXYZ fields of this class. A wrapper method (such as
+ * {@link #removeProjectJobsOfType(String, Class, boolean)}) should be provided for any new entity types with
+ * scheduled jobs (e.g., if feed version-specific scheduled jobs are needed).
+ */
+ private static int removeJobsOfType(ListMultimap scheduledJobs, String id, Class> clazz, boolean mayInterruptIfRunning) {
+ int jobsCancelled = 0;
+ // First get the list of jobs belonging to the id (e.g., all jobs related to a feed source).
+ List jobs = scheduledJobs.get(id);
+ // Iterate over jobs, cancelling and removing only those matching the job class.
+ // Use an iterator because elements may be removed and if removed in a regular loop it could
+ // throw a java.util.ConcurrentModificationException
+ // See https://stackoverflow.com/q/8104692/269834
+ for (Iterator iterator = jobs.iterator(); iterator.hasNext(); ) {
+ ScheduledJob scheduledJob = iterator.next();
+ if (clazz.isInstance(scheduledJob.job)) {
+ scheduledJob.scheduledFuture.cancel(mayInterruptIfRunning);
+ iterator.remove();
+ jobsCancelled++;
+ }
+ }
+ return jobsCancelled;
+ }
+
+ /**
+ * Cancels and removes all scheduled jobs for a given feed source id and job class.
+ */
+ public static void removeFeedSourceJobsOfType(String id, Class> clazz, boolean mayInterruptIfRunning) {
+ int cancelled = removeJobsOfType(scheduledJobsForFeedSources, id, clazz, mayInterruptIfRunning);
+ if (cancelled > 0) LOG.info("Cancelled/removed {} {} jobs for feed source {}", cancelled, clazz.getSimpleName(), id);
+ }
+
+ /**
+ * Cancels and removes all scheduled jobs for a given project id and job class.
+ */
+ public static void removeProjectJobsOfType(String id, Class> clazz, boolean mayInterruptIfRunning) {
+ int cancelled = removeJobsOfType(scheduledJobsForProjects, id, clazz, mayInterruptIfRunning);
+ if (cancelled > 0) LOG.info("Cancelled/removed {} {} jobs for project {}", cancelled, clazz.getSimpleName(), id);
+ }
+
+ /**
+ * Schedule or cancel auto feed fetch for a project as needed. This should be called whenever a
+ * project is created or updated. If a project is deleted, the auto feed fetch jobs will
+ * automatically cancel itself.
+ */
+ public static void scheduleAutoFeedFetch(Project project) {
+ // If auto fetch flag is turned on, schedule auto fetch.
+ if (project.autoFetchFeeds) Scheduler.scheduleAutoFeedFetch(project, 1);
+ // Otherwise, cancel any existing task for this id.
+ else Scheduler.removeProjectJobsOfType(project.id, FetchProjectFeedsJob.class, true);
+ }
+
+ /**
+ * Schedule an action that fetches all the feeds in the given project according to the autoFetch fields of that project.
+ * Currently feeds are not auto-fetched independently, they must be all fetched together as part of a project.
+ * This method is called when a Project's auto-fetch settings are updated, and when the system starts up to populate
+ * the auto-fetch scheduler.
+ */
+ public static void scheduleAutoFeedFetch (Project project, int intervalInDays) {
+ try {
+ // First cancel any already scheduled auto fetch task for this project id.
+ removeProjectJobsOfType(project.id, FetchProjectFeedsJob.class, true);
+
+ ZoneId timezone = getTimezone(project.defaultTimeZone);
+ LOG.info("Scheduling auto-fetch for projectID: {}", project.id);
+
+ // NOW in default timezone
+ ZonedDateTime now = ZonedDateTime.ofInstant(Instant.now(), timezone);
+
+ // Scheduled start time
+ ZonedDateTime startTime = LocalDateTime.of(
+ LocalDate.now(),
+ LocalTime.of(project.autoFetchHour, project.autoFetchMinute)
+ ).atZone(timezone);
+ LOG.info("Now: {}", now.format(DateTimeFormatter.ISO_ZONED_DATE_TIME));
+ LOG.info("Scheduled start time: {}", startTime.format(DateTimeFormatter.ISO_ZONED_DATE_TIME));
+
+ // Get diff between start time and current time
+ long diffInMinutes = (startTime.toEpochSecond() - now.toEpochSecond()) / 60;
+ // Delay is equivalent to diff or (if negative) one day plus (negative) diff.
+ long delayInMinutes = diffInMinutes >= 0
+ ? diffInMinutes
+ : 24 * 60 + diffInMinutes;
+
+ LOG.info("Auto fetch begins in {} hours and runs every {} hours", String.valueOf(delayInMinutes / 60.0), TimeUnit.DAYS.toHours(intervalInDays));
+ long intervalInMinutes = TimeUnit.DAYS.toMinutes(intervalInDays);
+ // system is defined as owner because owner field must not be null
+ FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(project, "system");
+ ScheduledFuture scheduledFuture = schedulerService.scheduleAtFixedRate(
+ fetchProjectFeedsJob,
+ delayInMinutes,
+ intervalInMinutes,
+ TimeUnit.MINUTES
+ );
+ ScheduledJob scheduledJob = new ScheduledJob(fetchProjectFeedsJob, scheduledFuture);
+ scheduledJobsForProjects.put(project.id, scheduledJob);
+ } catch (Exception e) {
+ LOG.error("Error scheduling project {} feed fetch.", project.id);
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Schedules feed expiration notifications. This method will find the latest feed version and
+ * then schedule a 1 week expiration warning notification and also notification the day that the
+ * feed version expires. It also cancels any existing notifications for this feed source.
+ */
+ public static void scheduleExpirationNotifications (FeedSource feedSource) {
+ // Cancel existing expiration notifications
+ removeFeedSourceJobsOfType(feedSource.id, FeedExpirationNotificationJob.class, true);
+
+ FeedVersion latest = feedSource.retrieveLatest();
+
+ if (
+ latest != null &&
+ latest.validationResult != null &&
+ latest.validationResult.lastCalendarDate != null &&
+ latest.validationResult.lastCalendarDate.isAfter(LocalDate.now())
+ ) {
+ // get parent project
+ Project parentProject = feedSource.retrieveProject();
+
+ if (parentProject == null) {
+ // parent project has been deleted, but feed source/version have not
+ // abort the setting up of the notification and figure out why the database has been
+ // allowed to devolve to this state
+ LOG.warn("The parent project for feed source {} does not exist in the database.", feedSource.id);
+ return;
+ }
+
+ // get the timezone from the parent project
+ ZoneId timezone = getTimezone(parentProject.defaultTimeZone);
+
+ // calculate feed expiration time from last service date
+ long expirationEpochSeconds = latest
+ .validationResult
+ .lastCalendarDate
+ .atTime(4, 0)
+ .atZone(timezone)
+ .toEpochSecond();
+ long curSeconds = System.currentTimeMillis() / 1000;
+ long timeUntilExpiration = expirationEpochSeconds - curSeconds;
+ long timeUntilOneWeekBeforeExpiration = timeUntilExpiration - 86400 * 7;
+
+ // schedule notification jobs and record them in the scheduled notifications
+
+ // one week warning
+ if (timeUntilOneWeekBeforeExpiration > 0) {
+ scheduleFeedSourceJob(
+ feedSource,
+ new FeedExpirationNotificationJob(feedSource.id, true),
+ timeUntilOneWeekBeforeExpiration,
+ TimeUnit.SECONDS
+ );
+ }
+
+ // actual expiration
+ scheduleFeedSourceJob(
+ feedSource,
+ new FeedExpirationNotificationJob(feedSource.id, false),
+ timeUntilExpiration,
+ TimeUnit.SECONDS
+ );
+
+ LOG.info("Scheduled feed expiration notifications for feed {}", feedSource.id);
+ }
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
index 89a2ba3c9..f69c11f5e 100644
--- a/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
+++ b/src/main/java/com/conveyal/datatools/common/utils/SparkUtils.java
@@ -1,5 +1,7 @@
package com.conveyal.datatools.common.utils;
+import com.bugsnag.Bugsnag;
+import com.bugsnag.Report;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
@@ -12,13 +14,17 @@
import spark.Request;
import spark.Response;
+import javax.servlet.ServletInputStream;
import javax.servlet.ServletOutputStream;
+import javax.servlet.ServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Arrays;
+import static com.conveyal.datatools.manager.DataManager.getBugsnag;
import static com.conveyal.datatools.manager.DataManager.getConfigPropertyAsText;
import static spark.Spark.halt;
@@ -30,12 +36,13 @@ public class SparkUtils {
private static final ObjectMapper mapper = new ObjectMapper();
private static final String BASE_URL = getConfigPropertyAsText("application.public_url");
private static final int DEFAULT_LINES_TO_PRINT = 10;
+ private static final int MAX_CHARACTERS_TO_PRINT = 500;
/**
* Write out the supplied file to the Spark response as an octet-stream.
*/
public static HttpServletResponse downloadFile(File file, String filename, Request req, Response res) {
- if (file == null) haltWithMessage(req, 404, "File is null");
+ if (file == null) logMessageAndHalt(req, 404, "File is null");
HttpServletResponse raw = res.raw();
raw.setContentType("application/octet-stream");
raw.setHeader("Content-Disposition", "attachment; filename=" + filename);
@@ -49,10 +56,8 @@ public static HttpServletResponse downloadFile(File file, String filename, Reque
ByteStreams.copy(fileInputStream, outputStream);
// TODO: Is flushing the stream necessary?
outputStream.flush();
- } catch (Exception e) {
- LOG.error("Could not write file to output stream", e);
- e.printStackTrace();
- haltWithMessage(req, 500, "Error serving GTFS file", e);
+ } catch (IOException e) {
+ logMessageAndHalt(req, 500, "Could not write file to output stream", e);
}
return raw;
}
@@ -90,19 +95,40 @@ public static String formatJSON(String message, int code, Exception e) {
/**
* Wrapper around Spark halt method that formats message as JSON using {@link SparkUtils#formatJSON}.
*/
- public static void haltWithMessage(Request request, int statusCode, String message) throws HaltException {
- haltWithMessage(request, statusCode, message, null);
+ public static void logMessageAndHalt(Request request, int statusCode, String message) throws HaltException {
+ logMessageAndHalt(request, statusCode, message, null);
}
/**
- * Wrapper around Spark halt method that formats message as JSON using {@link SparkUtils#formatJSON}. Exception
+ * Wrapper around Spark halt method that formats message as JSON using {@link SparkUtils#formatJSON}.
+ * Extra logic occurs for when the status code is >= 500. A Bugsnag report is created if
+ * Bugsnag is configured.
*/
- public static void haltWithMessage(
+ public static void logMessageAndHalt(
Request request,
int statusCode,
String message,
Exception e
) throws HaltException {
+ // Note that halting occurred, also print error stacktrace if applicable
+ if (e != null) e.printStackTrace();
+ LOG.info("Halting with status code {}. Error message: {}.", statusCode, message);
+
+ if (statusCode >= 500) {
+ LOG.error(message);
+
+ // create report to notify bugsnag if configured
+ Bugsnag bugsnag = getBugsnag();
+ if (bugsnag != null && e != null) {
+ // create report to send to bugsnag
+ Report report = bugsnag.buildReport(e);
+ Auth0UserProfile userProfile = request.attribute("user");
+ String userEmail = userProfile != null ? userProfile.getEmail() : "no-auth";
+ report.setUserEmail(userEmail);
+ bugsnag.notify(report);
+ }
+ }
+
JsonNode json = getObjectNode(message, statusCode, e);
String logString = null;
try {
@@ -162,13 +188,19 @@ public static void logRequestOrResponse(boolean logRequest, Request request, Res
}
if ("application/json".equals(contentType)) {
bodyString = logRequest ? request.body() : response.body();
- if (bodyString != null) {
+ if (bodyString == null) {
+ bodyString = "{body content is null}";
+ } else if (bodyString.length() > MAX_CHARACTERS_TO_PRINT) {
+ bodyString = new StringBuilder()
+ .append("body content is longer than 500 characters, printing first 500 characters here:\n")
+ .append(bodyString, 0, MAX_CHARACTERS_TO_PRINT)
+ .append("\n...and " + (bodyString.length() - MAX_CHARACTERS_TO_PRINT) + " more characters")
+ .toString();
+ } else {
// Pretty print JSON if ContentType is JSON and body is not empty
JsonNode jsonNode = mapper.readTree(bodyString);
// Add new line for legibility when printing
bodyString = "\n" + mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonNode);
- } else {
- bodyString = "{body content is null}";
}
} else if (contentType != null) {
bodyString = String.format("\nnon-JSON body type: %s", contentType);
@@ -201,6 +233,30 @@ public static void logRequestOrResponse(
);
}
+ /**
+ * Bypass Spark's request wrapper which always caches the request body in memory that may be a very large
+ * GTFS file. Also, the body of the request is the GTFS file instead of using multipart form data because
+ * multipart form handling code also caches the request body.
+ */
+ public static void copyRequestStreamIntoFile(Request req, File file) {
+ try {
+ ServletInputStream inputStream = ((ServletRequestWrapper) req.raw()).getRequest().getInputStream();
+ FileOutputStream fileOutputStream = new FileOutputStream(file);
+ // Guava's ByteStreams.copy uses a 4k buffer (no need to wrap output stream), but does not close streams.
+ ByteStreams.copy(inputStream, fileOutputStream);
+ fileOutputStream.close();
+ inputStream.close();
+ if (file.length() == 0) {
+ // Throw IO exception to be caught and returned to user via halt.
+ throw new IOException("No file found in request body.");
+ }
+ LOG.info("Saving file {} from upload", file.getName());
+ } catch (Exception e) {
+ LOG.error("Unable to open input stream from upload");
+ logMessageAndHalt(req, 500, "Unable to read uploaded file.", e);
+ }
+ }
+
private static String trimLines(String str) {
if (str == null) return "";
String[] lines = str.split("\n");
diff --git a/src/main/java/com/conveyal/datatools/common/utils/Utils.java b/src/main/java/com/conveyal/datatools/common/utils/Utils.java
new file mode 100644
index 000000000..36e016731
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/common/utils/Utils.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.common.utils;
+
+import java.time.ZoneId;
+
+public class Utils {
+
+ /**
+ * Get the ZoneId of a String that could be a valid tzid.
+ *
+ * @param tzid The timezone identifier
+ * @return The ZoneId of the parsed timezone identifier, or "America/New_York" if tzid is invalid.
+ */
+ public static ZoneId getTimezone(String tzid) {
+ try {
+ return ZoneId.of(tzid);
+ } catch(Exception e) {
+ return ZoneId.of("America/New_York");
+ }
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java b/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java
index cb6fe1b73..61db70a55 100644
--- a/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java
+++ b/src/main/java/com/conveyal/datatools/editor/controllers/EditorLockController.java
@@ -17,7 +17,7 @@
import java.util.Map;
import java.util.concurrent.TimeUnit;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static spark.Spark.delete;
import static spark.Spark.post;
import static spark.Spark.put;
@@ -69,12 +69,12 @@ private static String lockFeed (Request req, Response res) {
} else if (!currentSession.userId.equals(userProfile.getUser_id())) {
// If the session has not expired, and another user has the active session.
LOG.warn("Edit session {} for user {} in progress for feed {}. User {} not permitted to lock feed for {} minutes.", currentSession.sessionId, currentSession.userEmail, currentSession.feedId, userProfile.getEmail(), minutesUntilExpiration);
- haltWithMessage(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration));
+ logMessageAndHalt(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration));
return null;
} else {
String sessionId = req.session().id();
LOG.warn("User {} is editing feed {} in another session {}. Cannot create lock for session {}", userProfile.getEmail(), feedId, currentSession.sessionId, sessionId);
- haltWithMessage(req, 400, "Warning! You are editing this feed in another session/browser tab!");
+ logMessageAndHalt(req, 400, "Warning! You are editing this feed in another session/browser tab!");
return null;
}
}
@@ -109,7 +109,7 @@ private static String maintainLock(Request req, Response res) {
if (currentSession == null) {
// If there is no current session to maintain, request that user reloads browser.
LOG.warn("No active editor session to maintain {}.", sessionId);
- haltWithMessage(req, 400, "No active session for feedId. Please refresh your browser and try editing later.");
+ logMessageAndHalt(req, 400, "No active session for feedId. Please refresh your browser and try editing later.");
return null;
} else if (!currentSession.sessionId.equals(sessionId)) {
long secondsSinceLastCheckIn = TimeUnit.MILLISECONDS.toSeconds (System.currentTimeMillis() - currentSession.lastCheckIn);
@@ -122,10 +122,10 @@ private static String maintainLock(Request req, Response res) {
// If the new current session is held by this user, give them the option to evict the current session /
// unlock the feed.
LOG.warn("User {} already has an active editor session () for feed {}.", userProfile.getEmail(), currentSession.sessionId, currentSession.feedId);
- haltWithMessage(req, 400, "Warning! You have an active editing session for this feed underway in a different browser tab.");
+ logMessageAndHalt(req, 400, "Warning! You have an active editing session for this feed underway in a different browser tab.");
} else {
LOG.warn("User {} attempted editor session for feed {} while active session underway for user {}.", userProfile.getEmail(), currentSession.feedId, currentSession.userEmail);
- haltWithMessage(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration));
+ logMessageAndHalt(req, 400, getLockedFeedMessage(currentSession, minutesUntilExpiration));
}
return null;
} else {
diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
index 5c6f4a922..fc84d4d6b 100644
--- a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
+++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
@@ -1,6 +1,7 @@
package com.conveyal.datatools.editor.controllers.api;
import com.conveyal.datatools.common.utils.S3Utils;
+import com.conveyal.datatools.common.utils.SparkUtils;
import com.conveyal.datatools.editor.controllers.EditorLockController;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.conveyal.datatools.manager.models.FeedSource;
@@ -10,6 +11,7 @@
import com.conveyal.gtfs.loader.JdbcTableWriter;
import com.conveyal.gtfs.loader.Table;
import com.conveyal.gtfs.model.Entity;
+import com.conveyal.gtfs.util.InvalidNamespaceException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.dbutils.DbUtils;
@@ -20,12 +22,13 @@
import spark.Response;
import javax.sql.DataSource;
+import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJSON;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.editor.controllers.EditorLockController.sessionsForFeedIds;
import static spark.Spark.delete;
import static spark.Spark.options;
@@ -84,6 +87,7 @@ private void registerRoutes() {
// Handle update useFrequency field. Hitting this endpoint will delete all trips for a pattern and update the
// useFrequency field.
if ("pattern".equals(classToLowercase)) {
+ put(ROOT_ROUTE + ID_PARAM + "/stop_times", this::updateStopTimesFromPatternStops, json::write);
delete(ROOT_ROUTE + ID_PARAM + "/trips", this::deleteTripsForPattern, json::write);
}
}
@@ -97,18 +101,20 @@ private String deleteTripsForPattern(Request req, Response res) {
// NOTE: This is a string pattern ID, not the integer ID that all other HTTP endpoints use.
String patternId = req.params("id");
if (patternId == null) {
- haltWithMessage(req, 400, "Must provide valid pattern_id");
+ logMessageAndHalt(req, 400, "Must provide valid pattern_id");
}
try {
JdbcTableWriter tableWriter = new JdbcTableWriter(Table.TRIPS, datasource, namespace);
int deletedCount = tableWriter.deleteWhere("pattern_id", patternId, true);
return formatJSON(String.format("Deleted %d.", deletedCount), 200);
+ } catch (InvalidNamespaceException e) {
+ logMessageAndHalt(req, 400, "Invalid namespace");
+ return null;
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Error deleting entity", e);
+ logMessageAndHalt(req, 500, "Error deleting entity", e);
return null;
} finally {
- LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime);
+ LOG.info("Delete trips for pattern operation took {} msec", System.currentTimeMillis() - startTime);
}
}
@@ -120,8 +126,9 @@ private String deleteMultipleTrips(Request req, Response res) {
long startTime = System.currentTimeMillis();
String namespace = getNamespaceAndValidateSession(req);
String[] tripIds = req.queryParams("tripIds").split(",");
+ JdbcTableWriter tableWriter = null;
try {
- JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace);
+ tableWriter = new JdbcTableWriter(table, datasource, namespace);
for (String tripId: tripIds) {
// Delete each trip ID found in query param WITHOUT auto-committing.
int result = tableWriter.delete(Integer.parseInt(tripId), false);
@@ -131,13 +138,15 @@ private String deleteMultipleTrips(Request req, Response res) {
throw new SQLException(message);
}
}
- // Commit the transaction after iterating over trip IDs (because the deletes where made without autocommit).
+ // Commit the transaction after iterating over trip IDs (because the deletes were made without autocommit).
tableWriter.commit();
LOG.info("Deleted {} trips", tripIds.length);
+ } catch (InvalidNamespaceException e) {
+ logMessageAndHalt(req, 400, "Invalid namespace");
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Error deleting entity", e);
+ logMessageAndHalt(req, 500, "Error deleting entity", e);
} finally {
+ if (tableWriter != null) tableWriter.close();
LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime);
}
return formatJSON(String.format("Deleted %d.", tripIds.length), 200);
@@ -154,37 +163,50 @@ private String deleteOne(Request req, Response res) {
JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace);
if (tableWriter.delete(id, true) == 1) {
// FIXME: change return message based on result value
- return formatJSON(String.valueOf("Deleted one."), 200);
+ return formatJSON("Deleted one.", 200);
}
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Error deleting entity", e);
+ logMessageAndHalt(req, 400, "Error deleting entity", e);
} finally {
LOG.info("Delete operation took {} msec", System.currentTimeMillis() - startTime);
}
return null;
}
+ /**
+ * For a given pattern ID, update all its trips' stop times to conform to the default travel and dwell times. This
+ * is used, for instance, when a new pattern stop is added or inserted into an existing pattern that has trips which
+ * need the updated travel times applied in bulk.
+ */
+ private String updateStopTimesFromPatternStops (Request req, Response res) {
+ long startTime = System.currentTimeMillis();
+ String namespace = getNamespaceAndValidateSession(req);
+ int patternId = getIdFromRequest(req);
+ try {
+ int beginStopSequence = Integer.parseInt(req.queryParams("stopSequence"));
+ JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace);
+ int stopTimesUpdated = tableWriter.normalizeStopTimesForPattern(patternId, beginStopSequence);
+ return SparkUtils.formatJSON("updateResult", stopTimesUpdated + " stop times updated.");
+ } catch (Exception e) {
+ logMessageAndHalt(req, 400, "Error normalizing stop times", e);
+ return null;
+ } finally {
+ LOG.info("Normalize stop times operation took {} msec", System.currentTimeMillis() - startTime);
+ }
+ }
+
/**
* HTTP endpoint to upload branding image to S3 for either agency or route entities. The endpoint also handles
* updating the branding URL field to match the S3 URL.
*/
private String uploadEntityBranding (Request req, Response res) {
int id = getIdFromRequest(req);
- String url = null;
+ String url;
try {
- // FIXME: remove cast to string.
- String idAsString = String.valueOf(id);
- url = S3Utils.uploadBranding(req, String.join("_", classToLowercase, idAsString));
+ url = S3Utils.uploadBranding(req, String.format("%s_%d", classToLowercase, id));
} catch (HaltException e) {
// Do not re-catch halts thrown for exceptions that have already been caught.
- LOG.error("Halt encountered", e);
throw e;
- } catch (Exception e) {
- String message = String.format("Could not upload branding for %s id=%d", classToLowercase, id);
- LOG.error(message);
- e.printStackTrace();
- haltWithMessage(req, 400, message, e);
}
String namespace = getNamespaceAndValidateSession(req);
// Prepare json object for response. (Note: this is not the full entity object, but just the URL field).
@@ -201,9 +223,8 @@ private String uploadEntityBranding (Request req, Response res) {
preparedStatement.executeUpdate();
connection.commit();
return jsonObject.toString();
- } catch (SQLException e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Could not update branding url", e);
+ } catch (Exception e) {
+ logMessageAndHalt(req, 500, "Could not update branding url", e);
return null;
} finally {
DbUtils.closeQuietly(connection);
@@ -220,22 +241,26 @@ private String createOrUpdate(Request req, Response res) {
// Check if an update or create operation depending on presence of id param
// This needs to be final because it is used in a lambda operation below.
if (req.params("id") == null && req.requestMethod().equals("PUT")) {
- haltWithMessage(req, 400, "Must provide id");
+ logMessageAndHalt(req, 400, "Must provide id");
}
final boolean isCreating = req.params("id") == null;
String namespace = getNamespaceAndValidateSession(req);
Integer id = getIdFromRequest(req);
- // Get the JsonObject
+ // Save or update to database
try {
JdbcTableWriter tableWriter = new JdbcTableWriter(table, datasource, namespace);
+ String jsonBody = req.body();
if (isCreating) {
- return tableWriter.create(req.body(), true);
+ return tableWriter.create(jsonBody, true);
} else {
- return tableWriter.update(id, req.body(), true);
+ return tableWriter.update(id, jsonBody, true);
}
+ } catch (InvalidNamespaceException e) {
+ logMessageAndHalt(req, 400, "Invalid namespace");
+ } catch (IOException e) {
+ logMessageAndHalt(req, 400, "Invalid json", e);
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Operation failed.", e);
+ logMessageAndHalt(req, 500, "An error was encountered while trying to save to the database", e);
} finally {
String operation = isCreating ? "Create" : "Update";
LOG.info("{} operation took {} msec", operation, System.currentTimeMillis() - startTime);
@@ -252,23 +277,23 @@ private static String getNamespaceAndValidateSession(Request req) {
String sessionId = req.queryParams("sessionId");
FeedSource feedSource = Persistence.feedSources.getById(feedId);
if (feedSource == null) {
- haltWithMessage(req, 400, "Feed ID is invalid");
+ logMessageAndHalt(req, 400, "Feed ID is invalid");
}
// FIXME: Switch to using spark session IDs rather than query parameter?
// String sessionId = req.session().id();
EditorLockController.EditorSession currentSession = sessionsForFeedIds.get(feedId);
if (currentSession == null) {
- haltWithMessage(req, 400, "There is no active editing session for user.");
+ logMessageAndHalt(req, 400, "There is no active editing session for user.");
}
if (!currentSession.sessionId.equals(sessionId)) {
// This session does not match the current active session for the feed.
Auth0UserProfile userProfile = req.attribute("user");
if (currentSession.userEmail.equals(userProfile.getEmail())) {
LOG.warn("User {} already has editor session {} for feed {}. Same user cannot make edits on session {}.", currentSession.userEmail, currentSession.sessionId, feedId, req.session().id());
- haltWithMessage(req, 400, "You have another editing session open for " + feedSource.name);
+ logMessageAndHalt(req, 400, "You have another editing session open for " + feedSource.name);
} else {
LOG.warn("User {} already has editor session {} for feed {}. User {} cannot make edits on session {}.", currentSession.userEmail, currentSession.sessionId, feedId, userProfile.getEmail(), req.session().id());
- haltWithMessage(req, 400, "Somebody else is editing the " + feedSource.name + " feed.");
+ logMessageAndHalt(req, 400, "Somebody else is editing the " + feedSource.name + " feed.");
}
} else {
currentSession.lastEdit = System.currentTimeMillis();
@@ -276,7 +301,7 @@ private static String getNamespaceAndValidateSession(Request req) {
}
String namespace = feedSource.editorNamespace;
if (namespace == null) {
- haltWithMessage(req, 400, "Cannot edit feed that has not been snapshotted (namespace is null).");
+ logMessageAndHalt(req, 400, "Cannot edit feed that has not been snapshotted (namespace is null).");
}
return namespace;
}
@@ -294,15 +319,9 @@ private Integer getIdFromRequest(Request req) {
id = Integer.valueOf(req.params("id"));
} catch (NumberFormatException e) {
LOG.error("ID provided must be an integer", e);
- haltWithMessage(req, 400, "ID provided is not a number");
+ logMessageAndHalt(req, 400, "ID provided is not a number");
}
}
return id;
}
-
- // TODO add hooks
- abstract void getEntityHook(T entity);
- abstract void createEntityHook(T entity);
- abstract void updateEntityHook(T entity);
- abstract void deleteEntityHook(T entity);
}
diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java
index fa796e887..e25e29601 100644
--- a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java
+++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorControllerImpl.java
@@ -9,24 +9,4 @@ public class EditorControllerImpl extends EditorController {
public EditorControllerImpl(String apiPrefix, Table table, DataSource dataSource){
super(apiPrefix, table, dataSource);
}
-
- @Override
- void getEntityHook(Entity entity) {
-
- }
-
- @Override
- void createEntityHook(Entity entity) {
-
- }
-
- @Override
- void updateEntityHook(Entity entity) {
-
- }
-
- @Override
- void deleteEntityHook(Entity entity) {
-
- }
}
diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java
index 48a0719ef..0069f5093 100644
--- a/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java
+++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/SnapshotController.java
@@ -6,6 +6,7 @@
import com.conveyal.datatools.editor.jobs.ExportSnapshotToGTFSJob;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
+import com.conveyal.datatools.manager.auth.Actions;
import com.conveyal.datatools.manager.controllers.api.FeedVersionController;
import com.conveyal.datatools.manager.models.FeedDownloadToken;
import com.conveyal.datatools.manager.models.FeedSource;
@@ -26,7 +27,7 @@
import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3;
import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.options;
@@ -55,9 +56,9 @@ private static Snapshot getSnapshotById(Request req, Response res) {
*/
private static Snapshot getSnapshotFromRequest(Request req) {
String id = req.params("id");
- if (id == null) haltWithMessage(req, 400, "Must provide valid snapshot ID");
+ if (id == null) logMessageAndHalt(req, 400, "Must provide valid snapshot ID");
// Check user permissions on feed source.
- FeedVersionController.requestFeedSourceById(req, "view", "feedId");
+ FeedVersionController.requestFeedSourceById(req, Actions.VIEW, "feedId");
return Persistence.snapshots.getById(id);
}
@@ -66,7 +67,7 @@ private static Snapshot getSnapshotFromRequest(Request req) {
*/
private static Collection getSnapshots(Request req, Response res) {
// Get feed source and check user permissions.
- FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "view", "feedId");
+ FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, Actions.VIEW, "feedId");
// FIXME Do we need a way to return all snapshots?
// Is this used in GTFS Data Manager to retrieveById snapshots in bulk?
@@ -79,7 +80,7 @@ private static Collection getSnapshots(Request req, Response res) {
*/
private static String createSnapshot (Request req, Response res) throws IOException {
Auth0UserProfile userProfile = req.attribute("user");
- FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId");
+ FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, Actions.EDIT, "feedId");
// Take fields from request body for creating snapshot.
Snapshot snapshot = json.read(req.body());
// Ensure feed source ID and snapshotOf namespace is correct
@@ -104,7 +105,7 @@ private static String importFeedVersionAsSnapshot(Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
// Get feed version from request (and check permissions).
String feedVersionId = req.queryParams("feedVersionId");
- FeedVersion feedVersion = FeedVersionController.requestFeedVersion(req, "edit", feedVersionId);
+ FeedVersion feedVersion = FeedVersionController.requestFeedVersion(req, Actions.EDIT, feedVersionId);
FeedSource feedSource = feedVersion.parentFeedSource();
// Create and run snapshot job
Snapshot snapshot = new Snapshot("Snapshot of " + feedVersion.name, feedSource.id, feedVersion.namespace);
@@ -121,7 +122,7 @@ private static String importFeedVersionAsSnapshot(Request req, Response res) {
// FIXME: Is this method used anywhere? Can we delete?
private static Object updateSnapshot (Request req, Response res) {
// FIXME
- haltWithMessage(req, 400, "Method not implemented");
+ logMessageAndHalt(req, 400, "Method not implemented");
return null;
}
@@ -135,14 +136,14 @@ private static String restoreSnapshot (Request req, Response res) {
String id = req.params("id");
// FIXME Ensure namespace id exists in database?
// Retrieve feed source.
- FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId");
+ FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, Actions.EDIT, "feedId");
Snapshot snapshotToRestore = Persistence.snapshots.getById(id);
if (snapshotToRestore == null) {
- haltWithMessage(req, 400, "Must specify valid snapshot ID");
+ logMessageAndHalt(req, 400, "Must specify valid snapshot ID");
}
// Update editor namespace pointer.
if (snapshotToRestore.namespace == null) {
- haltWithMessage(req, 400, "Failed to restore snapshot. No namespace found.");
+ logMessageAndHalt(req, 400, "Failed to restore snapshot. No namespace found.");
}
// Preserve existing editor buffer if requested. FIXME: should the request body also contain name and comments?
boolean preserveBuffer = "true".equals(req.queryParams("preserveBuffer"));
@@ -187,7 +188,12 @@ private static Object getSnapshotToken(Request req, Response res) {
// FIXME: use new FeedStore.
if (DataManager.useS3) {
if (!FeedStore.s3Client.doesObjectExist(DataManager.feedBucket, key)) {
- haltWithMessage(req, 400, String.format("Error downloading snapshot from S3. Object %s does not exist.", key));
+ logMessageAndHalt(
+ req,
+ 500,
+ String.format("Error downloading snapshot from S3. Object %s does not exist.", key),
+ new Exception("s3 object does not exist")
+ );
}
// Return presigned download link if using S3.
return downloadFromS3(FeedStore.s3Client, DataManager.feedBucket, key, false, res);
@@ -207,10 +213,10 @@ private static Snapshot deleteSnapshot(Request req, Response res) {
String id = req.params("id");
// FIXME Ensure namespace id exists in database.
// Check feed source permissions.
- FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, "edit", "feedId");
+ FeedSource feedSource = FeedVersionController.requestFeedSourceById(req, Actions.EDIT, "feedId");
// Retrieve snapshot
Snapshot snapshot = Persistence.snapshots.getById(id);
- if (snapshot == null) haltWithMessage(req, 400, "Must provide valid snapshot ID.");
+ if (snapshot == null) logMessageAndHalt(req, 400, "Must provide valid snapshot ID.");
try {
// Remove the snapshot and then renumber the snapshots
snapshot.delete();
@@ -219,8 +225,7 @@ private static Snapshot deleteSnapshot(Request req, Response res) {
// FIXME delete tables from database?
return snapshot;
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Unknown error deleting snapshot.", e);
+ logMessageAndHalt(req, 500, "Unknown error occurred while deleting snapshot.", e);
return null;
}
}
@@ -235,7 +240,7 @@ private static Object downloadSnapshotWithToken (Request req, Response res) {
FeedDownloadToken token = Persistence.tokens.getById(id);
if(token == null || !token.isValid()) {
- haltWithMessage(req, 400, "Feed download token not valid");
+ logMessageAndHalt(req, 400, "Feed download token not valid");
}
Snapshot snapshot = token.retrieveSnapshot();
diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java b/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java
index 68476bd04..cbc21b453 100644
--- a/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java
+++ b/src/main/java/com/conveyal/datatools/editor/jobs/ConvertEditorMapDBToSQL.java
@@ -11,6 +11,7 @@
import com.conveyal.datatools.editor.models.transit.TripPatternStop;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.models.FeedSource;
+import com.conveyal.datatools.manager.models.Snapshot;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.gtfs.GTFSFeed;
import com.conveyal.gtfs.loader.FeedLoadResult;
@@ -26,8 +27,12 @@
import java.sql.JDBCType;
import java.sql.PreparedStatement;
import java.sql.SQLException;
+import java.util.Iterator;
+import java.util.List;
import static com.conveyal.gtfs.loader.DateField.GTFS_DATE_FORMATTER;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
public class ConvertEditorMapDBToSQL extends MonitorableJob {
private final String feedId;
@@ -53,6 +58,17 @@ public void jobLogic() {
LOG.warn("Not converting snapshot. Feed source Id {} does not exist in application data", feedId);
return;
}
+ Snapshot matchingSnapshot = Persistence.snapshots.getOneFiltered(
+ and(
+ eq("version", versionNumber),
+ eq(Snapshot.FEED_SOURCE_REF, feedId)
+ )
+ );
+ boolean snapshotExists = true;
+ if (matchingSnapshot == null) {
+ snapshotExists = false;
+ matchingSnapshot = new Snapshot("Imported", feedId, "mapdb_editor");
+ }
FeedTx feedTx;
// FIXME: This needs to share a connection with the snapshotter.
// Create connection for each snapshot
@@ -74,19 +90,19 @@ public void jobLogic() {
LOG.info("Converting {}.{} to SQL", feedId, versionNumber);
// Convert mapdb to SQL
FeedLoadResult convertFeedResult = convertFeed(feedId, versionNumber, feedTx);
- // Create manager snapshot for storing in feed source.
- com.conveyal.datatools.manager.models.Snapshot managerSnapshot =
- new com.conveyal.datatools.manager.models.Snapshot(
- feedId, versionNumber != null ? versionNumber : 0, "mapdb_editor", convertFeedResult);
-// managerSnapshot.dateCreated =
- LOG.info("Storing snapshot {}", managerSnapshot.id);
- Persistence.snapshots.create(managerSnapshot);
+ // Update manager snapshot with result details.
+ matchingSnapshot.snapshotOf = "mapdb_editor";
+ matchingSnapshot.namespace = convertFeedResult.uniqueIdentifier;
+ matchingSnapshot.feedLoadResult = convertFeedResult;
+ LOG.info("Storing snapshot {}", matchingSnapshot.id);
+ if (snapshotExists) Persistence.snapshots.replace(matchingSnapshot.id, matchingSnapshot);
+ else Persistence.snapshots.create(matchingSnapshot);
if (setEditorBuffer) {
// If there is no version, that indicates that this was from the editor buffer for that feedId.
// Make this snapshot the editor namespace buffer.
- LOG.info("Updating active snapshot to {}", managerSnapshot.id);
+ LOG.info("Updating active snapshot to {}", matchingSnapshot.id);
FeedSource updatedFeedSource = Persistence.feedSources.updateField(
- feedSource.id, "editorNamespace", managerSnapshot.namespace);
+ feedSource.id, "editorNamespace", matchingSnapshot.namespace);
LOG.info("Editor namespace: {}", updatedFeedSource.editorNamespace);
}
connection.commit();
@@ -173,7 +189,6 @@ private FeedLoadResult convertFeed(String feedId, Integer version, FeedTx feedTx
updateTripsStatement.setString(1, pattern.id);
updateTripsStatement.setString(2, trip.gtfsTripId);
// FIXME: Do something with the return value? E.g., rollback if it hits more than one trip.
- // FIXME: Do this in batches?
updateTripsStatement.addBatch();
batchSize += 1;
// If we've accumulated a lot of prepared statement calls, pass them on to the database backend.
@@ -185,10 +200,11 @@ private FeedLoadResult convertFeed(String feedId, Integer version, FeedTx feedTx
}
// Pattern stops table has not yet been created because pattern stops do not exist in
- // GTFSFeed. NOte, we want this table to be created regardless of whether patterns exist or not.
+ // GTFSFeed. Note, we want this table to be created regardless of whether patterns exist or not
+ // (which is why it is outside of the check for null pattern map).
Table.PATTERN_STOP.createSqlTable(connection, namespace, true);
- // Insert all trip patterns and pattern stops into database (tables have already been created FIXME pattern_stops has not yet been created).
+ // Insert all trip patterns and pattern stops into database (tables have already been created).
if (feedTx.tripPatterns != null) {
batchSize = 0;
// Handle inserting patterns
@@ -214,9 +230,8 @@ private FeedLoadResult convertFeed(String feedId, Integer version, FeedTx feedTx
insertPatternStatement.setString(6, pattern.id);
insertPatternStatement.addBatch();
batchSize += 1;
-
- int stopSequence = 1;
- // LOG.info("Inserting {} pattern stops for pattern {}", pattern.patternStops.size(), pattern.id);
+ // stop_sequence must be zero-based and incrementing to match stop_times values.
+ int stopSequence = 0;
for (TripPatternStop tripPatternStop : pattern.patternStops) {
// TripPatternStop's stop ID needs to be mapped to GTFS stop ID.
// FIXME Possible NPE?
@@ -228,7 +243,6 @@ private FeedLoadResult convertFeed(String feedId, Integer version, FeedTx feedTx
insertPatternStopStatement.setInt(5, tripPatternStop.defaultDwellTime);
insertPatternStopStatement.setInt(6, 0);
insertPatternStopStatement.setInt(7, 0);
- // FIXME: shapeDistTraveled could be null
if (tripPatternStop.shapeDistTraveled == null) {
insertPatternStopStatement.setNull(8, JDBCType.DOUBLE.getVendorTypeNumber());
} else {
@@ -336,4 +350,4 @@ private int handleBatchExecution(int batchSize, PreparedStatement ... preparedSt
return batchSize;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java b/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java
index 1d09113b5..e81138ac5 100644
--- a/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java
+++ b/src/main/java/com/conveyal/datatools/editor/jobs/ExportSnapshotToGTFSJob.java
@@ -13,7 +13,6 @@
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileNotFoundException;
import java.io.IOException;
public class ExportSnapshotToGTFSJob extends MonitorableJob {
@@ -67,10 +66,10 @@ public void jobLogic() {
} else {
try {
FeedVersion.feedStore.newFeed(filename, new FileInputStream(tempFile), null);
- } catch (FileNotFoundException e) {
+ } catch (IOException e) {
+ LOG.error("Could not store feed for snapshot {}", snapshot.id);
e.printStackTrace();
status.fail("Could not export snapshot to GTFS.");
- LOG.error("Could not store feed for snapshot {}", snapshot.id);
}
}
// Delete snapshot temp file.
diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/GisExport.java b/src/main/java/com/conveyal/datatools/editor/jobs/GisExport.java
deleted file mode 100644
index 664b31a70..000000000
--- a/src/main/java/com/conveyal/datatools/editor/jobs/GisExport.java
+++ /dev/null
@@ -1,216 +0,0 @@
-package com.conveyal.datatools.editor.jobs;
-
-import com.conveyal.datatools.editor.datastore.FeedTx;
-import com.conveyal.datatools.manager.models.FeedSource;
-import com.google.common.io.Files;
-import com.vividsolutions.jts.geom.Coordinate;
-import com.vividsolutions.jts.geom.GeometryFactory;
-import com.vividsolutions.jts.geom.LineString;
-import com.conveyal.datatools.editor.datastore.GlobalTx;
-import com.conveyal.datatools.editor.datastore.VersionedDataStore;
-import com.conveyal.datatools.editor.models.transit.*;
-import org.geotools.data.DataUtilities;
-import org.geotools.data.DefaultTransaction;
-import org.geotools.data.Transaction;
-import org.geotools.data.collection.ListFeatureCollection;
-import org.geotools.data.shapefile.ShapefileDataStore;
-import org.geotools.data.shapefile.ShapefileDataStoreFactory;
-import org.geotools.data.simple.SimpleFeatureCollection;
-import org.geotools.data.simple.SimpleFeatureSource;
-import org.geotools.data.simple.SimpleFeatureStore;
-import org.geotools.feature.simple.SimpleFeatureBuilder;
-import org.geotools.referencing.crs.DefaultGeographicCRS;
-import org.opengis.feature.simple.SimpleFeature;
-import org.opengis.feature.simple.SimpleFeatureType;
-import com.conveyal.datatools.editor.utils.DirectoryZip;
-
-import java.io.File;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/** Export routes or stops as a shapefile */
-public class GisExport implements Runnable {
- File file;
- Type type;
- Collection agencyIds;
-
- public GisExport(Type type, File file, Collection agencyIds) {
- this.type = type;
- this.file = file;
- this.agencyIds = agencyIds;
- }
-
- @Override
- public void run() {
- File outDir = Files.createTempDir();
- File outShp = new File(outDir, file.getName().replaceAll("\\.zip", "") + ".shp");
-
- GlobalTx gtx = VersionedDataStore.getGlobalTx();
- FeedTx atx = null;
- try {
- ShapefileDataStoreFactory dataStoreFactory = new ShapefileDataStoreFactory();
-
- Map params = new HashMap();
- params.put("url", outShp.toURI().toURL());
- params.put("create spatial index", Boolean.TRUE);
-
- ShapefileDataStore datastore = (ShapefileDataStore) dataStoreFactory.createNewDataStore(params);
- datastore.forceSchemaCRS(DefaultGeographicCRS.WGS84);
-
- SimpleFeatureType STOP_TYPE = DataUtilities.createType(
- "Stop",
- "the_geom:Point:srid=4326," +
- "name:String," +
- "code:String," +
- "desc:String," +
- "id:String," +
- "agency:String"
- );
-
- SimpleFeatureType ROUTE_TYPE = DataUtilities.createType(
- "Route", // <- the name for our feature type
- "the_geom:LineString:srid=4326," +
- "patternName:String," +
- "shortName:String," +
- "longName:String," +
- "desc:String," +
- "type:String," +
- "url:String," +
- "routeColor:String," +
- "routeTextColor:String," +
- "agency:String"
- );
-
- SimpleFeatureCollection collection;
-
- SimpleFeatureType collectionType;
-
- SimpleFeatureBuilder featureBuilder = null;
-
- List features = new ArrayList();
-
- if (type.equals(Type.STOPS)) {
- collectionType = STOP_TYPE;
- datastore.createSchema(STOP_TYPE);
- featureBuilder = new SimpleFeatureBuilder(STOP_TYPE);
-
- for (String feedId : agencyIds) {
- EditorFeed fs = gtx.feeds.get(feedId);
-
- atx = VersionedDataStore.getFeedTx(feedId);
- for (Stop s : atx.stops.values()) {
- featureBuilder.add(s.location);
- featureBuilder.add(s.stopName);
- featureBuilder.add(s.stopCode);
- featureBuilder.add(s.stopDesc);
- featureBuilder.add(s.getGtfsId());
- featureBuilder.add(fs.feedPublisherName);
- SimpleFeature feature = featureBuilder.buildFeature(null);
- features.add(feature);
- }
-
- atx.rollback();
- }
- } else if (type.equals(Type.ROUTES)) {
- collectionType = ROUTE_TYPE;
- datastore.createSchema(ROUTE_TYPE);
- featureBuilder = new SimpleFeatureBuilder(ROUTE_TYPE);
-
- GeometryFactory gf = new GeometryFactory();
-
- for (String feedId : agencyIds) {
- EditorFeed fs = gtx.feeds.get(feedId);
-
- atx = VersionedDataStore.getFeedTx(feedId);
-
- // we loop over trip patterns. Note that this will yield several lines for routes that have
- // multiple patterns. There's no real good way to reconcile the shapes of multiple patterns.
- for (TripPattern tp : atx.tripPatterns.values()) {
- LineString shape;
- if (tp.shape != null) {
- shape = tp.shape;
- } else {
- // build the shape from the stops
- Coordinate[] coords = new Coordinate[tp.patternStops.size()];
-
- for (int i = 0; i < coords.length; i++) {
- coords[i] = atx.stops.get(tp.patternStops.get(i).stopId).location.getCoordinate();
- }
-
- shape = gf.createLineString(coords);
- }
-
- Route r = atx.routes.get(tp.routeId);
-
- featureBuilder.add(shape);
- featureBuilder.add(tp.name);
- featureBuilder.add(r.routeShortName);
- featureBuilder.add(r.routeLongName);
- featureBuilder.add(r.routeDesc);
-
- if (r.routeTypeId != null)
- featureBuilder.add(gtx.routeTypes.get(r.routeTypeId).toString());
- else
- featureBuilder.add("");
-
- featureBuilder.add(r.routeUrl);
- featureBuilder.add(r.routeColor);
- featureBuilder.add(r.routeTextColor);
- featureBuilder.add(fs.feedPublisherName);
- SimpleFeature feature = featureBuilder.buildFeature(null);
- features.add(feature);
- }
-
- atx.rollback();
- }
- }
- else
- throw new IllegalStateException("Invalid type");
-
- // save the file
- collection = new ListFeatureCollection(collectionType, features);
-
- Transaction transaction = new DefaultTransaction("create");
-
- String typeName = datastore.getTypeNames()[0];
- SimpleFeatureSource featureSource = datastore.getFeatureSource(typeName);
-
- if (featureSource instanceof SimpleFeatureStore)
- {
- SimpleFeatureStore featureStore = (SimpleFeatureStore) featureSource;
-
- featureStore.setTransaction(transaction);
-
- featureStore.addFeatures(collection);
- transaction.commit();
-
- transaction.close();
- }
- else
- {
- throw new Exception(typeName + " does not support read/write access");
- }
-
- // zip the file
- DirectoryZip.zip(outDir, file);
-
- // clean up
- for (File f : outDir.listFiles()) {
- f.delete();
- }
- outDir.delete();
-
- } catch (Exception e) {
- e.printStackTrace();
- } finally {
- if (gtx != null) gtx.rollback();
- if (atx != null) atx.rollbackIfOpen();
- }
- }
-
- public static enum Type { ROUTES, STOPS };
-}
diff --git a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGisExport.java b/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGisExport.java
deleted file mode 100755
index 5467a7c28..000000000
--- a/src/main/java/com/conveyal/datatools/editor/jobs/ProcessGisExport.java
+++ /dev/null
@@ -1,199 +0,0 @@
-package com.conveyal.datatools.editor.jobs;
-
-
-public class ProcessGisExport implements Runnable {
- @Override
- public void run() {
-
- }
-/*
- private Long _gisExportId;
-
-
- public ProcessGisExport(Long gisExportId)
- {
- this._gisExportId = gisExportId;
- }
-
- public void doJob() {
-
- String exportName = "gis_" + this._gisExportId;
-
- File outputZipFile = new File(Play.configuration.getProperty("application.publicDataDirectory"), exportName + ".zip");
-
- File outputDirectory = new File(Play.configuration.getProperty("application.publicDataDirectory"), exportName);
-
- LOG.info("outfile path:" + outputDirectory.getAbsolutePath());
-
- File outputShapefile = new File(outputDirectory, exportName + ".shp");
-
- try
- {
- GisExport gisExport = null;
-
- while(gisExport == null)
- {
- gisExport = GisExport.findById(this._gisExportId);
- Thread.sleep(1000);
-
- LOG.info("Waiting for gisExport object...");
- }
-
-
- if(!outputDirectory.exists())
- {
- outputDirectory.mkdir();
- }
-
- ShapefileDataStoreFactory com.conveyal.datatools.editor.datastoreFactory = new ShapefileDataStoreFactory();
-
- Map params = new HashMap();
- params.put("url", outputShapefile.toURI().toURL());
- params.put("create spatial index", Boolean.TRUE);
-
- ShapefileDataStore com.conveyal.datatools.editor.datastore = (ShapefileDataStore)dataStoreFactory.createNewDataStore(params);
- com.conveyal.datatools.editor.datastore.forceSchemaCRS(DefaultGeographicCRS.WGS84);
-
- SimpleFeatureType STOP_TYPE = DataUtilities.createType(
- "Stop",
- "location:Point:srid=4326," +
- "name:String," +
- "code:String," +
- "desc:String," +
- "id:String," +
- "agency:String"
- );
-
- SimpleFeatureType ROUTE_TYPE = DataUtilities.createType(
- "Route", // <- the name for our feature type
- "route:LineString:srid=4326," +
- "patternName:String," +
- "shortName:String," +
- "longName:String," +
- "desc:String," +
- "type:String," +
- "url:String," +
- "routeColor:String," +
- "routeTextColor:String," +
- "agency:String"
- );
-
- SimpleFeatureCollection collection;
-
- SimpleFeatureType collectionType;
-
- SimpleFeatureBuilder featureBuilder = null;
-
- List features = new ArrayList();
-
- if(gisExport.type.equals(GisUploadType.STOPS))
- {
- collectionType = STOP_TYPE;
- com.conveyal.datatools.editor.datastore.createSchema(STOP_TYPE);
- featureBuilder = new SimpleFeatureBuilder(STOP_TYPE);
-
- List stops = Stop.find("agency in (:ids)").bind("ids", gisExport.feeds).fetch();
-
- for(Stop s : stops)
- {
- featureBuilder.add(s.locationPoint());
- featureBuilder.add(s.stopName);
- featureBuilder.add(s.stopCode);
- featureBuilder.add(s.stopDesc);
- featureBuilder.add(s.gtfsStopId);
- featureBuilder.add(s.agency.name);
- SimpleFeature feature = featureBuilder.buildFeature(null);
- features.add(feature);
- }
- }
- else if(gisExport.type.equals(GisUploadType.ROUTES))
- {
- collectionType = ROUTE_TYPE;
- com.conveyal.datatools.editor.datastore.createSchema(ROUTE_TYPE);
- featureBuilder = new SimpleFeatureBuilder(ROUTE_TYPE);
-
- List routes = Route.find("agency in (:ids)").bind("ids", gisExport.feeds).fetch();
-
- // check for duplicates
-
- // HashMap existingRoutes = new HashMap();
-
- for(Route r : routes)
- {
-// String routeId = r.routeLongName + "_" + r.routeDesc + "_ " + r.phone.id;
-//
-// if(existingRoutes.containsKey(routeId))
-// continue;
-// else
-// existingRoutes.put(routeId, true);
-
-
- List patterns = TripPattern.find("route = ?", r).fetch();
- for(TripPattern tp : patterns)
- {
- if(tp.shape == null)
- continue;
-
- featureBuilder.add(tp.shape.shape);
- featureBuilder.add(tp.name);
- featureBuilder.add(r.routeShortName);
- featureBuilder.add(r.routeLongName);
- featureBuilder.add(r.routeDesc);
-
- if(r.routeType != null)
- featureBuilder.add(r.routeType.toString());
- else
- featureBuilder.add("");
-
- featureBuilder.add(r.routeUrl);
- featureBuilder.add(r.routeColor);
- featureBuilder.add(r.routeTextColor);
- featureBuilder.add(r.agency.name);
- SimpleFeature feature = featureBuilder.buildFeature(null);
- features.add(feature);
- }
- }
- }
- else
- throw new Exception("Unknown export type.");
-
- collection = new ListFeatureCollection(collectionType, features);
-
- Transaction transaction = new DefaultTransaction("create");
-
- String typeName = com.conveyal.datatools.editor.datastore.getTypeNames()[0];
- SimpleFeatureSource featureSource = com.conveyal.datatools.editor.datastore.getFeatureSource(typeName);
-
- if (featureSource instanceof SimpleFeatureStore)
- {
- SimpleFeatureStore featureStore = (SimpleFeatureStore) featureSource;
-
- featureStore.setTransaction(transaction);
-
- featureStore.addFeatures(collection);
- transaction.commit();
-
- transaction.close();
- }
- else
- {
- throw new Exception(typeName + " does not support read/write access");
- }
-
- DirectoryZip.zip(outputDirectory, outputZipFile);
- FileUtils.deleteDirectory(outputDirectory);
-
- gisExport.status = GisExportStatus.PROCESSED;
-
- gisExport.save();
-
- }
- catch(Exception e)
- {
- LOG.error("Unable to process GIS export: ", e.toString());
- e.printStackTrace();
- }
- }*/
-}
-
-
diff --git a/src/main/java/com/conveyal/datatools/editor/utils/FeatureAttributeFormatter.java b/src/main/java/com/conveyal/datatools/editor/utils/FeatureAttributeFormatter.java
index d787869d0..20a597661 100755
--- a/src/main/java/com/conveyal/datatools/editor/utils/FeatureAttributeFormatter.java
+++ b/src/main/java/com/conveyal/datatools/editor/utils/FeatureAttributeFormatter.java
@@ -31,14 +31,12 @@ public String format(SimpleFeature feature)
Integer fieldPosition = Integer.parseInt(sub.replace("#", ""));
- try
- {
+ try {
String attributeString = feature.getAttribute(fieldPosition).toString();
-
output = output.replace(sub, attributeString);
- }
- catch(Exception e) {
+ } catch (Exception e) {
LOG.warn("Index out of range.");
+ e.printStackTrace();
}
}
diff --git a/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java b/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java
index 4d4496899..e723a3500 100644
--- a/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java
+++ b/src/main/java/com/conveyal/datatools/editor/utils/JacksonSerializers.java
@@ -1,27 +1,27 @@
package com.conveyal.datatools.editor.utils;
import com.conveyal.datatools.editor.models.transit.GtfsRouteType;
-import com.conveyal.datatools.editor.models.transit.TripDirection;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.DeserializationContext;
-import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer;
import com.fasterxml.jackson.databind.ser.std.StdScalarSerializer;
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
import com.google.common.io.BaseEncoding;
+import org.mapdb.Fun.Tuple2;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
-import org.mapdb.Fun.Tuple2;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
public class JacksonSerializers {
+ private static final Logger LOG = LoggerFactory.getLogger(JacksonSerializers.class);
private static final BaseEncoding encoder = BaseEncoding.base64Url();
public static class Tuple2Serializer extends StdScalarSerializer> {
@@ -139,17 +139,19 @@ public LocalDateDeserializer () {
@Override
public LocalDate deserialize(JsonParser jp, DeserializationContext arg1) throws IOException {
LocalDate date;
+ String dateText = jp.getText();
try {
- date = LocalDate.parse(jp.getText(), DateTimeFormatter.BASIC_ISO_DATE);
+ date = LocalDate.parse(dateText, DateTimeFormatter.BASIC_ISO_DATE);
return date;
} catch (Exception jsonException) {
// This is here to catch any loads of database dumps that happen to have the old java.util.Date
// field type in validationResult. God help us.
- System.out.println("Error parsing date value, trying legacy java.util.Date date format");
+ LOG.warn("Error parsing date value: `{}`, trying legacy java.util.Date date format", dateText);
try {
date = Instant.ofEpochMilli(jp.getValueAsLong()).atZone(ZoneOffset.UTC).toLocalDate();
return date;
} catch (Exception e) {
+ LOG.warn("Error parsing date value: `{}`", dateText);
e.printStackTrace();
}
}
diff --git a/src/main/java/com/conveyal/datatools/manager/ConvertMain.java b/src/main/java/com/conveyal/datatools/manager/ConvertMain.java
index 00df34272..5923ec9d0 100644
--- a/src/main/java/com/conveyal/datatools/manager/ConvertMain.java
+++ b/src/main/java/com/conveyal/datatools/manager/ConvertMain.java
@@ -10,8 +10,9 @@
import com.conveyal.datatools.manager.models.FeedSource;
import com.conveyal.datatools.manager.persistence.Persistence;
import org.apache.commons.io.FileUtils;
-import org.eclipse.jetty.util.ConcurrentHashSet;
import org.mapdb.Fun;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.nio.charset.Charset;
@@ -50,6 +51,8 @@
*
*/
public class ConvertMain {
+ public static final Logger LOG = LoggerFactory.getLogger(ConvertMain.class);
+
// Feed ID constants for testing.
private static final String CORTLAND_FEED_ID = "c5bdff54-82fa-47ce-ad6e-3c6517563992";
public static final String AMTRAK_FEED_ID = "be5b775b-6811-4522-bbf6-1a408e7cf3f8";
@@ -73,7 +76,7 @@ public static void main(String[] args) throws Exception {
String jsonString = FileUtils.readFileToString(new File(args[3]), Charset.defaultCharset());
boolean result = DumpController.updateSnapshotMetadata(jsonString);
if (result) {
- System.out.println("Snapshot metadata update successful!");
+ LOG.info("Snapshot metadata update successful!");
}
// Done.
System.exit(0);
@@ -87,26 +90,26 @@ public static void main(String[] args) throws Exception {
boolean force = args.length > 3 && "true".equals(args[3]);
DumpController.validateAll(true, force, null);
} else {
- System.out.println("Skipping JSON load and feed version load/validation due to snapshotsOnly flag");
+ LOG.info("Skipping JSON load and feed version load/validation due to snapshotsOnly flag");
}
// STEP 3: For each feed source in MongoDB, load all snapshots (and current editor buffer) into Postgres DB.
// STEP 3A: For each snapshot/editor DB, create a snapshot Mongo object for the feed source with the FeedLoadResult.
migrateEditorFeeds();
- System.out.println("Done queueing!!!!!!!!");
+ LOG.info("Done queueing!!!!!!!!");
int totalJobs = StatusController.getAllJobs().size();
while (!StatusController.filterActiveJobs(StatusController.getAllJobs()).isEmpty()) {
// While there are still active jobs, continue waiting.
- ConcurrentHashSet activeJobs = StatusController.filterActiveJobs(StatusController.getAllJobs());
- System.out.println(String.format("%d/%d jobs still active. Checking for completion again in 5 seconds...", activeJobs.size(), totalJobs));
-// System.out.println(String.join(", ", activeJobs.stream().map(job -> job.name).collect(Collectors.toList())));
+ Set activeJobs = StatusController.filterActiveJobs(StatusController.getAllJobs());
+ LOG.info(String.format("%d/%d jobs still active. Checking for completion again in 5 seconds...", activeJobs.size(), totalJobs));
+// LOG.info(String.join(", ", activeJobs.stream().map(job -> job.name).collect(Collectors.toList())));
int jobsInExecutor = ((ThreadPoolExecutor) DataManager.heavyExecutor).getActiveCount();
- System.out.println(String.format("Jobs in thread pool executor: %d", jobsInExecutor));
- System.out.println(String.format("Jobs completed by executor: %d", ((ThreadPoolExecutor) DataManager.heavyExecutor).getCompletedTaskCount()));
+ LOG.info(String.format("Jobs in thread pool executor: %d", jobsInExecutor));
+ LOG.info(String.format("Jobs completed by executor: %d", ((ThreadPoolExecutor) DataManager.heavyExecutor).getCompletedTaskCount()));
Thread.sleep(5000);
}
long durationInMillis = System.currentTimeMillis() - startTime;
- System.out.println(String.format("MIGRATION COMPLETED IN %d SECONDS.", TimeUnit.MILLISECONDS.toSeconds(durationInMillis)));
+ LOG.info("MIGRATION COMPLETED IN {} SECONDS.", TimeUnit.MILLISECONDS.toSeconds(durationInMillis));
System.exit(0);
}
@@ -117,7 +120,7 @@ public static boolean migrateEditorFeeds (String ...feedIdsToSkip) {
long startTime = System.currentTimeMillis();
int count = 0;
int snapshotCount = gtx.snapshots.values().size();
- System.out.println(snapshotCount + " snapshots to convert");
+ LOG.info(snapshotCount + " snapshots to convert");
Set feedSourcesEncountered = new HashSet<>();
// Iterate over the provided snapshots and convert each one. Note: this will skip snapshots for feed IDs that
@@ -132,7 +135,7 @@ public static boolean migrateEditorFeeds (String ...feedIdsToSkip) {
// Only migrate the feeds that have a feed source record in the MongoDB.
if (feedIdsToSkip != null && Arrays.asList(feedIdsToSkip).contains(feedSourceId)) {
// If list of feed IDs to skip is provided and the current feed ID matches, skip it.
- System.out.println("Skipping feed. ID found in list to skip. id: " + feedSourceId);
+ LOG.info("Skipping feed. ID found in list to skip. id: " + feedSourceId);
continue;
}
if (!feedSourcesEncountered.contains(feedSource.id)) {
@@ -143,18 +146,18 @@ public static boolean migrateEditorFeeds (String ...feedIdsToSkip) {
}
ConvertEditorMapDBToSQL convertEditorMapDBToSQL = new ConvertEditorMapDBToSQL(snapshot.id.a, snapshot.id.b);
DataManager.heavyExecutor.execute(convertEditorMapDBToSQL);
- System.out.println(count + "/" + snapshotCount + " snapshot conversion queued");
+ LOG.info(count + "/" + snapshotCount + " snapshot conversion queued");
feedSourcesEncountered.add(feedSource.id);
count++;
} else {
- System.out.println("Not converting snapshot. Feed source Id does not exist in application data" + feedSourceId);
+ LOG.info("Not converting snapshot. Feed source Id does not exist in application data" + feedSourceId);
}
}
// long duration = System.currentTimeMillis() - startTime;
-// System.out.println("Converting " + snapshotCount + " snapshots took " + TimeUnit.MILLISECONDS.toMinutes(duration) + " minutes");
+// LOG.info("Converting " + snapshotCount + " snapshots took " + TimeUnit.MILLISECONDS.toMinutes(duration) + " minutes");
return true;
} catch (Exception e) {
- System.out.println("Migrating editor feeds FAILED");
+ LOG.error("Migrating editor feeds FAILED");
e.printStackTrace();
return false;
} finally {
diff --git a/src/main/java/com/conveyal/datatools/manager/DataManager.java b/src/main/java/com/conveyal/datatools/manager/DataManager.java
index ef7cfb609..ff7027931 100644
--- a/src/main/java/com/conveyal/datatools/manager/DataManager.java
+++ b/src/main/java/com/conveyal/datatools/manager/DataManager.java
@@ -3,11 +3,13 @@
import com.bugsnag.Bugsnag;
import com.conveyal.datatools.common.status.MonitorableJob;
import com.conveyal.datatools.common.utils.CorsFilter;
+import com.conveyal.datatools.common.utils.Scheduler;
import com.conveyal.datatools.editor.controllers.EditorLockController;
import com.conveyal.datatools.editor.controllers.api.EditorControllerImpl;
import com.conveyal.datatools.editor.controllers.api.SnapshotController;
import com.conveyal.datatools.manager.auth.Auth0Connection;
import com.conveyal.datatools.manager.controllers.DumpController;
+import com.conveyal.datatools.manager.controllers.api.AppInfoController;
import com.conveyal.datatools.manager.controllers.api.DeploymentController;
import com.conveyal.datatools.manager.controllers.api.FeedSourceController;
import com.conveyal.datatools.manager.controllers.api.FeedVersionController;
@@ -15,7 +17,6 @@
import com.conveyal.datatools.manager.controllers.api.NoteController;
import com.conveyal.datatools.manager.controllers.api.OrganizationController;
import com.conveyal.datatools.manager.controllers.api.ProjectController;
-import com.conveyal.datatools.manager.controllers.api.AppInfoController;
import com.conveyal.datatools.manager.controllers.api.ServerController;
import com.conveyal.datatools.manager.controllers.api.StatusController;
import com.conveyal.datatools.manager.controllers.api.UserController;
@@ -24,18 +25,17 @@
import com.conveyal.datatools.manager.extensions.transitfeeds.TransitFeedsFeedResource;
import com.conveyal.datatools.manager.extensions.transitland.TransitLandFeedResource;
import com.conveyal.datatools.manager.jobs.FeedUpdater;
-import com.conveyal.datatools.manager.models.Project;
import com.conveyal.datatools.manager.persistence.FeedStore;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.gtfs.GTFS;
-import com.conveyal.gtfs.GraphQLMain;
+import com.conveyal.gtfs.GraphQLController;
import com.conveyal.gtfs.loader.Table;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import com.google.common.collect.Sets;
import com.google.common.io.Resources;
import org.apache.commons.io.Charsets;
-import org.eclipse.jetty.util.ConcurrentHashSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.utils.IOUtils;
@@ -49,15 +49,15 @@
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.common.utils.SparkUtils.logRequest;
import static com.conveyal.datatools.common.utils.SparkUtils.logResponse;
+import static spark.Service.SPARK_DEFAULT_PORT;
import static spark.Spark.after;
import static spark.Spark.before;
import static spark.Spark.exception;
@@ -69,6 +69,7 @@
* referenced throughout the application.
*/
public class DataManager {
+ public static final String GTFS_PLUS_SUBDIR = "gtfsplus";
private static final Logger LOG = LoggerFactory.getLogger(DataManager.class);
// These fields hold YAML files that represent the server configuration.
@@ -84,13 +85,12 @@ public class DataManager {
// TODO: define type for ExternalFeedResource Strings
public static final Map feedResources = new HashMap<>();
- // Stores jobs underway by user ID.
- public static Map> userJobsMap = new ConcurrentHashMap<>();
+ /**
+ * Stores jobs underway by user ID. NOTE: any set created and stored here must be created with
+ * {@link Sets#newConcurrentHashSet()} or similar thread-safe Set.
+ */
+ public static Map> userJobsMap = new ConcurrentHashMap<>();
- // Stores ScheduledFuture objects that kick off runnable tasks (e.g., fetch project feeds at 2:00 AM).
- public static Map autoFetchMap = new HashMap<>();
- // Scheduled executor that handles running scheduled jobs.
- public final static ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
// ObjectMapper that loads in YAML config files
private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory());
@@ -107,7 +107,9 @@ public class DataManager {
public static String commit = "";
public static boolean useS3;
- private static final String API_PREFIX = "/api/manager/";
+ public static final String API_PREFIX = "/api/manager/";
+ // Application port defaults to Spark's default.
+ public static int PORT = SPARK_DEFAULT_PORT;
private static final String GTFS_API_PREFIX = API_PREFIX + "secure/gtfs/";
private static final String EDITOR_API_PREFIX = "/api/editor/";
public static final String publicPath = "(" + API_PREFIX + "|" + EDITOR_API_PREFIX + ")public/.*";
@@ -119,14 +121,6 @@ public static void main(String[] args) throws IOException {
initializeApplication(args);
- // initialize map of auto fetched projects
- for (Project project : Persistence.projects.getAll()) {
- if (project.autoFetchFeeds) {
- ScheduledFuture scheduledFuture = ProjectController.scheduleAutoFeedFetch(project, 1);
- autoFetchMap.put(project.id, scheduledFuture);
- }
- }
-
registerRoutes();
registerExternalResources();
@@ -137,17 +131,15 @@ static void initializeApplication(String[] args) throws IOException {
loadConfig(args);
loadProperties();
- String bugsnagKey = getConfigPropertyAsText("BUGSNAG_KEY");
- if (bugsnagKey != null) {
- new Bugsnag(bugsnagKey);
- }
+ getBugsnag();
// FIXME: hack to statically load FeedStore
LOG.info(FeedStore.class.getSimpleName());
// Optionally set port for server. Otherwise, Spark defaults to 4567.
- if (getConfigProperty("application.port") != null) {
- port(Integer.parseInt(getConfigPropertyAsText("application.port")));
+ if (hasConfigProperty("application.port")) {
+ PORT = Integer.parseInt(getConfigPropertyAsText("application.port"));
+ port(PORT);
}
useS3 = "true".equals(getConfigPropertyAsText("application.data.use_s3_storage"));
@@ -160,11 +152,26 @@ static void initializeApplication(String[] args) throws IOException {
feedBucket = getConfigPropertyAsText("application.data.gtfs_s3_bucket");
bucketFolder = FeedStore.s3Prefix;
+ // create application gtfs folder if it doesn't already exist
+ new File(getConfigPropertyAsText("application.data.gtfs")).mkdirs();
+
// Initialize MongoDB storage
Persistence.initialize();
+
+ // Initialize scheduled tasks
+ Scheduler.initialize();
}
- /**
+ // intialize bugsnag
+ public static Bugsnag getBugsnag() {
+ String bugsnagKey = getConfigPropertyAsText("BUGSNAG_KEY");
+ if (bugsnagKey != null) {
+ return new Bugsnag(bugsnagKey);
+ }
+ return null;
+ }
+
+ /*
* Load some properties files to obtain information about this project.
* This method reads in two files:
* - src/main/resources/.properties
@@ -205,7 +212,7 @@ static void registerRoutes() throws IOException {
CorsFilter.apply();
// Initialize GTFS GraphQL API service
// FIXME: Add user permissions check to ensure user has access to feeds.
- GraphQLMain.initialize(GTFS_DATA_SOURCE, GTFS_API_PREFIX);
+ GraphQLController.initialize(GTFS_DATA_SOURCE, GTFS_API_PREFIX);
// Register core API routes
AppInfoController.register(API_PREFIX);
ProjectController.register(API_PREFIX);
@@ -236,7 +243,6 @@ static void registerRoutes() throws IOException {
new EditorControllerImpl(EDITOR_API_PREFIX, Table.STOPS, DataManager.GTFS_DATA_SOURCE);
new EditorControllerImpl(EDITOR_API_PREFIX, Table.TRIPS, DataManager.GTFS_DATA_SOURCE);
// TODO: Add transfers.txt controller?
-// GisController.register(EDITOR_API_PREFIX);
}
// log all exceptions to system.out
@@ -312,7 +318,7 @@ static void registerRoutes() throws IOException {
// Return 404 for any API path that is not configured.
// IMPORTANT: Any API paths must be registered before this halt.
get("/api/" + "*", (request, response) -> {
- haltWithMessage(request, 404, "No API route configured for this path.");
+ logMessageAndHalt(request, 404, "No API route configured for this path.");
return null;
});
diff --git a/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java b/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java
index ac053f67d..4c40eaf6a 100644
--- a/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java
+++ b/src/main/java/com/conveyal/datatools/manager/UpdateSQLFeedsMain.java
@@ -7,6 +7,7 @@
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import static com.conveyal.datatools.manager.DataManager.initializeApplication;
@@ -22,9 +23,13 @@
* Argument descriptions:
* 1. path to env.yml
* 2. path to server.yml
- * 3. update sql statement to apply to optionally filtered feeds
- * 4. field to filter feeds on
- * 5. value (corresponding to field in arg 3) to filter feeds on (omit to use NULL as value)
+ * 3. string update sql statement to apply to optionally filtered feeds (this should contain a {@link java.util.Formatter}
+ * compatible string substitution for the namespace argument).
+ * 4. string field to filter feeds on
+ * 5. string value (corresponding to field in arg 3) to filter feeds on (omit to use NULL as value or comma separate to
+ * include multiple values)
+ * 6. boolean (optional) whether to run SQL as a test run (i.e., rollback changes and do not commit). If missing, this
+ * defaults to false.
*
* Sample arguments:
*
@@ -45,8 +50,14 @@ public static void main(String[] args) throws IOException, SQLException {
String updateSql = args[2];
// The next arguments will apply a where clause to conditionally to apply the updates.
String field = args.length > 3 ? args[3] : null;
- String value = args.length > 4 ? args[4] : null;
- List failedNamespace = updateFeedsWhere(updateSql, field, value);
+ String valuesArg = args.length > 4 ? args[4] : null;
+ String[] values;
+ // Set value to null if the string value = "null".
+ if ("null".equals(valuesArg) || valuesArg == null) values = null;
+ else values = valuesArg.split(",");
+ // If test run arg is not included, default to true. Else, only set to false if value equals false.
+ boolean testRun = args.length <= 5 || !"false".equals(args[5]);
+ List failedNamespace = updateFeedsWhere(updateSql, field, values, testRun);
System.out.println("Finished!");
System.out.println("Failed namespaces: " + String.join(", ", failedNamespace));
System.exit(0);
@@ -56,40 +67,51 @@ public static void main(String[] args) throws IOException, SQLException {
*
* @param updateSql
* @param field
- * @param value
+ * @param values
* @return
* @throws SQLException
*/
- private static List updateFeedsWhere(String updateSql, String field, String value)throws SQLException {
+ private static List updateFeedsWhere(String updateSql, String field, String[] values, boolean testRun)throws SQLException {
if (updateSql == null) throw new RuntimeException("Update SQL must not be null!");
// Keep track of failed namespaces for convenient printing at end of method.
List failedNamespace = new ArrayList<>();
// Select feeds migrated from MapDB
String selectFeedsSql = "select namespace from feeds";
if (field != null) {
- // Add where clause if field is not null
+ // Add where in clause if field is not null
// NOTE: if value is null, where clause will be executed accordingly (i.e., WHERE field = null)
- String operator = value == null ? "IS NULL" : "= ?";
+ String operator = values == null
+ ? "IS NULL"
+ : String.format("in (%s)", String.join(", ", Collections.nCopies(values.length, "?")));
selectFeedsSql = String.format("%s where %s %s", selectFeedsSql, field, operator);
}
Connection connection = DataManager.GTFS_DATA_SOURCE.getConnection();
- // Set auto-commit to true.
- connection.setAutoCommit(true);
+ if (!testRun) {
+ System.out.println("Auto-committing each statement");
+ // Set auto-commit to true.
+ connection.setAutoCommit(true);
+ } else {
+ System.out.println("TEST RUN. Changes will NOT be committed (a rollback occurs at the end of method).");
+ }
PreparedStatement selectStatement = connection.prepareStatement(selectFeedsSql);
- // Set filter value if not null (otherwise, IS NULL has already been populated).
- if (value != null) {
- selectStatement.setString(1, value);
+ if (values != null) {
+ // Set filter values if not null (otherwise, IS NULL has already been populated).
+ int oneBasedIndex = 1;
+ for (String value : values) {
+ selectStatement.setString(oneBasedIndex++, value);
+ }
}
System.out.println(selectStatement.toString());
ResultSet resultSet = selectStatement.executeQuery();
int successCount = 0;
while (resultSet.next()) {
+ // Use the string found in the result as the table prefix for the following update query.
String namespace = resultSet.getString(1);
- String updateLocationSql = String.format(updateSql, namespace);
+ String updateTableSql = String.format(updateSql, namespace);
Statement statement = connection.createStatement();
try {
- int updated = statement.executeUpdate(updateLocationSql);
- System.out.println(updateLocationSql);
+ System.out.println(updateTableSql);
+ int updated = statement.executeUpdate(updateTableSql);
System.out.println(String.format("Updated rows: %d", updated));
successCount++;
} catch (SQLException e) {
@@ -99,7 +121,13 @@ private static List updateFeedsWhere(String updateSql, String field, Str
}
}
System.out.println(String.format("Updated %d tables.", successCount));
- // No need to commit the transaction because of auto-commit
+ // No need to commit the transaction because of auto-commit above (in fact, "manually" committing is not
+ // permitted with auto-commit enabled.
+ if (testRun) {
+ // Rollback changes if performing a test run.
+ System.out.println("Rolling back changes...");
+ connection.rollback();
+ }
connection.close();
return failedNamespace;
}
diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Actions.java b/src/main/java/com/conveyal/datatools/manager/auth/Actions.java
new file mode 100644
index 000000000..f59ebd1b2
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/auth/Actions.java
@@ -0,0 +1,9 @@
+package com.conveyal.datatools.manager.auth;
+
+/**
+ * The set of request actions that a user can take on application entities. These are checked
+ * against the requesting user's permissions to ensure that they have permission to make the request.
+ */
+public enum Actions {
+ CREATE, EDIT, MANAGE, VIEW
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0AccessToken.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0AccessToken.java
new file mode 100644
index 000000000..2d439c283
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0AccessToken.java
@@ -0,0 +1,31 @@
+package com.conveyal.datatools.manager.auth;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+import java.util.Date;
+
+/**
+ * This represents the Auth0 API access token that is needed for requests to the v2 Management API. This token can be
+ * retrieved by sending a request to the oauth token endpoint: https://auth0.com/docs/api/management/v2/get-access-tokens-for-production
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class Auth0AccessToken {
+ public String access_token;
+ /** Seconds until token expires. */
+ public int expires_in;
+ public String scope;
+ public String token_type;
+ /**
+ * Time when the object was instantiated. This is used in conjunction with expires_in to determine if a token is
+ * still valid.
+ */
+ @JsonIgnore
+ public long creation_time = new Date().getTime();
+
+ /** Helper method to determine the time in milliseconds since epoch that this token expires. */
+ @JsonIgnore
+ public long getExpirationTime() {
+ return this.expires_in * 1000 + this.creation_time;
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java
index d8c1f4ab4..80c5942ce 100644
--- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java
+++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Connection.java
@@ -1,7 +1,6 @@
package com.conveyal.datatools.manager.auth;
import com.auth0.jwt.JWTVerifier;
-import com.auth0.jwt.JWTVerifyException;
import com.auth0.jwt.pem.PemReader;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.models.FeedSource;
@@ -12,17 +11,16 @@
import spark.Request;
import java.io.IOException;
-import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import java.security.PublicKey;
-import java.security.SignatureException;
import java.security.spec.InvalidKeySpecException;
import java.util.Map;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.DataManager.getConfigPropertyAsText;
import static com.conveyal.datatools.manager.DataManager.hasConfigProperty;
+import static com.conveyal.datatools.manager.controllers.api.UserController.inTestingEnvironment;
/**
* This handles verifying the Auth0 token passed in the Auth header of Spark HTTP requests.
@@ -46,24 +44,39 @@ public class Auth0Connection {
* @param req Spark request object
*/
public static void checkUser(Request req) {
- if (authDisabled()) {
- // If in a development environment, assign a mock profile to request attribute and skip authentication.
- req.attribute("user", new Auth0UserProfile("mock@example.com", "user_id:string"));
+ if (authDisabled() || inTestingEnvironment()) {
+ // If in a development or testing environment, assign a mock profile of an admin user to the request
+ // attribute and skip authentication.
+ Auth0UserProfile.DatatoolsInfo adminDatatoolsInfo = new Auth0UserProfile.DatatoolsInfo();
+ adminDatatoolsInfo.setPermissions(
+ new Auth0UserProfile.Permission[]{
+ new Auth0UserProfile.Permission("administer-application", new String[]{})
+ }
+ );
+ adminDatatoolsInfo.setClientId(DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"));
+
+ Auth0UserProfile.AppMetadata adminAppMetaData = new Auth0UserProfile.AppMetadata();
+ adminAppMetaData.setDatatoolsInfo(adminDatatoolsInfo);
+
+ Auth0UserProfile adminUser = new Auth0UserProfile("mock@example.com", "user_id:string");
+ adminUser.setApp_metadata(adminAppMetaData);
+
+ req.attribute("user", adminUser);
return;
}
// Check that auth header is present and formatted correctly (Authorization: Bearer [token]).
final String authHeader = req.headers("Authorization");
if (authHeader == null) {
- haltWithMessage(req, 401, "Authorization header is missing.");
+ logMessageAndHalt(req, 401, "Authorization header is missing.");
}
String[] parts = authHeader.split(" ");
if (parts.length != 2 || !"bearer".equals(parts[0].toLowerCase())) {
- haltWithMessage(req, 401, String.format("Authorization header is malformed: %s", authHeader));
+ logMessageAndHalt(req, 401, String.format("Authorization header is malformed: %s", authHeader));
}
// Retrieve token from auth header.
String token = parts[1];
if (token == null) {
- haltWithMessage(req, 401, "Could not find authorization token");
+ logMessageAndHalt(req, 401, "Could not find authorization token");
}
// Handle getting the verifier outside of the below verification try/catch, which is intended to catch issues
// with the client request. (getVerifier has its own exception/halt handling).
@@ -79,7 +92,7 @@ public static void checkUser(Request req) {
req.attribute("user", profile);
} catch (Exception e) {
LOG.warn("Login failed to verify with our authorization provider.", e);
- haltWithMessage(req, 401, "Could not verify user's token");
+ logMessageAndHalt(req, 401, "Could not verify user's token");
}
}
@@ -101,8 +114,7 @@ private static JWTVerifier getVerifier(Request req) {
} else throw new IllegalStateException("Auth0 public key or secret token must be defined in config (env.yml).");
} catch (IllegalStateException | NullPointerException | NoSuchAlgorithmException | IOException | NoSuchProviderException | InvalidKeySpecException e) {
LOG.error("Auth0 verifier configured incorrectly.");
- e.printStackTrace();
- haltWithMessage(req, 500, "Server authentication configured incorrectly.", e);
+ logMessageAndHalt(req, 500, "Server authentication configured incorrectly.", e);
}
}
return verifier;
@@ -136,8 +148,11 @@ private static void remapTokenValues(Map jwt) {
* tables in the database.
*/
public static void checkEditPrivileges(Request request) {
- if (authDisabled()) {
- // If in a development environment, skip privileges check.
+ if (authDisabled() || inTestingEnvironment()) {
+ // If in a development or testing environment, skip privileges check. This is done so that basically any API
+ // endpoint can function.
+ // TODO: make unit tests of the below items or do some more stuff as mentioned in PR review here:
+ // https://github.com/conveyal/datatools-server/pull/187#discussion_r262714708
return;
}
Auth0UserProfile userProfile = request.attribute("user");
@@ -151,13 +166,13 @@ public static void checkEditPrivileges(Request request) {
FeedSource feedSource = feedId != null ? Persistence.feedSources.getById(feedId) : null;
if (feedSource == null) {
LOG.warn("feedId {} not found", feedId);
- haltWithMessage(request, 400, "Must provide valid feedId parameter");
+ logMessageAndHalt(request, 400, "Must provide valid feedId parameter");
}
if (!request.requestMethod().equals("GET")) {
if (!userProfile.canEditGTFS(feedSource.organizationId(), feedSource.projectId, feedSource.id)) {
LOG.warn("User {} cannot edit GTFS for {}", userProfile.email, feedId);
- haltWithMessage(request, 403, "User does not have permission to edit GTFS for feedId");
+ logMessageAndHalt(request, 403, "User does not have permission to edit GTFS for feedId");
}
}
}
@@ -182,13 +197,13 @@ public static void checkGTFSPrivileges(Request request) {
FeedSource feedSource = feedId != null ? Persistence.feedSources.getById(feedId) : null;
if (feedSource == null) {
LOG.warn("feedId {} not found", feedId);
- haltWithMessage(request, 400, "Must provide valid feedId parameter");
+ logMessageAndHalt(request, 400, "Must provide valid feedId parameter");
}
if (!request.requestMethod().equals("GET")) {
if (!userProfile.canEditGTFS(feedSource.organizationId(), feedSource.projectId, feedSource.id)) {
LOG.warn("User {} cannot edit GTFS for {}", userProfile.email, feedId);
- haltWithMessage(request, 403, "User does not have permission to edit GTFS for feedId");
+ logMessageAndHalt(request, 403, "User does not have permission to edit GTFS for feedId");
}
}
}
diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java
index f6e21918e..dc81f72cd 100644
--- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java
+++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0UserProfile.java
@@ -6,6 +6,7 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.ArrayList;
import java.util.List;
/**
@@ -72,6 +73,14 @@ public AppMetadata() {}
public void setDatatoolsInfo(DatatoolsInfo datatools) {
if (Auth0Connection.authDisabled()) return;
+ // check if the datatools field hasn't yet been created. Although new users that get created automatically
+ // have this set, when running in a test environment, this won't be set, so it should be created.
+ if (this.datatools == null) {
+ this.datatools = new ArrayList<>();
+ this.datatools.add(datatools);
+ return;
+ }
+
for(int i = 0; i < this.datatools.size(); i++) {
if (this.datatools.get(i).clientId.equals(DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID"))) {
this.datatools.set(i, datatools);
diff --git a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java
index e30acb74f..64fd59c8f 100644
--- a/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java
+++ b/src/main/java/com/conveyal/datatools/manager/auth/Auth0Users.java
@@ -1,25 +1,29 @@
package com.conveyal.datatools.manager.auth;
import com.conveyal.datatools.manager.DataManager;
-import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.http.HttpResponse;
+import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
+import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
-import java.net.URLEncoder;
-import java.util.Collection;
+import java.util.ArrayList;
+import java.util.Date;
import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
/**
* This class contains methods for querying Auth0 users using the Auth0 User Management API. Auth0 docs describing the
@@ -27,8 +31,17 @@
*/
public class Auth0Users {
private static final String AUTH0_DOMAIN = DataManager.getConfigPropertyAsText("AUTH0_DOMAIN");
- private static final String AUTH0_API_TOKEN = DataManager.getConfigPropertyAsText("AUTH0_TOKEN");
+ // This client/secret pair is for making requests for an API access token used with the Management API.
+ private static final String AUTH0_API_CLIENT = DataManager.getConfigPropertyAsText("AUTH0_API_CLIENT");
+ private static final String AUTH0_API_SECRET = DataManager.getConfigPropertyAsText("AUTH0_API_SECRET");
+ // This is the UI client ID which is currently used to synchronize the user permissions object between server and UI.
private static final String clientId = DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID");
+ private static final String MANAGEMENT_API_VERSION = "v2";
+ private static final String SEARCH_API_VERSION = "v3";
+ private static final String API_PATH = "/api/" + MANAGEMENT_API_VERSION;
+ public static final String USERS_API_PATH = API_PATH + "/users";
+ // Cached API token so that we do not have to request a new one each time a Management API request is made.
+ private static Auth0AccessToken cachedToken = null;
private static final ObjectMapper mapper = new ObjectMapper();
private static final Logger LOG = LoggerFactory.getLogger(Auth0Users.class);
@@ -43,32 +56,30 @@ public class Auth0Users {
private static URI getUrl(String searchQuery, int page, int perPage, boolean includeTotals) {
// always filter users by datatools client_id
String defaultQuery = "app_metadata.datatools.client_id:" + clientId;
- URIBuilder builder = new URIBuilder();
- builder.setScheme("https").setHost(AUTH0_DOMAIN).setPath("/api/v2/users");
+ URIBuilder builder = getURIBuilder();
+ builder.setPath(USERS_API_PATH);
builder.setParameter("sort", "email:1");
builder.setParameter("per_page", Integer.toString(perPage));
builder.setParameter("page", Integer.toString(page));
builder.setParameter("include_totals", Boolean.toString(includeTotals));
if (searchQuery != null) {
- builder.setParameter("search_engine", "v2");
+ builder.setParameter("search_engine", SEARCH_API_VERSION);
builder.setParameter("q", searchQuery + " AND " + defaultQuery);
}
else {
- builder.setParameter("search_engine", "v2");
+ builder.setParameter("search_engine", SEARCH_API_VERSION);
builder.setParameter("q", defaultQuery);
}
- URI uri = null;
+ URI uri;
try {
uri = builder.build();
-
+ return uri;
} catch (URISyntaxException e) {
e.printStackTrace();
return null;
}
-
- return uri;
}
/**
@@ -80,26 +91,123 @@ private static String doRequest(URI uri) {
HttpClient client = HttpClientBuilder.create().build();
HttpGet request = new HttpGet(uri);
-
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
+ String apiToken = getApiToken();
+ if (apiToken == null) {
+ LOG.error("API access token is null, aborting Auth0 request");
+ return null;
+ }
+ request.addHeader("Authorization", "Bearer " + apiToken);
request.setHeader("Accept-Charset", charset);
- HttpResponse response = null;
+ HttpResponse response;
+
+ LOG.info("Making request: ({})", request.toString());
try {
response = client.execute(request);
} catch (IOException e) {
+ LOG.error("An exception occurred while making a request to Auth0");
e.printStackTrace();
+ return null;
}
String result = null;
+ if (response.getEntity() != null) {
+ try {
+ result = EntityUtils.toString(response.getEntity());
+ } catch (IOException e) {
+ LOG.error("An exception occurred while parsing a response from Auth0");
+ e.printStackTrace();
+ }
+ } else {
+ LOG.warn("No response body available to parse from Auth0 request");
+ }
+
+ int statusCode = response.getStatusLine().getStatusCode();
+ if(statusCode >= 300) {
+ LOG.warn(
+ "HTTP request to Auth0 returned error code >= 300: ({}). Body: {}",
+ request.toString(),
+ result != null ? result : ""
+ );
+ } else {
+ LOG.info("Successfully made request: ({})", request.toString());
+ }
+
+ return result;
+ }
+
+ /**
+ * Gets an Auth0 API access token for authenticating requests to the Auth0 Management API. This will either create
+ * a new token using the oauth token endpoint or grab a cached token that it has already created (if it has not
+ * expired). More information on setting this up is here: https://auth0.com/docs/api/management/v2/get-access-tokens-for-production
+ */
+ public static String getApiToken() {
+ long nowInMillis = new Date().getTime();
+ // If cached token has not expired, use it instead of requesting a new one.
+ if (cachedToken != null && cachedToken.getExpirationTime() > nowInMillis) {
+ long minutesToExpiration = (cachedToken.getExpirationTime() - nowInMillis) / 1000 / 60;
+ LOG.info("Using cached token (expires in {} minutes)", minutesToExpiration);
+ return cachedToken.access_token;
+ }
+ LOG.info("Getting new Auth0 API access token (cached token does not exist or has expired).");
+ // Create client and build URL.
+ HttpClient client = HttpClientBuilder.create().build();
+ URIBuilder builder = getURIBuilder();
+ String responseString;
try {
- result = EntityUtils.toString(response.getEntity());
- } catch (IOException e) {
+ // First get base url for use in audience URL param. (Trailing slash required.)
+ final String audienceUrl = builder.setPath(API_PATH + "/").build().toString();
+ URI uri = builder.setPath("/oauth/token").build();
+ // Make POST request to Auth0 for new token.
+ HttpPost post = new HttpPost(uri);
+ post.setHeader("content-type", "application/x-www-form-urlencoded");
+ List urlParameters = new ArrayList<>();
+ urlParameters.add(new BasicNameValuePair("grant_type", "client_credentials"));
+ urlParameters.add(new BasicNameValuePair("client_id", AUTH0_API_CLIENT));
+ urlParameters.add(new BasicNameValuePair("client_secret", AUTH0_API_SECRET));
+ urlParameters.add(new BasicNameValuePair("audience", audienceUrl));
+ post.setEntity(new UrlEncodedFormEntity(urlParameters));
+ HttpResponse response = client.execute(post);
+ // Read response code/entity.
+ int code = response.getStatusLine().getStatusCode();
+ responseString = EntityUtils.toString(response.getEntity());
+ if (code >= 300) {
+ LOG.error("Could not get Auth0 API token {}", responseString);
+ throw new IllegalStateException("Bad response for Auth0 token");
+ }
+ } catch (IllegalStateException | URISyntaxException | IOException e) {
e.printStackTrace();
+ return null;
}
+ // Parse API Token.
+ Auth0AccessToken auth0AccessToken;
+ try {
+ auth0AccessToken = mapper.readValue(responseString, Auth0AccessToken.class);
+ } catch (IOException e) {
+ LOG.error("Error parsing Auth0 API access token.", e);
+ return null;
+ }
+ if (auth0AccessToken.scope == null) {
+ // TODO: Somehow verify that the scope of the token supports the original request's operation? Right now
+ // we expect that the scope covers fully all of the operations handled by this application (i.e., update
+ // user, delete user, etc.), which is something that must be configured in the Auth0 dashboard.
+ LOG.error("API access token has invalid scope.");
+ return null;
+ }
+ // Cache token for later use and return token string.
+ setCachedApiToken(auth0AccessToken);
+ return getCachedApiToken().access_token;
+ }
- return result;
+ /** Set the cached API token to the input parameter. */
+ public static void setCachedApiToken(Auth0AccessToken accessToken) {
+ cachedToken = accessToken;
+ }
+
+ /** Set the cached API token to the input parameter. */
+ public static Auth0AccessToken getCachedApiToken() {
+ return cachedToken;
}
/**
@@ -119,48 +227,53 @@ public static String getAuth0Users(String queryString) {
return getAuth0Users(queryString, 0);
}
- /**
- * Get all users for this application (using the default search).
- */
- public static Collection getAll () {
- Collection users = new HashSet<>();
-
- // limited to the first 100
- URI uri = getUrl(null, 0, 100, false);
- String response = doRequest(uri);
- try {
- users = mapper.readValue(response, new TypeReference>(){});
- } catch (IOException e) {
- e.printStackTrace();
- }
- return users;
- }
-
/**
* Get a single Auth0 user for the specified ID.
*/
public static Auth0UserProfile getUserById(String id) {
-
- URIBuilder builder = new URIBuilder();
- builder.setScheme("https").setHost(AUTH0_DOMAIN).setPath("/api/v2/users/" + id);
+ URIBuilder builder = getURIBuilder();
+ builder.setPath(String.join("/", USERS_API_PATH, id));
URI uri = null;
try {
uri = builder.build();
-
} catch (URISyntaxException e) {
+ LOG.error("Unable to build URI to getUserById");
e.printStackTrace();
return null;
}
String response = doRequest(uri);
+ if (response == null) {
+ LOG.error("Auth0 request aborted due to issues during request.");
+ return null;
+ }
Auth0UserProfile user = null;
try {
user = mapper.readValue(response, Auth0UserProfile.class);
} catch (IOException e) {
+ LOG.error("Unable to parse user profile response from Auth0! Response: {}", response);
e.printStackTrace();
}
return user;
}
+ /**
+ * Creates a new uri builder and sets the scheme, port and host according to whether a test environment is in effect
+ */
+ private static URIBuilder getURIBuilder() {
+ URIBuilder builder = new URIBuilder();
+ if (AUTH0_DOMAIN.equals("your-auth0-domain")) {
+ // set items for testing purposes assuming use of a Wiremock server
+ builder.setScheme("http");
+ builder.setPort(8089);
+ builder.setHost("localhost");
+ } else {
+ // use live Auth0 domain
+ builder.setScheme("https");
+ builder.setHost(AUTH0_DOMAIN);
+ }
+ return builder;
+ }
+
/**
* Get users subscribed to a given target ID.
*/
@@ -168,11 +281,31 @@ public static String getUsersBySubscription(String subscriptionType, String targ
return getAuth0Users("app_metadata.datatools.subscriptions.type:" + subscriptionType + " AND app_metadata.datatools.subscriptions.target:" + target);
}
- /**
- * Get users belong to a specified organization.
- */
- public static String getUsersForOrganization(String organizationId) {
- return getAuth0Users("app_metadata.datatools.organizations.organization_id:" + organizationId);
+ public static Set getVerifiedEmailsBySubscription(String subscriptionType, String target) {
+ String json = getUsersBySubscription(subscriptionType, target);
+ JsonNode firstNode = null;
+ Set emails = new HashSet<>();
+ try {
+ firstNode = mapper.readTree(json);
+ } catch (IOException e) {
+ LOG.error("Subscribed users list for type={}, target={} is null or unparseable.", subscriptionType, target);
+ return emails;
+ }
+ for (JsonNode user : firstNode) {
+ if (!user.has("email")) {
+ continue;
+ }
+ String email = user.get("email").asText();
+ Boolean emailVerified = user.get("email_verified").asBoolean();
+ // only send email if address has been verified
+ if (!emailVerified) {
+ LOG.warn("Skipping user {}. User's email address has not been verified.", email);
+ } else {
+ emails.add(email);
+ }
+ }
+
+ return emails;
}
/**
diff --git a/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java b/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java
index f3969f633..b999e5975 100644
--- a/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java
+++ b/src/main/java/com/conveyal/datatools/manager/codec/LocalDateCodec.java
@@ -37,6 +37,7 @@ public LocalDate decode(final BsonReader reader, final DecoderContext decoderCon
date = Instant.ofEpochMilli(reader.readInt64()).atZone(ZoneOffset.UTC).toLocalDate();
return date;
} catch (Exception e) {
+ LOG.error("Error parsing date value with legacy java.util.Date date format");
e.printStackTrace();
}
}
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java b/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java
index d8c4d94f9..56c526741 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/DumpController.java
@@ -32,10 +32,11 @@
import java.util.List;
import java.util.Map;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.mongodb.client.model.Filters.and;
import static com.mongodb.client.model.Filters.eq;
-import static spark.Spark.*;
+import static spark.Spark.get;
+import static spark.Spark.post;
/**
* This class contains HTTP endpoints that should ONLY be used in controlled environments (i.e., when the application is
@@ -74,8 +75,7 @@ private static boolean getLegacy(Request req, Response response) {
try {
return loadLegacy(req.body());
} catch (IOException e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Error loading legacy JSON", e);
+ logMessageAndHalt(req, 400, "Error loading legacy JSON", e);
return false;
}
}
@@ -299,6 +299,7 @@ private static void loadLegacyFeedSource (JsonNode node) {
try {
feedSource.url = url != null && !url.equals("null") ? new URL(url.toString()) : null;
} catch (MalformedURLException e) {
+ LOG.error("Failed to create feedsource url");
e.printStackTrace();
}
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java
index a4e873029..385db49c1 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/DeploymentController.java
@@ -1,7 +1,7 @@
package com.conveyal.datatools.manager.controllers.api;
-import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.common.utils.SparkUtils;
+import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.conveyal.datatools.manager.jobs.DeployJob;
import com.conveyal.datatools.manager.models.Deployment;
@@ -16,7 +16,6 @@
import org.eclipse.jetty.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import spark.HaltException;
import spark.Request;
import spark.Response;
@@ -30,9 +29,12 @@
import java.util.Map;
import java.util.stream.Collectors;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
-import static spark.Spark.*;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
+import static spark.Spark.delete;
import static spark.Spark.get;
+import static spark.Spark.options;
+import static spark.Spark.post;
+import static spark.Spark.put;
/**
* Handlers for HTTP API requests that affect Deployments.
@@ -52,12 +54,12 @@ private static Deployment checkDeploymentPermissions (Request req, Response res)
String deploymentId = req.params("id");
Deployment deployment = Persistence.deployments.getById(deploymentId);
if (deployment == null) {
- haltWithMessage(req, HttpStatus.BAD_REQUEST_400, "Deployment does not exist.");
+ logMessageAndHalt(req, HttpStatus.BAD_REQUEST_400, "Deployment does not exist.");
}
boolean isProjectAdmin = userProfile.canAdministerProject(deployment.projectId, deployment.organizationId());
if (!isProjectAdmin && !userProfile.getUser_id().equals(deployment.user())) {
// If user is not a project admin and did not create the deployment, access to the deployment is denied.
- haltWithMessage(req, HttpStatus.UNAUTHORIZED_401, "User not authorized for deployment.");
+ logMessageAndHalt(req, HttpStatus.UNAUTHORIZED_401, "User not authorized for deployment.");
}
return deployment;
}
@@ -111,23 +113,23 @@ private static Collection getAllDeployments (Request req, Response r
if (projectId != null) {
// Return deployments for project
Project project = Persistence.projects.getById(projectId);
- if (project == null) haltWithMessage(req, 400, "Must provide valid projectId value.");
+ if (project == null) logMessageAndHalt(req, 400, "Must provide valid projectId value.");
if (!userProfile.canAdministerProject(projectId, project.organizationId))
- haltWithMessage(req, 401, "User not authorized to view project deployments.");
+ logMessageAndHalt(req, 401, "User not authorized to view project deployments.");
return project.retrieveDeployments();
} else if (feedSourceId != null) {
// Return test deployments for feed source (note: these only include test deployments specific to the feed
// source and will not include all deployments that reference this feed source).
FeedSource feedSource = Persistence.feedSources.getById(feedSourceId);
- if (feedSource == null) haltWithMessage(req, 400, "Must provide valid feedSourceId value.");
+ if (feedSource == null) logMessageAndHalt(req, 400, "Must provide valid feedSourceId value.");
Project project = feedSource.retrieveProject();
if (!userProfile.canViewFeed(project.organizationId, project.id, feedSourceId))
- haltWithMessage(req, 401, "User not authorized to view feed source deployments.");
+ logMessageAndHalt(req, 401, "User not authorized to view feed source deployments.");
return feedSource.retrieveDeployments();
} else {
// If no query parameter is supplied, return all deployments for application.
if (!userProfile.canAdministerApplication())
- haltWithMessage(req, 401, "User not authorized to view application deployments.");
+ logMessageAndHalt(req, 401, "User not authorized to view application deployments.");
return Persistence.deployments.getAll();
}
}
@@ -156,7 +158,7 @@ private static Deployment createDeployment (Request req, Response res) {
Persistence.deployments.create(newDeployment);
return Persistence.deployments.update(newDeployment.id, req.body());
} else {
- haltWithMessage(req, 403, "Not authorized to create a deployment for project " + projectId);
+ logMessageAndHalt(req, 403, "Not authorized to create a deployment for project " + projectId);
return null;
}
}
@@ -178,10 +180,10 @@ private static Deployment createDeploymentFromFeedSource (Request req, Response
!userProfile.canAdministerProject(feedSource.projectId, feedSource.organizationId()) &&
!userProfile.getUser_id().equals(feedSource.user())
)
- haltWithMessage(req, 401, "User not authorized to perform this action");
+ logMessageAndHalt(req, 401, "User not authorized to perform this action");
if (feedSource.latestVersionId() == null)
- haltWithMessage(req, 400, "Cannot create a deployment from a feed source with no versions.");
+ logMessageAndHalt(req, 400, "Cannot create a deployment from a feed source with no versions.");
Deployment deployment = new Deployment(feedSource);
deployment.storeUser(userProfile);
@@ -206,16 +208,16 @@ private static Object updateDeployment (Request req, Response res) {
ArrayList versionsToInsert = new ArrayList<>(versions.size());
for (Document version : versions) {
if (!version.containsKey("id")) {
- haltWithMessage(req, 400, "Version not supplied");
+ logMessageAndHalt(req, 400, "Version not supplied");
}
FeedVersion feedVersion = null;
try {
feedVersion = Persistence.feedVersions.getById(version.getString("id"));
} catch (Exception e) {
- haltWithMessage(req, 404, "Version not found");
+ logMessageAndHalt(req, 404, "Version not found");
}
if (feedVersion == null) {
- haltWithMessage(req, 404, "Version not found");
+ logMessageAndHalt(req, 404, "Version not found");
}
// check that the version belongs to the correct project
if (feedVersion.parentFeedSource().projectId.equals(deploymentToUpdate.projectId)) {
@@ -243,73 +245,75 @@ private static Object updateDeployment (Request req, Response res) {
* Create a deployment bundle, and send it to the specified OTP target servers (or the specified s3 bucket).
*/
private static String deploy (Request req, Response res) {
- try {
- // Check parameters supplied in request for validity.
- Auth0UserProfile userProfile = req.attribute("user");
- String target = req.params("target");
- Deployment deployment = checkDeploymentPermissions(req, res);
- Project project = Persistence.projects.getById(deployment.projectId);
- if (project == null) haltWithMessage(req, 400, "Internal reference error. Deployment's project ID is invalid");
- // FIXME: Currently the otp server to deploy to is determined by the string name field (with special characters
- // replaced with underscores). This should perhaps be replaced with an immutable server ID so that there is
- // no risk that these values can overlap. This may be over engineering this system though. The user deploying
- // a set of feeds would likely not create two deployment targets with the same name (and the name is unlikely
- // to change often).
- OtpServer otpServer = Persistence.servers.getById(target);
- if (otpServer == null) haltWithMessage(req, 400, "Must provide valid OTP server target ID.");
- // Check that permissions of user allow them to deploy to target.
- boolean isProjectAdmin = userProfile.canAdministerProject(deployment.projectId, deployment.organizationId());
- if (!isProjectAdmin && otpServer.admin) {
- haltWithMessage(req, 401, "User not authorized to deploy to admin-only target OTP server.");
- }
- // Check that we can deploy to the specified target. (Any deploy job for the target that is presently active will
- // cause a halt.)
- if (deploymentJobsByServer.containsKey(target)) {
- // There is a deploy job for the server. Check if it is active.
- DeployJob deployJob = deploymentJobsByServer.get(target);
- if (deployJob != null && !deployJob.status.completed) {
- // Job for the target is still active! Send a 202 to the requester to indicate that it is not possible
- // to deploy to this target right now because someone else is deploying.
- String message = String.format(
- "Will not process request to deploy %s. Deployment currently in progress for target: %s",
- deployment.name,
- target);
- LOG.warn(message);
- haltWithMessage(req, HttpStatus.ACCEPTED_202, message);
- }
- }
- // Get the URLs to deploy to.
- List targetUrls = otpServer.internalUrl;
- if ((targetUrls == null || targetUrls.isEmpty()) && (otpServer.s3Bucket == null || otpServer.s3Bucket.isEmpty())) {
- haltWithMessage(req, 400, String.format("OTP server %s has no internal URL or s3 bucket specified.", otpServer.name));
- }
- // For any previous deployments sent to the server/router combination, set deployedTo to null because
- // this new one will overwrite it. NOTE: deployedTo for the current deployment will only be updated after the
- // successful completion of the deploy job.
- for (Deployment oldDeployment : Deployment.retrieveDeploymentForServerAndRouterId(target, deployment.routerId)) {
- LOG.info("Setting deployment target to null id={}", oldDeployment.id);
- Persistence.deployments.updateField(oldDeployment.id, "deployedTo", null);
+ // Check parameters supplied in request for validity.
+ Auth0UserProfile userProfile = req.attribute("user");
+ String target = req.params("target");
+ Deployment deployment = checkDeploymentPermissions(req, res);
+ Project project = Persistence.projects.getById(deployment.projectId);
+ if (project == null)
+ logMessageAndHalt(req, 400, "Internal reference error. Deployment's project ID is invalid");
+
+ // FIXME: Currently the otp server to deploy to is determined by the string name field (with special characters
+ // replaced with underscores). This should perhaps be replaced with an immutable server ID so that there is
+ // no risk that these values can overlap. This may be over engineering this system though. The user deploying
+ // a set of feeds would likely not create two deployment targets with the same name (and the name is unlikely
+ // to change often).
+ OtpServer otpServer = project.retrieveServer(target);
+ if (otpServer == null) logMessageAndHalt(req, 400, "Must provide valid OTP server target ID.");
+
+ // Check that permissions of user allow them to deploy to target.
+ boolean isProjectAdmin = userProfile.canAdministerProject(deployment.projectId, deployment.organizationId());
+ if (!isProjectAdmin && otpServer.admin) {
+ logMessageAndHalt(req, 401, "User not authorized to deploy to admin-only target OTP server.");
+ }
+
+ // Check that we can deploy to the specified target. (Any deploy job for the target that is presently active will
+ // cause a halt.)
+ if (deploymentJobsByServer.containsKey(target)) {
+ // There is a deploy job for the server. Check if it is active.
+ DeployJob deployJob = deploymentJobsByServer.get(target);
+ if (deployJob != null && !deployJob.status.completed) {
+ // Job for the target is still active! Send a 202 to the requester to indicate that it is not possible
+ // to deploy to this target right now because someone else is deploying.
+ String message = String.format(
+ "Will not process request to deploy %s. Deployment currently in progress for target: %s",
+ deployment.name,
+ target);
+ LOG.warn(message);
+ logMessageAndHalt(req, HttpStatus.ACCEPTED_202, message);
}
+ }
- // Execute the deployment job and keep track of it in the jobs for server map.
- DeployJob job = new DeployJob(deployment, userProfile.getUser_id(), otpServer);
- DataManager.heavyExecutor.execute(job);
- deploymentJobsByServer.put(target, job);
+ // Get the URLs to deploy to.
+ List targetUrls = otpServer.internalUrl;
+ if ((targetUrls == null || targetUrls.isEmpty()) && (otpServer.s3Bucket == null || otpServer.s3Bucket.isEmpty())) {
+ logMessageAndHalt(
+ req,
+ 400,
+ String.format("OTP server %s has no internal URL or s3 bucket specified.", otpServer.name)
+ );
+ }
- return SparkUtils.formatJobMessage(job.jobId, "Deployment initiating.");
- } catch (HaltException e) {
- throw e;
- } catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Could not process deployment request. Please check request parameters and OTP server target fields.");
- return null;
+ // For any previous deployments sent to the server/router combination, set deployedTo to null because
+ // this new one will overwrite it. NOTE: deployedTo for the current deployment will only be updated after the
+ // successful completion of the deploy job.
+ for (Deployment oldDeployment : Deployment.retrieveDeploymentForServerAndRouterId(target, deployment.routerId)) {
+ LOG.info("Setting deployment target to null id={}", oldDeployment.id);
+ Persistence.deployments.updateField(oldDeployment.id, "deployedTo", null);
}
+
+ // Execute the deployment job and keep track of it in the jobs for server map.
+ DeployJob job = new DeployJob(deployment, userProfile.getUser_id(), otpServer);
+ DataManager.heavyExecutor.execute(job);
+ deploymentJobsByServer.put(target, job);
+
+ return SparkUtils.formatJobMessage(job.jobId, "Deployment initiating.");
}
public static void register (String apiPrefix) {
post(apiPrefix + "secure/deployments/:id/deploy/:target", DeploymentController::deploy, json::write);
post(apiPrefix + "secure/deployments/:id/deploy/", ((request, response) -> {
- haltWithMessage(request, 400, "Must provide valid deployment target name");
+ logMessageAndHalt(request, 400, "Must provide valid deployment target name");
return null;
}), json::write);
options(apiPrefix + "secure/deployments", (q, s) -> "");
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java
index 81022a90b..6882deb38 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedSourceController.java
@@ -1,8 +1,8 @@
package com.conveyal.datatools.manager.controllers.api;
-import com.conveyal.datatools.common.utils.SparkUtils;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
+import com.conveyal.datatools.manager.auth.Actions;
import com.conveyal.datatools.manager.extensions.ExternalFeedResource;
import com.conveyal.datatools.manager.jobs.FetchSingleFeedJob;
import com.conveyal.datatools.manager.jobs.NotifyUsersForSubscriptionJob;
@@ -27,7 +27,7 @@
import java.util.Map;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.auth.Auth0Users.getUserById;
import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId;
import static spark.Spark.delete;
@@ -46,7 +46,7 @@ public class FeedSourceController {
private static ObjectMapper mapper = new ObjectMapper();
public static FeedSource getFeedSource(Request req, Response res) {
- return requestFeedSourceById(req, "view");
+ return requestFeedSourceById(req, Actions.VIEW);
}
public static Collection getAllFeedSources(Request req, Response res) {
@@ -128,12 +128,11 @@ public static FeedSource createFeedSource(Request req, Response res) {
String.format("New feed %s created in project %s.", newFeedSource.name, parentProject.name));
return newFeedSource;
} catch (Exception e) {
- LOG.error("Unknown error creating feed source", e);
- haltWithMessage(req, 400, "Unknown error encountered creating feed source", e);
+ logMessageAndHalt(req, 500, "Unknown error encountered creating feed source", e);
return null;
}
} else {
- haltWithMessage(req, 400, "Must provide project ID for feed source");
+ logMessageAndHalt(req, 403, "User not allowed to create feed source");
return null;
}
}
@@ -144,7 +143,7 @@ public static FeedSource updateFeedSource(Request req, Response res) {
// call this method just for null and permissions check
// TODO: it's wasteful to request the entire feed source here, need to factor out permissions checks. However,
// we need the URL to see if it has been updated in order to then set the lastFetched value to null.
- FeedSource formerFeedSource = requestFeedSourceById(req, "manage");
+ FeedSource formerFeedSource = requestFeedSourceById(req, Actions.MANAGE);
Document fieldsToUpdate = Document.parse(req.body());
if (fieldsToUpdate.containsKey("url") && formerFeedSource.url != null) {
// Reset last fetched timestamp if the URL has been updated.
@@ -175,14 +174,19 @@ public static FeedSource updateFeedSource(Request req, Response res) {
* FIXME: Should we reconsider how we store external feed source properties now that we are using Mongo document
* storage? This might should be refactored in the future, but it isn't really hurting anything at the moment.
*/
- public static FeedSource updateExternalFeedResource(Request req, Response res) throws IOException {
- FeedSource source = requestFeedSourceById(req, "manage");
+ public static FeedSource updateExternalFeedResource(Request req, Response res) {
+ FeedSource source = requestFeedSourceById(req, Actions.MANAGE);
String resourceType = req.queryParams("resourceType");
- JsonNode node = mapper.readTree(req.body());
+ JsonNode node = null;
+ try {
+ node = mapper.readTree(req.body());
+ } catch (IOException e) {
+ logMessageAndHalt(req, 400, "Unable to parse request body", e);
+ }
Iterator> fieldsIterator = node.fields();
ExternalFeedResource externalFeedResource = DataManager.feedResources.get(resourceType);
if (externalFeedResource == null) {
- haltWithMessage(req, 400, String.format("Resource '%s' not registered with server.", resourceType));
+ logMessageAndHalt(req, 400, String.format("Resource '%s' not registered with server.", resourceType));
}
// Iterate over fields found in body and update external properties accordingly.
while (fieldsIterator.hasNext()) {
@@ -191,7 +195,7 @@ public static FeedSource updateExternalFeedResource(Request req, Response res) t
ExternalFeedSourceProperty prop = Persistence.externalFeedSourceProperties.getById(propertyId);
if (prop == null) {
- haltWithMessage(req, 400, String.format("Property '%s' does not exist!", propertyId));
+ logMessageAndHalt(req, 400, String.format("Property '%s' does not exist!", propertyId));
}
// Hold previous value for use when updating third-party resource
String previousValue = prop.value;
@@ -200,7 +204,11 @@ public static FeedSource updateExternalFeedResource(Request req, Response res) t
propertyId, "value", entry.getValue().asText());
// Trigger an event on the external resource
- externalFeedResource.propertyUpdated(updatedProp, previousValue, req.headers("Authorization"));
+ try {
+ externalFeedResource.propertyUpdated(updatedProp, previousValue, req.headers("Authorization"));
+ } catch (IOException e) {
+ logMessageAndHalt(req, 500, "Could not update external feed source", e);
+ }
}
// Updated external properties will be included in JSON (FeedSource#externalProperties)
return source;
@@ -212,14 +220,13 @@ public static FeedSource updateExternalFeedResource(Request req, Response res) t
* FIXME: Should this just set a "deleted" flag instead of removing from the database entirely?
*/
private static FeedSource deleteFeedSource(Request req, Response res) {
- FeedSource source = requestFeedSourceById(req, "manage");
+ FeedSource source = requestFeedSourceById(req, Actions.MANAGE);
try {
source.delete();
return source;
} catch (Exception e) {
- LOG.error("Could not delete feed source", e);
- haltWithMessage(req, 400, "Unknown error deleting feed source.");
+ logMessageAndHalt(req, 500, "Unknown error occurred while deleting feed source.", e);
return null;
}
}
@@ -228,7 +235,7 @@ private static FeedSource deleteFeedSource(Request req, Response res) {
* Re-fetch this feed from the feed source URL.
*/
public static String fetch (Request req, Response res) {
- FeedSource s = requestFeedSourceById(req, "manage");
+ FeedSource s = requestFeedSourceById(req, Actions.MANAGE);
LOG.info("Fetching feed for source {}", s.name);
@@ -244,38 +251,38 @@ public static String fetch (Request req, Response res) {
/**
* Helper function returns feed source if user has permission for specified action.
* @param req spark Request object from API request
- * @param action action type (either "view" or "manage")
+ * @param action action type (either "view" or Permission.MANAGE)
* @return feedsource object for ID
*/
- public static FeedSource requestFeedSourceById(Request req, String action) {
+ public static FeedSource requestFeedSourceById(Request req, Actions action) {
String id = req.params("id");
if (id == null) {
- haltWithMessage(req, 400, "Please specify id param");
+ logMessageAndHalt(req, 400, "Please specify id param");
}
return checkFeedSourcePermissions(req, Persistence.feedSources.getById(id), action);
}
- public static FeedSource checkFeedSourcePermissions(Request req, FeedSource feedSource, String action) {
+ public static FeedSource checkFeedSourcePermissions(Request req, FeedSource feedSource, Actions action) {
Auth0UserProfile userProfile = req.attribute("user");
Boolean publicFilter = Boolean.valueOf(req.queryParams("public")) ||
req.url().split("/api/*/")[1].startsWith("public");
// check for null feedSource
if (feedSource == null)
- haltWithMessage(req, 400, "Feed source ID does not exist");
+ logMessageAndHalt(req, 400, "Feed source ID does not exist");
String orgId = feedSource.organizationId();
boolean authorized;
switch (action) {
- case "create":
+ case CREATE:
authorized = userProfile.canAdministerProject(feedSource.projectId, orgId);
break;
- case "manage":
+ case MANAGE:
authorized = userProfile.canManageFeed(orgId, feedSource.projectId, feedSource.id);
break;
- case "edit":
+ case EDIT:
authorized = userProfile.canEditGTFS(orgId, feedSource.projectId, feedSource.id);
break;
- case "view":
+ case VIEW:
if (!publicFilter) {
authorized = userProfile.canViewFeed(orgId, feedSource.projectId, feedSource.id);
} else {
@@ -291,15 +298,15 @@ public static FeedSource checkFeedSourcePermissions(Request req, FeedSource feed
if (publicFilter){
// if feed not public and user not authorized, halt
if (!feedSource.isPublic && !authorized)
- haltWithMessage(req, 403, "User not authorized to perform action on feed source");
+ logMessageAndHalt(req, 403, "User not authorized to perform action on feed source");
// if feed is public, but action is managerial, halt (we shouldn't ever retrieveById here, but just in case)
- else if (feedSource.isPublic && action.equals("manage"))
- haltWithMessage(req, 403, "User not authorized to perform action on feed source");
+ else if (feedSource.isPublic && action.equals(Actions.MANAGE))
+ logMessageAndHalt(req, 403, "User not authorized to perform action on feed source");
}
else {
if (!authorized)
- haltWithMessage(req, 403, "User not authorized to perform action on feed source");
+ logMessageAndHalt(req, 403, "User not authorized to perform action on feed source");
}
// if we make it here, user has permission and it's a valid feedsource
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java
index 445428f0f..058d814b8 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/FeedVersionController.java
@@ -3,7 +3,11 @@
import com.conveyal.datatools.common.utils.SparkUtils;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
+import com.conveyal.datatools.manager.auth.Actions;
import com.conveyal.datatools.manager.jobs.CreateFeedVersionFromSnapshotJob;
+import com.conveyal.datatools.manager.jobs.GisExportJob;
+import com.conveyal.datatools.manager.jobs.MergeFeedsJob;
+import com.conveyal.datatools.manager.jobs.MergeFeedsType;
import com.conveyal.datatools.manager.jobs.ProcessSingleFeedJob;
import com.conveyal.datatools.manager.models.FeedDownloadToken;
import com.conveyal.datatools.manager.models.FeedSource;
@@ -14,40 +18,31 @@
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.datatools.manager.utils.HashUtils;
import com.conveyal.datatools.manager.utils.json.JsonManager;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonGenerator;
+
import com.fasterxml.jackson.databind.JsonNode;
-import com.google.common.io.ByteStreams;
-import org.eclipse.jetty.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.Request;
import spark.Response;
-import javax.servlet.ServletInputStream;
-import javax.servlet.ServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
-import java.io.ByteArrayOutputStream;
import java.io.File;
-import java.io.FileOutputStream;
import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.time.LocalDate;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatter;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
-import java.util.EnumSet;
import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.stream.Collectors;
+import java.util.HashSet;
+import java.util.Set;
-import static com.conveyal.datatools.common.status.MonitorableJob.JobType.BUILD_TRANSPORT_NETWORK;
import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3;
+import static com.conveyal.datatools.common.utils.SparkUtils.copyRequestStreamIntoFile;
import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.controllers.api.FeedSourceController.checkFeedSourcePermissions;
+import static com.mongodb.client.model.Filters.eq;
+import static com.conveyal.datatools.manager.jobs.MergeFeedsType.REGIONAL;
import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.post;
@@ -55,11 +50,6 @@
public class FeedVersionController {
- // TODO use this instead of stringly typed permissions
- enum Permission {
- VIEW, MANAGE
- }
-
public static final Logger LOG = LoggerFactory.getLogger(FeedVersionController.class);
public static JsonManager json = new JsonManager<>(FeedVersion.class, JsonViews.UserInterface.class);
@@ -68,7 +58,7 @@ enum Permission {
* If you pass in ?summarized=true, don't include the full tree of validation results, only the counts.
*/
private static FeedVersion getFeedVersion (Request req, Response res) {
- return requestFeedVersion(req, "view");
+ return requestFeedVersion(req, Actions.VIEW);
}
/**
@@ -76,19 +66,19 @@ private static FeedVersion getFeedVersion (Request req, Response res) {
*/
private static Collection getAllFeedVersionsForFeedSource(Request req, Response res) {
// Check permissions and get the FeedSource whose FeedVersions we want.
- FeedSource feedSource = requestFeedSourceById(req, "view");
+ FeedSource feedSource = requestFeedSourceById(req, Actions.VIEW);
return feedSource.retrieveFeedVersions();
}
- public static FeedSource requestFeedSourceById(Request req, String action, String paramName) {
+ public static FeedSource requestFeedSourceById(Request req, Actions action, String paramName) {
String id = req.queryParams(paramName);
if (id == null) {
- haltWithMessage(req, 400, "Please specify feedSourceId param");
+ logMessageAndHalt(req, 400, "Please specify feedSourceId param");
}
return checkFeedSourcePermissions(req, Persistence.feedSources.getById(id), action);
}
- private static FeedSource requestFeedSourceById(Request req, String action) {
+ private static FeedSource requestFeedSourceById(Request req, Actions action) {
return requestFeedSourceById(req, action, "feedSourceId");
}
@@ -106,7 +96,7 @@ private static FeedSource requestFeedSourceById(Request req, String action) {
public static String createFeedVersionViaUpload(Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
- FeedSource feedSource = requestFeedSourceById(req, "manage");
+ FeedSource feedSource = requestFeedSourceById(req, Actions.MANAGE);
FeedVersion latestVersion = feedSource.retrieveLatest();
FeedVersion newFeedVersion = new FeedVersion(feedSource);
newFeedVersion.retrievalMethod = FeedSource.FeedRetrievalMethod.MANUALLY_UPLOADED;
@@ -115,34 +105,17 @@ public static String createFeedVersionViaUpload(Request req, Response res) {
// FIXME: Make the creation of new GTFS files generic to handle other feed creation methods, including fetching
// by URL and loading from the editor.
File newGtfsFile = new File(DataManager.getConfigPropertyAsText("application.data.gtfs"), newFeedVersion.id);
- try {
- // Bypass Spark's request wrapper which always caches the request body in memory that may be a very large
- // GTFS file. Also, the body of the request is the GTFS file instead of using multipart form data because
- // multipart form handling code also caches the request body.
- ServletInputStream inputStream = ((ServletRequestWrapper) req.raw()).getRequest().getInputStream();
- FileOutputStream fileOutputStream = new FileOutputStream(newGtfsFile);
- // Guava's ByteStreams.copy uses a 4k buffer (no need to wrap output stream), but does not close streams.
- ByteStreams.copy(inputStream, fileOutputStream);
- fileOutputStream.close();
- inputStream.close();
- if (newGtfsFile.length() == 0) {
- throw new IOException("No file found in request body.");
- }
- // Set last modified based on value of query param. This is determined/supplied by the client
- // request because this data gets lost in the uploadStream otherwise.
- Long lastModified = req.queryParams("lastModified") != null
- ? Long.valueOf(req.queryParams("lastModified"))
- : null;
- if (lastModified != null) {
- newGtfsFile.setLastModified(lastModified);
- newFeedVersion.fileTimestamp = lastModified;
- }
- LOG.info("Last modified: {}", new Date(newGtfsFile.lastModified()));
- LOG.info("Saving feed from upload {}", feedSource);
- } catch (Exception e) {
- LOG.error("Unable to open input stream from uploaded file", e);
- haltWithMessage(req, 400, "Unable to read uploaded feed");
+ copyRequestStreamIntoFile(req, newGtfsFile);
+ // Set last modified based on value of query param. This is determined/supplied by the client
+ // request because this data gets lost in the uploadStream otherwise.
+ Long lastModified = req.queryParams("lastModified") != null
+ ? Long.valueOf(req.queryParams("lastModified"))
+ : null;
+ if (lastModified != null) {
+ newGtfsFile.setLastModified(lastModified);
+ newFeedVersion.fileTimestamp = lastModified;
}
+ LOG.info("Last modified: {}", new Date(newGtfsFile.lastModified()));
// TODO: fix FeedVersion.hash() call when called in this context. Nothing gets hashed because the file has not been saved yet.
// newFeedVersion.hash();
@@ -158,7 +131,7 @@ public static String createFeedVersionViaUpload(Request req, Response res) {
LOG.warn("File deleted");
// There is no need to delete the newFeedVersion because it has not yet been persisted to MongoDB.
- haltWithMessage(req, 304, "Uploaded feed is identical to the latest version known to the database.");
+ logMessageAndHalt(req, 304, "Uploaded feed is identical to the latest version known to the database.");
}
newFeedVersion.name = newFeedVersion.formattedTimestamp() + " Upload";
@@ -185,10 +158,10 @@ private static boolean createFeedVersionFromSnapshot (Request req, Response res)
Auth0UserProfile userProfile = req.attribute("user");
// TODO: Should the ability to create a feedVersion from snapshot be controlled by the 'edit-gtfs' privilege?
- FeedSource feedSource = requestFeedSourceById(req, "manage");
+ FeedSource feedSource = requestFeedSourceById(req, Actions.MANAGE);
Snapshot snapshot = Persistence.snapshots.getById(req.queryParams("snapshotId"));
if (snapshot == null) {
- haltWithMessage(req, 400, "Must provide valid snapshot ID");
+ logMessageAndHalt(req, 400, "Must provide valid snapshot ID");
}
FeedVersion feedVersion = new FeedVersion(feedSource);
CreateFeedVersionFromSnapshotJob createFromSnapshotJob =
@@ -202,19 +175,19 @@ private static boolean createFeedVersionFromSnapshot (Request req, Response res)
* Spark HTTP API handler that deletes a single feed version based on the ID in the request.
*/
private static FeedVersion deleteFeedVersion(Request req, Response res) {
- FeedVersion version = requestFeedVersion(req, "manage");
+ FeedVersion version = requestFeedVersion(req, Actions.MANAGE);
version.delete();
return version;
}
- private static FeedVersion requestFeedVersion(Request req, String action) {
+ private static FeedVersion requestFeedVersion(Request req, Actions action) {
return requestFeedVersion(req, action, req.params("id"));
}
- public static FeedVersion requestFeedVersion(Request req, String action, String feedVersionId) {
+ public static FeedVersion requestFeedVersion(Request req, Actions action, String feedVersionId) {
FeedVersion version = Persistence.feedVersions.getById(feedVersionId);
if (version == null) {
- haltWithMessage(req, 404, "Feed version ID does not exist");
+ logMessageAndHalt(req, 404, "Feed version ID does not exist");
}
// Performs permissions checks on the feed source this feed version belongs to, and halts if permission is denied.
checkFeedSourcePermissions(req, version.parentFeedSource(), action);
@@ -222,19 +195,19 @@ public static FeedVersion requestFeedVersion(Request req, String action, String
}
private static boolean renameFeedVersion (Request req, Response res) {
- FeedVersion v = requestFeedVersion(req, "manage");
+ FeedVersion v = requestFeedVersion(req, Actions.MANAGE);
String name = req.queryParams("name");
if (name == null) {
- haltWithMessage(req, 400, "Name parameter not specified");
+ logMessageAndHalt(req, 400, "Name parameter not specified");
}
Persistence.feedVersions.updateField(v.id, "name", name);
return true;
}
- private static Object downloadFeedVersionDirectly(Request req, Response res) {
- FeedVersion version = requestFeedVersion(req, "view");
+ private static HttpServletResponse downloadFeedVersionDirectly(Request req, Response res) {
+ FeedVersion version = requestFeedVersion(req, Actions.VIEW);
return downloadFile(version.retrieveGtfsFile(), version.id, req, res);
}
@@ -242,11 +215,11 @@ private static Object downloadFeedVersionDirectly(Request req, Response res) {
* Returns credentials that a client may use to then download a feed version. Functionality
* changes depending on whether application.data.use_s3_storage config property is true.
*/
- private static Object getFeedDownloadCredentials(Request req, Response res) {
- FeedVersion version = requestFeedVersion(req, "view");
+ private static Object getDownloadCredentials(Request req, Response res) {
+ FeedVersion version = requestFeedVersion(req, Actions.VIEW);
if (DataManager.useS3) {
- // Return presigned download link if using S3.
+ // Return pre-signed download link if using S3.
return downloadFromS3(FeedStore.s3Client, DataManager.feedBucket, FeedStore.s3Prefix + version.id, false, res);
} else {
// when feeds are stored locally, single-use download token will still be used
@@ -261,29 +234,135 @@ private static Object getFeedDownloadCredentials(Request req, Response res) {
* FIXME!
*/
private static JsonNode validate (Request req, Response res) {
- FeedVersion version = requestFeedVersion(req, "manage");
- haltWithMessage(req, 400, "Validate endpoint not currently configured!");
+ FeedVersion version = requestFeedVersion(req, Actions.MANAGE);
+ logMessageAndHalt(req, 400, "Validate endpoint not currently configured!");
// FIXME: Update for sql-loader validation process?
return null;
// return version.retrieveValidationResult(true);
}
private static FeedVersion publishToExternalResource (Request req, Response res) {
- FeedVersion version = requestFeedVersion(req, "manage");
+ FeedVersion version = requestFeedVersion(req, Actions.MANAGE);
// notify any extensions of the change
- for(String resourceType : DataManager.feedResources.keySet()) {
- DataManager.feedResources.get(resourceType).feedVersionCreated(version, null);
+ try {
+ for (String resourceType : DataManager.feedResources.keySet()) {
+ DataManager.feedResources.get(resourceType).feedVersionCreated(version, null);
+ }
+ if (!DataManager.isExtensionEnabled("mtc")) {
+ // update published version ID on feed source
+ Persistence.feedSources.updateField(version.feedSourceId, "publishedVersionId", version.namespace);
+ return version;
+ } else {
+ // NOTE: If the MTC extension is enabled, the parent feed source's publishedVersionId will not be updated to the
+ // version's namespace until the FeedUpdater has successfully downloaded the feed from the share S3 bucket.
+ Date publishedDate = new Date();
+ // Set "sent" timestamp to now and reset "processed" timestamp (in the case that it had previously been
+ // published as the active version.
+ version.sentToExternalPublisher = publishedDate;
+ version.processedByExternalPublisher = null;
+ Persistence.feedVersions.replace(version.id, version);
+ return version;
+ }
+ } catch (Exception e) {
+ logMessageAndHalt(req, 500, "Could not publish feed.", e);
+ return null;
}
- if (!DataManager.isExtensionEnabled("mtc")) {
- // update published version ID on feed source
- Persistence.feedSources.updateField(version.feedSourceId, "publishedVersionId", version.namespace);
- return version;
- } else {
- // NOTE: If the MTC extension is enabled, the parent feed source's publishedVersionId will not be updated to the
- // version's namespace until the FeedUpdater has successfully downloaded the feed from the share S3 bucket.
- return Persistence.feedVersions.updateField(version.id, "processing", true);
+ }
+
+ /**
+ * HTTP endpoint to initiate an export of a shapefile containing the stops or routes of one or
+ * more feed versions. NOTE: the job ID returned must be used by the requester to download the
+ * zipped shapefile once the job has completed.
+ */
+ private static String exportGis (Request req, Response res) throws IOException {
+ String type = req.queryParams("type");
+ Auth0UserProfile userProfile = req.attribute("user");
+ List feedIds = Arrays.asList(req.queryParams("feedId").split(","));
+ File temp = File.createTempFile("gis_" + type, ".zip");
+ // Create and run shapefile export.
+ GisExportJob gisExportJob = new GisExportJob(
+ GisExportJob.ExportType.valueOf(type),
+ temp,
+ feedIds,
+ userProfile.getUser_id()
+ );
+ DataManager.heavyExecutor.execute(gisExportJob);
+ // Do not use S3 to store the file, which should only be stored ephemerally (until requesting
+ // user has downloaded file).
+ FeedDownloadToken token = new FeedDownloadToken(gisExportJob);
+ Persistence.tokens.create(token);
+ return SparkUtils.formatJobMessage(gisExportJob.jobId, "Generating shapefile.");
+ }
+
+ /**
+ * Public HTTP endpoint to download a zipped shapefile of routes or stops for a set of feed
+ * versions using the job ID that was used for initially creating the exported shapes.
+ */
+ private static HttpServletResponse downloadFeedVersionGis (Request req, Response res) {
+ FeedDownloadToken token = Persistence.tokens.getOneFiltered(eq("jobId", req.params("jobId")));
+ File file = new File(token.filePath);
+ try {
+ return downloadFile(file, file.getName(), req, res);
+ } catch (Exception e) {
+ logMessageAndHalt(req, 500,
+ "Unknown error occurred while downloading feed version shapefile", e);
+ } finally {
+ if (!file.delete()) {
+ LOG.error("Could not delete shapefile {}. Storage issues may occur.", token.filePath);
+ } else {
+ LOG.info("Deleted shapefile {} following download.", token.filePath);
+ }
+ // Delete token.
+ Persistence.tokens.removeById(token.id);
+ }
+ return null;
+ }
+
+ /**
+ * HTTP controller that handles merging multiple feed versions for a given feed source, with version IDs specified
+ * in a comma-separated string in the feedVersionIds query parameter and merge type specified in mergeType query
+ * parameter. NOTE: REGIONAL merge type should only be handled through {@link ProjectController#mergeProjectFeeds(Request, Response)}.
+ */
+ private static String mergeFeedVersions(Request req, Response res) {
+ String[] versionIds = req.queryParams("feedVersionIds").split(",");
+ // Try to parse merge type (null or bad value throws IllegalArgumentException).
+ MergeFeedsType mergeType;
+ try {
+ mergeType = MergeFeedsType.valueOf(req.queryParams("mergeType"));
+ if (mergeType.equals(REGIONAL)) {
+ throw new IllegalArgumentException("Regional merge type is not permitted for this endpoint.");
+ }
+ } catch (IllegalArgumentException e) {
+ logMessageAndHalt(req, 400, "Must provide valid merge type.", e);
+ return null;
}
+ // Collect versions to merge (must belong to same feed source).
+ Set versions = new HashSet<>();
+ String feedSourceId = null;
+ for (String id : versionIds) {
+ FeedVersion v = Persistence.feedVersions.getById(id);
+ if (v == null) {
+ logMessageAndHalt(req,
+ 400,
+ String.format("Must provide valid version ID. (No version exists for id=%s.)", id)
+ );
+ }
+ // Store feed source id and check other versions for matching.
+ if (feedSourceId == null) feedSourceId = v.feedSourceId;
+ else if (!v.feedSourceId.equals(feedSourceId)) {
+ logMessageAndHalt(req, 400, "Cannot merge versions with different parent feed sources.");
+ }
+ versions.add(v);
+ }
+ if (versionIds.length != 2) {
+ logMessageAndHalt(req, 400, "Merging more than two versions is not currently supported.");
+ }
+ // Kick off merge feeds job.
+ Auth0UserProfile userProfile = req.attribute("user");
+ MergeFeedsJob mergeFeedsJob = new MergeFeedsJob(userProfile.getUser_id(), versions, "merged", mergeType);
+ DataManager.heavyExecutor.execute(mergeFeedsJob);
+ return SparkUtils.formatJobMessage(mergeFeedsJob.jobId, "Merging feed versions...");
}
/**
@@ -296,12 +375,12 @@ private static HttpServletResponse downloadFeedVersionWithToken (Request req, Re
if(token == null || !token.isValid()) {
LOG.error("Feed download token is invalid: {}", token);
- haltWithMessage(req, 400, "Feed download token not valid");
+ logMessageAndHalt(req, 400, "Feed download token not valid");
}
// Fetch feed version to download.
FeedVersion version = token.retrieveFeedVersion();
if (version == null) {
- haltWithMessage(req, 400, "Could not retrieve version to download");
+ logMessageAndHalt(req, 400, "Could not retrieve version to download");
}
LOG.info("Using token {} to download feed version {}", token.id, version.id);
// Remove token so that it cannot be used again for feed download
@@ -318,19 +397,22 @@ public static void register (String apiPrefix) {
// previous version of data tools.
get(apiPrefix + "secure/feedversion/:id", FeedVersionController::getFeedVersion, json::write);
get(apiPrefix + "secure/feedversion/:id/download", FeedVersionController::downloadFeedVersionDirectly);
- get(apiPrefix + "secure/feedversion/:id/downloadtoken", FeedVersionController::getFeedDownloadCredentials, json::write);
+ get(apiPrefix + "secure/feedversion/:id/downloadtoken", FeedVersionController::getDownloadCredentials, json::write);
post(apiPrefix + "secure/feedversion/:id/validate", FeedVersionController::validate, json::write);
get(apiPrefix + "secure/feedversion", FeedVersionController::getAllFeedVersionsForFeedSource, json::write);
post(apiPrefix + "secure/feedversion", FeedVersionController::createFeedVersionViaUpload, json::write);
+ post(apiPrefix + "secure/feedversion/shapes", FeedVersionController::exportGis, json::write);
post(apiPrefix + "secure/feedversion/fromsnapshot", FeedVersionController::createFeedVersionFromSnapshot, json::write);
put(apiPrefix + "secure/feedversion/:id/rename", FeedVersionController::renameFeedVersion, json::write);
+ put(apiPrefix + "secure/feedversion/merge", FeedVersionController::mergeFeedVersions, json::write);
post(apiPrefix + "secure/feedversion/:id/publish", FeedVersionController::publishToExternalResource, json::write);
delete(apiPrefix + "secure/feedversion/:id", FeedVersionController::deleteFeedVersion, json::write);
get(apiPrefix + "public/feedversion", FeedVersionController::getAllFeedVersionsForFeedSource, json::write);
- get(apiPrefix + "public/feedversion/:id/downloadtoken", FeedVersionController::getFeedDownloadCredentials, json::write);
+ get(apiPrefix + "public/feedversion/:id/downloadtoken", FeedVersionController::getDownloadCredentials, json::write);
get(apiPrefix + "downloadfeed/:token", FeedVersionController::downloadFeedVersionWithToken);
+ get(apiPrefix + "downloadshapes/:jobId", FeedVersionController::downloadFeedVersionGis, json::write);
}
}
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java
index c0038d7a0..fb9ab951b 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/GtfsPlusController.java
@@ -1,99 +1,91 @@
package com.conveyal.datatools.manager.controllers.api;
-import com.conveyal.datatools.common.utils.Consts;
+import com.conveyal.datatools.common.utils.SparkUtils;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
+import com.conveyal.datatools.manager.gtfsplus.GtfsPlusValidation;
+import com.conveyal.datatools.manager.jobs.ProcessSingleFeedJob;
import com.conveyal.datatools.manager.models.FeedVersion;
import com.conveyal.datatools.manager.persistence.FeedStore;
import com.conveyal.datatools.manager.persistence.Persistence;
+import com.conveyal.datatools.manager.utils.HashUtils;
import com.conveyal.datatools.manager.utils.json.JsonUtil;
-import com.conveyal.gtfs.GTFSFeed;
import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.node.ArrayNode;
+import org.eclipse.jetty.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.Request;
import spark.Response;
-import javax.servlet.MultipartConfigElement;
-import javax.servlet.ServletException;
import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.Part;
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Serializable;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.Enumeration;
import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import java.util.zip.ZipOutputStream;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.copyRequestStreamIntoFile;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
+import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.post;
/**
+ * This handles the GTFS+ specific HTTP endpoints, which allow for validating GTFS+ tables,
+ * downloading GTFS+ files to a client for editing (for example), and uploading/publishing a GTFS+ zip as
+ * (for example, one that has been edited) as a new feed version. Here is the workflow in sequence:
+ *
+ * 1. User uploads feed version (with or without GTFS+ tables).
+ * 2. User views validation to determine if errors need amending.
+ * 3. User makes edits (in client) and uploads modified GTFS+.
+ * 4. Once user is satisfied with edits. User publishes as new feed version.
+ *
* Created by demory on 4/13/16.
*/
public class GtfsPlusController {
public static final Logger LOG = LoggerFactory.getLogger(GtfsPlusController.class);
- private static FeedStore gtfsPlusStore = new FeedStore("gtfsplus");
+ private static final FeedStore gtfsPlusStore = new FeedStore(DataManager.GTFS_PLUS_SUBDIR);
-
- public static Boolean uploadGtfsPlusFile (Request req, Response res) throws IOException, ServletException {
-
- //FeedSource s = FeedSource.retrieveById(req.queryParams("feedSourceId"));
+ /**
+ * Upload a GTFS+ file based on a specific feed version and replace (or create)
+ * the file in the GTFS+ specific feed store.
+ */
+ private static Boolean uploadGtfsPlusFile (Request req, Response res) {
String feedVersionId = req.params("versionid");
-
- if (req.raw().getAttribute("org.eclipse.jetty.multipartConfig") == null) {
- MultipartConfigElement multipartConfigElement = new MultipartConfigElement(System.getProperty("java.io.tmpdir"));
- req.raw().setAttribute("org.eclipse.jetty.multipartConfig", multipartConfigElement);
- }
-
- Part part = req.raw().getPart("file");
-
- LOG.info("Saving GTFS+ feed {} from upload for version " + feedVersionId);
-
-
- InputStream uploadStream;
- try {
- uploadStream = part.getInputStream();
- gtfsPlusStore.newFeed(feedVersionId, uploadStream, null);
- } catch (Exception e) {
- LOG.error("Unable to open input stream from upload");
- haltWithMessage(req, 500, "an unexpected error occurred", e);
- }
-
+ File newGtfsFile = new File(gtfsPlusStore.getPathToFeed(feedVersionId));
+ copyRequestStreamIntoFile(req, newGtfsFile);
return true;
}
+ /**
+ * Download a GTFS+ file for a specific feed version. If no edited GTFS+ file
+ * has been uploaded for the feed version, the original feed version will be returned.
+ */
private static HttpServletResponse getGtfsPlusFile(Request req, Response res) {
String feedVersionId = req.params("versionid");
LOG.info("Downloading GTFS+ file for FeedVersion " + feedVersionId);
// check for saved
File file = gtfsPlusStore.getFeed(feedVersionId);
- if(file == null) {
+ if (file == null) {
return getGtfsPlusFromGtfs(feedVersionId, req, res);
}
LOG.info("Returning updated GTFS+ data");
- return downloadGtfsPlusFile(file, req, res);
+ return SparkUtils.downloadFile(file, file.getName() + ".zip", req, res);
}
+ /**
+ * Download only the GTFS+ tables in a zip for a specific feed version.
+ */
private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Request req, Response res) {
LOG.info("Extracting GTFS+ data from main GTFS feed");
FeedVersion version = Persistence.feedVersions.getById(feedVersionId);
@@ -102,13 +94,12 @@ private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Req
// create a set of valid GTFS+ table names
Set gtfsPlusTables = new HashSet<>();
- for(int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
+ for (int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
gtfsPlusTables.add(tableNode.get("name").asText());
}
try {
-
// create a new zip file to only contain the GTFS+ tables
gtfsPlusFile = File.createTempFile(version.id + "_gtfsplus", ".zip");
ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(gtfsPlusFile));
@@ -119,7 +110,7 @@ private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Req
byte[] buffer = new byte[512];
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
- if(!gtfsPlusTables.contains(entry.getName())) continue;
+ if (!gtfsPlusTables.contains(entry.getName())) continue;
// create a new empty ZipEntry and copy the contents
ZipEntry newEntry = new ZipEntry(entry.getName());
@@ -133,39 +124,16 @@ private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Req
zos.closeEntry();
}
zos.close();
-
- } catch (Exception e) {
- haltWithMessage(req, 500, "an unexpected error occurred", e);
- }
-
- return downloadGtfsPlusFile(gtfsPlusFile, req, res);
- }
-
- private static HttpServletResponse downloadGtfsPlusFile(File file, Request req, Response res) {
- res.raw().setContentType("application/octet-stream");
- res.raw().setHeader("Content-Disposition", "attachment; filename=" + file.getName() + ".zip");
-
- try {
- BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(res.raw().getOutputStream());
- BufferedInputStream bufferedInputStream = new BufferedInputStream(new FileInputStream(file));
-
- byte[] buffer = new byte[1024];
- int len;
- while ((len = bufferedInputStream.read(buffer)) > 0) {
- bufferedOutputStream.write(buffer, 0, len);
- }
-
- bufferedOutputStream.flush();
- bufferedOutputStream.close();
} catch (IOException e) {
- LOG.error("An error occurred while trying to download a gtfs plus file");
- e.printStackTrace();
- haltWithMessage(req, 500, "An error occurred while trying to download a gtfs plus file", e);
+ logMessageAndHalt(req, 500, "An error occurred while trying to create a gtfs file", e);
}
- return res.raw();
+ return SparkUtils.downloadFile(gtfsPlusFile, gtfsPlusFile.getName() + ".zip", req, res);
}
+ /** HTTP endpoint used to return the last modified timestamp for a GTFS+ feed. Essentially this is used as a way to
+ * determine whether any GTFS+ edits have been made to
+ */
private static Long getGtfsPlusFileTimestamp(Request req, Response res) {
String feedVersionId = req.params("versionid");
@@ -173,35 +141,35 @@ private static Long getGtfsPlusFileTimestamp(Request req, Response res) {
File file = gtfsPlusStore.getFeed(feedVersionId);
if (file == null) {
FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
- if (feedVersion != null) {
- file = feedVersion.retrieveGtfsFile();
- } else {
- haltWithMessage(req, 400, "Feed version ID is not valid");
+ if (feedVersion == null) {
+ logMessageAndHalt(req, 400, "Feed version ID is not valid");
+ return null;
}
- }
-
- if (file != null) {
- return file.lastModified();
+ return feedVersion.fileTimestamp;
} else {
- haltWithMessage(req, 404, "Feed version file not found");
- return null;
+ return file.lastModified();
}
}
- private static Boolean publishGtfsPlusFile(Request req, Response res) {
+ /**
+ * Publishes the edited/saved GTFS+ file as a new feed version for the feed source.
+ * This is the final stage in the GTFS+ validation/editing workflow described in the
+ * class's javadoc.
+ */
+ private static String publishGtfsPlusFile(Request req, Response res) {
Auth0UserProfile profile = req.attribute("user");
String feedVersionId = req.params("versionid");
LOG.info("Publishing GTFS+ for " + feedVersionId);
File plusFile = gtfsPlusStore.getFeed(feedVersionId);
- if(plusFile == null || !plusFile.exists()) {
- haltWithMessage(req, 400, "No saved GTFS+ data for version");
+ if (plusFile == null || !plusFile.exists()) {
+ logMessageAndHalt(req, 400, "No saved GTFS+ data for version");
}
FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
// create a set of valid GTFS+ table names
Set gtfsPlusTables = new HashSet<>();
- for(int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
+ for (int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
gtfsPlusTables.add(tableNode.get("name").asText());
}
@@ -209,18 +177,18 @@ private static Boolean publishGtfsPlusFile(Request req, Response res) {
File newFeed = null;
try {
-
- // create a new zip file to only contain the GTFS+ tables
+ // First, create a new zip file to only contain the GTFS+ tables
newFeed = File.createTempFile(feedVersionId + "_new", ".zip");
ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(newFeed));
- // iterate through the existing GTFS file, copying all non-GTFS+ tables
+ // Next, iterate through the existing GTFS file, copying all non-GTFS+ tables.
ZipFile gtfsFile = new ZipFile(feedVersion.retrieveGtfsFile());
final Enumeration extends ZipEntry> entries = gtfsFile.entries();
byte[] buffer = new byte[512];
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
- if(gtfsPlusTables.contains(entry.getName()) || entry.getName().startsWith("_")) continue; // skip GTFS+ and non-standard tables
+ // skip GTFS+ and non-standard tables
+ if (gtfsPlusTables.contains(entry.getName()) || entry.getName().startsWith("_")) continue;
// create a new empty ZipEntry and copy the contents
ZipEntry newEntry = new ZipEntry(entry.getName());
@@ -250,190 +218,68 @@ private static Boolean publishGtfsPlusFile(Request req, Response res) {
in.close();
zos.closeEntry();
}
-
zos.close();
-
- } catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 500, "an unexpected error occurred", e);
+ } catch (IOException e) {
+ logMessageAndHalt(req, 500, "Error creating combined GTFS/GTFS+ file", e);
}
FeedVersion newFeedVersion = new FeedVersion(feedVersion.parentFeedSource());
-
+ File newGtfsFile = null;
try {
- newFeedVersion.newGtfsFile(new FileInputStream(newFeed));
- } catch (Exception e) {
+ newGtfsFile = newFeedVersion.newGtfsFile(new FileInputStream(newFeed));
+ } catch (IOException e) {
e.printStackTrace();
- haltWithMessage(req, 500, "Error creating new FeedVersion from combined GTFS/GTFS+", e);
+ logMessageAndHalt(req, 500, "Error reading GTFS file input stream", e);
}
+ if (newGtfsFile == null) {
+ logMessageAndHalt(req, 500, "GTFS input file must not be null");
+ return null;
+ }
+ newFeedVersion.originNamespace = feedVersion.namespace;
+ newFeedVersion.fileTimestamp = newGtfsFile.lastModified();
+ newFeedVersion.fileSize = newGtfsFile.length();
+ newFeedVersion.hash = HashUtils.hashFile(newGtfsFile);
- newFeedVersion.hash();
-
- // validation for the main GTFS content hasn't changed
- newFeedVersion.validationResult = feedVersion.validationResult;
- newFeedVersion.storeUser(profile);
- Persistence.feedVersions.create(newFeedVersion);
+ // Must be handled by executor because it takes a long time.
+ ProcessSingleFeedJob processSingleFeedJob = new ProcessSingleFeedJob(newFeedVersion, profile.getUser_id(), true);
+ DataManager.heavyExecutor.execute(processSingleFeedJob);
- return true;
+ return formatJobMessage(processSingleFeedJob.jobId, "Feed version is processing.");
}
- private static Collection getGtfsPlusValidation(Request req, Response res) {
+ /**
+ * HTTP endpoint that validates GTFS+ tables for a specific feed version (or its saved/edited GTFS+).
+ */
+ private static GtfsPlusValidation getGtfsPlusValidation(Request req, Response res) {
String feedVersionId = req.params("versionid");
- LOG.info("Validating GTFS+ for " + feedVersionId);
- FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
-
- List issues = new LinkedList<>();
-
-
- // load the main GTFS
- // FIXME: fix gtfs+ loading/validating for sql-load
- GTFSFeed gtfsFeed = null; // feedVersion.retrieveFeed();
- // check for saved GTFS+ data
- File file = gtfsPlusStore.getFeed(feedVersionId);
- if (file == null) {
- LOG.warn("GTFS+ file not found, loading from main version GTFS.");
- file = feedVersion.retrieveGtfsFile();
- }
- int gtfsPlusTableCount = 0;
+ GtfsPlusValidation gtfsPlusValidation = null;
try {
- ZipFile zipFile = new ZipFile(file);
- final Enumeration extends ZipEntry> entries = zipFile.entries();
- while (entries.hasMoreElements()) {
- final ZipEntry entry = entries.nextElement();
- for(int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
- JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
- if(tableNode.get("name").asText().equals(entry.getName())) {
- LOG.info("Validating GTFS+ table: " + entry.getName());
- gtfsPlusTableCount++;
- validateTable(issues, tableNode, zipFile.getInputStream(entry), gtfsFeed);
- }
- }
- }
-
+ gtfsPlusValidation = GtfsPlusValidation.validate(feedVersionId);
} catch(Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 500, "an unexpected error occurred", e);
- }
- LOG.info("GTFS+ tables found: {}/{}", gtfsPlusTableCount, DataManager.gtfsPlusConfig.size());
- return issues;
- }
-
- private static void validateTable(Collection issues, JsonNode tableNode, InputStream inputStream, GTFSFeed gtfsFeed) throws IOException {
-
- String tableId = tableNode.get("id").asText();
- BufferedReader in = new BufferedReader(new InputStreamReader(inputStream));
- String line = in.readLine();
- String[] fields = line.split(",");
- List fieldList = Arrays.asList(fields);
- for (String field : fieldList) {
- field = field.toLowerCase();
- }
-
- JsonNode[] fieldNodes = new JsonNode[fields.length];
-
- JsonNode fieldsNode = tableNode.get("fields");
- for(int i = 0; i < fieldsNode.size(); i++) {
- JsonNode fieldNode = fieldsNode.get(i);
- int index = fieldList.indexOf(fieldNode.get("name").asText());
- if(index != -1) fieldNodes[index] = fieldNode;
- }
-
- int rowIndex = 0;
- while((line = in.readLine()) != null) {
- String[] values = line.split(Consts.COLUMN_SPLIT, -1);
- for(int v=0; v < values.length; v++) {
- validateTableValue(issues, tableId, rowIndex, values[v], fieldNodes[v], gtfsFeed);
- }
- rowIndex++;
+ logMessageAndHalt(req, 500, "Could not read GTFS+ zip file", e);
}
+ return gtfsPlusValidation;
}
- private static void validateTableValue(Collection issues, String tableId, int rowIndex, String value, JsonNode fieldNode, GTFSFeed gtfsFeed) {
- if(fieldNode == null) return;
- String fieldName = fieldNode.get("name").asText();
-
- if(fieldNode.get("required") != null && fieldNode.get("required").asBoolean()) {
- if(value == null || value.length() == 0) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Required field missing value"));
- }
- }
-
- switch(fieldNode.get("inputType").asText()) {
- case "DROPDOWN":
- boolean invalid = true;
- ArrayNode options = (ArrayNode) fieldNode.get("options");
- for (JsonNode option : options) {
- String optionValue = option.get("value").asText();
-
- // NOTE: per client's request, this check has been made case insensitive
- boolean valuesAreEqual = optionValue.equalsIgnoreCase(value);
-
- // if value is found in list of options, break out of loop
- if (valuesAreEqual || (!fieldNode.get("required").asBoolean() && value.equals(""))) {
- invalid = false;
- break;
- }
- }
- if (invalid) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Value: " + value + " is not a valid option."));
- }
- break;
- case "TEXT":
- // check if value exceeds max length requirement
- if(fieldNode.get("maxLength") != null) {
- int maxLength = fieldNode.get("maxLength").asInt();
- if(value.length() > maxLength) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Text value exceeds the max. length of "+maxLength));
- }
- }
- break;
- case "GTFS_ROUTE":
- if(!gtfsFeed.routes.containsKey(value)) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Route ID "+ value + " not found in GTFS"));
- }
- break;
- case "GTFS_STOP":
- if(!gtfsFeed.stops.containsKey(value)) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Stop ID "+ value + " not found in GTFS"));
- }
- break;
- case "GTFS_TRIP":
- if(!gtfsFeed.trips.containsKey(value)) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Trip ID "+ value + " not found in GTFS"));
- }
- break;
- case "GTFS_FARE":
- if(!gtfsFeed.fares.containsKey(value)) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Fare ID "+ value + " not found in GTFS"));
- }
- break;
- case "GTFS_SERVICE":
- if(!gtfsFeed.services.containsKey(value)) {
- issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Service ID "+ value + " not found in GTFS"));
- }
- break;
- }
-
- }
-
- public static class ValidationIssue implements Serializable {
- private static final long serialVersionUID = 1L;
- public String tableId;
- public String fieldName;
- public int rowIndex;
- public String description;
-
- public ValidationIssue(String tableId, String fieldName, int rowIndex, String description) {
- this.tableId = tableId;
- this.fieldName = fieldName;
- this.rowIndex = rowIndex;
- this.description = description;
+ /**
+ * HTTP endpoint to delete the GTFS+ specific edits made for a feed version. In other words, this will revert to
+ * referencing the original GTFS+ files for a feed version. Note: this will not delete the feed version itself.
+ */
+ private static String deleteGtfsPlusFile(Request req, Response res) {
+ String feedVersionId = req.params("versionid");
+ File file = gtfsPlusStore.getFeed(feedVersionId);
+ if (file == null) {
+ logMessageAndHalt(req, HttpStatus.NOT_FOUND_404, "No GTFS+ file found for feed version");
+ return null;
}
+ file.delete();
+ return SparkUtils.formatJSON("message", "GTFS+ edits deleted successfully.");
}
public static void register(String apiPrefix) {
post(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::uploadGtfsPlusFile, JsonUtil.objectMapper::writeValueAsString);
get(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::getGtfsPlusFile);
+ delete(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::deleteGtfsPlusFile);
get(apiPrefix + "secure/gtfsplus/:versionid/timestamp", GtfsPlusController::getGtfsPlusFileTimestamp, JsonUtil.objectMapper::writeValueAsString);
get(apiPrefix + "secure/gtfsplus/:versionid/validation", GtfsPlusController::getGtfsPlusValidation, JsonUtil.objectMapper::writeValueAsString);
post(apiPrefix + "secure/gtfsplus/:versionid/publish", GtfsPlusController::publishGtfsPlusFile, JsonUtil.objectMapper::writeValueAsString);
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java
index 4e08f780a..b372e4d26 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/NoteController.java
@@ -18,7 +18,7 @@
import java.util.Collection;
import java.util.Date;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.mongodb.client.model.Filters.eq;
import static com.mongodb.client.model.Updates.push;
import static spark.Spark.get;
@@ -34,20 +34,20 @@ public class NoteController {
public static Collection getAllNotes (Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
- if (userProfile == null) haltWithMessage(req, 401, "User not authorized to perform this action");
+ if (userProfile == null) logMessageAndHalt(req, 401, "User not authorized to perform this action");
String typeStr = req.queryParams("type");
String objectId = req.queryParams("objectId");
if (typeStr == null || objectId == null) {
- haltWithMessage(req, 400, "Please specify objectId and type");
+ logMessageAndHalt(req, 400, "Please specify objectId and type");
}
Note.NoteType type = null;
try {
type = Note.NoteType.valueOf(typeStr);
} catch (IllegalArgumentException e) {
- haltWithMessage(req, 400, "Please specify a valid type");
+ logMessageAndHalt(req, 400, "Please specify a valid type");
}
Model model = null;
@@ -61,7 +61,7 @@ public static Collection getAllNotes (Request req, Response res) {
break;
default:
// this shouldn't ever happen, but Java requires that every case be covered somehow so model can't be used uninitialized
- haltWithMessage(req, 400, "Unsupported type for notes");
+ logMessageAndHalt(req, 400, "Unsupported type for notes");
}
FeedSource s;
@@ -78,7 +78,7 @@ public static Collection getAllNotes (Request req, Response res) {
return model.retrieveNotes();
}
else {
- haltWithMessage(req, 401, "User not authorized to perform this action");
+ logMessageAndHalt(req, 401, "User not authorized to perform this action");
}
return null;
@@ -86,7 +86,7 @@ public static Collection getAllNotes (Request req, Response res) {
public static Note createNote (Request req, Response res) throws IOException {
Auth0UserProfile userProfile = req.attribute("user");
- if(userProfile == null) haltWithMessage(req, 401, "User not authorized to perform this action");
+ if(userProfile == null) logMessageAndHalt(req, 401, "User not authorized to perform this action");
String typeStr = req.queryParams("type");
String objectId = req.queryParams("objectId");
@@ -95,7 +95,7 @@ public static Note createNote (Request req, Response res) throws IOException {
try {
type = Note.NoteType.valueOf(typeStr);
} catch (IllegalArgumentException e) {
- haltWithMessage(req, 400, "Please specify a valid type");
+ logMessageAndHalt(req, 400, "Please specify a valid type");
}
Model objectWithNote = null;
@@ -109,7 +109,7 @@ public static Note createNote (Request req, Response res) throws IOException {
break;
default:
// this shouldn't ever happen, but Java requires that every case be covered somehow so model can't be used uninitialized
- haltWithMessage(req, 400, "Unsupported type for notes");
+ logMessageAndHalt(req, 400, "Unsupported type for notes");
}
FeedSource feedSource;
@@ -159,7 +159,7 @@ public static Note createNote (Request req, Response res) throws IOException {
return note;
}
else {
- haltWithMessage(req, 401, "User not authorized to perform this action");
+ logMessageAndHalt(req, 401, "User not authorized to perform this action");
}
return null;
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java
index 46c6a3d3f..ddf712093 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/OrganizationController.java
@@ -16,7 +16,7 @@
import java.util.Collection;
import java.util.List;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.post;
@@ -32,7 +32,7 @@ public class OrganizationController {
public static Organization getOrganization (Request req, Response res) {
String id = req.params("id");
if (id == null) {
- haltWithMessage(req, 400, "Must specify valid organization id");
+ logMessageAndHalt(req, 400, "Must specify valid organization id");
}
Organization org = Persistence.organizations.getById(id);
return org;
@@ -49,7 +49,7 @@ public static Collection getAllOrganizations (Request req, Respons
LOG.info("returning org {}", orgs);
return orgs;
} else {
- haltWithMessage(req, 401, "Must be application admin to view organizations");
+ logMessageAndHalt(req, 401, "Must be application admin to view organizations");
}
return null;
}
@@ -61,7 +61,7 @@ public static Organization createOrganization (Request req, Response res) {
Organization org = Persistence.organizations.create(req.body());
return org;
} else {
- haltWithMessage(req, 401, "Must be application admin to view organizations");
+ logMessageAndHalt(req, 401, "Must be application admin to view organizations");
}
return null;
}
@@ -108,7 +108,7 @@ public static Organization deleteOrganization (Request req, Response res) {
Organization org = requestOrganizationById(req);
Collection organizationProjects = org.projects();
if (organizationProjects != null && organizationProjects.size() > 0) {
- haltWithMessage(req, 400, "Cannot delete organization that is referenced by projects.");
+ logMessageAndHalt(req, 400, "Cannot delete organization that is referenced by projects.");
}
Persistence.organizations.removeById(org.id);
return org;
@@ -118,16 +118,16 @@ private static Organization requestOrganizationById(Request req) {
Auth0UserProfile userProfile = req.attribute("user");
String id = req.params("id");
if (id == null) {
- haltWithMessage(req, 400, "Must specify valid organization id");
+ logMessageAndHalt(req, 400, "Must specify valid organization id");
}
if (userProfile.canAdministerApplication()) {
Organization org = Persistence.organizations.getById(id);
if (org == null) {
- haltWithMessage(req, 400, "Organization does not exist");
+ logMessageAndHalt(req, 400, "Organization does not exist");
}
return org;
} else {
- haltWithMessage(req, 401, "Must be application admin to modify organization");
+ logMessageAndHalt(req, 401, "Must be application admin to modify organization");
}
return null;
}
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java
index d506c4cc3..cc4db1c1c 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/ProjectController.java
@@ -1,42 +1,36 @@
package com.conveyal.datatools.manager.controllers.api;
+import com.conveyal.datatools.common.utils.Scheduler;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.conveyal.datatools.manager.jobs.FetchProjectFeedsJob;
import com.conveyal.datatools.manager.jobs.MakePublicJob;
-import com.conveyal.datatools.manager.jobs.MergeProjectFeedsJob;
+import com.conveyal.datatools.manager.jobs.MergeFeedsJob;
import com.conveyal.datatools.manager.models.FeedDownloadToken;
+import com.conveyal.datatools.manager.models.FeedSource;
import com.conveyal.datatools.manager.models.FeedVersion;
import com.conveyal.datatools.manager.models.JsonViews;
import com.conveyal.datatools.manager.models.Project;
import com.conveyal.datatools.manager.persistence.FeedStore;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.datatools.manager.utils.json.JsonManager;
-import com.fasterxml.jackson.core.JsonProcessingException;
import org.bson.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.Request;
import spark.Response;
-import java.io.IOException;
-import java.time.Instant;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
import java.util.Collection;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
+import java.util.HashSet;
+import java.util.Set;
import java.util.stream.Collectors;
import static com.conveyal.datatools.common.utils.S3Utils.downloadFromS3;
import static com.conveyal.datatools.common.utils.SparkUtils.downloadFile;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.DataManager.publicPath;
+import static com.conveyal.datatools.manager.jobs.MergeFeedsType.REGIONAL;
import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.post;
@@ -59,7 +53,7 @@ public class ProjectController {
/**
* @return a list of all projects that are public or visible given the current user and organization.
*/
- private static Collection getAllProjects(Request req, Response res) throws JsonProcessingException {
+ private static Collection getAllProjects(Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
// TODO: move this filtering into database query to reduce traffic / memory
return Persistence.projects.getAll().stream()
@@ -90,9 +84,10 @@ private static Project createProject(Request req, Response res) {
if (organizationId == null) allowedToCreate = true;
if (allowedToCreate) {
Project newlyStoredProject = Persistence.projects.create(req.body());
+ Scheduler.scheduleAutoFeedFetch(newlyStoredProject);
return newlyStoredProject;
} else {
- haltWithMessage(req, 403, "Not authorized to create a project on organization " + organizationId);
+ logMessageAndHalt(req, 403, "Not authorized to create a project on organization " + organizationId);
return null;
}
}
@@ -102,7 +97,7 @@ private static Project createProject(Request req, Response res) {
* body.
* @return the Project as it appears in the database after the update.
*/
- private static Project updateProject(Request req, Response res) throws IOException {
+ private static Project updateProject(Request req, Response res) {
// Fetch the project once to check permissions
requestProjectById(req, "manage");
try {
@@ -111,23 +106,10 @@ private static Project updateProject(Request req, Response res) throws IOExcepti
Project updatedProject = Persistence.projects.update(id, req.body());
// Catch updates to auto-fetch params, and update the autofetch schedule accordingly.
// TODO factor out into generic update hooks, or at least separate method
- if (updateDocument.containsKey("autoFetchHour")
- || updateDocument.containsKey("autoFetchMinute")
- || updateDocument.containsKey("autoFetchFeeds")
- || updateDocument.containsKey("defaultTimeZone")) {
- // If auto fetch flag is turned on
- if (updatedProject.autoFetchFeeds) {
- ScheduledFuture fetchAction = scheduleAutoFeedFetch(updatedProject, 1);
- DataManager.autoFetchMap.put(updatedProject.id, fetchAction);
- } else {
- // otherwise, cancel any existing task for this id
- cancelAutoFetch(updatedProject.id);
- }
- }
+ Scheduler.scheduleAutoFeedFetch(updatedProject);
return updatedProject;
} catch (Exception e) {
- e.printStackTrace();
- haltWithMessage(req, 400, "Error updating project");
+ logMessageAndHalt(req, 500, "Error updating project", e);
return null;
}
}
@@ -138,7 +120,10 @@ private static Project updateProject(Request req, Response res) throws IOExcepti
private static Project deleteProject(Request req, Response res) {
// Fetch project first to check permissions, and so we can return the deleted project after deletion.
Project project = requestProjectById(req, "manage");
- project.delete();
+ boolean successfullyDeleted = project.delete();
+ if (!successfullyDeleted) {
+ logMessageAndHalt(req, 500, "Did not delete project.", new Exception("Delete unsuccessful"));
+ }
return project;
}
@@ -166,7 +151,7 @@ public static Boolean fetch(Request req, Response res) {
private static Project requestProjectById (Request req, String action) {
String id = req.params("id");
if (id == null) {
- haltWithMessage(req, 400, "Please specify id param");
+ logMessageAndHalt(req, 400, "Please specify id param");
}
return checkProjectPermissions(req, Persistence.projects.getById(id), action);
}
@@ -191,7 +176,7 @@ private static Project checkProjectPermissions(Request req, Project project, Str
// check for null project
if (project == null) {
- haltWithMessage(req, 400, "Project ID does not exist");
+ logMessageAndHalt(req, 400, "Project ID does not exist");
return null;
}
@@ -221,7 +206,7 @@ private static Project checkProjectPermissions(Request req, Project project, Str
} else {
project.feedSources = null;
if (!authorized) {
- haltWithMessage(req, 403, "User not authorized to perform action on project");
+ logMessageAndHalt(req, 403, "User not authorized to perform action on project");
return null;
}
}
@@ -235,14 +220,28 @@ private static Project checkProjectPermissions(Request req, Project project, Str
* to getFeedDownloadCredentials with the project ID to obtain either temporary S3 credentials or a download token
* (depending on application configuration "application.data.use_s3_storage") to download the zip file.
*/
- private static String downloadMergedFeed(Request req, Response res) {
+ static String mergeProjectFeeds(Request req, Response res) {
Project project = requestProjectById(req, "view");
Auth0UserProfile userProfile = req.attribute("user");
// TODO: make this an authenticated call?
- MergeProjectFeedsJob mergeProjectFeedsJob = new MergeProjectFeedsJob(project, userProfile.getUser_id());
- DataManager.heavyExecutor.execute(mergeProjectFeedsJob);
+ Set feedVersions = new HashSet<>();
+ // Get latest version for each feed source in project
+ Collection feedSources = project.retrieveProjectFeedSources();
+ for (FeedSource fs : feedSources) {
+ // check if feed version exists
+ FeedVersion version = fs.retrieveLatest();
+ if (version == null) {
+ LOG.warn("Skipping {} because it has no feed versions", fs.name);
+ continue;
+ }
+ // modify feed version to use prepended feed id
+ LOG.info("Adding {} feed to merged zip", fs.name);
+ feedVersions.add(version);
+ }
+ MergeFeedsJob mergeFeedsJob = new MergeFeedsJob(userProfile.getUser_id(), feedVersions, project.id, REGIONAL);
+ DataManager.heavyExecutor.execute(mergeFeedsJob);
// Return job ID to requester for monitoring job status.
- return formatJobMessage(mergeProjectFeedsJob.jobId, "Merge operation is processing.");
+ return formatJobMessage(mergeFeedsJob.jobId, "Merge operation is processing.");
}
/**
@@ -274,11 +273,11 @@ private static boolean publishPublicFeeds(Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
String id = req.params("id");
if (id == null) {
- haltWithMessage(req, 400, "must provide project id!");
+ logMessageAndHalt(req, 400, "must provide project id!");
}
Project p = Persistence.projects.getById(id);
if (p == null) {
- haltWithMessage(req, 400, "no such project!");
+ logMessageAndHalt(req, 400, "no such project!");
}
// Run this as a synchronous job; if it proves to be too slow we will change to asynchronous.
new MakePublicJob(p, userProfile.getUser_id()).run();
@@ -298,80 +297,21 @@ private static Project thirdPartySync(Request req, Response res) {
String syncType = req.params("type");
if (!userProfile.canAdministerProject(proj.id, proj.organizationId)) {
- haltWithMessage(req, 403, "Third-party sync not permitted for user.");
+ logMessageAndHalt(req, 403, "Third-party sync not permitted for user.");
}
LOG.info("syncing with third party " + syncType);
if(DataManager.feedResources.containsKey(syncType)) {
- DataManager.feedResources.get(syncType).importFeedsForProject(proj, req.headers("Authorization"));
- return proj;
- }
-
- haltWithMessage(req, 404, syncType + " sync type not enabled for application.");
- return null;
- }
-
- /**
- * Schedule an action that fetches all the feeds in the given project according to the autoFetch fields of that project.
- * Currently feeds are not auto-fetched independently, they must be all fetched together as part of a project.
- * This method is called when a Project's auto-fetch settings are updated, and when the system starts up to populate
- * the auto-fetch scheduler.
- */
- public static ScheduledFuture scheduleAutoFeedFetch (Project project, int intervalInDays) {
- TimeUnit minutes = TimeUnit.MINUTES;
- try {
- // First cancel any already scheduled auto fetch task for this project id.
- cancelAutoFetch(project.id);
-
- ZoneId timezone;
try {
- timezone = ZoneId.of(project.defaultTimeZone);
- }catch(Exception e){
- timezone = ZoneId.of("America/New_York");
- }
- LOG.info("Scheduling auto-fetch for projectID: {}", project.id);
-
- // NOW in default timezone
- ZonedDateTime now = ZonedDateTime.ofInstant(Instant.now(), timezone);
-
- // Scheduled start time
- ZonedDateTime startTime = LocalDateTime.of(LocalDate.now(),
- LocalTime.of(project.autoFetchHour, project.autoFetchMinute)).atZone(timezone);
- LOG.info("Now: {}", now.format(DateTimeFormatter.ISO_ZONED_DATE_TIME));
- LOG.info("Scheduled start time: {}", startTime.format(DateTimeFormatter.ISO_ZONED_DATE_TIME));
-
- // Get diff between start time and current time
- long diffInMinutes = (startTime.toEpochSecond() - now.toEpochSecond()) / 60;
- long delayInMinutes;
- if ( diffInMinutes >= 0 ){
- delayInMinutes = diffInMinutes; // delay in minutes
+ DataManager.feedResources.get(syncType).importFeedsForProject(proj, req.headers("Authorization"));
+ } catch (Exception e) {
+ logMessageAndHalt(req, 500, "An error occurred while trying to sync", e);
}
- else{
- delayInMinutes = 24 * 60 + diffInMinutes; // wait for one day plus difference (which is negative)
- }
-
- LOG.info("Auto fetch begins in {} hours and runs every {} hours", String.valueOf(delayInMinutes / 60.0), TimeUnit.DAYS.toHours(intervalInDays));
-
- // system is defined as owner because owner field must not be null
- FetchProjectFeedsJob fetchProjectFeedsJob = new FetchProjectFeedsJob(project, "system");
- return DataManager.scheduler.scheduleAtFixedRate(fetchProjectFeedsJob,
- delayInMinutes, TimeUnit.DAYS.toMinutes(intervalInDays), minutes);
- } catch (Exception e) {
- e.printStackTrace();
- return null;
+ return proj;
}
- }
- /**
- * Cancel an existing auto-fetch job that is scheduled for the given project ID.
- * There is only one auto-fetch job per project, not one for each feedSource within the project.
- */
- private static void cancelAutoFetch(String projectId){
- Project p = Persistence.projects.getById(projectId);
- if ( p != null && DataManager.autoFetchMap.get(p.id) != null) {
- LOG.info("Cancelling auto-fetch for projectID: {}", p.id);
- DataManager.autoFetchMap.get(p.id).cancel(true);
- }
+ logMessageAndHalt(req, 404, syncType + " sync type not enabled for application.");
+ return null;
}
/**
@@ -388,7 +328,7 @@ public static void register (String apiPrefix) {
post(apiPrefix + "secure/project/:id/fetch", ProjectController::fetch, json::write);
post(apiPrefix + "secure/project/:id/deployPublic", ProjectController::publishPublicFeeds, json::write);
- get(apiPrefix + "secure/project/:id/download", ProjectController::downloadMergedFeed);
+ get(apiPrefix + "secure/project/:id/download", ProjectController::mergeProjectFeeds);
get(apiPrefix + "secure/project/:id/downloadtoken", ProjectController::getFeedDownloadCredentials, json::write);
get(apiPrefix + "public/project/:id", ProjectController::getProject, json::write);
@@ -404,7 +344,7 @@ private static Object downloadMergedFeedWithToken(Request req, Response res) {
FeedDownloadToken token = Persistence.tokens.getById(req.params("token"));
if(token == null || !token.isValid()) {
- haltWithMessage(req, 400, "Feed download token not valid");
+ logMessageAndHalt(req, 400, "Feed download token not valid");
}
Project project = token.retrieveProject();
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java
index 355ac8fe1..ae701864f 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/StatusController.java
@@ -5,7 +5,7 @@
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.conveyal.datatools.manager.models.JsonViews;
import com.conveyal.datatools.manager.utils.json.JsonManager;
-import org.eclipse.jetty.util.ConcurrentHashSet;
+import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.Request;
@@ -17,7 +17,7 @@
import java.util.Set;
import java.util.stream.Collectors;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static spark.Spark.get;
/**
@@ -33,7 +33,7 @@ public class StatusController {
private static Set getAllJobsRoute(Request req, Response res) {
Auth0UserProfile userProfile = req.attribute("user");
if (!userProfile.canAdministerApplication()) {
- haltWithMessage(req, 401, "User not authorized to view all jobs");
+ logMessageAndHalt(req, 401, "User not authorized to view all jobs");
}
return getAllJobs();
}
@@ -109,15 +109,16 @@ private static Set getJobsByUserId(String userId, boolean clearC
// Any active jobs will still have their status updated, so they need to be retrieved again with any status
// updates. All completed or errored jobs are in their final state and will not be updated any longer, so we
// remove them once the client has seen them.
- ConcurrentHashSet jobsStillActive = filterActiveJobs(allJobsForUser);
+ Set jobsStillActive = filterActiveJobs(allJobsForUser);
DataManager.userJobsMap.put(userId, jobsStillActive);
}
return allJobsForUser;
}
- public static ConcurrentHashSet filterActiveJobs(Set jobs) {
- ConcurrentHashSet jobsStillActive = new ConcurrentHashSet<>();
+ public static Set filterActiveJobs(Set jobs) {
+ // Note: this must be a thread-safe set in case it is placed into the DataManager#userJobsMap.
+ Set jobsStillActive = Sets.newConcurrentHashSet();
jobs.stream()
.filter(job -> !job.status.completed && !job.status.error)
.forEach(jobsStillActive::add);
diff --git a/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java b/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java
index 106aa676a..3f0ffe967 100644
--- a/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java
+++ b/src/main/java/com/conveyal/datatools/manager/controllers/api/UserController.java
@@ -16,9 +16,11 @@
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPatch;
import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
@@ -30,6 +32,7 @@
import java.io.IOException;
import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
@@ -39,7 +42,8 @@
import java.util.List;
import java.util.Map;
-import static com.conveyal.datatools.common.utils.SparkUtils.haltWithMessage;
+import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
+import static com.conveyal.datatools.manager.auth.Auth0Users.USERS_API_PATH;
import static com.conveyal.datatools.manager.auth.Auth0Users.getUserById;
import static spark.Spark.delete;
import static spark.Spark.get;
@@ -51,37 +55,40 @@
*/
public class UserController {
- private static String AUTH0_DOMAIN = DataManager.getConfigPropertyAsText("AUTH0_DOMAIN");
- private static String AUTH0_CLIENT_ID = DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID");
- private static String AUTH0_API_TOKEN = DataManager.getConfigPropertyAsText("AUTH0_TOKEN");
+ private static final String AUTH0_DOMAIN = DataManager.getConfigPropertyAsText("AUTH0_DOMAIN");
+ private static final String AUTH0_CLIENT_ID = DataManager.getConfigPropertyAsText("AUTH0_CLIENT_ID");
+ public static final int TEST_AUTH0_PORT = 8089;
+ public static final String TEST_AUTH0_DOMAIN = String.format("localhost:%d", TEST_AUTH0_PORT);
private static Logger LOG = LoggerFactory.getLogger(UserController.class);
private static ObjectMapper mapper = new ObjectMapper();
- public static JsonManager json =
- new JsonManager<>(Project.class, JsonViews.UserInterface.class);
+ private static final String UTF_8 = "UTF-8";
+ public static final String DEFAULT_BASE_USERS_URL = "https://" + AUTH0_DOMAIN + USERS_API_PATH;
+ /** Users URL uses Auth0 domain by default, but can be overridden with {@link #setBaseUsersUrl(String)} for testing. */
+ private static String baseUsersUrl = DEFAULT_BASE_USERS_URL;
+ private static final JsonManager json = new JsonManager<>(Project.class, JsonViews.UserInterface.class);
/**
* HTTP endpoint to get a single Auth0 user for the application (by specified ID param). Note, this uses a different
* Auth0 API (get user) than the other get methods (user search query).
*/
- private static String getUser(Request req, Response res) throws IOException {
- String url = "https://" + AUTH0_DOMAIN + "/api/v2/users/" + URLEncoder.encode(req.params("id"), "UTF-8");
- String charset = "UTF-8";
-
- HttpGet request = new HttpGet(url);
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
- request.setHeader("Accept-Charset", charset);
-
- HttpClient client = HttpClientBuilder.create().build();
- HttpResponse response = client.execute(request);
- String result = EntityUtils.toString(response.getEntity());
+ private static String getUser(Request req, Response res) {
+ HttpGet getUserRequest = new HttpGet(getUserIdUrl(req));
+ setHeaders(req, getUserRequest);
+ return executeRequestAndGetResult(getUserRequest, req);
+ }
- return result;
+ /**
+ * Determines whether the user controller is being run in a testing environment by checking if the users URL contains
+ * the {@link #TEST_AUTH0_DOMAIN}.
+ */
+ public static boolean inTestingEnvironment() {
+ return baseUsersUrl.contains(TEST_AUTH0_DOMAIN);
}
/**
* HTTP endpoint to get all users for the application (using a filtered search on all users for the Auth0 tenant).
*/
- private static String getAllUsers(Request req, Response res) throws IOException {
+ private static String getAllUsers(Request req, Response res) {
res.type("application/json");
int page = Integer.parseInt(req.queryParams("page"));
String queryString = filterUserSearchQuery(req);
@@ -112,7 +119,7 @@ private static String filterUserSearchQuery(Request req) {
}
return queryString;
} else {
- haltWithMessage(req, 401, "Must be application or organization admin to view users");
+ logMessageAndHalt(req, 401, "Must be application or organization admin to view users");
// Return statement cannot be reached due to halt.
return null;
}
@@ -121,10 +128,15 @@ private static String filterUserSearchQuery(Request req) {
/**
* Gets the total count of users that match the filtered user search query.
*/
- private static int getUserCount(Request req, Response res) throws IOException {
+ private static int getUserCount(Request req, Response res) {
res.type("application/json");
String queryString = filterUserSearchQuery(req);
- return Auth0Users.getAuth0UserCount(queryString);
+ try {
+ return Auth0Users.getAuth0UserCount(queryString);
+ } catch (IOException e) {
+ logMessageAndHalt(req, 500, "Failed to get user count", e);
+ return 0;
+ }
}
/**
@@ -133,131 +145,81 @@ private static int getUserCount(Request req, Response res) throws IOException {
* Note, this passes a "blank" app_metadata object to the newly created user, so there is no risk of someone
* injecting permissions somehow into the create user request.
*/
- private static String createPublicUser(Request req, Response res) throws IOException {
- String url = "https://" + AUTH0_DOMAIN + "/api/v2/users";
- String charset = "UTF-8";
-
- HttpPost request = new HttpPost(url);
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
- request.setHeader("Accept-Charset", charset);
- request.setHeader("Content-Type", "application/json");
- JsonNode jsonNode = mapper.readTree(req.body());
+ private static String createPublicUser(Request req, Response res) {
+ HttpPost createUserRequest = new HttpPost(baseUsersUrl);
+ setHeaders(req, createUserRequest);
+
+ JsonNode jsonNode = parseJsonFromBody(req);
String json = String.format("{" +
"\"connection\": \"Username-Password-Authentication\"," +
"\"email\": %s," +
"\"password\": %s," +
"\"app_metadata\": {\"datatools\": [{\"permissions\": [], \"projects\": [], \"subscriptions\": [], \"client_id\": \"%s\" }] } }",
jsonNode.get("email"), jsonNode.get("password"), AUTH0_CLIENT_ID);
- HttpEntity entity = new ByteArrayEntity(json.getBytes(charset));
- request.setEntity(entity);
-
- HttpClient client = HttpClientBuilder.create().build();
- HttpResponse response = client.execute(request);
- String result = EntityUtils.toString(response.getEntity());
- int statusCode = response.getStatusLine().getStatusCode();
- if(statusCode >= 300) haltWithMessage(req, statusCode, response.toString());
+ setRequestEntityUsingJson(createUserRequest, json, req);
- return result;
+ return executeRequestAndGetResult(createUserRequest, req);
}
/**
* HTTP endpoint to create new Auth0 user for the application.
- *
- * FIXME: This endpoint fails if the user's email already exists in the Auth0 tenant.
*/
- private static String createUser(Request req, Response res) throws IOException {
- String url = "https://" + AUTH0_DOMAIN + "/api/v2/users";
- String charset = "UTF-8";
-
- HttpPost request = new HttpPost(url);
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
- request.setHeader("Accept-Charset", charset);
- request.setHeader("Content-Type", "application/json");
- JsonNode jsonNode = mapper.readTree(req.body());
+ private static String createUser(Request req, Response res) {
+ HttpPost createUserRequest = new HttpPost(baseUsersUrl);
+ setHeaders(req, createUserRequest);
+
+ JsonNode jsonNode = parseJsonFromBody(req);
String json = String.format("{" +
"\"connection\": \"Username-Password-Authentication\"," +
"\"email\": %s," +
"\"password\": %s," +
"\"app_metadata\": {\"datatools\": [%s] } }"
, jsonNode.get("email"), jsonNode.get("password"), jsonNode.get("permissions"));
- HttpEntity entity = new ByteArrayEntity(json.getBytes(charset));
- request.setEntity(entity);
+ setRequestEntityUsingJson(createUserRequest, json, req);
- HttpClient client = HttpClientBuilder.create().build();
- HttpResponse response = client.execute(request);
- String result = EntityUtils.toString(response.getEntity());
-
- int statusCode = response.getStatusLine().getStatusCode();
- if(statusCode >= 300) {
- // If Auth0 status shows an error, throw a halt with a reasonably intelligible message.
- LOG.error("Auth0 error encountered. Could not create user: {}", response.toString());
- String errorMessage;
- switch (statusCode) {
- case HttpStatus.CONFLICT_409:
- errorMessage = String.format("User already exists for email address %s.", jsonNode.get("email"));
- break;
- default:
- errorMessage = String.format("Error while creating user: %s.", HttpStatus.getMessage(statusCode));
- break;
- }
- haltWithMessage(req, statusCode, errorMessage);
- }
- return result;
+ return executeRequestAndGetResult(createUserRequest, req);
}
- private static Object updateUser(Request req, Response res) throws IOException {
+ private static String updateUser(Request req, Response res) {
String userId = req.params("id");
Auth0UserProfile user = getUserById(userId);
- LOG.info("Updating user {}", user.getEmail());
-
- String url = "https://" + AUTH0_DOMAIN + "/api/v2/users/" + URLEncoder.encode(userId, "UTF-8");
- String charset = "UTF-8";
+ if (user == null) {
+ logMessageAndHalt(
+ req,
+ 404,
+ String.format("Could not update user: User with id %s not found (or there are issues with the Auth0 configuration)", userId)
+ );
+ }
+ LOG.info("Updating user {}", user.getEmail());
- HttpPatch request = new HttpPatch(url);
+ HttpPatch updateUserRequest = new HttpPatch(getUserIdUrl(req));
+ setHeaders(req, updateUserRequest);
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
- request.setHeader("Accept-Charset", charset);
- request.setHeader("Content-Type", "application/json");
+ JsonNode jsonNode = parseJsonFromBody(req);
- JsonNode jsonNode = mapper.readTree(req.body());
// JsonNode data = mapper.readValue(jsonNode.retrieveById("data"), Auth0UserProfile.DatatoolsInfo.class); //jsonNode.retrieveById("data");
JsonNode data = jsonNode.get("data");
- System.out.println(data.asText());
+
Iterator> fieldsIter = data.fields();
while (fieldsIter.hasNext()) {
Map.Entry entry = fieldsIter.next();
- System.out.println(entry.getValue());
}
// if (!data.has("client_id")) {
// ((ObjectNode)data).put("client_id", DataManager.config.retrieveById("auth0").retrieveById("client_id").asText());
// }
String json = "{ \"app_metadata\": { \"datatools\" : " + data + " }}";
- System.out.println(json);
- HttpEntity entity = new ByteArrayEntity(json.getBytes(charset));
- request.setEntity(entity);
- HttpClient client = HttpClientBuilder.create().build();
- HttpResponse response = client.execute(request);
- String result = EntityUtils.toString(response.getEntity());
+ setRequestEntityUsingJson(updateUserRequest, json, req);
- return mapper.readTree(result);
+ return executeRequestAndGetResult(updateUserRequest, req);
}
- private static Object deleteUser(Request req, Response res) throws IOException {
- String url = "https://" + AUTH0_DOMAIN + "/api/v2/users/" + URLEncoder.encode(req.params("id"), "UTF-8");
- String charset = "UTF-8";
-
- HttpDelete request = new HttpDelete(url);
- request.addHeader("Authorization", "Bearer " + AUTH0_API_TOKEN);
- request.setHeader("Accept-Charset", charset);
-
- HttpClient client = HttpClientBuilder.create().build();
- HttpResponse response = client.execute(request);
- int statusCode = response.getStatusLine().getStatusCode();
- if(statusCode >= 300) haltWithMessage(req, statusCode, response.getStatusLine().getReasonPhrase());
-
+ private static Object deleteUser(Request req, Response res) {
+ HttpDelete deleteUserRequest = new HttpDelete(getUserIdUrl(req));
+ setHeaders(req, deleteUserRequest);
+ executeRequestAndGetResult(deleteUserRequest, req);
return true;
}
@@ -276,7 +238,7 @@ private static Object getRecentActivity(Request req, Response res) {
Auth0UserProfile.DatatoolsInfo datatools = userProfile.getApp_metadata().getDatatoolsInfo();
if (datatools == null) {
// NOTE: this condition will also occur if DISABLE_AUTH is set to true
- haltWithMessage(req, 403, "User does not have permission to access to this application");
+ logMessageAndHalt(req, 403, "User does not have permission to access to this application");
}
Auth0UserProfile.Subscription[] subscriptions = datatools.getSubscriptions();
@@ -338,6 +300,155 @@ private static Object getRecentActivity(Request req, Response res) {
return activityList;
}
+ /**
+ * Set some common headers on the request, including the API access token, which must be obtained via token request
+ * to Auth0.
+ */
+ private static void setHeaders(Request sparkRequest, HttpRequestBase auth0Request) {
+ String apiToken = Auth0Users.getApiToken();
+ if (apiToken == null) {
+ logMessageAndHalt(
+ sparkRequest,
+ 400,
+ "Failed to obtain Auth0 API token for request"
+ );
+ }
+ auth0Request.addHeader("Authorization", "Bearer " + apiToken);
+ auth0Request.setHeader("Accept-Charset", UTF_8);
+ auth0Request.setHeader("Content-Type", "application/json");
+ }
+
+ /**
+ * Safely parse the userId and create an Auth0 url.
+ *
+ * @param req The initating request that came into datatools-server
+ */
+ private static String getUserIdUrl(Request req) {
+ try {
+ return String.format(
+ "%s/%s",
+ baseUsersUrl,
+ URLEncoder.encode(req.params("id"), "UTF-8")
+ );
+ } catch (UnsupportedEncodingException e) {
+ logMessageAndHalt(
+ req,
+ 400,
+ "Failed to encode user id",
+ e
+ );
+ }
+ return null;
+ }
+
+ /**
+ * Safely parse the request body into a JsonNode.
+ *
+ * @param req The initating request that came into datatools-server
+ */
+ private static JsonNode parseJsonFromBody(Request req) {
+ try {
+ return mapper.readTree(req.body());
+ } catch (IOException e) {
+ logMessageAndHalt(req, 400, "Failed to parse request body", e);
+ return null;
+ }
+ }
+
+ /**
+ * Safely set the HTTP request body with a json string.
+ *
+ * @param request the outgoing HTTP post request
+ * @param json The json to set in the request body
+ * @param req The initating request that came into datatools-server
+ */
+ private static void setRequestEntityUsingJson(HttpEntityEnclosingRequestBase request, String json, Request req) {
+ HttpEntity entity = null;
+ try {
+ entity = new ByteArrayEntity(json.getBytes(UTF_8));
+ } catch (UnsupportedEncodingException e) {
+ logMessageAndHalt(
+ req,
+ 500,
+ "Failed to set entity body due to encoding issue.",
+ e
+ );
+ }
+ request.setEntity(entity);
+ }
+
+ /**
+ * Executes and logs an outgoing HTTP request, makes sure it worked and then returns the
+ * stringified response body.
+ *
+ * @param httpRequest The outgoing HTTP request
+ * @param req The initating request that came into datatools-server
+ */
+ private static String executeRequestAndGetResult(HttpRequestBase httpRequest, Request req) {
+ // execute outside http request
+ HttpClient client = HttpClientBuilder.create().build();
+ HttpResponse response = null;
+ try {
+ LOG.info("Making request: ({})", httpRequest.toString());
+ response = client.execute(httpRequest);
+ } catch (IOException e) {
+ LOG.error("HTTP request failed: ({})", httpRequest.toString());
+ logMessageAndHalt(
+ req,
+ 500,
+ "Failed to make external HTTP request.",
+ e
+ );
+ }
+
+ // parse response body if there is one
+ HttpEntity entity = response.getEntity();
+ String result = null;
+ if (entity != null) {
+ try {
+ result = EntityUtils.toString(entity);
+ } catch (IOException e) {
+ logMessageAndHalt(
+ req,
+ 500,
+ String.format(
+ "Failed to parse result of http request (%s).",
+ httpRequest.toString()
+ ),
+ e
+ );
+ }
+ }
+
+ int statusCode = response.getStatusLine().getStatusCode();
+ if(statusCode >= 300) {
+ LOG.error(
+ "HTTP request returned error code >= 300: ({}). Body: {}",
+ httpRequest.toString(),
+ result != null ? result : ""
+ );
+ // attempt to parse auth0 response to respond with an error message
+ String auth0Message = "An Auth0 error occurred";
+ JsonNode jsonResponse = null;
+ try {
+ jsonResponse = mapper.readTree(result);
+ } catch (IOException e) {
+ LOG.warn("Could not parse json from auth0 error message. Body: {}", result != null ? result : "");
+ e.printStackTrace();
+ }
+
+ if (jsonResponse != null && jsonResponse.has("message")) {
+ auth0Message = String.format("%s: %s", auth0Message, jsonResponse.get("message").asText());
+ }
+
+ logMessageAndHalt(req, statusCode, auth0Message);
+ }
+
+ LOG.info("Successfully made request: ({})", httpRequest.toString());
+
+ return result;
+ }
+
private static ZonedDateTime toZonedDateTime (Date date) {
return ZonedDateTime.ofInstant(date.toInstant(), ZoneOffset.UTC);
}
@@ -426,6 +537,14 @@ public FeedVersionCommentActivity(Note note, FeedSource feedSource, FeedVersion
}
}
+ /**
+ * Used to override the base url for making requests to Auth0. This is primarily used for testing purposes to set
+ * the url to something that is stubbed with WireMock.
+ */
+ public static void setBaseUsersUrl (String url) {
+ baseUsersUrl = url;
+ }
+
public static void register (String apiPrefix) {
get(apiPrefix + "secure/user/:id", UserController::getUser, json::write);
get(apiPrefix + "secure/user/:id/recentactivity", UserController::getRecentActivity, json::write);
diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/ExternalFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/ExternalFeedResource.java
index 98e9a788a..c761449de 100644
--- a/src/main/java/com/conveyal/datatools/manager/extensions/ExternalFeedResource.java
+++ b/src/main/java/com/conveyal/datatools/manager/extensions/ExternalFeedResource.java
@@ -5,6 +5,8 @@
import com.conveyal.datatools.manager.models.FeedVersion;
import com.conveyal.datatools.manager.models.Project;
+import java.io.IOException;
+
/**
* Created by demory on 3/30/16.
*/
@@ -12,11 +14,11 @@ public interface ExternalFeedResource {
public String getResourceType();
- public void importFeedsForProject(Project project, String authHeader);
+ public void importFeedsForProject(Project project, String authHeader) throws Exception;
- public void feedSourceCreated(FeedSource source, String authHeader);
+ public void feedSourceCreated(FeedSource source, String authHeader) throws Exception;
- public void propertyUpdated(ExternalFeedSourceProperty property, String previousValue, String authHeader);
+ public void propertyUpdated(ExternalFeedSourceProperty property, String previousValue, String authHeader) throws IOException;
public void feedVersionCreated(FeedVersion feedVersion, String authHeader);
}
diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java
index c70d83baa..99e2125b9 100644
--- a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java
+++ b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/MtcFeedResource.java
@@ -15,16 +15,27 @@
import java.io.BufferedReader;
import java.io.File;
+import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.lang.reflect.Field;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
+import java.util.Collection;
import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId;
/**
+ * This class implements the {@link ExternalFeedResource} interface for the MTC RTD database list of carriers (transit
+ * operators) and allows the Data Tools application to read and sync the list of carriers to a set of feed sources for a
+ * given project.
+ *
+ * This is generally intended as an initialization step to importing feed sources into a project; however, it should
+ * support subsequent sync requests (e.g., if new agencies are expected in the external feed resource, syncing should
+ * import those OR if feed properties are expected to have changed in the external feed resource, they should be updated
+ * accordingly in Data Tools).
+ *
* Created by demory on 3/30/16.
*/
public class MtcFeedResource implements ExternalFeedResource {
@@ -33,7 +44,7 @@ public class MtcFeedResource implements ExternalFeedResource {
private String rtdApi, s3Bucket, s3Prefix, s3CredentialsFilename;
- public static final String AGENCY_ID = "AgencyId";
+ public static final String AGENCY_ID_FIELDNAME = "AgencyId";
public static final String RESOURCE_TYPE = "MTC";
public MtcFeedResource() {
rtdApi = DataManager.getExtensionPropertyAsText(RESOURCE_TYPE, "rtd_api");
@@ -47,109 +58,86 @@ public String getResourceType() {
return RESOURCE_TYPE;
}
+ /**
+ * Fetch the list of feeds from the MTC endpoint, create any feed sources that do not match on agencyID, and update
+ * the external feed source properties.
+ */
@Override
- public void importFeedsForProject(Project project, String authHeader) {
+ public void importFeedsForProject(Project project, String authHeader) throws IOException, IllegalAccessException {
URL url;
ObjectMapper mapper = new ObjectMapper();
- // single list from MTC
+ // A single list of feeds is returned from the MTC Carrier endpoint.
try {
url = new URL(rtdApi + "/Carrier");
} catch(MalformedURLException ex) {
LOG.error("Could not construct URL for RTD API: {}", rtdApi);
- return;
+ throw ex;
}
try {
- HttpURLConnection con = (HttpURLConnection) url.openConnection();
-
- // optional default is GET
- con.setRequestMethod("GET");
-
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
//add request header
- con.setRequestProperty("User-Agent", "User-Agent");
-
+ conn.setRequestProperty("User-Agent", "User-Agent");
// add auth header
- LOG.info("authHeader="+authHeader);
- con.setRequestProperty("Authorization", authHeader);
-
- int responseCode = con.getResponseCode();
- LOG.info("Sending 'GET' request to URL : " + url);
- LOG.info("Response Code : " + responseCode);
-
- BufferedReader in = new BufferedReader(
- new InputStreamReader(con.getInputStream()));
- String inputLine;
- StringBuffer response = new StringBuffer();
-
- while ((inputLine = in.readLine()) != null) {
- response.append(inputLine);
- }
- in.close();
+ conn.setRequestProperty("Authorization", authHeader);
- String json = response.toString();
- RtdCarrier[] results = mapper.readValue(json, RtdCarrier[].class);
- for (int i = 0; i < results.length; i++) {
- // String className = "RtdCarrier";
- // Object car = Class.forName(className).newInstance();
- RtdCarrier car = results[i];
- //LOG.info("car id=" + car.AgencyId + " name=" + car.AgencyName);
+ LOG.info("Sending 'GET' request to URL : {}", url);
+ LOG.info("Response Code : {}", conn.getResponseCode());
+ RtdCarrier[] carriers = mapper.readValue(conn.getInputStream(), RtdCarrier[].class);
+ Collection projectFeedSources = project.retrieveProjectFeedSources();
+ // Iterate over carriers found in response and update properties. Also, create a feed source for any carriers
+ // found in the response that do not correspond to an agency ID found in the external feed source properties.
+ for (int i = 0; i < carriers.length; i++) {
+ RtdCarrier carrier = carriers[i];
FeedSource source = null;
- // check if a FeedSource with this AgencyId already exists
- for (FeedSource existingSource : project.retrieveProjectFeedSources()) {
+ // Check if a FeedSource with this AgencyId already exists.
+ for (FeedSource existingSource : projectFeedSources) {
ExternalFeedSourceProperty agencyIdProp;
- agencyIdProp = Persistence.externalFeedSourceProperties.getById(constructId(existingSource, this.getResourceType(), AGENCY_ID));
- if (agencyIdProp != null && agencyIdProp.value != null && agencyIdProp.value.equals(car.AgencyId)) {
- //LOG.info("already exists: " + car.AgencyId);
+ String propertyId = constructId(existingSource, this.getResourceType(), AGENCY_ID_FIELDNAME);
+ agencyIdProp = Persistence.externalFeedSourceProperties.getById(propertyId);
+ if (agencyIdProp != null && agencyIdProp.value != null && agencyIdProp.value.equals(carrier.AgencyId)) {
source = existingSource;
}
}
-
- String feedName;
- if (car.AgencyName != null) {
- feedName = car.AgencyName;
- } else if (car.AgencyShortName != null) {
- feedName = car.AgencyShortName;
- } else {
- feedName = car.AgencyId;
- }
-
+ // Feed source does not exist. Create one using carrier properties.
if (source == null) {
+ // Derive the name from carrier properties found in response.
+ String feedName = carrier.AgencyName != null
+ ? carrier.AgencyName
+ : carrier.AgencyShortName != null
+ ? carrier.AgencyShortName
+ : carrier.AgencyId;
+ // Create new feed source to store in application database.
source = new FeedSource(feedName);
+ source.projectId = project.id;
+ LOG.info("Creating feed source {} from carrier response. (Did not previously exist.)", feedName);
+ // Store the feed source if it does not already exist.
+ Persistence.feedSources.create(source);
}
- else source.name = feedName;
-
- source.projectId = project.id;
- // Store the feed source.
- Persistence.feedSources.create(source);
-
- // create / update the properties
-
- for(Field carrierField : car.getClass().getDeclaredFields()) {
- String fieldName = carrierField.getName();
- String fieldValue = carrierField.get(car) != null ? carrierField.get(car).toString() : null;
- ExternalFeedSourceProperty prop = new ExternalFeedSourceProperty(source, this.getResourceType(), fieldName, fieldValue);
- if (Persistence.externalFeedSourceProperties.getById(prop.id) == null) {
- Persistence.externalFeedSourceProperties.create(prop);
- } else {
- Persistence.externalFeedSourceProperties.updateField(prop.id, fieldName, fieldValue);
- }
- }
+ // TODO: Does any property on the feed source need to be updated from the carrier (e.g., name).
+
+ // Create / update the properties
+ LOG.info("Updating props for {}", source.name);
+ carrier.updateFields(source);
}
} catch(Exception ex) {
LOG.error("Could not read feeds from MTC RTD API");
- ex.printStackTrace();
+ throw ex;
}
}
/**
- * Do nothing for now. Creating a new agency for RTD requires adding the AgencyId property (when it was previously
- * null. See {@link #propertyUpdated(ExternalFeedSourceProperty, String, String)}.
+ * Generate blank external feed resource properties when a new feed source is created. Creating a new agency for RTD
+ * requires adding the AgencyId property (when it was previously null. See {@link #propertyUpdated(ExternalFeedSourceProperty, String, String)}.
*/
@Override
- public void feedSourceCreated(FeedSource source, String authHeader) {
- LOG.info("Processing new FeedSource {} for RTD. (No action taken.)", source.name);
+ public void feedSourceCreated(FeedSource source, String authHeader) throws IllegalAccessException {
+ LOG.info("Processing new FeedSource {} for RTD. Empty external feed properties being generated.", source.name);
+ // Create a blank carrier and update fields (will initialize all fields to null).
+ RtdCarrier carrier = new RtdCarrier();
+ carrier.updateFields(source);
}
/**
@@ -157,13 +145,17 @@ public void feedSourceCreated(FeedSource source, String authHeader) {
* null create/register a new carrier with RTD.
*/
@Override
- public void propertyUpdated(ExternalFeedSourceProperty updatedProperty, String previousValue, String authHeader) {
+ public void propertyUpdated(
+ ExternalFeedSourceProperty updatedProperty,
+ String previousValue,
+ String authHeader
+ ) throws IOException {
LOG.info("Update property in MTC carrier table: " + updatedProperty.name);
String feedSourceId = updatedProperty.feedSourceId;
FeedSource source = Persistence.feedSources.getById(feedSourceId);
RtdCarrier carrier = new RtdCarrier(source);
- if(updatedProperty.name.equals(AGENCY_ID) && previousValue == null) {
+ if(updatedProperty.name.equals(AGENCY_ID_FIELDNAME) && previousValue == null) {
// If the property being updated is the agency ID field and it previously was null, this indicates that a
// new carrier should be written to the RTD.
writeCarrierToRtd(carrier, true, authHeader);
@@ -185,24 +177,30 @@ public void feedVersionCreated(FeedVersion feedVersion, String authHeader) {
}
// Construct agency ID from feed source and retrieve from MongoDB.
ExternalFeedSourceProperty agencyIdProp = Persistence.externalFeedSourceProperties.getById(
- constructId(feedVersion.parentFeedSource(), this.getResourceType(), AGENCY_ID)
+ constructId(feedVersion.parentFeedSource(), this.getResourceType(), AGENCY_ID_FIELDNAME)
);
if(agencyIdProp == null || agencyIdProp.value.equals("null")) {
- LOG.error("Could not read {} for FeedSource {}", AGENCY_ID, feedVersion.feedSourceId);
+ LOG.error("Could not read {} for FeedSource {}", AGENCY_ID_FIELDNAME, feedVersion.feedSourceId);
return;
}
String keyName = String.format("%s%s.zip", this.s3Prefix, agencyIdProp.value);
- LOG.info("Pushing to MTC S3 Bucket: " + keyName);
+ LOG.info("Pushing to MTC S3 Bucket: s3://{}/{}", s3Bucket, keyName);
File file = feedVersion.retrieveGtfsFile();
- FeedStore.s3Client.putObject(new PutObjectRequest(s3Bucket, keyName, file));
+ try {
+ FeedStore.s3Client.putObject(new PutObjectRequest(s3Bucket, keyName, file));
+ } catch (Exception e) {
+ LOG.error("Could not upload feed version to s3.");
+ e.printStackTrace();
+ throw e;
+ }
}
/**
* Update or create a carrier and its properties with an HTTP request to the RTD.
*/
- private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String authHeader) {
+ private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String authHeader) throws IOException {
try {
ObjectMapper mapper = new ObjectMapper();
@@ -226,6 +224,7 @@ private void writeCarrierToRtd(RtdCarrier carrier, boolean createNew, String aut
LOG.info("RTD API response: {}/{}", connection.getResponseCode(), connection.getResponseMessage());
} catch (Exception e) {
LOG.error("Error writing to RTD", e);
+ throw e;
}
}
}
diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java
index 87bd30cdc..920d7ee32 100644
--- a/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java
+++ b/src/main/java/com/conveyal/datatools/manager/extensions/mtc/RtdCarrier.java
@@ -1,12 +1,17 @@
package com.conveyal.datatools.manager.extensions.mtc;
+import com.conveyal.datatools.manager.models.ExternalFeedSourceProperty;
import com.conveyal.datatools.manager.models.FeedSource;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.lang.reflect.Field;
+
import static com.conveyal.datatools.manager.models.ExternalFeedSourceProperty.constructId;
/**
+ * Represents all of the properties persisted on a carrier record by the external MTC database known as RTD.
+ *
* Created by demory on 3/30/16.
*/
@@ -63,15 +68,16 @@ public class RtdCarrier {
@JsonProperty
String EditedDate;
+ /** Empty constructor needed for serialization (also used to create empty carrier). */
public RtdCarrier() {
}
/**
- * Construct an RtdCarrier given the provided feed source.
+ * Construct an RtdCarrier given the provided feed source and initialize all field values from MongoDB.
* @param source
*/
public RtdCarrier(FeedSource source) {
- AgencyId = getValueForField(source, MtcFeedResource.AGENCY_ID);
+ AgencyId = getValueForField(source, MtcFeedResource.AGENCY_ID_FIELDNAME);
AgencyPhone = getValueForField(source, "AgencyPhone");
AgencyName = getValueForField(source, "AgencyName");
RttAgencyName = getValueForField(source, "RttAgencyName");
@@ -93,9 +99,38 @@ private String getPropId(FeedSource source, String fieldName) {
}
/**
- * FIXME: Are there cases where this might throw NPEs?
+ * Get the value stored in the database for a particular field.
+ *
+ * TODO: Are there cases where this might throw NPEs?
*/
private String getValueForField (FeedSource source, String fieldName) {
return Persistence.externalFeedSourceProperties.getById(getPropId(source, fieldName)).value;
}
+
+ /**
+ * Use reflection to update (or create if field does not exist) all fields for a carrier instance and provided feed
+ * source.
+ *
+ * TODO: Perhaps we should not be using reflection, but it works pretty well here.
+ */
+ public void updateFields(FeedSource feedSource) throws IllegalAccessException {
+ // Using reflection, iterate over every field in the class.
+ for(Field carrierField : this.getClass().getDeclaredFields()) {
+ String fieldName = carrierField.getName();
+ String fieldValue = carrierField.get(this) != null ? carrierField.get(this).toString() : null;
+ // Construct external feed source property for field with value from carrier.
+ ExternalFeedSourceProperty prop = new ExternalFeedSourceProperty(
+ feedSource,
+ MtcFeedResource.RESOURCE_TYPE,
+ fieldName,
+ fieldValue
+ );
+ // If field does not exist, create it. Otherwise, update value.
+ if (Persistence.externalFeedSourceProperties.getById(prop.id) == null) {
+ Persistence.externalFeedSourceProperties.create(prop);
+ } else {
+ Persistence.externalFeedSourceProperties.updateField(prop.id, fieldName, fieldValue);
+ }
+ }
+ }
}
\ No newline at end of file
diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java
index 421e6a314..6f30db565 100644
--- a/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java
+++ b/src/main/java/com/conveyal/datatools/manager/extensions/transitfeeds/TransitFeedsFeedResource.java
@@ -41,7 +41,7 @@ public String getResourceType() {
}
@Override
- public void importFeedsForProject(Project project, String authHeader) {
+ public void importFeedsForProject(Project project, String authHeader) throws IOException {
LOG.info("Importing feeds from TransitFeeds");
URL url;
@@ -55,7 +55,7 @@ public void importFeedsForProject(Project project, String authHeader) {
url = new URL(api + "?key=" + apiKey + "&limit=100" + "&page=" + String.valueOf(count));
} catch (MalformedURLException ex) {
LOG.error("Could not construct URL for TransitFeeds API");
- return;
+ throw ex;
}
@@ -84,7 +84,7 @@ public void importFeedsForProject(Project project, String authHeader) {
in.close();
} catch (IOException ex) {
LOG.error("Could not read from Transit Feeds API");
- return;
+ throw ex;
}
String json = response.toString();
@@ -93,7 +93,7 @@ public void importFeedsForProject(Project project, String authHeader) {
transitFeedNode = mapper.readTree(json);
} catch (IOException ex) {
LOG.error("Error parsing TransitFeeds JSON response");
- return;
+ throw ex;
}
for (JsonNode feed : transitFeedNode.get("results").get("feeds")) {
@@ -144,6 +144,7 @@ public void importFeedsForProject(Project project, String authHeader) {
}
} catch (MalformedURLException ex) {
LOG.error("Error constructing URLs from TransitFeeds API response");
+ throw ex;
}
source.projectId = project.id;
diff --git a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java
index 7d1404966..51ac94e64 100644
--- a/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java
+++ b/src/main/java/com/conveyal/datatools/manager/extensions/transitland/TransitLandFeedResource.java
@@ -13,6 +13,7 @@
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
+import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field;
import java.net.HttpURLConnection;
@@ -41,7 +42,7 @@ public String getResourceType() {
}
@Override
- public void importFeedsForProject(Project project, String authHeader) {
+ public void importFeedsForProject(Project project, String authHeader) throws IOException, IllegalAccessException {
LOG.info("Importing TransitLand feeds");
URL url = null;
ObjectMapper mapper = new ObjectMapper();
@@ -62,6 +63,7 @@ public void importFeedsForProject(Project project, String authHeader) {
url = new URL(api + "?total=true&per_page=" + perPage + "&offset=" + offset + locationFilter);
} catch (MalformedURLException ex) {
LOG.error("Error constructing TransitLand API URL");
+ throw ex;
}
try {
@@ -74,8 +76,8 @@ public void importFeedsForProject(Project project, String authHeader) {
con.setRequestProperty("User-Agent", "User-Agent");
int responseCode = con.getResponseCode();
- System.out.println("\nSending 'GET' request to URL : " + url);
- System.out.println("Response Code : " + responseCode);
+ LOG.info("Sending 'GET' request to URL : " + url);
+ LOG.info("Response Code : " + responseCode);
BufferedReader in = new BufferedReader(
new InputStreamReader(con.getInputStream()));
@@ -117,7 +119,7 @@ public void importFeedsForProject(Project project, String authHeader) {
try {
source.url = new URL(tlFeed.url);
} catch (MalformedURLException e) {
- e.printStackTrace();
+ throw e;
}
Persistence.feedSources.create(source);
LOG.info("Creating new feed source: {}", source.name);
@@ -129,7 +131,7 @@ public void importFeedsForProject(Project project, String authHeader) {
feedUrl = new URL(tlFeed.url);
Persistence.feedSources.updateField(source.id, "url", feedUrl);
} catch (MalformedURLException e) {
- e.printStackTrace();
+ throw e;
}
// FIXME: These shouldn't be separate updates.
Persistence.feedSources.updateField(source.id, "name", feedName);
@@ -149,7 +151,7 @@ public void importFeedsForProject(Project project, String authHeader) {
}
} catch (Exception ex) {
LOG.error("Error reading from TransitLand API");
- ex.printStackTrace();
+ throw ex;
}
count++;
}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/GtfsPlusValidation.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/GtfsPlusValidation.java
new file mode 100644
index 000000000..efe69dc98
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/GtfsPlusValidation.java
@@ -0,0 +1,261 @@
+package com.conveyal.datatools.manager.gtfsplus;
+
+import com.conveyal.datatools.common.utils.Consts;
+import com.conveyal.datatools.manager.DataManager;
+import com.conveyal.datatools.manager.models.FeedVersion;
+import com.conveyal.datatools.manager.persistence.FeedStore;
+import com.conveyal.datatools.manager.persistence.Persistence;
+import com.conveyal.gtfs.GTFSFeed;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import org.apache.commons.io.input.BOMInputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+/** Generates a GTFS+ validation report for a file. */
+public class GtfsPlusValidation implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final Logger LOG = LoggerFactory.getLogger(GtfsPlusValidation.class);
+ private static final FeedStore gtfsPlusStore = new FeedStore(DataManager.GTFS_PLUS_SUBDIR);
+ private static final String NOT_FOUND = "not found in GTFS";
+
+ // Public fields to appear in validation JSON.
+ public final String feedVersionId;
+ /** Indicates whether GTFS+ validation applies to user-edited feed or original published GTFS feed */
+ public boolean published;
+ public long lastModified;
+ /** Issues found for this GTFS+ feed */
+ public List issues = new LinkedList<>();
+
+ private GtfsPlusValidation (String feedVersionId) {
+ this.feedVersionId = feedVersionId;
+ }
+
+ /**
+ * Validate a GTFS+ feed and return a list of issues encountered.
+ * FIXME: For now this uses the MapDB-backed GTFSFeed class. Which actually suggests that this might
+ * should be contained within a MonitorableJob.
+ */
+ public static GtfsPlusValidation validate(String feedVersionId) throws Exception {
+ GtfsPlusValidation validation = new GtfsPlusValidation(feedVersionId);
+ if (!DataManager.isModuleEnabled("gtfsplus")) {
+ throw new IllegalStateException("GTFS+ module must be enabled in server.yml to run GTFS+ validation.");
+ }
+ LOG.info("Validating GTFS+ for " + feedVersionId);
+
+ FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
+ // Load the main GTFS file.
+ // FIXME: Swap MapDB-backed GTFSFeed for use of SQL data?
+ String gtfsFeedDbFilePath = gtfsPlusStore.getPathToFeed(feedVersionId + ".db");
+ GTFSFeed gtfsFeed;
+ try {
+ // This check for existence must occur before GTFSFeed is instantiated (and the file must be discarded
+ // immediately).
+ boolean dbExists = new File(gtfsFeedDbFilePath).isFile();
+ gtfsFeed = new GTFSFeed(gtfsFeedDbFilePath);
+ if (!dbExists) {
+ LOG.info("Loading GTFS file into new MapDB file (.db).");
+ gtfsFeed.loadFromFile(new ZipFile(feedVersion.retrieveGtfsFile().getAbsolutePath()));
+ }
+ } catch (Exception e) {
+ LOG.error("MapDB file for GTFSFeed appears to be corrupted. Deleting and trying to load from zip file.", e);
+ // Error loading MapDB file. Delete and try to reload.
+ new File(gtfsFeedDbFilePath).delete();
+ new File(gtfsFeedDbFilePath + ".p").delete();
+ LOG.info("Attempt #2 to load GTFS file into new MapDB file (.db).");
+ gtfsFeed = new GTFSFeed(gtfsFeedDbFilePath);
+ gtfsFeed.loadFromFile(new ZipFile(feedVersion.retrieveGtfsFile().getAbsolutePath()));
+ }
+
+ // check for saved GTFS+ data
+ File file = gtfsPlusStore.getFeed(feedVersionId);
+ if (file == null) {
+ validation.published = true;
+ LOG.warn("GTFS+ Validation -- Modified GTFS+ file not found, loading from main version GTFS.");
+ file = feedVersion.retrieveGtfsFile();
+ } else {
+ validation.published = false;
+ LOG.info("GTFS+ Validation -- Validating user-saved GTFS+ data (unpublished)");
+ }
+ int gtfsPlusTableCount = 0;
+ ZipFile zipFile = new ZipFile(file);
+ final Enumeration extends ZipEntry> entries = zipFile.entries();
+ while (entries.hasMoreElements()) {
+ final ZipEntry entry = entries.nextElement();
+ for (int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
+ JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
+ if (tableNode.get("name").asText().equals(entry.getName())) {
+ LOG.info("Validating GTFS+ table: " + entry.getName());
+ gtfsPlusTableCount++;
+ // Skip any byte order mark that may be present. Files must be UTF-8,
+ // but the GTFS spec says that "files that include the UTF byte order mark are acceptable".
+ InputStream bis = new BOMInputStream(zipFile.getInputStream(entry));
+ validateTable(validation.issues, tableNode, bis, gtfsFeed);
+ }
+ }
+ }
+ gtfsFeed.close();
+ LOG.info("GTFS+ tables found: {}/{}", gtfsPlusTableCount, DataManager.gtfsPlusConfig.size());
+ return validation;
+ }
+
+ /**
+ * Validate a single GTFS+ table using the table specification found in gtfsplus.yml.
+ */
+ private static void validateTable(
+ Collection issues,
+ JsonNode specTable,
+ InputStream inputStreamToValidate,
+ GTFSFeed gtfsFeed
+ ) throws IOException {
+ String tableId = specTable.get("id").asText();
+ // Read in table data from input stream.
+ BufferedReader in = new BufferedReader(new InputStreamReader(inputStreamToValidate));
+ String line = in.readLine();
+ String[] inputHeaders = line.split(",");
+ List fieldList = Arrays.asList(inputHeaders);
+ JsonNode[] fieldsFound = new JsonNode[inputHeaders.length];
+ JsonNode specFields = specTable.get("fields");
+ // Iterate over spec fields and check that there are no missing required fields.
+ for (int i = 0; i < specFields.size(); i++) {
+ JsonNode specField = specFields.get(i);
+ String fieldName = specField.get("name").asText();
+ int index = fieldList.indexOf(fieldName);
+ if (index != -1) {
+ // Add spec field for each field found.
+ fieldsFound[index] = specField;
+ } else if (isRequired(specField)) {
+ // If spec field not found, check that missing field was not required.
+ issues.add(new ValidationIssue(tableId, fieldName, -1, "Required column missing."));
+ }
+ }
+ // Iterate over each row and validate each field value.
+ int rowIndex = 0;
+ int rowsWithWrongNumberOfColumns = 0;
+ while ((line = in.readLine()) != null) {
+ String[] values = line.split(Consts.COLUMN_SPLIT, -1);
+ // First, check that row has the correct number of fields.
+ if (values.length != fieldsFound.length) {
+ rowsWithWrongNumberOfColumns++;
+ }
+ // Validate each value in row. Note: we iterate over the fields and not values because a row may be missing
+ // columns, but we still want to validate that missing value (e.g., if it is missing a required field).
+ for (int f = 0; f < fieldsFound.length; f++) {
+ // If value exists for index, use that. Otherwise, default to null to avoid out of bounds exception.
+ String val = f < values.length ? values[f] : null;
+ validateTableValue(issues, tableId, rowIndex, val, fieldsFound[f], gtfsFeed);
+ }
+ rowIndex++;
+ }
+ // Add issue for wrong number of columns after processing all rows.
+ // Note: We considered adding an issue for each row, but opted for the single error approach because there's no
+ // concept of a row-level issue in the UI right now. So we would potentially need to add that to the UI
+ // somewhere. Also, there's the trouble of reporting the issue at the row level, but not really giving the user
+ // a great way to resolve the issue in the GTFS+ editor. Essentially, all of the rows with the wrong number of
+ // columns can be resolved simply by clicking the "Save and Revalidate" button -- so the resolution is more at
+ // the table level than the row level (like, for example, a bad value for a field would be).
+ if (rowsWithWrongNumberOfColumns > 0) {
+ issues.add(new ValidationIssue(tableId, null, -1, rowsWithWrongNumberOfColumns + " row(s) do not contain the same number of fields as there are headers. (File may need to be edited manually.)"));
+ }
+ }
+
+ /** Determine if a GTFS+ spec field is required. */
+ private static boolean isRequired(JsonNode specField) {
+ return specField.get("required") != null && specField.get("required").asBoolean();
+ }
+
+ /** Validate a single value for a GTFS+ table. */
+ private static void validateTableValue(
+ Collection issues,
+ String tableId,
+ int rowIndex,
+ String value,
+ JsonNode specField,
+ GTFSFeed gtfsFeed
+ ) {
+ if (specField == null) return;
+ String fieldName = specField.get("name").asText();
+
+ if (isRequired(specField)) {
+ if (value == null || value.length() == 0) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Required field missing value"));
+ }
+ }
+
+ switch(specField.get("inputType").asText()) {
+ case "DROPDOWN":
+ boolean invalid = true;
+ ArrayNode options = (ArrayNode) specField.get("options");
+ for (JsonNode option : options) {
+ String optionValue = option.get("value").asText();
+
+ // NOTE: per client's request, this check has been made case insensitive
+ boolean valuesAreEqual = optionValue.equalsIgnoreCase(value);
+
+ // if value is found in list of options, break out of loop
+ if (valuesAreEqual || (!isRequired(specField) && "".equals(value))) {
+ invalid = false;
+ break;
+ }
+ }
+ if (invalid) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Value: " + value + " is not a valid option."));
+ }
+ break;
+ case "TEXT":
+ // check if value exceeds max length requirement
+ if (specField.get("maxLength") != null) {
+ int maxLength = specField.get("maxLength").asInt();
+ if (value != null && value.length() > maxLength) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, "Text value exceeds the max. length of " + maxLength));
+ }
+ }
+ break;
+ case "GTFS_ROUTE":
+ if (!gtfsFeed.routes.containsKey(value)) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, missingIdText(value, "Route")));
+ }
+ break;
+ case "GTFS_STOP":
+ if (!gtfsFeed.stops.containsKey(value)) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, missingIdText(value, "Stop")));
+ }
+ break;
+ case "GTFS_TRIP":
+ if (!gtfsFeed.trips.containsKey(value)) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, missingIdText(value, "Trip")));
+ }
+ break;
+ case "GTFS_FARE":
+ if (!gtfsFeed.fares.containsKey(value)) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, missingIdText(value, "Fare")));
+ }
+ break;
+ case "GTFS_SERVICE":
+ if (!gtfsFeed.services.containsKey(value)) {
+ issues.add(new ValidationIssue(tableId, fieldName, rowIndex, missingIdText(value, "Service")));
+ }
+ break;
+ }
+
+ }
+
+ /** Construct missing ID text for validation issue description. */
+ private static String missingIdText(String value, String entity) {
+ return String.join(" ", entity, "ID", value, NOT_FOUND);
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/ValidationIssue.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/ValidationIssue.java
new file mode 100644
index 000000000..b835a3996
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/ValidationIssue.java
@@ -0,0 +1,19 @@
+package com.conveyal.datatools.manager.gtfsplus;
+
+import java.io.Serializable;
+
+/** A validation issue for a GTFS+ field. Use rowIndex = -1 for a table level issue. */
+public class ValidationIssue implements Serializable {
+ private static final long serialVersionUID = 1L;
+ public String tableId;
+ public String fieldName;
+ public int rowIndex;
+ public String description;
+
+ public ValidationIssue(String tableId, String fieldName, int rowIndex, String description) {
+ this.tableId = tableId;
+ this.fieldName = fieldName;
+ this.rowIndex = rowIndex;
+ this.description = description;
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/CalendarAttribute.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/CalendarAttribute.java
new file mode 100644
index 000000000..ed6c4b7f1
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/CalendarAttribute.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import javax.naming.OperationNotSupportedException;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class CalendarAttribute extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String service_id;
+ public String service_description;
+
+ @Override public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/Direction.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/Direction.java
new file mode 100644
index 000000000..0b83f6e9c
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/Direction.java
@@ -0,0 +1,22 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class Direction extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String route_id;
+ public int direction_id;
+ public String direction;
+
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareRiderCategory.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareRiderCategory.java
new file mode 100644
index 000000000..133d2b60f
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareRiderCategory.java
@@ -0,0 +1,24 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.time.LocalDate;
+
+public class FareRiderCategory extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String fare_id;
+ public int rider_category_id;
+ public double price;
+ public LocalDate expiration_date;
+ public LocalDate commencement_date;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareZoneAttribute.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareZoneAttribute.java
new file mode 100644
index 000000000..4d586f296
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/FareZoneAttribute.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class FareZoneAttribute extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String zone_id;
+ public String zone_name;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/GtfsPlusTable.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/GtfsPlusTable.java
new file mode 100644
index 000000000..23031b2fd
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/GtfsPlusTable.java
@@ -0,0 +1,98 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.loader.DateField;
+import com.conveyal.gtfs.loader.DoubleField;
+import com.conveyal.gtfs.loader.IntegerField;
+import com.conveyal.gtfs.loader.ShortField;
+import com.conveyal.gtfs.loader.StringField;
+import com.conveyal.gtfs.loader.Table;
+
+import static com.conveyal.gtfs.loader.Requirement.OPTIONAL;
+import static com.conveyal.gtfs.loader.Requirement.PROPRIETARY;
+import static com.conveyal.gtfs.loader.Requirement.REQUIRED;
+
+/**
+ * This class contains GTFS+ table definitions that are based on gtfs-lib's {@link Table} constants.
+ * Currently, these are only used when operating on tables being merged within
+ * {@link com.conveyal.datatools.manager.jobs.MergeFeedsJob}. The definition of these tables can be
+ * found at https://www.transitwiki.org/TransitWiki/images/e/e7/GTFS%2B_Additional_Files_Format_Ver_1.7.pdf.
+ */
+public class GtfsPlusTable {
+ public static final Table REALTIME_ROUTES = new Table("realtime_routes", RealtimeRoute.class, PROPRIETARY,
+ new StringField("route_id", REQUIRED).isReferenceTo(Table.ROUTES),
+ new ShortField("realtime_enabled", REQUIRED, 1),
+ new StringField("realtime_routename", REQUIRED),
+ new StringField("realtime_routecode", REQUIRED)
+ );
+
+ public static final Table REALTIME_STOPS = new Table("realtime_stops", RealtimeStop.class, PROPRIETARY,
+ new StringField("trip_id", REQUIRED).isReferenceTo(Table.TRIPS),
+ new StringField("stop_id", REQUIRED).isReferenceTo(Table.STOPS),
+ new StringField("realtime_stop_id", REQUIRED)
+ ).keyFieldIsNotUnique();
+
+ public static final Table DIRECTIONS = new Table("directions", Direction.class, PROPRIETARY,
+ new StringField("route_id", REQUIRED).isReferenceTo(Table.ROUTES),
+ new ShortField("direction_id", REQUIRED, 1),
+ new StringField("direction", REQUIRED))
+ .keyFieldIsNotUnique()
+ .hasCompoundKey();
+
+ public static final Table REALTIME_TRIPS = new Table("realtime_trips", RealtimeTrip.class, PROPRIETARY,
+ new StringField("trip_id", REQUIRED).isReferenceTo(Table.TRIPS),
+ new StringField("realtime_trip_id", REQUIRED)
+ );
+
+ public static final Table STOP_ATTRIBUTES = new Table("stop_attributes", StopAttribute.class, PROPRIETARY,
+ new StringField("stop_id", REQUIRED).isReferenceTo(Table.STOPS),
+ new ShortField("accessibility_id", REQUIRED, 8),
+ new StringField("cardinal_direction", OPTIONAL),
+ new StringField("relative_position", OPTIONAL),
+ new StringField("stop_city", REQUIRED)
+ );
+
+ public static final Table TIMEPOINTS = new Table("timepoints", TimePoint.class, PROPRIETARY,
+ new StringField("trip_id", REQUIRED).isReferenceTo(Table.TRIPS),
+ new StringField("stop_id", REQUIRED).isReferenceTo(Table.STOPS)
+ ).keyFieldIsNotUnique();
+
+ public static final Table RIDER_CATEGORIES = new Table("rider_categories", RiderCategory.class, PROPRIETARY,
+ new IntegerField("rider_category_id", REQUIRED, 1, 25),
+ new StringField("rider_category_description", REQUIRED)
+ );
+
+ public static final Table FARE_RIDER_CATEGORIES = new Table("fare_rider_categories", FareRiderCategory.class, PROPRIETARY,
+ new StringField("fare_id", REQUIRED),
+ new IntegerField("rider_category_id", REQUIRED, 2, 25).isReferenceTo(RIDER_CATEGORIES),
+ new DoubleField("price", REQUIRED, 0, Double.MAX_VALUE, 2),
+ new DateField("expiration_date", OPTIONAL),
+ new DateField("commencement_date", OPTIONAL)
+ ).keyFieldIsNotUnique();
+
+ public static final Table CALENDAR_ATTRIBUTES = new Table("calendar_attributes", CalendarAttribute.class, PROPRIETARY,
+ new StringField("service_id", REQUIRED).isReferenceTo(Table.CALENDAR),
+ new StringField("service_description", REQUIRED)
+ );
+
+ public static final Table FAREZONE_ATTRIBUTES = new Table("farezone_attributes", FareZoneAttribute.class, PROPRIETARY,
+ new StringField("zone_id", REQUIRED),
+ new StringField("zone_name", REQUIRED)
+ );
+
+ /**
+ * List of tables in the order such that internal references can be appropriately checked as
+ * tables are loaded/encountered.
+ */
+ public static final Table[] tables = new Table[] {
+ REALTIME_ROUTES,
+ REALTIME_STOPS,
+ REALTIME_TRIPS,
+ DIRECTIONS,
+ STOP_ATTRIBUTES,
+ TIMEPOINTS,
+ RIDER_CATEGORIES,
+ FARE_RIDER_CATEGORIES,
+ CALENDAR_ATTRIBUTES,
+ FAREZONE_ATTRIBUTES
+ };
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeRoute.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeRoute.java
new file mode 100644
index 000000000..49050a05f
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeRoute.java
@@ -0,0 +1,21 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class RealtimeRoute extends Entity {
+ private static final long serialVersionUID = 1L;
+
+ public String route_id;
+ public int realtime_enabled;
+ public String realtime_routename;
+ public String realtime_routecode;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeStop.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeStop.java
new file mode 100644
index 000000000..fd61db1f3
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeStop.java
@@ -0,0 +1,21 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class RealtimeStop extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String trip_id;
+ public String stop_id;
+ public String realtime_stop_id;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeTrip.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeTrip.java
new file mode 100644
index 000000000..3c52ae7de
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RealtimeTrip.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class RealtimeTrip extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String trip_id;
+ public String realtime_trip_id;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RiderCategory.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RiderCategory.java
new file mode 100644
index 000000000..d30705ee2
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/RiderCategory.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class RiderCategory extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public int rider_category_id;
+ public String rider_category_description;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/StopAttribute.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/StopAttribute.java
new file mode 100644
index 000000000..e11714f2c
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/StopAttribute.java
@@ -0,0 +1,23 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class StopAttribute extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String stop_id;
+ public int accessibility_id;
+ public String cardinal_direction;
+ public String relative_position;
+ public String stop_city;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/TimePoint.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/TimePoint.java
new file mode 100644
index 000000000..b1b71abf8
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/TimePoint.java
@@ -0,0 +1,20 @@
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
+import com.conveyal.gtfs.model.Entity;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+
+public class TimePoint extends Entity {
+
+ private static final long serialVersionUID = 1L;
+
+ public String trip_id;
+ public String stop_id;
+
+ @Override
+ public void setStatementParameters(PreparedStatement statement, boolean setDefaultId) throws SQLException {
+ throw new UnsupportedOperationException(
+ "Cannot call setStatementParameters because loading a GTFS+ table into RDBMS is unsupported.");
+ }
+}
diff --git a/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/package-info.java b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/package-info.java
new file mode 100644
index 000000000..aab21a404
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/gtfsplus/tables/package-info.java
@@ -0,0 +1,16 @@
+/**
+ * This package contains classes that correspond to those found for GTFS entity types in
+ * {@link com.conveyal.gtfs.model}, but for GTFS+ entity types. It also contains
+ * {@link com.conveyal.datatools.manager.gtfsplus.tables.GtfsPlusTable}, which extends the
+ * {@link com.conveyal.gtfs.loader.Table} in order to define a table specification for this set of
+ * extension tables.
+ *
+ * Note: these classes are primarily used for the MTC merge type in
+ * {@link com.conveyal.datatools.manager.jobs.MergeFeedsJob}. There may be an opportunity to also use
+ * these classes in the GTFS+ validation code path found in
+ * {@link com.conveyal.datatools.manager.controllers.api.GtfsPlusController}; however,
+ * TODO a way to define an enum set for string field values would need to first be added to support
+ * fields such as {@link com.conveyal.datatools.manager.gtfsplus.tables.StopAttribute#cardinal_direction}.
+ */
+package com.conveyal.datatools.manager.gtfsplus.tables;
+
diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java
index 2b6d9d294..34ec4c935 100644
--- a/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java
+++ b/src/main/java/com/conveyal/datatools/manager/jobs/CreateFeedVersionFromSnapshotJob.java
@@ -28,6 +28,7 @@ public CreateFeedVersionFromSnapshotJob(FeedVersion feedVersion, Snapshot snapsh
@Override
public void jobLogic() {
// Set feed version properties.
+ feedVersion.originNamespace = snapshot.namespace;
feedVersion.retrievalMethod = FeedSource.FeedRetrievalMethod.PRODUCED_IN_HOUSE;
feedVersion.name = snapshot.name + " Snapshot Export";
// FIXME: This should probably just create a new snapshot, and then validate those tables.
diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java
index 6017b1035..8621e0b80 100644
--- a/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java
+++ b/src/main/java/com/conveyal/datatools/manager/jobs/DeployJob.java
@@ -164,6 +164,16 @@ public void jobLogic () {
// Upload to S3, if specifically required by the OTPServer or needed for servers in the target group to fetch.
if (otpServer.s3Bucket != null || otpServer.targetGroupArn != null) {
+ if (!DataManager.useS3) {
+ String message = "Cannot upload deployment to S3. Application not configured for s3 storage.";
+ LOG.error(message);
+ status.fail(message);
+ return;
+ }
+ status.message = "Uploading to S3";
+ status.uploadingS3 = true;
+ LOG.info("Uploading deployment {} to s3", deployment.name);
+ String key = null;
try {
uploadBundleToS3();
} catch (AmazonClientException | InterruptedException e) {
diff --git a/src/main/java/com/conveyal/datatools/manager/jobs/FeedExpirationNotificationJob.java b/src/main/java/com/conveyal/datatools/manager/jobs/FeedExpirationNotificationJob.java
new file mode 100644
index 000000000..0c31a40a4
--- /dev/null
+++ b/src/main/java/com/conveyal/datatools/manager/jobs/FeedExpirationNotificationJob.java
@@ -0,0 +1,84 @@
+package com.conveyal.datatools.manager.jobs;
+
+import com.conveyal.datatools.manager.DataManager;
+import com.conveyal.datatools.manager.models.FeedSource;
+import com.conveyal.datatools.manager.models.Project;
+import com.conveyal.datatools.manager.persistence.Persistence;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static com.conveyal.datatools.manager.auth.Auth0Users.getVerifiedEmailsBySubscription;
+import static com.conveyal.datatools.manager.utils.NotificationsUtils.sendNotification;
+
+public class FeedExpirationNotificationJob implements Runnable {
+ private static final Logger LOG = LoggerFactory.getLogger(FeedExpirationNotificationJob.class);
+ private static final String APPLICATION_URL = DataManager.getConfigPropertyAsText("application.public_url");
+
+ String feedSourceId;
+ boolean isWarningNotification;
+
+ public FeedExpirationNotificationJob(String feedSourceId, boolean isWarningNotification) {
+ this.feedSourceId = feedSourceId;
+ this.isWarningNotification = isWarningNotification;
+ }
+
+ public void run() {
+ FeedSource source = Persistence.feedSources.getById(feedSourceId);
+ Project project = source.retrieveProject();
+
+ if (project == null) {
+ // parent project has already been deleted, this notification should've been canceled
+ // but it's still around for some reason. Return as nothing further should be done.
+ return;
+ }
+
+ // build up list of emails to send expiration notifications to
+ Set emails = new HashSet<>();
+
+ // get each user subscriber for feed
+ emails.addAll(getVerifiedEmailsBySubscription("feed-updated", source.id));
+
+ // get each user subscriber for feed's project
+ emails.addAll(getVerifiedEmailsBySubscription("project-updated", project.id));
+
+ if (emails.size() > 0) {
+ LOG.info(
+ String.format(
+ "Sending feed %s for feed source %s notification to %d users",
+ isWarningNotification
+ ? "expiration in one week" :
+ "final expiration",
+ source.id,
+ emails.size()
+ )
+ );
+
+ String message = String.format(
+ "The latest feed version for %s %s",
+ source.name,
+ isWarningNotification ? "expires in one week!" : "has expired!"
+ );
+
+ String feedSourceUrl = String.format("%s/feed/%s", APPLICATION_URL, source.id);
+ String text = String.format(
+ "%s\n\nView the %s feedsource here: %s.",
+ message,
+ source.name,
+ feedSourceUrl
+ );
+ String html = String.format(
+ "