diff --git a/.github/workflows/build-and-deploy.yml b/.github/workflows/build-and-deploy.yml index 831fa1f66..4b2172d91 100644 --- a/.github/workflows/build-and-deploy.yml +++ b/.github/workflows/build-and-deploy.yml @@ -24,7 +24,7 @@ env: jobs: build-and-deploy-datasets: - runs-on: ubuntu-22.04 + runs-on: ubuntu-20.04 environment: name: ${{ github.ref }} @@ -95,6 +95,7 @@ jobs: - name: "Rebuild, commit, push and make a release" if: github.ref == 'refs/heads/release' run: | + echo ${{ secrets.GH_TOKEN_NEXTSTRAIN_BOT_REPO }} | gh auth login --with-token ./scripts/rebuild --input-dir 'data/' --output-dir 'data_output/' --release --repo="${GITHUB_REPOSITORY}" - name: "Deploy dataset server" diff --git a/scripts/lib/changelog.py b/scripts/lib/changelog.py index 8ba20603d..18f05f361 100644 --- a/scripts/lib/changelog.py +++ b/scripts/lib/changelog.py @@ -39,10 +39,8 @@ def changelog_get_unreleased_section(changelog_path: str): def format_dataset_attributes_md_table(attributes): - attr_table = f"| {'attribute':20} | {'value':20} | {'value friendly':40} |\n" - attr_table += f"| {'-' * 20} | {'-' * 20} | {'-' * 40} |\n" + attr_table = f"| {'attribute':20} | {'value':40} |\n" + attr_table += f"| {'-' * 20} | {'-' * 40} |\n" for attr_name, attr_val in attributes.items(): - value = attr_val["value"] - value_friendly = dict_get(attr_val, ["valueFriendly"]) or "" - attr_table += f'| {attr_name:20} | {value:20} | {value_friendly:40} |\n' + attr_table += f'| {attr_name:20} | {attr_val:40} |\n' return attr_table diff --git a/scripts/rebuild b/scripts/rebuild index 991e0cdc4..0f257b52b 100755 --- a/scripts/rebuild +++ b/scripts/rebuild @@ -254,10 +254,13 @@ def main(): commit_hash = commit_changes(args, tag, release_infos) if args.push: + l.info("Pushing commited changes to GitHub") git_push() if args.release: + l.info("Releasing to GitHub") release_notes = aggregate_release_notes(release_infos) + l.info(f"Release notes:\n-------\n{release_notes}\n-------\nEnd of release notes\n") publish_to_github_releases(args, tag, commit_hash, release_notes) @@ -351,26 +354,35 @@ def prepare_dataset_release_infos(args, datasets, collection_dir, tag, updated_a def aggregate_release_notes(release_infos): - dataset_names_friendly = format_list( - unique([get_dataset_name_friendly(release_info["dataset"]) for release_info in release_infos]), - sep="\n", marker="- ", quote=False - ) - release_notes = f"This release contains changes for datasets:\n\n{dataset_names_friendly}\n\n\n" + dataset_list = format_dataset_list(release_infos) + release_notes = f"This release contains changes for datasets:\n\n{dataset_list}\n\n\n" for release_info in release_infos: release_notes += f'\n{release_info["release_notes"]}\n\n' return release_notes +def format_dataset_list(release_infos): + entries = [format_dataset_list_entry(release_info) for release_info in release_infos] + entries = unique(entries) + entries = list(sorted(entries)) + return format_list(entries, sep="\n", marker="- ", quote=False) + + +def format_dataset_list_entry(release_info): + path = dict_get_required(release_info, ['dataset', 'path']) + name = get_dataset_name_friendly(release_info['dataset']) + return f"{path} ({name})" + + def commit_changes(args, tag, release_infos): - l.info(f"Commiting changes for '{tag}'") + l.info(f"Committing changes for '{tag}'") commit_message = "chore: rebuild [skip ci]" if args.release: - dataset_names = format_list( - unique([get_dataset_name(release_info["dataset"]) for release_info in release_infos]), - sep="\n", marker="- ", quote=False - ) - commit_message = f"chore: release '{tag}'\n\nUpdated datasets:\n\n{dataset_names}" + dataset_list = format_dataset_list(release_infos) + commit_message = f"chore: release '{tag}'\n\nUpdated datasets:\n\n{dataset_list}" + + l.info(f"Commit message:\n--------\n{commit_message}\n--------\nEnd of commit message\n\n") return git_commit_all(commit_message) @@ -438,12 +450,8 @@ def create_dataset_package(args, dataset, pathogen_json, tag, dataset_dir): file_write("User-agent: *\nDisallow: /\n", join(args.output_dir, "robots.txt")) -def get_dataset_name(dataset): - return dict_get_required(dataset, ["attributes", "name", "value"]) - - def get_dataset_name_friendly(dataset): - return dict_get(dataset, ["attributes", "name", "valueFriendly"]) or get_dataset_name(dataset) + return dict_get_required(dataset, ["attributes", "name"]) if __name__ == '__main__':