Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

re-add circleci artifact fetching #76

Merged
merged 2 commits into from
Mar 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion images/bot/setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = bioconda-bot
version = 0.0.1
version = 0.0.2

[options]
python_requires = >=3.8
Expand Down
82 changes: 70 additions & 12 deletions images/bot/src/bioconda_bot/comment.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import re

from aiohttp import ClientSession
from typing import List, Tuple
from yaml import safe_load

from .common import (
Expand All @@ -24,16 +25,23 @@
# Given a PR and commit sha, post a comment with any artifacts
async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> None:
artifacts = await fetch_pr_sha_artifacts(session, pr, sha)

comment = compose_azure_comment(artifacts["azure"] if "azure" in artifacts else [])
if len(comment) > 0:
comment += "\n\n"
comment += compose_circlci_comment(artifacts["circleci"] if "circleci" in artifacts else [])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: compose_circleci_comment


await send_comment(session, pr, comment)

def compose_azure_comment(artifacts: List[Tuple[str, str]]) -> str:
nPackages = len(artifacts)
comment = "## Azure\n\n"

if nPackages > 0:
comment = "Package(s) built on Azure are ready for inspection:\n\n"
comment += "Package(s) built on Azure are ready for inspection:\n\n"
comment += "Arch | Package | Zip File\n-----|---------|---------\n"
install_noarch = ""
install_linux = ""
install_osx = ""

# Table of packages and repodata.json
# Table of packages and zips
for URL, artifact in artifacts:
if not (package_match := re.match(r"^((.+)\/(.+)\/(.+)\/(.+\.tar\.bz2))$", artifact)):
continue
Expand Down Expand Up @@ -61,9 +69,9 @@ async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> No
comment += "```\nconda install -c ./packages <package name>\n```\n"

# Table of containers
comment += "***\n\nDocker image(s) built (images are in the LinuxArtifacts zip file above):\n\n"
comment += "Package | Tag | Install with `docker`\n"
comment += "--------|-----|----------------------\n"
imageHeader = "***\n\nDocker image(s) built (images for Azure are in the LinuxArtifacts zip file above):\n\n"
imageHeader += "Package | Tag | Install with `docker`\n"
imageHeader += "--------|-----|----------------------\n"

for URL, artifact in artifacts:
if artifact.endswith(".tar.gz"):
Expand All @@ -72,18 +80,68 @@ async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> No
package_name, tag = image_name.split(':', 1)
#image_url = URL[:-3] # trim off zip from format=
#image_url += "file&subPath=%2F{}.tar.gz".format("%2F".join(["images", '%3A'.join([package_name, tag])]))
comment += imageHeader
imageHeader = "" # only add the header for the first image
comment += f"{package_name} | {tag} | "
comment += f'<details><summary>show</summary>`gzip -dc LinuxArtifacts/images/{image_name}.tar.gz \\| docker load`\n'
comment += "\n\n"
else:
comment = (
comment += (
"No artifacts found on the most recent Azure build. "
"Either the build failed, the artifacts have were removed due to age, or the recipe was blacklisted/skipped."
"Either the build failed, the artifacts have been removed due to age, or the recipe was blacklisted/skipped."
)
await send_comment(session, pr, comment)
return comment

def compose_circlci_comment(artifacts: List[Tuple[str, str]]) -> str:
nPackages = len(artifacts)

# Post a comment on a given PR with its CircleCI artifacts
if nPackages < 1:
return ""

comment = "## CircleCI\n\n"
comment += "Package(s) built on CircleCI are ready for inspection:\n\n"
comment += "Arch | Package | Repodata\n-----|---------|---------\n"

# Table of packages and repodata.json
for URL, artifact in artifacts:
if not (package_match := re.match(r"^((.+)\/(.+)\/(.+\.tar\.bz2))$", URL)):
continue
url, basedir, subdir, packageName = package_match.groups()
repo_url = "/".join([basedir, subdir, "repodata.json"])
conda_install_url = basedir

if subdir == "noarch":
comment += "noarch |"
elif subdir == "linux-64":
comment += "linux-64 |"
elif subdir == "linux-aarch64":
comment += "linux-aarch64 |"
else:
comment += "osx-64 |"
comment += f" [{packageName}]({URL}) | [repodata.json]({repo_url})\n"

# Conda install examples
comment += "***\n\nYou may also use `conda` to install these:\n\n"
comment += f"```\nconda install -c {conda_install_url} <package name>\n```\n"

# Table of containers
imageHeader = "***\n\nDocker image(s) built:\n\n"
imageHeader += "Package | Tag | Install with `docker`\n"
imageHeader += "--------|-----|----------------------\n"

for URL, artifact in artifacts:
if artifact.endswith(".tar.gz"):
image_name = artifact.split("/").pop()[: -len(".tar.gz")]
if ":" in image_name:
package_name, tag = image_name.split(":", 1)
comment += imageHeader
imageHeader = "" # only add the header for the first image
comment += f"[{package_name}]({URL}) | {tag} | "
comment += f'<details><summary>show</summary>`curl -L "{URL}" \\| gzip -dc \\| docker load`</details>\n'
comment += "</details>\n"
return comment

# Post a comment on a given PR with its artifacts
async def artifact_checker(session: ClientSession, issue_number: int) -> None:
url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}"
headers = {
Expand Down
56 changes: 50 additions & 6 deletions images/bot/src/bioconda_bot/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,40 @@ async def fetch_azure_zip_files(session: ClientSession, buildId: str) -> [(str,
def parse_azure_build_id(url: str) -> str:
return re.search("buildId=(\d+)", url).group(1)

# Find artifact zip files, download them and return their URLs and contents
async def fetch_circleci_artifacts(session: ClientSession, workflowId: str) -> [(str, str)]:
artifacts = []

url_wf = f"https://circleci.com/api/v2/workflow/{workflowId}/job"
async with session.get(url_wf) as response:
# Sometimes we get a 301 error, so there are no longer artifacts available
if response.status == 301:
return artifacts
res_wf = await response.text()

res_wf_object = safe_load(res_wf)

if len(res_wf_object["items"]) == 0:
return artifacts
else:
for job in res_wf_object["items"]:
if job["name"].startswith(f"build_and_test-"):
circleci_job_num = job["job_number"]
url = f"https://circleci.com/api/v2/project/gh/bioconda/bioconda-recipes/{circleci_job_num}/artifacts"

async with session.get(url) as response:
res = await response.text()

res_object = safe_load(res)
for artifact in res_object["items"]:
zipUrl = artifact["url"]
pkg = artifact["path"]
if zipUrl.endswith(".tar.bz2"): # (currently excluding container images) or zipUrl.endswith(".tar.gz"):
artifacts.append((zipUrl, pkg))
return artifacts

# Given a PR and commit sha, fetch a list of the artifact zip files URLs and their contents
async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> List[Tuple[str, str]]:
async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> Dict[str, List[Tuple[str, str]]]:
url = f"https://api.github.com/repos/bioconda/bioconda-recipes/commits/{sha}/check-runs"

headers = {
Expand All @@ -151,15 +182,28 @@ async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> L
res = await response.text()
check_runs = safe_load(res)

artifact_sources = {}
for check_run in check_runs["check_runs"]:
# The names are "bioconda.bioconda-recipes (test_osx test_osx)" or similar
if check_run["name"].startswith("bioconda.bioconda-recipes (test_"):
if (
"azure" not in artifact_sources and
check_run["app"]["slug"] == "azure-pipelines" and
check_run["name"].startswith("bioconda.bioconda-recipes (test_")
):
# azure builds
# The azure build ID is in the details_url as buildId=\d+
buildID = parse_azure_build_id(check_run["details_url"])
zipFiles = await fetch_azure_zip_files(session, buildID)
return zipFiles # We've already fetched all possible artifacts

return []
artifact_sources["azure"] = zipFiles # We've already fetched all possible artifacts from Azure
elif (
"circleci" not in artifact_sources and
check_run["app"]["slug"] == "circleci-checks"
):
# Circle CI builds
workflowId = safe_load(check_run["external_id"])["workflow-id"]
zipFiles = await fetch_circleci_artifacts(session, workflowId)
artifact_sources["circleci"] = zipFiles # We've already fetched all possible artifacts from CircleCI

return artifact_sources


async def get_sha_for_status(job_context: Dict[str, Any]) -> Optional[str]:
Expand Down
4 changes: 3 additions & 1 deletion images/bot/src/bioconda_bot/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,9 @@ async def upload_artifacts(session: ClientSession, pr: int) -> str:
sha: str = pr_info["head"]["sha"]

# Fetch the artifacts (a list of (URL, artifact) tuples actually)
artifacts = await fetch_pr_sha_artifacts(session, pr, sha)
artifactDict = await fetch_pr_sha_artifacts(session, pr, sha)
# Merge is deprecated, so leaving as Azure only
artifacts = artifactDict["azure"]
artifacts = [artifact for (URL, artifact) in artifacts if artifact.endswith((".gz", ".bz2"))]
assert artifacts

Expand Down
Loading