-
Notifications
You must be signed in to change notification settings - Fork 7
/
aws_geo_datasets.py
133 lines (104 loc) · 4.19 KB
/
aws_geo_datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import json
import os
import shutil
import yaml
import leafmap
import pandas as pd
url = "https://github.com/awslabs/open-data-registry/archive/refs/heads/main.zip"
out_dir = "open-data-registry-main"
zip_file = "open-data-registry-main.zip"
max_chars = 80 # The maximum number of characters in each column
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
if os.path.exists(zip_file):
os.remove(zip_file)
leafmap.download_file(url, output=zip_file, unzip=True)
in_dir = os.path.join(out_dir, "datasets")
files = leafmap.find_files(in_dir, ext=".yaml")
print(f"Total number of AWS open datasets: {len(files)}")
def generate_datasets(files, max_chars=80, sep_tags=False):
datasets = []
names = {}
geo_tags = [
"gis",
"earth observation",
"events",
"mapping",
"meteorological",
"environmental",
"transportation",
"geospatial",
"satellite imagery",
]
for file in files:
dataset = {}
with open(file, "r") as f:
dataset = yaml.safe_load(f)
if "Deprecated" in dataset:
continue
tags = dataset.get("Tags", [])
name = dataset.get("Name", "")
if bool(set(geo_tags) & set(tags)):
basename = os.path.basename(file)
out_file = os.path.join("datasets", basename)
shutil.copy(file, out_file)
resources = dataset.get("Resources", [])
names[name] = len(resources)
for resource in resources:
before_href = (
resource["Description"].split("](")[0].replace("[", "")
)
if len(resource["Description"].split("](")) > 1:
after_href = (
resource["Description"].split("](")[1].split(")")[1]
)
else: # No hyperlink
after_href = ""
resource["Description"] = (
f"{before_href}{after_href}"[:max_chars]
.replace("\n", "")
.replace(".", "")
.replace("or [SQS", "")
.replace("(ORC", "")
.replace("[", "")
.replace("(2007-2014", "(2007-2014)")
.replace("(2007-2013", "(2007-2013)")
.strip()
)
item = {}
if names[name] > 1:
item["Name"] = f"{name} - {resource['Description']}"
else:
item["Name"] = name
for key in resource:
item[key] = resource[key]
item["Documentation"] = (
dataset["Documentation"]
.replace("<br/>", "")
.replace("\n", "")[:max_chars]
)
item["Contact"] = (
dataset["Contact"]
.replace("<br/>", "")
.replace("\n", "")[:max_chars]
)
item["ManagedBy"] = dataset["ManagedBy"][:max_chars]
item["UpdateFrequency"] = dataset["UpdateFrequency"][:max_chars]
item["License"] = dataset["License"].replace("\n", "")[:max_chars]
if sep_tags:
item["Tags"] = dataset["Tags"]
else:
item["Tags"] = ", ".join(dataset["Tags"])
datasets.append(item)
print(f"Total number of geospatial datasets: {len(datasets)}")
return datasets
tsv_datasets = generate_datasets(files, max_chars=max_chars, sep_tags=False)
df = pd.DataFrame(tsv_datasets)
df = df.sort_values(by="Name")
df.to_csv("aws_geo_datasets.tsv", index=False, sep="\t")
json_datasets = generate_datasets(files, max_chars=None, sep_tags=True)
df = pd.DataFrame(json_datasets)
df = df.sort_values(by="Name")
data = json.loads(df.to_json(orient="records"))
with open("aws_geo_datasets.json", "w") as f:
json.dump(data, f, indent=4)