-
Notifications
You must be signed in to change notification settings - Fork 0
/
wikiScrape.py
50 lines (39 loc) · 1.05 KB
/
wikiScrape.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import requests
import wptools
import json
from bs4 import BeautifulSoup
import wikipedia
from urllib.request import urlopen
def getPage(name):
baseurl = 'http://en.wikipedia.org/w/api.php'
my_atts = {
"action": "opensearch",
"format": "json",
"search": name,
"namespace": "0",
"limit": "10",
"utf8": True,
"formatversion" : 2
}
r = requests.get(baseurl, my_atts)
print(r.status_code)
#soup = BeautifulSoup(r.content, 'html.parser')
def getSummary(name):
print(wikipedia.summary(name))
baseurl = 'http://en.wikipedia.org/w/api.php'
my_atts = {
"action": "query",
"format": "json",
"titles": name,
"prop" : "info"
}
header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia
r = requests.get(baseurl, headers=header, params = my_atts)
r = r.json()
return(r["query"]["pages"].keys()[0])
#f = wptools.page("Ed Sheeran").get_query()
def getArtistInfo(name):
fela = wptools.page(name).get_parse()
for info in fela.infobox.keys():
if info != "image" and info != "labels":
return(fela.infobox[info])