forked from nitinkaushik01/Data_Science_Bootcamp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Scraping_Data_from_Websites.py
129 lines (55 loc) · 1.29 KB
/
Scraping_Data_from_Websites.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# coding: utf-8
# ## Import Libraries
# In[87]:
import bs4 as bs
import urllib.request
# ## Store HTML source code in a variable
# In[88]:
src = urllib.request.urlopen('https://www.nytimes.com/').read()
# In[89]:
bsoup = bs.BeautifulSoup(src, 'lxml')
# In[ ]:
print(bsoup)
# ## Extract Title of the web page
# In[92]:
print(bsoup.title.text)
# ## Extract URLs
# In[ ]:
for link in bsoup.find_all('a'):
print(link.get('href'))
# ## Extract Paragraphs
# In[94]:
print(bsoup.p)
# In[95]:
print(bsoup.find_all('p'))
# In[98]:
print(bsoup.find('p').get_text())
# In[99]:
ptags = bsoup.find_all('p')
for p in ptags:
print(p.text)
# ## Extract Table Data
# In[100]:
src = urllib.request.urlopen('http://www.espn.com/nba/statistics/player/_/stat/assists/sort/avgAssists/').read()
bsoup = bs.BeautifulSoup(src, 'lxml')
tbl = bsoup.find('table')
# In[101]:
tbl_rows = tbl.find_all('tr')
for tr in tbl_rows:
td = tr.find_all('td')
row = [i.text for i in td]
print(row)
# In[102]:
type(row)
# ## Extract Table Data using Pandas
# In[103]:
import pandas as pd
data = pd.read_html("http://www.espn.com/nba/statistics/player/_/stat/assists/sort/avgAssists/")
for df in data:
print(df)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]: