爬取百度百科词条“猪八戒”及副标题
import urllib.requestimport urllib.parse
import re
from bs4 import BeautifulSoup
def main():
keyword = input('请输入关键词:')
keyword = urllib.parse.urlencode({'word':keyword}) # 看不懂
response = urllib.request.urlopen('http://baike.baidu.com/search/word?%s' % keyword) # 此行代码中的word、keyword与上面两行代码有关?
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
for each in soup.find_all(href = re.compile('item')):
content = ''.join()# 看不懂
url2 = ''.join(['http://baike.baidu.com', each['href']])# 看不懂
response2 = urllib.request.urlopen(url2)
html2 = response2.read()
soup2 = BeautifulSoup(html2, 'html.parser')
if soup2.h2:
content = ''.join()# 看不懂
content = ''.join() # 看不懂
print(content)
if __name__ == '__main__':
main()
【问题】:
1、无法正常运行获得结果
2、很多地方没看懂,尤其标注#的地方看不懂,可否帮忙加下注解,最好逐行
3、url、html、soup、h没看到有0或1,为何使用url2、html2、soup2、h2
特发帖求助 无法看懂就对了,记着,提问一定要问能正确运行的代码
你比如说我想问1+1为什么等于3 这谁能回答??? import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def main():
keyword = input('请输入检索关键词:')
param = urllib.parse.urlencode({'word':keyword}) # word=%E7%8C%AA%E5%85%AB%E6%88%92
url = 'https://baike.baidu.com/search/word?%s' % param # ->%E7%8C%AA%E5%85%AB%E6%88%92
#url = 'https://baike.baidu.com/item/%s' % param
#headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'} # 可隐藏headers???
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for each in soup.find_all(href = re.compile('item')):
content = ''.join()
url2 = ''.join(['http://baike.baidu.com', each['href'] if "%" in each['href'] else urllib.parse.quote(each['href'])])
req2 = urllib.request.Request(url2)
response2 = urllib.request.urlopen(req2)
html2 = response2.read().decode('utf-8')
soup2 = BeautifulSoup(html2, 'html.parser')
if soup2.h2:
content = ''.join()
content = ''.join()
print(content)
if __name__ == '__main__':
main()
问题:
1、上面两个URL好像都可以,为什么会这样?主要应该看哪里用哪个呢?
2、用了列表分片后,仅过滤最后一条无用记录,开头几条无法过滤,若不用列表分片或仅用[:-1]还会报错无法运行,这是为什么?
3、如果要加入伪装的header,上述headers用法是否正确?
4、输入关键词“黄埔军校”,刚开始正常抓取,快结束时会报错,不知道什么原因 本帖最后由 lzb1001 于 2022-4-9 13:36 编辑
【四个问题】
1、下面两个URL好像都可以,为什么会这样?主要应该看哪里用哪个呢?
url = '……/search/word?%s' % param
url = '……/item/%s' % param
2、用了列表分片后,仅过滤最后一条无用记录,开头几条无法过滤;若不用列表分片或仅用[:-1]还会报错无法运行,这是为什么?
3、如果要加入伪装的header,上述headers用法是否正确?
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
4、输入关键词“黄埔军校”,刚开始正常抓取,快结束时会报如下错误,不知道什么原因
Traceback (most recent call last):
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 1319, in do_open
encode_chunked=req.has_header('Transfer-encoding'))
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1252, in request
self._send_request(method, url, body, headers, encode_chunked)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1298, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1247, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1026, in _send_output
self.send(msg)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 966, in send
self.connect()
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 938, in connect
(self.host,self.port), self.timeout, self.source_address)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\socket.py", line 707, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\socket.py", line 752, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
socket.gaierror: getaddrinfo failed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\work\p14_92.py", line 35, in <module>
main()
File "D:\work\p14_92.py", line 26, in main
response2 = urllib.request.urlopen(req2)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 525, in open
response = self._open(req, data)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 543, in _open
'_open', req)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 1347, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "C:\Users\dell\AppData\Local\Programs\Python\Python37\lib\urllib\request.py", line 1321, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error getaddrinfo failed>
页:
[1]