python 55讲的课后作业
请教各位大神,发现小甲鱼几年前写的代码爬取百度百科时会出错。我应该怎么改呢?import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def test_url(soup):
result = soup.find(text=re.compile("百度百科尚未收录词条"))
if result:
print(result) # 百度这个碧池在最后加了个“符号,给它去掉
return False
else:
return True
def summary(soup):
word = soup.h1.text
# 如果存在副标题,一起打印
if soup.h2:
word += soup.h2.text
# 打印标题
print(word)
# 打印简介
if soup.find(class_="lemma-summary"):
print(soup.find(class_="lemma-summary").text)
def get_urls(soup):
for each in soup.find_all(href=re.compile("view")):
content = ''.join()
url2 = ''.join(["http://baike.baidu.com", each["href"]])
response2 = urllib.request.urlopen(url2)
html2 = response2.read()
soup2 = BeautifulSoup(html2, "html.parser")
if soup2.h2:
content = ''.join()
content = ''.join()
yield content
def main():
word = input("请输入关键词:")
keyword = urllib.parse.urlencode({"word":word})
response = urllib.request.urlopen("https://baike.baidu.com/search/word?%s" % keyword)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
if test_url(soup):
summary(soup)
print("下边打印相关链接:")
each = get_urls(soup)
while True:
try:
for i in range(10):
print(next(each))
except StopIteration:
break
command = input("输入任意字符将继续打印,q退出程序:")
if command == 'q':
break
else:
continue
if __name__ == "__main__":
main()
报的什么错? zltzlt 发表于 2020-3-29 08:11
报的什么错?
Traceback (most recent call last):
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\baidu.py", line 101, in <module>
main()
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\baidu.py", line 90, in main
print(next(each))
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\baidu.py", line 66, in get_urls
response2 = urllib.request.urlopen(url2)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 525, in open
response = self._open(req, data)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 543, in _open
'_open', req)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\urllib\request.py", line 1317, in do_open
encode_chunked=req.has_header('Transfer-encoding'))
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\http\client.py", line 1244, in request
self._send_request(method, url, body, headers, encode_chunked)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\http\client.py", line 1255, in _send_request
self.putrequest(method, url, **skips)
File "C:\Users\xiha\AppData\Local\Programs\Python\Python37-32\lib\http\client.py", line 1122, in putrequest
self._output(request.encode('ascii'))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 36-39: ordinal not in range(128)
>>> 原来小甲鱼写的时候没有link里边有中文的连接,后边有中文的连接了,所以不行,把他那个keyword在url2哪里输一遍,把汉字编码就好了,改一下就okl了
mport urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def test_url(soup):
result = soup.find(text=re.compile("百度百科尚未收录词条"))
if result:
print(result) # 百度这个碧池在最后加了个“符号,给它去掉
return False
else:
return True
def summary(soup):
word = soup.h1.text
# 如果存在副标题,一起打印
if soup.h2:
word += soup.h2.text
# 打印标题
print(word)
# 打印简介
if soup.find(class_="lemma-summary"):
print(soup.find(class_="lemma-summary").text)
def get_urls(soup):
for each in soup.find_all(href=re.compile("view")):
content = ''.join()
print(each.text)
if '=' in each['href']:
= each['href'].split('Name=',2)
keyname = urllib.parse.urlencode({"Name":name})
key_link = links + keyname
else:
key_link = each['href']
url2 = ''.join(["http://baike.baidu.com", key_link])
response2 = urllib.request.urlopen(url2)
html2 = response2.read()
soup2 = BeautifulSoup(html2, "html.parser")
if soup2.h2:
content = ''.join()
content = ''.join()
yield content
def main():
word = input("请输入关键词:")
keyword = urllib.parse.urlencode({"word":word})
response = urllib.request.urlopen("https://baike.baidu.com/search/word?%s" % keyword)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
if test_url(soup):
summary(soup)
print("下边打印相关链接:")
each = get_urls(soup)
while True:
try:
for i in range(10):
print(next(each))
except StopIteration:
break
command = input("输入任意字符将继续打印,q退出程序:")
if command == 'q':
break
else:
continue
if __name__ == "__main__":
main()
这是结果:
请输入关键词:tom
汤姆(美国动画片《猫和老鼠》中的主角)
汤姆(Tom)是米高梅公司制作的经典动画片《猫和老鼠》中的主角之一,70年代老动画中的著名卡通明星角色,是一只蓝灰和白色混合的英国短毛猫。他与小老鼠杰瑞(Jerry)之间可称得上是一对欢喜冤家,二者亦敌亦友,有时互相捣乱,有时互相帮助,有时争论不休,有时团结和谐。他每天忙碌于捉住同住在一起的杰瑞,但总是不如意,总在被耍,憨傻得可爱。生活中充满幽默搞笑,同时也具有抒情与伤情的一幕;主人通常都为两只鞋太太(又黑又胖的女人)。另外,他也爱慕漂亮的千金母猫图多盖洛。有时还被其他成员欺负,如斯派克(Spike)、布奇、莱特宁、托普斯等。
下边打印相关链接:
恐龙百科
恐龙百科 -> http://baike.baidu.com/wikicategory/view?categoryName=%E6%81%90%E9%BE%99%E5%A4%A7%E5%85%A8
多肉百科
多肉百科 -> http://baike.baidu.com/wikicategory/view?categoryName=%E5%A4%9A%E8%82%89%E6%A4%8D%E7%89%A9
里约残奥会吉祥物
里约残奥会吉祥物(里约残奥会吉祥物) -> http://baike.baidu.com/item/TOM/19885318#viewPageContent
名字
名字(名字) -> http://baike.baidu.com/item/TOM/2044473#viewPageContent
全面订单管理
全面订单管理(全面订单管理) -> http://baike.baidu.com/item/TOM/2044457#viewPageContent
移动互联网公司
移动互联网公司(移动互联网公司) -> http://baike.baidu.com/item/TOM/39488#viewPageContent
化学术语
化学术语(化学术语) -> http://baike.baidu.com/item/TOM/2044489#viewPageContent
同义词
同义词目录 -> http://baike.baidu.com/subview/71844/10028254.htm
我感觉小甲鱼 哪个href少了半个引号的,所以有的网址好像不太对 你自己调试把 我就不改了 我后来改成这样,就不报错了{:10_266:}
import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def test_url(soup):
result = soup.find(text=re.compile("百度百科尚未收录词条"))
if result:
print(result) # 百度这个碧池在最后加了个“符号,给它去掉
return False
else:
return True
def summary(soup):
word = soup.h1.text
# 如果存在副标题,一起打印
if soup.h2:
word += soup.h2.text
# 打印标题
print(word)
# 打印简介
if soup.find(class_="lemma-summary"):
print(soup.find(class_="lemma-summary").text)
def get_urls(soup):
for each in soup.find_all(href=re.compile("view")):
content = ''.join()
url2 = ''.join(["http://baike.baidu.com", each["href"]])
try:
response2 = urllib.request.urlopen(url2)
html2 = response2.read()
soup2 = BeautifulSoup(html2, "html.parser")
if soup2.h2:
content = ''.join()
content = ''.join()
yield content
except:
pass
def main():
word = input("请输入关键词:")
keyword = urllib.parse.urlencode({"word":word})
response = urllib.request.urlopen("https://baike.baidu.com/search/word?%s" % keyword)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
if test_url(soup):
summary(soup)
print("下边打印相关链接:")
each = get_urls(soup)
while True:
try:
for i in range(10):
print(next(each))
except StopIteration:
break
command = input("输入任意字符将继续打印,q退出程序:")
if command == 'q':
break
else:
continue
if __name__ == "__main__":
main()
Chysial 发表于 2020-3-29 10:02
原来小甲鱼写的时候没有link里边有中文的连接,后边有中文的连接了,所以不行,把他那个keyword在url2哪里 ...
感谢 fresh_python 发表于 2020-3-29 10:52
我后来改成这样,就不报错了
是不报错了,把部分给删了 也能用
页:
[1]