|
|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
书中14.4案例2:
import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def main():
keyword = input("请输入要查询的的关键词:")
keyword = urllib.parse.urlencode({"word":keyword})
response = urllib.request.urlopen("http://baike.baidu.com/item/word?%s"% keyword)
html = response.read()
soup = BeautifulSoup(html,"html.parser")
#BS
for each in soup.find_all(href = re.compile("item")):
content = ''.join([each.text])
url2 = ''.join(["http://baike.baidu.com",each["href"]])
response2 =urllib.request.urlopen(url2)
html2 =response2.read()
soup2 =BeautifulSoup(html2,"html.parser")
if soup2.h2:
content = ''.join([content,soup2.h2.text])
content = ''.join([content,"——>",url2])
print(content)
if __name__ == "__main__":
main()
报错UnicodeEncodeError: 'ascii' codec can't encode characters in position 10-12: ordinal not in range(128)
求解决
事实证明输英文是没事的,输中文就不对了。所以URL带中文部份进行URL编码就可以了
代码:
- import urllib.request
- import urllib.parse
- import re
- from bs4 import BeautifulSoup
- def main():
- keyword = input("请输入要查询的的关键词:")
- keyword = urllib.parse.urlencode({"word":keyword})
- response = urllib.request.urlopen("http://baike.baidu.com/item/word?%s"% keyword)
- html = response.read()
- soup = BeautifulSoup(html,"html.parser")
-
- #BS
- for each in soup.find_all(href = re.compile("item")):
- content = ''.join([each.text])
- url2 = ''.join(["http://baike.baidu.com",urllib.parse.quote(each["href"])]) #URL中文编码
- response2 =urllib.request.urlopen(url2)
- html2 =response2.read()
- soup2 =BeautifulSoup(html2,"html.parser")
- if soup2.h2:
- content = ''.join([content,soup2.h2.text])
- content = ''.join([content,"——>",url2])
- print(content)
- if __name__ == "__main__":
- main()
复制代码
|
|