本帖最后由 Hoiste 于 2020-3-23 21:11 编辑
是改了,我记得答案好像只能搜出多肉百科和恐龙百科了,你如果看过网页源代码就会发现那些词条已经换了关键字了(view变成item),这是我当初学的时候根据情况改的代码,你看看能不能用,另外,后面那题同样也不能按答案来了,我也放上来吧。
1题:import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def main():
keyword = input('请输入关键词:')
keyword = urllib.parse.urlencode({'word':keyword})
url = 'http://baike.baidu.com/search/word?%s' %keyword
response = urllib.request.urlopen(url)
html = response.read()
soup = BeautifulSoup(html,'html.parser')
#百度百科下该关键词作为多义词时的各个词条
for each in soup.find_all(href = re.compile('item')):
content = each.text
try:
url2 = ''.join(['http://baike.baidu.com',each['href']])
response2 = urllib.request.urlopen(url2)
except UnicodeEncodeError:
temp = each['href'].split('/')[2]
key = urllib.parse.urlencode({'':temp}).split('=')[1]
each['href'] = ''.join(['/item/',key])
url2 = ''.join(['http://baike.baidu.com',each['href']])
response2 = urllib.request.urlopen(url2)
finally:
html2 = response2.read().decode('utf-8')
soup2 = BeautifulSoup(html2,'html.parser')
if soup2.h2:
content = ''.join([content,soup2.h2.text])
content = ''.join([content,' -> ',url2])
print(content)
if __name__ == '__main__':
main()
2题import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup
def get_url(soup):
for each in soup.find_all(href=re.compile('item')):
content = ''.join([each.text])
try:
url2 = ''.join(['http://baike.baidu.com',each['href']])
response2 = urllib.request.urlopen(url2)
except UnicodeEncodeError:
temp = each['href'].split('/')[2]
each['href'] = urllib.parse.urlencode({'':temp}).split('=')[1]
url2 = ''.join(['http://baike.baidu.com','/',each['href']])
response2 = urllib.request.urlopen(url2)
finally:
html2 = response2.read()
soup2 = BeautifulSoup(html2,'html.parser')
if soup2.h2:
content = ''.join([content,soup2.h2.text])
content = ''.join([content,' -> ',url2])
yield content
def main():
keyword = input('请输入关键词:')
keyword = urllib.parse.urlencode({'word':keyword})
url = 'http://baike.baidu.com/search/word?%s' %keyword
response = urllib.request.urlopen(url)
html = response.read()
soup = BeautifulSoup(html,'html.parser')
result = soup.find(text=re.compile('百度百科尚未收录词条'))
if result:#如果未收录打印没有收录词条
result = result.split(' ”')[0]#去掉奇怪的小尾巴
print(result)
else:#没有未收录词条的内容则打印标题、简介
title = soup.h1.text
if soup.h2:
title += soup.h2.text
print(title)
if soup.find(class_='lemma-summary'):
print(soup.find(class_='lemma-summary').text)
print('下边打印相关链接:')
#打印十个链接。。。好像要另起函数比较方便使用生成器来解决问题
each = get_url(soup)
while True:
try:
for i in range(10):
print(next(each))
except StopIteration:
break
Quit = input('任意键继续打印词条,按q取消:')
if Quit == 'q':
break
else:
continue
if __name__ == '__main__':
main()
|