仿照爬豆瓣top250,爬先知社区标题,作者时间
import requestsimport bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
res = requests.get(url, headers=headers)
return res
def find_title(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
#文章标题
title = []
targets = soup.find_all('p',class_='topic-summary')
for each in targets:
title.append(each.a.text)
#资料(作者,类型,时间)
messages = []
targets = soup.find_all('p',class_='topic-info')
for each in targets:
try:
messages.append(each.a.text.split('\n').strip() + each.a.text.split('\n').strip())
except:
continue
result = []
length = len(title)
for i in range(length):
result.append(title + messages + '\n')
return result
#找出一共多少页
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('li', class_='disable').previous_sibling.previous_sibling.text
return int(depth)
def main():
res = "https://xz.aliyun.com/"
res = open_url(res)
depth = find_depth(res)
result = []
for i in range(depth):
url = res + '/?page=' + str(2 * i)
res = open_url(url)
result.extend(find_title(res))
with open("先知.txt",'w',encoding='utf-8') as file:
for each in result:
file.write(each)
if __name__ == "__main__":
main() Thewhitecrow 发表于 2020-8-5 14:53
老哥为啥我去复制代码,发现爬虫结果没爬干净,是从7.23开始爬,之前的没有爬到
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
res = requests.get(url, headers=headers)
return res
def find_title(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
#文章标题
title = []
targets = soup.find_all('p',class_='topic-summary')
for each in targets:
title.append(each.a.text.strip('\n').strip(' '))
#资料(作者,类型,时间)
messages = []
targets = soup.find_all('p',class_='topic-info')
for each in targets:
messages.append()
result = []
length = len(title)
for i in range(length):
result.append(,messages])
return result
#找出一共多少页
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.select('#Wrapper > div > div.span10 > div > div > div.pagination.clearfix > ul > li:nth-child(2) > a')
depth = re.search('\d{3}',str(depth))
return int(depth.group())
def main():
res = "https://xz.aliyun.com/"
res1 = open_url(res)
result = []
result.extend(find_title(res1))
depth = find_depth(res1)
for i in range(1,depth):
url = res + '/?page=' + str(2 * i)
print('正在爬取第',i,'页')
res2 = open_url(url)
result.extend(find_title(res2))
with open("先知.txt",'w',encoding='utf-8') as file:
for each in result:
file.write(str(each))
file.write('\n')
file.write('=============================================')
file.write('\n')
if __name__ == "__main__":
main()
ok之前没把第一页加进去 报错AttributeError: 'NoneType' object has no attribute 'previous_sibling',我感觉是一共多少页那里有点问题,希望前辈们指点下 本帖最后由 suchocolate 于 2020-8-5 10:44 编辑
从网页html看,class为disable的li没有前兄弟节点,所以报错说没有。
bs不熟,用xpath写了一个简单的:
import requests
from lxml import etree
import re
def main():
url = 'https://xz.aliyun.com/'
headers = {'user-agent': 'firefox'}
r = requests.get(url, headers=headers)
html = etree.HTML(r.text)
trs = html.xpath('//tr')
for item in trs:
title = item.xpath('normalize-space(./td/p[@class="topic-summary"]/a[@class="topic-title"]/text())')
info = item.xpath('./td/p[@class="topic-info"]//text()')
print('标题:', title)
print('作者:', info)
print('类型:', info)
print('发布时间:', re.findall('\d{4}(?:-\d{2}){2}', info))
print('='*50)
if __name__ == '__main__':
main()
suchocolate 发表于 2020-8-5 10:41
从网页html看,class为disable的li没有前兄弟节点,所以报错说没有。
你这个pycharm有点亮眼啊 Thewhitecrow 发表于 2020-8-4 18:21
报错AttributeError: 'NoneType' object has no attribute 'previous_sibling',我感觉是一共多少页那里有 ...
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
res = requests.get(url, headers=headers)
return res
def find_title(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
#文章标题
title = []
targets = soup.find_all('p',class_='topic-summary')
for each in targets:
title.append(each.a.text.strip('\n').strip(' '))
#资料(作者,类型,时间)
messages = []
targets = soup.find_all('p',class_='topic-info')
for each in targets:
messages.append()
result = []
length = len(title)
for i in range(length):
result.append(,messages])
return result
#找出一共多少页
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.select('#Wrapper > div > div.span10 > div > div > div.pagination.clearfix > ul > li:nth-child(2) > a')
depth = re.search('\d{3}',str(depth))
return int(depth.group())
def main():
res = "https://xz.aliyun.com/"
res1 = open_url(res)
depth = find_depth(res1)
result = []
for i in range(1,depth):
url = res + '/?page=' + str(2 * i)
print('正在爬取第',i,'页')
res2 = open_url(url)
result.extend(find_title(res2))
with open("先知.txt",'w',encoding='utf-8') as file:
for each in result:
file.write(str(each))
file.write('\n')
file.write('=============================================')
file.write('\n')
if __name__ == "__main__":
main()
代码已改好 suchocolate 发表于 2020-8-5 10:41
从网页html看,class为disable的li没有前兄弟节点,所以报错说没有。
老哥我刚才发现你的的class原因,也改了。还是有点问题,debug的时候在有多少页这个地方,语法有问题
import requests
import bs4
import re
def open_url(url):
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
res = requests.get(url, headers=headers)
return res
def find_title(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
#文章标题
title = []
targets = soup.find_all('p',class_='topic-summary')
for each in targets:
title.append(each.a.text)
#资料(作者,类型,时间)
messages = []
targets = soup.find_all('p',class_='topic-info')
for each in targets:
try:
messages.append(each.p.text.split('\n').strip() + each.p.text.split('\n').strip())
except:
continue
result = []
length = len(title)
for i in range(length):
result.append(title + messages + '\n')
return result
#找出一共多少页
def find_depth(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
depth = soup.find('li', class_='disabled').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = "https://xz.aliyun.com/"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '/?page=' + str(2 + i)
res = open_url(url)
result.extend(find_title(res))
with open("先知.txt",'w',encoding='utf-8') as file:
for each in result:
file.write(each)
if __name__ == "__main__":
main() 1q23w31 发表于 2020-8-5 14:15
代码已改好
老哥你这真的强,我好像明白了,谢谢老哥 suchocolate 发表于 2020-8-5 10:41
从网页html看,class为disable的li没有前兄弟节点,所以报错说没有。
老哥你这我去学习下,非常感谢哥哥 1q23w31 发表于 2020-8-5 14:15
代码已改好
老哥为啥我去复制代码,发现爬虫结果没爬干净,是从7.23开始爬,之前的没有爬到 Thewhitecrow 发表于 2020-8-5 14:53
老哥为啥我去复制代码,发现爬虫结果没爬干净,是从7.23开始爬,之前的没有爬到
稍等,我改一下代码
1q23w31 发表于 2020-8-5 15:01
ok之前没把第一页加进去
老哥感谢,够自己去反思学习了
页:
[1]