爬虫ConnectionError问题
本帖最后由 937135952 于 2020-8-11 17:28 编辑部分代码如下
import requests
from bs4 import BeautifulSoup
from lxml import etree
from cnsenti import Sentiment
import jieba
import smtplib
from email.mime.text import MIMEText
import os
import time
def get_and_save(url):
global save_txt
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-US,en;q=0.5',
'Accept-Encoding':'gzip',
'DNT':'1',
'Connection':'close'
}
r = requests.get(url, headers=headers)
r.encoding = 'utf-8'
html = etree.HTML(r.text)#etree.HTML():构造了一个XPath解析对象并对HTML文本进行自动修正。
result = html.xpath('//div/p/text()')#('//div[@id="mainNewsContent"]/p/text()')
#处理文本
result = str(result)
result2 = result.replace('\\u3000','')
#print(result2)
def get_url():
#xunhuan=
xunhuan=
liebiao=[]
for k in xunhuan:
print (k)
html = 'http://guba.eastmoney.com/default,1_'+str(k)+'.html'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'en-US,en;q=0.5',
'Accept-Encoding':'gzip',
'DNT':'1',
'Connection':'close'
}
page = requests.get(html,headers=headers)
soup_obj=BeautifulSoup(page.content,'html.parser')
#print(soup_obj.prettify())
for link in soup_obj.findAll('a'):#含a
if "href" in link.attrs:#且以href作为特征..href在link的特征里
#print(link.attrs['href'])
a = 'http://guba.eastmoney.com'+link.attrs['href']#href=‘/news,000762,954300722.html’
if 'news' in a:
liebiao.append(a)
for i in liebiao:
get_and_save(i)
print("——————————————————————————————————————————————————————————————————————————执行了"+i)
if __name__ =='__main__':
get_url()
错误:ConnectionError: HTTPConnectionPool(host='guba.eastmoney.comnews,cjpl,953932572.html', port=80): Max retries exceeded with url: / (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x000001644FDBB948>: Failed to establish a new connection: getaddrinfo failed'))
一开始可以爬,爬了一会儿就会出现这个错误。首先我以为是访问过于频繁,加了time.sleep(60),同样爬一会儿会出现这个错误。
加入异常处理,爬一会儿会出现这个错误,连着好几个都是这个错,接着又能正常爬取,接着又是连着好几个这个错,如此反复。
出现这个错误的原因是服务器把我屏蔽了吗?还是什么原因?有什么解决办法吗?
还有一个问题...是我爬多了被服务器施加了什么神奇的魔法吗,打开链接跳转到的都是同一个内容....不管是手动还是用代码打开链接 {:10_243:}没人了么 {:10_266:}
页:
[1]