爬虫的自我修养
import urllib.requestimport chardet
def main():
i = 0
with open("url.txt", "r") as f:
# 读取待访问的网址
# 由于urls.txt每一行一个URL
# 所以按换行符'\n'分割
urls = f.read().splitlines()
for each_url in urls:
response = urllib.request.urlopen(each_url)
html = response.read()
# 识别网页编码
encode = chardet.detect(html)['encoding']
if encode == 'GB2312':
encode = 'GBK'
i += 1
filename = "url_%d.txt" % i
with open(filename, "w", encoding=encode) as each_file:
each_file.write(html.decode(encode, "ignore"))
if __name__ == "__main__":
main()
这是依次访问文件中每一个网站的节点的标准答案,但是在爬取www.douban.com时报错,错误为 raise HTTPError(req.full_url, code, msg, hdrs, fp)
HTTPError
删掉这个网址就可以了 这是为什么? UserAgent加上试试看 for each_url in urls:
req = urllib.request.Request(each_url)
req.add_header('user-agent', 'firefox')
response = urllib.request.urlopen(req)
页:
[1]