python爬取煎蛋网妹子图时报错了,好气哦!有哪位大佬可以给小白解答下吗?谢谢!
import urllib.requestimport re
def open_url(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36')
page = urllib.request.urlopen(req)
html = page.read().decode('UTF-8')
return html
def get_img(html):
p = r'<img src="([^"]+\.jpg)"'
imglist = re.findall(p,html)
for each in imglist:
print(each)
for each in imglist:
filename = each.split('/')[-1]
urllib.request.urlretrieve('http://' + each,filename,None)
if __name__ == '__main__':
url = 'http://jandan.net/ooxx/MjAyMTAyMDgtOTk=#comments'
get_img(open_url(url))
报错截图如下:
网址是最近煎蛋网上的随手拍那一栏的网址,这里小白先谢过各位鱼油们!谢谢~
本帖最后由 Daniel_Zhang 于 2021-2-9 11:49 编辑
我觉得是你这个 url有点问题{:10_245:}
我这里直接就 404 not found 了
我自己的代码也是,估计是反爬了,换一个网站吧 {:10_249:} import requests
import re
url = "http://jandan.net/ooxx/MjAyMTAyMDgtOTk=#comments"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
}
req = requests.get(url, headers=headers).text
p = r'<img src="([^"]+\.jpg)"'
img_list = re.findall(p, req)
print(img_list)
这里没问题 第21行
url双斜杠多了一套,去掉就行了
import urllib.request
import re
def open_url(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36')
page = urllib.request.urlopen(req)
html = page.read().decode('UTF-8')
return html
def get_img(html):
global imglist
p = r'<img src="([^"]+\.jpg)"'
imglist = re.findall(p,html)
for each in imglist:
print(each)
for each in imglist:
filename = each.split('/')[-1]
urllib.request.urlretrieve('http:' + each,filename,None)
if __name__ == '__main__':
url = 'http://jandan.net/ooxx/MjAyMTAyMDgtOTk=#comments'
get_img(open_url(url))
import urllib.request as u_request
import os, re, base64, requests
header ={}
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
def url_open(url):
html = requests.get(url, headers=header).text
return html
def find_images(url):
html = url_open(url)
m = r'<img src="([^"]+\.jpg)"'
match = re.findall(m, html)
for each in range(len(match)):
match = 'http:' + match
print(match)
return match
def save_images(folder, img_addrs):
for each in img_addrs:
try:
req = u_request.Request(each, headers = header)
response = u_request.urlopen(req)
cat_image = response.read()
filename = each.split('/')[-1]
with open(filename,'wb') as f:
f.write(cat_image)
#print(each)
except OSError as error:
print(error)
continue
except ValueError as error:
print(error)
continue
def web_link_encode(url, folder):
for i in range(180,200):
string_date = '20201216-'
string_date += str(i)
string_date = string_date.encode('utf-8')
str_base64 = base64.b64encode(string_date)
page_url = url + str_base64.decode() + '=#comments'
print(page_url)
img_addrs = find_images(page_url)
save_images(folder, img_addrs)
def download_the_graph(url):
folder = 'graph'
os.mkdir(folder)
os.chdir(folder)
web_link_encode(url, folder)
if __name__ == '__main__':
url = 'http://jandan.net/pic/'
download_the_graph(url) def getit(session, url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
}
res = session.get(url, headers=headers)
xmls = etree.HTML(res.text)
comments = xmls.xpath("//ol[@class='commentlist']/li/div/div/div[@class='text']/p/img/@src")
for cm in comments:
print(cm)
urlretrieve(url="http:" + cm, filename=cm.split("/")[-1])
if xmls.xpath("//div[@class='comments']/div[@class='cp-pagenavi']/a/@href"):
next = "http:" + xmls.xpath("//div[@class='comments']/div[@class='cp-pagenavi']/a/@href")
getit(session, next)
if __name__ == '__main__':
session = requests.Session()
start = "http://jandan.net/ooxx/comments"
getit(session=session, url=start)
页:
[1]