爬虫抓取图片报错,求大神帮主
import osimport urllib
import urllib.request
def open_url(url):
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'}#增加头部信息
req=urllib.request.Request(url,headers=headers)
reponse=urllib.request.urlopen(req)
html=reponse.read()#.decode(encoding='UTF-8')
return html
def get_page(html):
html=open_url(url).decode(encoding='UTF-8')
a=html.find('current-comment-page')+23
b=html.find(']',a)
return (html)
'''
reponse=urllib.request.urlopen('http://www.kuaidaili.com/ops')
html=reponse.read().decode(encoding='UTF-8')
return html
'''
def find_imgs(page_url): #查找每个图片的连接,以列表的形式返回
html=open_url(url).decode(encoding='UTF-8')
img_adds=[]
a=html.find('img src=')
while a!=-1:
b=html.find('.jpg',a,a+255) #限定结束范围
if b!=-1:
img_adds.append(html)
else:
b=a+9
a=html.find('img src=',b)
for adds_img in img_adds:
print (each_img)
return img_adds
def save_imgs(filename,img_adds):
for each in img_adds:
f=each.spilt('/')[-1]
with open (f,'wb') as f1:
img=open_url(each)
f1.write(img)
def download(filename='ooxx',page=5):
os.mkdir(filename)
os.chdir(filename)
global url,page_url
url='http://jandan.net/ooxx'
#page_url=open_url(url).decode(encoding='UTF-8')
page_num=int(get_page(url))
for i in range(page):
page_num-=i
get_url=url+'/'+str(page_num)+'#comments'
img_adds=find_imgs(page_url)
save_imgs(filename,img_adds)
if __name__=='__main__':
download()
小甲鱼爬虫抓取妹子图的实例讲解,运行程序报错
page_url没有定义 gaomengsuijia 发表于 2017-11-16 15:15
page_url没有定义
我在主函数下加上了page_url=open_url(url).decode(encoding='UTF-8'),但是运行程序,电脑会宕机
def download(filename='ooxx',page=5):
os.mkdir(filename)
os.chdir(filename)
global url,page_url
url='http://jandan.net/ooxx'
page_url=open_url(url).decode(encoding='UTF-8')
page_num=int(get_page(url))
for i in range(page):
page_num-=i
get_url=url+'/'+str(page_num)+'#comments'
img_adds=find_imgs(page_url)
save_imgs(filename,img_adds) 里边有些细节错了 比如拼写
页:
[1]