关于代码无法触发for循环的问题
#导包import requests
import time
import os
import threading
import parsel
if not os.path.exists('image'):
os.mkdir('image')
base_url = 'https://anime-pictures.net/pictures/view_posts/0?lang=en'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
def get(url,headers,cookie):
'''请求数据'''
response = requests.get(url,headers)
html_data = response.text
return html_data
def parsel_data(html_data):
'''筛选数据'''
selector = parsel.Selector(html_data)
result_list = selector.xpath('//span[@class="img_block_big"]')
for result in result_list:
image_url = result.xpath('./a/picture/source/img/@src').extract_first()
image_id = result.xpath('./a/picture/source/img/@id').extract_first()
img_url = 'https:' + image_url #手动拼url
all_title = img_url
img_data = requests.get(url = all_title,headers = headers).content
return all_title,image_id,img_data
def save(all_title,image_id,img_data):
'''保存数据'''
try:
with open('image\\' + all_title,mode='wb') as f:
print('保存成功:', image_id)
f.write(img_data)
except:
pass
print('保存失败')
def sleep(time):
'''休眠'''
time.sleep(time)
for _ inrange(0,100):
html_data = get(url=base_url, headers=headers)
image_data = parsel_data(html_data = html_data)
all_title = image_data #url https://xxxxxxx...
img_id = image_data #ID号
img_data = image_data #数据
print(all_title,img_id,img_data)
如果调试的话 会发现他一直在重复同一个图片的数据 如果各位鱼油调试的话 会发现parsel_data()的提取图片没用 提出来的都是同一个图片数据 调试过后 发现的是parsel_data()函数内的return没有进行循环 一直返还同一个图片数据 其他的参数目前正常
def parsel_data(html_data):
'''筛选数据'''
selector = parsel.Selector(html_data)
result_list = selector.xpath('//span[@class="img_block_big"]')
for result in result_list:
image_url = result.xpath('./a/picture/source/img/@src').extract_first()
image_id = result.xpath('./a/picture/source/img/@id').extract_first()
img_url = 'https:' + image_url #手动拼url
all_title = img_url
img_data = requests.get(url = all_title,headers = headers).content
return all_title,image_id,img_data
首先我自己这里尝试了一下,可能是因为版本的原因,你的代码还缺少cookie
然后这段代码貌似有些问题
你的return写在了循环里面,在一个方法里,你使用了return,代码运行到这个地方就停止了
然后再给你个简单的例子,自己试验一下
names = ['Michael', 'Bob', 'Tracy']
def name():
for name in names:
# print(name)
sn = name
return sn
if __name__ == '__main__':
a = name()
print(a)
在这个例子里你就可以知道为什么循环只执行一次就没有了
本帖最后由 yjsx86 于 2020-7-21 10:50 编辑
让parsel_data函数变成生成器就行
# 导包
import requests
import time
import os
import threading
import parsel
if not os.path.exists('image'):
os.mkdir('image')
base_url = 'https://anime-pictures.net/pictures/view_posts/0?lang=en'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
def get(url, headers):
'''请求数据'''
response = requests.get(url, headers)
html_data = response.text
return html_data
def parsel_data(html_data):
'''筛选数据'''
selector = parsel.Selector(html_data)
result_list = selector.xpath('//span[@class="img_block_big"]')
for result in result_list:
image_url = result.xpath('./a/picture/source/img/@src').extract_first()
image_id = result.xpath('./a/picture/source/img/@id').extract_first()
img_url = 'https:' + image_url# 手动拼url
all_title = img_url
img_data = requests.get(url=all_title, headers=headers).content
# return 改 yield
yield all_title, image_id, img_data
def save(all_title, image_id, img_data):
'''保存数据'''
try:
with open('image\\' + all_title, mode='wb') as f:
print('保存成功:', image_id)
f.write(img_data)
except:
pass
print('保存失败')
def sleep(time):
'''休眠'''
time.sleep(time)
if __name__ == '__main__':
html_data = get(url=base_url, headers=headers)
for image_data in parsel_data(html_data):
all_title = image_data# url https://xxxxxxx...
img_id = image_data# ID号
img_data = image_data# 数据
print(all_title, img_id, img_data)
https://cdn.jsdelivr.net/gh/hishis/forum-master/public/images/patch.gif yjsx86 发表于 2020-7-21 10:49
让parsel_data函数变成生成器就行
我早上也确实改成了yiled 但是就没法切片了{:10_277:} yjsx86 发表于 2020-7-21 10:49
让parsel_data函数变成生成器就行
感谢大佬的帮助 本帖还没完呢 待会可能有新bug
页:
[1]