爬图片过程中遇到的问题
大神们,下边的代码问题出在哪里呀,就像图片中这样的,不清楚为什么走了一轮后,又跑回去了。要怎么修改呀。。from queue import Queue
import requests as rs
from bs4 import BeautifulSoup
import os
from urllib import request
import threading
lock = threading.Lock()
#构造页面的安全队列
page_queue = Queue(100)
#构造图片的安全队列
img_queue = Queue(500)
for i in range(1,20):
page_url = 'https://www.doutub.com/img_lists/new/{}'.format(i)
page_queue.put(page_url)
#定义生产者
def producer():
global page_queue,img_queue,lock
img_urls = []
j = 1
while 1:
if page_queue.empty():
break
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.62',
'Referer':'https://www.doutub.com/img_lists/new/2'}
url = page_queue.get()
res = rs.get(url,headers=headers)
content = res.content.decode('utf8')
soup = BeautifulSoup(content,'lxml')
divs = soup.find_all('div',class_='cell')
for div in divs:
try:
img_url = div.a.img['src']
img_urls.append(img_url)
except:
continue
#这个锁应该放哪里呀!!!!晕死了
lock.acquire()
for img_url in img_urls:
suffix = os.path.splitext(img_url)
img_name = str(j) + suffix
img_queue.put((img_url,img_name))
print(j)
j += 1
lock.release()
#定义消费者
def consumer():
global page_queue,img_queue,lock
while 1:
if page_queue.empty() and img_queue.empty():
break
img = img_queue.get()
url,filename = img
request.urlretrieve(url,r"C:\Users\87912\Desktop\派神\爬虫入门到实战\爬图片\斗图啦\\"+filename)
#print(filename + '下载成功')
#定义多线程
def multi_thread():
global page_queue,img_queue
#定义生产者
for i in range(5):
t = threading.Thread(target=producer)
t.start()
#定义消费者
for i in range(5):
t = threading.Thread(target=consumer)
t.start()
if __name__ == '__main__':
multi_thread()
使用队列是不需要加锁的 isdkz 发表于 2022-1-17 23:12
使用队列是不需要加锁的
可是要怎么办才能让程序按照123456....这样执行下去呀。重复了好多次123456...
页:
[1]