多线程爬表情包爬不到内容
本帖最后由 xmpython 于 2020-5-3 19:52 编辑大哥哥大姐姐能不能帮我看看我这程序哪里有问题啊,为啥运行几秒之后就自动结束,而且什么内容都没爬到{:10_269:}
不用多线程的时候能爬图片,很正常,加上多线程之后就不行了,点运行之后啥反应都没有,过几秒自动结束程序,而且啥东西都没爬到。。。。要哭了{:5_100:}
import requests
from lxml import etree
from fake_useragent import UserAgent
from urllib import request
import threading
from queue import Queue
class Producer(threading.Thread):
headers = {
"User-Agent": UserAgent().chrome
}
def __init__(self, page_queue, img_queue, *args, **kwargs):
super(Producer, self).__init__(*args, **kwargs)
self.page_queue = page_queue
self.img_queue = img_queue
def run(self):
while True:
if self.page_queue.empty():
break
url = self.page_queue.get()
self.get_url(url)
def get_url(self, url):
response = requests.get(url, headers=self.headers)
e = etree.HTML(response.text)
img_urls = e.xpath("//div[@class='page-content text-center']/div/a/img/@data-original")
for img in img_urls:
self.img_queue.put(img)
class Consumer(threading.Thread):
def __init__(self, page_queue, img_queue, *args, **kwargs):
super(Consumer, self).__init__(*args, **kwargs)
self.page_queue = page_queue
self.img_queue = img_queue
def run(self):
while True:
if self.img_queue.empty() and self.page_queue.empty():
break
img_url = self.img_queue.get()
filename = img_url.split("/")[-1]
request.urlretrieve(img_url, "images/" + filename, None)
print(img_url)
def main():
page_queue = Queue(3)
img_queue = Queue(1000)
for x in range(1, 4):
page_url = "https://www.doutula.com/photo/list/?page=%d" % x
page_queue.put(page_url)
for x in range(5):
t = Producer(page_queue, img_queue)
t.start()
for x in range(5):
t = Consumer(page_queue, img_queue)
t.start()
if __name__ == '__main__':
main()
新手还没有权限发图{:10_247:}
图链接https://imgchr.com/i/YpPZsshttps://imgchr.com/i/YpPZss
用图床发
https://imgchr.com/ liuzhengyuan 发表于 2020-5-3 18:09
用图床发
https://imgchr.com/
欧克欧克 不懂python
1. 你的 queue 容量为 3, 为什么有 5 个 Producer, Consumer
2. run 函数是会自动执行吗
3. 主线程需要等待 Producer Consumer 的退出吗
4. Queue 是线程安全的吗 本帖最后由 suchocolate 于 2020-5-11 18:11 编辑
import requests
from lxml import etree
from urllib import request
import threading
from queue import Queue
class Producer(threading.Thread):
def __init__(self, page_queue, img_queue, *args, **kwargs):
super(Producer, self).__init__(*args, **kwargs)
self.headers = {'user-agent': 'firefox'} # 有改动
self.page_queue = page_queue
self.img_queue = img_queue
def run(self):
while not self.page_queue.empty(): # 有改动
url = self.page_queue.get()
self.get_url(url)
def get_url(self, url):
response = requests.get(url, headers=self.headers)
e = etree.HTML(response.text)
img_urls = e.xpath("//div[@class='page-content text-center']/div/a/img/@data-original")
for img in img_urls:
self.img_queue.put(img)
class Consumer(threading.Thread):
def __init__(self, img_queue, *args, **kwargs):
super(Consumer, self).__init__(*args, **kwargs)
self.img_queue = img_queue # 有改动
def run(self):
while not self.img_queue.empty(): # 有改动
img_url = self.img_queue.get()
filename = img_url.split("/")[-1]
request.urlretrieve(img_url, "images/" + filename, None)
print(img_url)
def main():
page_queue = Queue(3)
img_queue = Queue(1000)
for x in range(1, 4):
page_url = "https://www.doutula.com/photo/list/?page=%d" % x
page_queue.put(page_url)
t_l_p = [] # 新增
t_l_c = [] # 新增
for x in range(3): # 线程数大于队列数,会有线程不干活的
t = Producer(page_queue, img_queue)
t.start()
t_l_p.append(t)
for t in t_l_p:
t.join() # 得等获取完url再继续
for x in range(3):
t = Consumer(img_queue)
t.start()
t_l_c.append(t)
for t in t_l_c:
t.join()
print('It's over')
if __name__ == '__main__':
main()
斗图网有反爬,多线程会挂掉
页:
[1]