|  | 
 
| 
本帖最后由 Ootme 于 2020-3-3 12:53 编辑
x
马上注册,结交更多好友,享用更多功能^_^您需要 登录 才可以下载或查看,没有账号?立即注册  
 复制代码import urllib.request
import os
import random
import gevent
from gevent import monkey
monkey.patch_all()
user = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
first={0:'w',1:'x',2:'y',3:'z',4:'0',5:'1',6:'2',7:'3',8:'4',9:'5'}
second={0:'TA',1:'MT',2:'Mj',3:'Mz',4:'ND',5:'NT',6:'Nj',7:'Nz',8:'OD',9:'OT',\
        10:'MTA',11:'MTE',12:'MTI',13:'MTM',14:'MTQ',15:'MTU',\
        16:'MTY',17:'MTc',18:'MTg',19:'MTk'}
dict1={1:'MQ',2:'Mg',3:'Mw',4:'NA',5:'NQ',6:'Ng',7:'Nw',8:'OA',9:'OQ'}
ip=['114.139.34.209:8118','123.169.102.252:9999','117.91.252.141:9999','175.44.108.143:9999']
def url_open(url):
    proxy = random.choice(ip)
    proxy_support = urllib.request.ProxyHandler({'https':proxy})
    opener = urllib.request.build_opener(proxy_support)
    urllib.request.install_opener(opener)
    
    req = urllib.request.Request(url)
    req.add_header('User-Agent',user)
    response = urllib.request.urlopen(req)
    html = response.read()
    return html
def get_page(url):
    html=url_open(url).decode("utf-8")
   
    a=html.find("current-comment-page")+23    
    b=html.find("]",a)    
    return html[a:b]
        
def img_find(url):
    html=url_open(url).decode('utf-8')
    img_addr = []
    a=html.find('img src=')
    while a !=-1:
        b=html.find('.jpg',a,a+255)
        if b != -1:
            img_addr.append('http:'+html[a+9:b+4])
        else :
            b=a+9
        a=html.find('img src=',b)
    print(img_addr)
    return img_addr    
    
def save(folder,img_addr):
    os.chdir(folder)
    for each in img_addr:
        name = each.split('/')[-1]
        with open(name,'wb') as f:
            img = url_open(each)
            f.write(img)
            
    
def download(xin):
    folder,begin,page = xin
    folder = 'D:\python\jichuu\多任务\协程\\' + folder
    os.mkdir(folder)
    if begin == 'begin':
        page_num=int(get_page(url))
    else :
        page_num = begin
    
    for i in range(page):
        page_num -=i
        url = 'http://www.jandan.net/ooxx/'
        if page_num >=10:
            url =url + 'MjAyMDAxMDct'+second[page_num//10]+ first[page_num%10]+ '=#comments'
            img_addr = img_find(url)
        else :
            url =url + 'MjAyMDAxMDct'+ dict1[page_num%10]+ '=#comments'
            img_addr = img_find(url)
        save(folder,img_addr)
def main():
    g1=gevent.spawn(download,('ooxx1',180,2))
    g2=gevent.spawn(download,('ooxx2',180,2))
    g1.join()
    g2.join()
    
 
if __name__ == '__main__':
    main()
 前面应该没什么重要的,应该是最后的main函数出了问题
 为什么我下载的两页一样的图片,两个文件夹的图片数量不一样
 两个线程结束的时间为什么会不一样啊。。
 | 
 |