shiyouroc 发表于 2022-7-20 18:34:21

堆糖爬虫

因为,本人能力问题爬图片不能怕全。
而且,也没有多线程。from urllib.request import *
import random
import os
from urllib.error import *
import re
import time
f_img = []
url = input('请输入网址')
head ={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.48 Safari/537.36 Edg/104.0.1293.25'}
html = None

def get_html(url):
    global html
    url =url
    head ={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.48 Safari/537.36 Edg/104.0.1293.25'}
    req = Request(url,None,head)
    response = urlopen(req)
    html = response.read().decode('utf-8')
   


def find_img():
    global html
    global f_img
   
    while True:
      
      
      xx = re.search(r'data-iid="" src="*"',html)
      yy = re.search(r'thumb\.400_0\.jpg" height',html)
      
      if yy == None:
            break;
      else:
            xxx = str(yy)
            xxx = xxx
            yyy = str(xx)
            yyy = yyy
            yyy = int(yyy)
            xxx = int(xxx)
            xxx = xxx+15
            y = html
            print(y)
            savemm(y,y)
            html = html

def savemm(url,y):
    y = url
    y = y+str(time.time())
    y = y+'.jpg'
    try:
      url = url
      iplist = ['58.221.154.46:9091','112.14.40.137:9091','223.94.85.131:9091','120.237.144.200:9091','183.222.217.168:9091','123.180.189.135:9091','222.179.155.90:9091','183.239.62.251:9091','221.181.238.59:9091','112.14.40.137:9091','59.56.142.185:9091','222.77.85.15:9091']
      head ={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.48 Safari/537.36 Edg/104.0.1293.25'}
      x = random.choice(iplist)
      proxy_support = ProxyHandler({'http':x})
      opener = build_opener(proxy_support)
      opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36')]
      install_opener(opener)
      response = urlopen(url)
      html = response.read()
      
      with open(y,'wb') as f:
            f.write(html)
            f.close()
    except:
      savemm(url,y)
      
      
   
      

get_html(url)

find_img()
页: [1]
查看完整版本: 堆糖爬虫