二农戏猪` 发表于 2020-8-11 18:24:38

【求助】煎蛋爬妹子图程序出现不知名错误

import urllib.request
import os
import base64
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    print(html)
   
    return html

   
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a,html)

    for each in page_list:
      t = each

    return t

      
def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
    print (string)
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url_list = []


    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a,html)
   
    if not len(b):
      pic_url_list.append(b)

    for each in pic_url_list:
      print(each)

      
def save_pic(folder, pic_url):
    pass

def download_mm(folder = 'mm', pages = 10):
    #创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    #获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -=i
      #base64加密
      x = base_num(page_num)
      #获取页码地址
      page_url = url + x + '#comments'
      #获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url)

      
if __name__ == '__main__':
    download_mm()


我这个到底哪里出错我也拿不准,求大神

1q23w31 发表于 2020-8-11 18:29:04

报错信息是啥

weiter 发表于 2020-8-11 18:34:36

煎蛋的妹子图还有???
弹幕上都说煎蛋的图没了……

Twilight6 发表于 2020-8-11 18:35:48



是因为你没有把创建的文件夹删除吧,把你代码下 mm 文件夹删除即可,然后代码就正常运行了


二农戏猪` 发表于 2020-8-11 20:26:38

Twilight6 发表于 2020-8-11 18:35
是因为你没有把创建的文件夹删除吧,把你代码下 mm 文件夹删除即可,然后代码就正常运行了

不是,我知道这个错误 它一直刷新Squeezed text (884lines)

二农戏猪` 发表于 2020-8-11 20:27:54

1q23w31 发表于 2020-8-11 18:29
报错信息是啥

Squeezed text (884lines)

1q23w31 发表于 2020-8-11 20:29:22

二农戏猪` 发表于 2020-8-11 20:27
Squeezed text (884lines)

点开它

二农戏猪` 发表于 2020-8-11 20:35:57

weiter 发表于 2020-8-11 18:34
煎蛋的妹子图还有???
弹幕上都说煎蛋的图没了……

有 叫随手拍

二农戏猪` 发表于 2020-8-11 20:36:49

1q23w31 发表于 2020-8-11 20:29
点开它

我就卡了

1q23w31 发表于 2020-8-11 21:17:05

二农戏猪` 发表于 2020-8-11 20:36
我就卡了

import urllib.request
import os
import base64
import re


def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent',
                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    #print(html)

    return html


def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a, html)

    for each in page_list:
      t = each

    return t


def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
    print(string)
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url_list = []

    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a, html)

    if not len(b):
      pic_url_list.append(b)

    for each in pic_url_list:
      print(each)


def save_pic(folder, pic_url):
    pass


def download_mm(folder='mm', pages=10):
    # 创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    # 获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -= i
      # base64加密
      x = base_num(page_num)
      # 获取页码地址
      page_url = url + x + '#comments'
      # 获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url)


if __name__ == '__main__':
    download_mm()

看起来,你的代码没完成,程序没问题,继续写吧(已修改)

二农戏猪` 发表于 2020-8-11 21:23:23

1q23w31 发表于 2020-8-11 21:17
看起来,你的代码没完成,程序没问题,继续写吧(已修改)

好,我试试,谢谢啦

二农戏猪` 发表于 2020-8-11 21:31:22

1q23w31 发表于 2020-8-11 21:17
看起来,你的代码没完成,程序没问题,继续写吧(已修改)

还是不行啊{:5_102:}

1q23w31 发表于 2020-8-11 21:31:56

二农戏猪` 发表于 2020-8-11 21:31
还是不行啊

??

二农戏猪` 发表于 2020-8-11 21:34:56

1q23w31 发表于 2020-8-11 21:31
??

我写完了 还是没下载

1q23w31 发表于 2020-8-11 21:35:30

二农戏猪` 发表于 2020-8-11 21:34
我写完了 还是没下载

把写完的发一下

二农戏猪` 发表于 2020-8-11 21:35:59

1q23w31 发表于 2020-8-11 21:35
把写完的发一下

import urllib.request
import os
import base64
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    print(html)
   
    return html

   
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a,html)

    for each in page_list:
      t = each

      return t

      
def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
   
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url_list = []


    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a,html)
   
    if not len(b):
      pic_url_list.append(b)

    return pic_url_list

      
def save_pic(folder, pic_url_list):
    for each in pic_url_list:
      filename = each.split('/')[-1]
      with open(filename, 'wb') as f:
            img = open_url(each)
            f.write(img)

def download_mm(folder = 'mm', pages = 10):
    #创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    #获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -=i
      #base64加密
      x = base_num(page_num)
      #获取页码地址
      page_url = url + x + '#comments'
      #获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url_list)

      
if __name__ == '__main__':
    download_mm()

二农戏猪` 发表于 2020-8-11 21:41:19

1q23w31 发表于 2020-8-11 21:35
把写完的发一下

iimport urllib.request
import os
import base64
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    print(html)
   
    return html

   
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a,html)

    for each in page_list:
      t = each

    return t

      
def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
   
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url = []


    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a,html)
   
    if not len(b):
      pic_url.append(b)

    return pic_url

      
def save_pic(folder, pic_url):
    for each in pic_url:
      filename = each.split('/')[-1]
      with open(filename, 'wb') as f:
            img = open_url(each)
            f.write(img)

def download_mm(folder = 'mm', pages = 10):
    #创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    #获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -=i
      #base64加密
      x = base_num(page_num)
      #获取页码地址
      page_url = url + x + '#comments'
      #获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url)

      
if __name__ == '__main__':
    download_mm()

二农戏猪` 发表于 2020-8-11 21:43:21

import urllib.request
import os
import base64
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    print(html)
   
    return html

   
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a,html)

    for each in page_list:
      t = each

    return t

      
def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
   
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url = []


    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a,html)
   
    if not len(b):
      pic_url.append(b)

    return pic_url

      
def save_pic(folder, pic_url):
    for each in pic_url:
      filename = each.split('/')[-1]
      with open(filename, 'wb') as f:
            img = open_url(each)
            f.write(img)

def download_mm(folder = 'mm', pages = 10):
    #创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    #获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -=i
      #base64加密
      x = base_num(page_num)
      #获取页码地址
      page_url = url + x + '#comments'
      #获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url)

      
if __name__ == '__main__':
    download_mm()

二农戏猪` 发表于 2020-8-11 21:48:33

需要审核

二农戏猪` 发表于 2020-8-11 21:50:25

1q23w31 发表于 2020-8-11 21:35
把写完的发一下

import urllib.request
import os
import base64
import re

def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36')
    response = urllib.request.urlopen(req)
    html = response.read()
    print(html)
   
    return html

   
def get_page(url):
    html = url_open(url).decode('utf-8')

    a = r'<span class="current-comment-page">\[(\d+)\]</span>'
    page_list = re.findall(a,html)

    for each in page_list:
      t = each

      return t

      
def base_num(page_num):
    times = '20200811-' + str(page_num)
    s = times.encode('utf-8')
    sw = base64.b64encode(s)
    str_sw = str(sw)
    string = str_sw.split("'")[-1]
   
    return string


def find_pic_url(url):
    html = url_open(url).decode('utf-8')
    pic_url = []


    a = r'<img src="([^"]+\.jpg)"'
    b = re.findall(a,html)
   
    if not len(b):
      pic_url.append(b)

    return pic_url

      
def save_pic(folder, pic_url):
    for each in pic_url:
      filename = each.split('/')[-1]
      with open(filename, 'wb') as f:
            img = open_url(each)
            f.write(img)

def download_mm(folder = 'mm', pages = 10):
    #创建一个文件夹
    os.mkdir(folder)
    os.chdir(folder)

    url = 'http://jandan.net/ooxx/'
    #获取页码
    page_num = int(get_page(url))

    for i in range(pages):
      page_num -=i
      #base64加密
      x = base_num(page_num)
      #获取页码地址
      page_url = url + x + '#comments'
      #获取图片具体地址并保存成列表
      pic_url = find_pic_url(page_url)
      save_pic(folder, pic_url)

      
if __name__ == '__main__':
    download_mm()
页: [1] 2
查看完整版本: 【求助】煎蛋爬妹子图程序出现不知名错误