君子好逑 发表于 2020-8-20 18:53:58

爬虫

import urllib.request
import re


f = open('无敌真寂寞.txt','w')


def get_html(url):

    headers = {
    'Accept-Language': 'zh-CN',
    'Cache-Control': 'no-cache',
    'Connection': 'Keep-Alive',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
    }

    res = urllib.request.Request(url=url,headers=headers)

    response = urllib.request.urlopen(res)
    html = response.read().decode("gbk",'ignore')
    return html

def get_text(html):

    regular = re.compile(".*?<br><br>&nbsp;&nbsp;&nbsp;&nbsp;")

    m = regular.findall(html)

    for each in m:
      each = each[:-32]
      print('   ',end='')
      if '&nbsp;&nbsp;&nbsp;&nbsp;' in each :
            print(each.split(';')[-1])
            f.write(each.split(';')[-1])
            f.write('\n')
      else:
            print(each)
            f.write(each)
            f.write('\n')

def get_title(html):
    title = re.findall('<h1>(.*?)</h1>',html)
    title = title
    return title

def get_next(html):
    url_1 = 'https://www.biduo.cc'
    regular = re.compile(r'.*?">上一章</a> &larr; <a href=".*?">章节列表</a> .*?; <a href="(.*?)"')
    m = regular.findall(html)
    url_2 = m.pop()
    next_address = url_1 + url_2
    return next_address


def main():
    number = int(input('请输入您想获取的章节总数:'))
    url = 'https://www.biduo.cc/biquge/39_39888/c13353637.html'
    for each in range(number):
      html = get_html(url)
      title = get_title(html)
      print(title)
      next_address = get_next(html)
      f.write(title)
      f.write('\n')
      f.write('\n')
      get_text(html)
      url = next_address
      f.write('\n')
      f.write('\n')

    f.close()

if __name__ == "__main__":
    main()
爬的小说叫无敌真寂寞,喜欢看小说的可以试一试这个程序。有大佬要是把程序改进了能给我发一波吗,让我观摩观摩

小甲鱼的铁粉 发表于 2020-8-20 18:58:58

{:10_275:}

1q23w31 发表于 2020-8-20 19:17:33

import urllib.request
import re


f = open('无敌真寂寞.txt','w')


def get_html(url):

    headers = {
    'Accept-Language': 'zh-CN',
    'Cache-Control': 'no-cache',
    'Connection': 'Keep-Alive',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363'
    }

    res = urllib.request.Request(url=url,headers=headers)

    response = urllib.request.urlopen(res)
    html = response.read().decode("gbk",'ignore')
    return html

def get_text(html):

    regular = re.compile(".*?<br><br>&nbsp;&nbsp;&nbsp;&nbsp;")

    m = regular.findall(html)

    for each in m:
      each = each[:-32]
      if '&nbsp;&nbsp;&nbsp;&nbsp;' in each :
            f.write(each.split(';')[-1])
            f.write('\n')
      else:
            f.write(each)
            f.write('\n')

def get_title(html):
    title = re.findall('<h1>(.*?)</h1>',html)
    title = title
    return title

def get_next(html):
    url_1 = 'https://www.biduo.cc'
    regular = re.compile(r'.*?">上一章</a> &larr; <a href=".*?">章节列表</a> .*?; <a href="(.*?)"')
    m = regular.findall(html)
    url_2 = m.pop()
    next_address = url_1 + url_2
    return next_address


def main():
    number = int(input('请输入您想获取的章节总数:'))
    url = 'https://www.biduo.cc/biquge/39_39888/c13353637.html'
    for each in range(number):
      html = get_html(url)
      title = get_title(html)
      next_address = get_next(html)
      f.write(title)
      f.write('\n'*2)
      get_text(html)
      url = next_address
      f.write('\n'*2)
    f.close()
    print('已完成')

if __name__ == "__main__":
    main()

运行显示精简
页: [1]
查看完整版本: 爬虫