故里 发表于 2021-10-21 16:58:11

我这个代码为什么没反应啊

import json
import requests
from requests.exceptions import RequestException
import re
import time

def get_one_page(url):
    try:
      headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
      }
      response = requests.get(url, headers=headers)
      if response.status_code == 200:
            return response.content.decode('ANSI')
      return None
    except RequestException:
      return None

url = 'http://www.stats.gov.cn/tjsj/pcsj/rkpc/6rp/left.htm'

html = get_one_page(url)
from bs4 import BeautifulSoup

soup = BeautifulSoup(html,'lxml')

soup.prettify()
data_name_list = []
data_xls_list = []
pre_url = 'http://www.stats.gov.cn/tjsj/pcsj/rkpc/6rp/'
for ul in soup.find_all('ul'):
    for li in ul.find_all(name='li'):
      a = li.a
      #数据格式为xls,去掉其他的
      if a != None and a.attrs['href'][-1] != 'm':
            data_name_list.append(li.get_text())
            data_xls_list.append(pre_url + a.attrs['href'])

import urllib
import os
path = 'C:\\Users\\'
i = 0
for url in data_xls_list:
    print(url)
    filename = os.path.join(path, data_name_list + '.xls')
    urllib.request.urlretrieve(url, filename)
    i += 1

大马强 发表于 2021-10-21 18:43:30

if a != None and a.attrs['href'][-1] != 'm':
你这是想爬啥?

大马强 发表于 2021-10-21 18:51:44

大马强 发表于 2021-10-21 18:43
你这是想爬啥?

你这个判断导致后面的一个都拿不到

大马强 发表于 2021-10-21 18:54:09

def get_one_page(url):
    try:
      headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
      }
      response = requests.get(url, headers=headers)
      if response.status_code == 200:
            return response.content.decode('ANSI')
      return None
    except RequestException:
      return None


url = 'http://www.stats.gov.cn/tjsj/pcsj/rkpc/6rp/left.htm'

html = get_one_page(url)

soup = BeautifulSoup(html, 'lxml')

soup.prettify()
data_name_list = []
data_xls_list = []
pre_url = 'http://www.stats.gov.cn/tjsj/pcsj/rkpc/6rp/'
count = 0
for ul in soup.find_all('ul'):
    # print(type(ul))
    for li in ul.find_all(name='li'):

      try:
            a = li.a['href']# 获取href str
            # print(li.a['href'])
            # print(li.a['href'][-1])
            # print(a, li.get_text())
      except:
            pass
      if li.a != None and a[-1] == 'm':
            data_name_list.append(li.get_text())
            data_xls_list.append(pre_url + a)
    break
path = 'C:\\Users\\'
i = 0
for url in data_xls_list:
    print(url)
    filename = os.path.join(path, data_name_list + '.xls')
    urllib.request.urlretrieve(url, filename)
    i += 1
页: [1]
查看完整版本: 我这个代码为什么没反应啊