L嘉 发表于 2020-8-3 15:53:26

怎样才能让我爬取下来的数据保存到EXCEL表或者CSV文件里呢

# -*- coding: utf-8 -*-
"""
Created on Mon Aug3 13:43:56 2020

@author: Administrator
"""

import os, re
import requests
import random
import time
from bs4 import BeautifulSoup
import xlrd





user_agent_list = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
UA = random.choice(user_agent_list)##从self.user_agent_list中随机取出一个字符串
headers = {'User-Agent': UA}##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)


url = 'https://cd.ke.com/xiaoqu/damian/pg'

for x in range(1,3):

    time.sleep(random.randint(2,5))

    with requests.get(url+str(x)+'ddo22p7', headers=headers, timeout=5) as response:

      soup = BeautifulSoup(response.text, 'lxml')

      # print(soup.title.text)# 打印title

      li_list = soup.find('ul', class_='listContent').find_all('li')
      # print(li_list)
      for li_quick in li_list:

            try:
               
                #取名称
                title=li_quick.find('div',class_='title').a.get_text().strip()
            
            
                #取位置信息
                positionInfo=li_quick.find('div',class_='positionInfo').get_text().strip()
               
               
            except:
                continue
            finally:
                print(title,',',positionInfo)

Twilight6 发表于 2020-8-3 16:12:50



这样吧,直接写入 csv 了,代码中多余的空格也帮你替换掉了,试试看(注意:如果 Excel 打开出现乱码,那么通过笔记本打开即可):

# -*- coding: utf-8 -*-
"""
Created on Mon Aug3 13:43:56 2020

@author: Administrator
"""

import os, re
import requests
import random
import time
from bs4 import BeautifulSoup
import xlrd

user_agent_list = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
UA = random.choice(user_agent_list)##从self.user_agent_list中随机取出一个字符串
headers = {'User-Agent': UA}##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

url = 'https://cd.ke.com/xiaoqu/damian/pg'

for x in range(1, 3):

    time.sleep(random.randint(2, 5))

    with requests.get(url + str(x) + 'ddo22p7', headers=headers, timeout=5) as response:

      soup = BeautifulSoup(response.text, 'lxml')

      # print(soup.title.text)# 打印title

      li_list = soup.find('ul', class_='listContent').find_all('li')
      # print(li_list)
      file = open('data.csv','w',encoding='utf-8')# 文件名自己改改哈~
      for li_quick in li_list:

            try:

                # 取名称
                title = li_quick.find('div', class_='title').a.get_text().strip()

                # 取位置信息
                positionInfo = ','.join(li_quick.find('div', class_='positionInfo').get_text().replace('\n','').replace(' ','').replace('/','').split())

            except:
                continue
            finally:
                file.write(title+','+positionInfo+'\n')
                print(title, ',', positionInfo)
      file.close()

L嘉 发表于 2020-8-3 17:22:06

Twilight6 发表于 2020-8-3 16:12
这样吧,直接写入 csv 了,代码中多余的空格也帮你替换掉了,试试看(注意:如果 Excel 打开出现乱码, ...

太感谢你了,5星好评,但是还有最后一个问题请教你一下,我爬取的目标有62个,但是获取下来的资料怎么少了很多呢

Twilight6 发表于 2020-8-3 17:27:56

L嘉 发表于 2020-8-3 17:22
太感谢你了,5星好评,但是还有最后一个问题请教你一下,我爬取的目标有62个,但是获取下来的资料怎么少 ...

哈哈,我的错,我不小心把 打开文件放 for 循环内了,但是只爬到 49 个欸:

# -*- coding: utf-8 -*-
"""
Created on Mon Aug3 13:43:56 2020

@author: Administrator
"""

import os, re
import requests
import random
import time
from bs4 import BeautifulSoup
import xlrd

user_agent_list = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
UA = random.choice(user_agent_list)##从self.user_agent_list中随机取出一个字符串
headers = {'User-Agent': UA}##构造成一个完整的User-Agent (UA代表的是上面随机取出来的字符串哦)

url = 'https://cd.ke.com/xiaoqu/damian/pg'
file = open('data.csv','w',encoding='utf-8')# 文件名自己改改哈~
for x in range(1, 3):

    time.sleep(random.randint(2, 5))

    with requests.get(url + str(x) + 'ddo22p7', headers=headers, timeout=5) as response:

      soup = BeautifulSoup(response.text, 'lxml')

      # print(soup.title.text)# 打印title

      li_list = soup.find('ul', class_='listContent').find_all('li')
      # print(li_list)
      for li_quick in li_list:
            try:
                # 取名称
                title = li_quick.find('div', class_='title').a.get_text().strip()

                # 取位置信息
                positionInfo = ','.join(li_quick.find('div', class_='positionInfo').get_text().replace('\n','').replace(' ','').replace('/','').split())

            except:
                continue
            finally:
                file.write(title+','+positionInfo+'\n')
                print(title, ',', positionInfo)
file.close()

L嘉 发表于 2020-8-3 19:55:44

Twilight6 发表于 2020-8-3 17:27
哈哈,我的错,我不小心把 打开文件放 for 循环内了,但是只爬到 49 个欸:

好的 感谢大神给我这个菜鸟教学{:5_106:}
页: [1]
查看完整版本: 怎样才能让我爬取下来的数据保存到EXCEL表或者CSV文件里呢