|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
from selenium import webdriver # 自动化库
# 引入 ActionChains 类
from bs4 import BeautifulSoup
import re
import time
import requests
import os
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36"
}
# 启动googlechrome
#Firefox_options = Options()
#Firefox_options.add_argument('--headless') # 隐藏chrome
browser = webdriver.Firefox(executable_path="C:\\Program Files\\Mozilla Firefox\\geckodriver.exe")#,options=Firefox_options)
# 读取要采集的文件
csv_str = open('采集文件.csv','r')
# 按行读取
list_d = csv_str.readlines()
# 循环输出
for str_list_num in list_d:
str_list = str_list_num.strip('\n')
url = "https://www.amazon.ca/s?k="+str_list+"&ref=nb_sb_noss" # url地址
# get URL
time.sleep(3)
d = browser.get (url)
# 定位爬取信息
browser.find_element_by_xpath("//*[contains(text(),'Paperback')]").click()
# 获取大图链接,需要点击一下查看更多图片
time.sleep(2)
browser.find_element_by_xpath('//span[@class="thumb-text thumb"]').click()
# 获取网页的str源代码
response = browser.page_source
time.sleep(5)
soup = BeautifulSoup(response, 'html.parser')
# 图片提取
# 判断图片的的张数
num_phohos = soup.find(class_="a-column a-span4 ig-thumbs a-span-last")
num_phoho = num_phohos.find_all('div',class_="ig-thumb-image")
mun = int(len(num_phoho))-1
# 下载图片
dl = soup.find(class_="a-column a-span8 a-text-center ig-main-image")
dl_str = str(dl)
pattern = re.findall('https://[^\s]*?.jpg',dl_str)
# 创建存储图片文件夹
#os.makedirs(os.path.split(os.path.realpath(__file__))[0] + '\\采集到的图片' )
os.makedirs(os.path.split(os.path.realpath(__file__))[0] + '\\采集到的图片\\' + str_list )
IMAGE_PATH = os.path.split(os.path.realpath(__file__))[0] + '\\采集到的图片\\' + str_list + '\\'
time.sleep(3)
# 第一页图片下载
for d in pattern:
photos_url = requests.get(d,headers=headers)
pic = photos_url.content
# 图片内容需要以二进制wb读写
photo = open(IMAGE_PATH + "%s.jpg" % pattern.index(d),'wb')
# 获取pic的二进制内容
photo.write(pic)
# 关闭文件
photo.close()
# 后续的图片
n = 1
for i in range(mun):
browser.find_element_by_xpath('//img[@class="ig-thumb-inner "]').click()
time.sleep(3)
# 下载图片
response_str = browser.page_source # 重新获取一下网站源码
soup_url = BeautifulSoup(response_str, 'html.parser')
dl_url = soup_url.find(class_="a-column a-span8 a-text-center ig-main-image")
dl_str = str(dl_url)
pattern_str = re.findall('https://[^\s]*?.jpg',dl_str)
for d_str in pattern_str:
photos_url_str = requests.get(d_str,headers=headers)
pic_str = photos_url_str.content
# 图片内容需要以二进制wb读写
photo = open(IMAGE_PATH +str(n)+ ".jpg" ,'wb')
# 获取pic的二进制内容
photo.write(pic_str)
# 关闭文件
photo.close()
n = int(n)+1 |
|