爬虫升级,现在已可以爬到所有欧美电影的网页网址。单单利用其网页的序号结构迭代实现。__author__ = 'chennan'
#-*- coding:utf8 -*-
import requests
import re
from bs4 import BeautifulSoup
import csv
def getHtml(url): #获取网址并形成requests对象并编码
res=requests.get(url)
html=res.text.encode('utf-8',errors='ignore')
return html
def getPageUrl(html): #解析网页,获取网址对象
bs0bj=BeautifulSoup(html,from_encoding='gbk')
reg=re.compile(r'/html/gndy/\w{4}/\d{8}/\d{4,10}.html')
pages=bs0bj.findAll('a', {'href':reg})
return pages
if __name__ == '__main__':
preurl='http://www.ygdy8.net/html/gndy/oumei/'
csvFile=open('I:/编程学习/spider/movie.csv','w+')
sheet=csv.writer(csvFile)
sheet.writerow(('电影名称','电影介绍及下载网页网址'))
for count in range(1,160):
x=count
urls=preurl+'list_7_'+str(x)+'.html' #每一页网页都是http://www.ygdy8.net/html/gndy/oumei/list_7_N.html形式,N是1~159数字
html=getHtml(urls)
pages=getPageUrl(html)
prepageurl='http://www.ygdy8.net/html/gndy'
for page in pages:
sheet.writerow((page.get_text(),prepageurl+page['href']))
csvFile.close()
|