我的第一个爬虫!!!!!开心
给大佬们看看还没学正则,就全部用的字符串
爬取妹子图还是算了吧,毕竟我是正人君子,所以就爬了鱼哥的帖子(别打我)
问一下,那些特殊符号大家都是怎么处理的??
import urllib.request as ur
illegal = {'\xa0':' ', '<br />':'', '<div class="quote">':'', '<blockquote>':'"', '</blockquote>':'"','</div>':'', '<div>':'', ' ':' ', '0xaf':''}
data = []
def find_comment(content):
global data
account = []
comments = []
head = content.find('<div id="postlist" class="pl bm">')
tail = content.find('<div id="postlistreply" class="pl">', head)
p = content.find('<div id="post_', head, tail)
while p != -1:
t = content.find('<div id="post_', p + 1, tail)
temp = content.find('class="xw1"', p + 1, t)
temp = content.find('>', temp + 1, t)
temp_t = content.find('</a>', temp + 1, t)
name = content
account.append(name)
temp = content.find('<td class="t_f" id="postmessage_', temp_t + 1, t) + 40
temp_t = content.find('</td>', temp + 1, t)
comment = content
comments.append(comment)
p = content.find('<div id="post_', t + 1, tail)
num = len(account)
for i in range(0, num):
data.append(, comments])
def filt():
global data
for each in data:
if '<strong>回复' in each:
t = each.find('#', 111)
repeated_num = each
t1 = each.find('<i>', t) + 3
t2 = each.find('</i>', t1)
repeated_name = each
t = each.find('</strong>', t) + 9
each = '回复了' + repeated_num + '楼用户' + repeated_name + '的评论' + each
for i in illegal:
while i in each:
each = each.replace(i, illegal)
while '<img src=' in each:
p = each.find('<img src=')
t = each.find('/>', p) + 2
each = each[:p] + '<图片表情>' + each
def save_comment(data, page_p):
global f
f.writelines('='*20 + '第%d页'%page_p + '='*20 + '\n\n')
for each in data:
f.writelines('用户名:' + each + ' 评论道' + '\n' + each + '\n\n')
print('第%d页评论已爬取完毕'%page_p)
return
def get_page(page_p):
url_head = 'https://fishc.com.cn/thread-1053-'
url_tail = '-1.html'
page_url = url_head + str(page_p) + url_tail
response = ur.urlopen(page_url)
content = response.read()
content = content.decode('gbk', 'ignore')
return content
f = open('comments.txt','w')
page_start = int(input("请输入需要爬取起始页数:"))
page_end = int(input("请输入需要爬取终止页数:"))
page_p = page_start
while page_p <= page_end:
content = get_page(page_p)
find_comment(content)
filt()
save_comment(data, page_p)
page_p += 1
f.close() 是否注释下内容 好让我们新人 观摩学习 jy02618370 发表于 2019-10-14 17:18
是否注释下内容 好让我们新人 观摩学习
来啦来啦
第一次当大佬
其实可以用正则避免很多问题,但是当时还没学正则
以下:
import urllib.request as ur
illegal = {'\xa0':' ', '<br />':'', '<div class="quote">':'', '<blockquote>':'"', '</blockquote>':'"','</div>':'', '<div>':'', ' ':' ', '0xaf':''}
#这里保存非法字符。因为网站编码问题,不能被转码为ASCII码,会报错,所以做特殊处理
data = []
#data列表用于保存某一页的 用户和评论
def find_comment(content):
#find__comment函数用于查找评论, 将(用户名称,评论内容)存入全局列表data
global data
account = []#用户名列表
comments = []#评论内容列表
#对网站URL分析得知,评论的URL部分目录为:postlist/postlistreply/post_编号
head = content.find('<div id="postlist" class="pl bm">')
tail = content.find('<div id="postlistreply" class="pl">', head)
p = content.find('<div id="post_', head, tail)
while p != -1:
#当找不到评论时跳出循环
t = content.find('<div id="post_', p + 1, tail)
temp = content.find('class="xw1"', p + 1, t)
temp = content.find('>', temp + 1, t)
temp_t = content.find('</a>', temp + 1, t)
name = content
account.append(name)
#记录用户名
temp = content.find('<td class="t_f" id="postmessage_', temp_t + 1, t) + 40
temp_t = content.find('</td>', temp + 1, t)
comment = content
comments.append(comment)
#记录评论内容
p = content.find('<div id="post_', t + 1, tail)
#找到下一个post_编号标签
num = len(account)
for i in range(0, num):
data.append(, comments])
#存储到全局列表data中
def filt():
#应为filter(过滤器),但是python对filter函数已有定义,故名为filt。用于滤去非法字符
global data
for each in data:
if '<strong>回复' in each:
#如果该评论是回复,则修改为回复的格式:某人 回复了 某楼用户 某某 的评论:评论内容
t = each.find('#', 111)
repeated_num = each
t1 = each.find('<i>', t) + 3
t2 = each.find('</i>', t1)
repeated_name = each
t = each.find('</strong>', t) + 9
each = '回复了' + repeated_num + '楼用户' + repeated_name + '的评论' + each
for i in illegal:
while i in each:
each = each.replace(i, illegal)
while '<img src=' in each:
#如果评论中有图片,则将图片替换为<图片表情>
p = each.find('<img src=')
t = each.find('/>', p) + 2
each = each[:p] + '<图片表情>' + each
def save_comment(data, page_p):
#save_comment用于保存评论
global f
f.writelines('='*20 + '第%d页'%page_p + '='*20 + '\n\n')
for each in data:
f.writelines('用户名:' + each + ' 评论道' + '\n' + each + '\n\n')
print('第%d页评论已爬取完毕'%page_p)
return
def get_page(page_p):
#get_page用于寻找下一页的URL
url_head = 'https://fishc.com.cn/thread-1053-'
url_tail = '-1.html'
page_url = url_head + str(page_p) + url_tail
response = ur.urlopen(page_url)
content = response.read()
content = content.decode('gbk', 'ignore')
return content
f = open('comments.txt','w')
page_start = int(input("请输入需要爬取起始页数:"))
page_end = int(input("请输入需要爬取终止页数:"))
page_p = page_start
while page_p <= page_end:
#读取页面→寻找评论→过滤→保存→去往下一页
content = get_page(page_p)
find_comment(content)
filt()
save_comment(data, page_p)
page_p += 1
f.close()
页:
[1]