|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 江湖散人 于 2021-8-30 18:08 编辑
- import requests
- from lxml import etree
- import asyncio
- import aiohttp
- import aiofiles
- async def down_load(href, title):
- async with aiohttp.ClientSession() as session:
- async with session.get("https://www.17k.com" + href)as resp:
- resp.encoding = "utf-8"
- dic = await resp.text()
- tree = etree.HTML(dic)
- page_content = tree.xpath("/html/body/div[4]/div[2]/div[2]/div[1]/div[2]")[0]
- lp = len(page_content) - 6
- page_content = page_content[:lp]
- for i in page_content:
- content = i.xpath("./text()")[0]
- async with aiofiles.open(("ibook/" + title), 'a', encoding='utf-8') as f:
- await f.write(content)
- async def get_page(url):
- resp = requests.get(url)
- resp.encoding = "utf-8"
- tree = etree.HTML(resp.text)
- dd = tree.xpath("/html/body/div[5]/dl/dd")[0]
- task = []
- for a in range(len(dd)):
- href = dd[a].xpath("./@href")[0]
- title = str(dd[a].xpath("./@title")[0]).split(":", 1)[0].replace("字数", "").replace("\r", "")
- task.append(asyncio.create_task(down_load(href, title)))
- await asyncio.wait(task)
- if __name__ == '__main__':
- url = "https://www.17k.com/list/3318557.html"
- loop = asyncio.get_event_loop()
- loop.run_until_complete(get_page(url))
复制代码
这是一个运用异步协程来爬取网站小说的程序。
但是运行的时候老是报错:
dic = await resp.text()
return self._body.decode(encoding, errors=errors) # type: ignore
return codecs.charmap_decode(input,errors,decoding_table)
UnicodeDecodeError: 'charmap' codec can't decode byte 0x8d in position 400: character maps to <undefined>
爬取小说基本都下来了,只不过少了几章节。
不知道问题出在哪了。
哪位前辈帮忙看一下啊,谢谢啊!
本帖最后由 2012277033 于 2021-8-30 18:46 编辑
你这个有几个问题,一个是解析时用的方法存在问题,需要用await关键字。
一个是文件写入存在问题,用的打开模式不对,如果是w的话,每次写入都是覆盖,最后只会记录到最后一行
还有一个编码问题:不确定是哪个编码直接ignore处理了.
修改了部分就这样:
- async def down_load(href, title):
- async with aiohttp.ClientSession() as session:
- async with session.get("https://www.17k.com" + href)as resp:
- resp.encoding = "utf-8"
- #这里resp.text()必须用await关键字来获取,后面加入编码格式这里直接跳过错误编码
- tree = etree.HTML(await resp.text(errors="ignore"), parser=etree.HTMLParser(encoding='utf-8'))
- page_content = tree.xpath(
- "/html/body/div[4]/div[2]/div[2]/div[1]/div[2]")
- #跳过空白页
- if page_content:
- page_content = page_content[0]
- else:
- return
- lp = len(page_content) - 6
- page_content = page_content[:lp]
- for i in page_content:
- #跳过空白行
- if i.xpath("./text()"):
- content = i.xpath("./text()")[0]
- else:
- continue
- #这里open的模式要用追加模式,不然最后只有最后一行被写入
- async with aiofiles.open(("ibook/" + title.replace(' ',"_")), 'a+', encoding='utf-8') as f:
- #这里末尾加个回车,保证每行都单独隔开
- await f.write(content+'\n')
- async def get_page(url):
- resp = requests.get(url)
- resp.encoding = "utf-8"
- tree = etree.HTML(resp.text)
- dd = tree.xpath("/html/body/div[5]/dl/dd")[0]
- task = []
- for a in range(len(dd)):
- href = dd[a].xpath("./@href")[0]
- title = str(dd[a].xpath("./@title")[0]).split(":",
- 1)[0].replace("字数", "").replace("\r", "")
- task.append(asyncio.create_task(down_load(href, title)))
- await asyncio.wait(task)
- if __name__ == '__main__':
- url = "https://www.17k.com/list/3318557.html"
- loop = asyncio.get_event_loop()
- loop.run_until_complete(get_page(url))
复制代码
|
|