学习爬虫初期练习,保存鱼c论坛网页
仅适用于可访问的页面,且仅保存第一页,主要是练手,顺便保存一下速查资料有需要的萌新小伙伴可以自行更改用户名密码及网址
from urllib.parse import urljoin
from pathlib import Path
import requests
from fake_useragent import UserAgent, errors
from bs4 import BeautifulSoup
# 获取
def get_info(session: requests.Session, url):
response = session.get(url)
return response
# 保存
def save_info(session: requests.Session, response: requests.get):
soup = BeautifulSoup(response.content, 'lxml')
p = Path('.')
path_name = soup.find('title').text
p.joinpath(path_name).mkdir(parents=True, exist_ok=True)
# 图片保存
img_info = soup.find_all('img')
for img in img_info:
try:
img_url = urljoin('https://fishc.com.cn/', img['src'])
img_response = session.get(img_url)
img_name = img['src'].split('/')[-1].split('?').strip()
img['src'] = f'./{path_name}'
with open(f'{path_name}/{img_name}', 'wb') as file:
print(f'正在保存图片->{img_name}')
file.write(img_response.content)
except Exception as e:
print(f'图片保存错误: {e}')
# css保存
css_info = soup.find_all('link', {'rel': 'stylesheet'})
for css in css_info:
try:
css_url = urljoin('https://fishc.com.cn/', css['href'])
css_response = session.get(css_url)
css_name = css['href'].split('/')[-1].split('?').strip()
css['href'] = f'./{path_name}'
with open(f'{path_name}/{css_name}', 'wb') as file:
print(f'正在保存css->{css_name}')
file.write(css_response.content)
except Exception as e:
print(f'css保存错误: {e}')
# html保存
with open(f'{p}/{path_name}.html', 'wb') as file:
file.write(response.content)
print(f'网页保存完毕')
if __name__ == '__main__':
try:
url = 'https://fishc.com.cn/thread-183975-1-1.html' #可替换url
url_login = 'https://fishc.com.cn/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
session = requests.Session()
header = {'User-Agent': UserAgent().random}
session.headers = header
login_info = {'username': 'yourname',
'password': 'yourpassword'}
session.post(url_login, data=login_info)
response = get_info(session, url)
save_info(session, response)
except errors.FakeUserAgentError as e:
print(f'FakeUserAgentError: {e}')
except requests.exceptions.RequestException as e:
print(f'RequestException: {e}')
你的代码看起来已经很不错了,可以实现保存鱼C论坛网页的功能。不过有一些地方需要修改:
1. 导入的模块错误,正确的模块名是`from fake_useragent import UserAgent, errors`,请注意大小写。
2. `response` 是一个 `requests.Response` 对象,而不是 `requests.get` 对象,因此在 `save_info` 函数中将其参数改为 `response: requests.Response`。
3. 在获取图片和 CSS 的 URL 时,不能直接使用 `urljoin` 函数,因为 `img` 和 `css` 是 BeautifulSoup 返回的 Tag 对象,需要使用 `.get('src')` 和 `.get('href')` 方法来获取其属性值。
4. 在保存图片和 CSS 时,要使用正确的文件名,可以使用 `os.path.basename` 函数来获取 URL 中的文件名部分。
这里是修改后的代码:
from urllib.parse import urljoin
import os
import requests
from fake_useragent import UserAgent, errors
from bs4 import BeautifulSoup
# 获取
def get_info(session: requests.Session, url):
response = session.get(url)
return response
# 保存
def save_info(session: requests.Session, response: requests.Response):
soup = BeautifulSoup(response.content, 'lxml')
title = soup.find('title').text
path_name = title.strip()
os.makedirs(path_name, exist_ok=True)
# 图片保存
img_info = soup.find_all('img')
for img in img_info:
try:
img_url = urljoin(response.url, img.get('src'))
img_response = session.get(img_url)
img_name = os.path.basename(img_url)
with open(f'{path_name}/{img_name}', 'wb') as file:
print(f'正在保存图片->{img_name}')
file.write(img_response.content)
except Exception as e:
print(f'图片保存错误: {e}')
# css保存
css_info = soup.find_all('link', {'rel': 'stylesheet'})
for css in css_info:
try:
css_url = urljoin(response.url, css.get('href'))
css_response = session.get(css_url)
css_name = os.path.basename(css_url)
with open(f'{path_name}/{css_name}', 'wb') as file:
print(f'正在保存css->{css_name}')
file.write(css_response.content)
except Exception as e:
print(f'css保存错误: {e}')
# html保存
with open(f'{path_name}.html', 'wb') as file:
file.write(response.content)
print(f'网页保存完毕')
if __name__ == '__main__':
try:
url = 'https://fishc.com.cn/thread-183975-1-1.html'
url_login = 'https://fishc.com.cn/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
session = requests.Session()
header = {'User-Agent': UserAgent().random}
session.headers = header
login_info = {'username': 'yourname',
'password': 'yourpassword'}
session.post(url_login, data=login_info)
response = get_info(session, url)
save_info(session, response)
except errors.FakeUserAgentError as e:
print(f'FakeUserAgentError: {e}')
except requests.exceptions.RequestException as e:
print(f'RequestException: {e}')
以上代码进行了修改,你可以根据自己的需要进一步调整和扩展。希望能对你有所帮助!
以上回复来自 -- ChatGPT(FishC官方接口),如未能正确解答您的问题,请继续追问。
页:
[1]