import requests
from multiprocessing import Pool
from bs4 import BeautifulSoup
def download_page(url):
response = requests.get(url)
if response.status_code == 200:
return response.content
else:
print(f"下载页面出错: {url}")
return None
def extract_number_of_pages(initial_page_content):
# 在此实现从初始页面提取总页数的逻辑
# 这可能因网站结构而异
soup = BeautifulSoup(initial_page_content, 'html.parser')
total_pages = 10 # 用实际逻辑替换这个值
return total_pages
def generate_page_urls(base_url, total_pages):
return [f"{base_url}/page/{i}" for i in range(2, total_pages + 1)]
if __name__ == "__main__":
base_url = "https://example.com" # 替换为要下载的网站的基本URL
initial_page_url = f"{base_url}/page/1"
initial_page_content = download_page(initial_page_url)
total_pages = extract_number_of_pages(initial_page_content)
page_urls = generate_page_urls(base_url, total_pages)
with Pool() as pool:
pages_content = pool.map(download_page, page_urls)
# pages_content列表包含从第2页开始的所有下载页面的内容