爬取网页问题
import requestsimport bs4
def open_url(url):
headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"}
r=requests.get(url,headers=headers)
return r
def find_data(soup):
div=soup.find_all('div',class_='spread-module')
for i in div:
print(i.a['href'])
def main():
game_url='https://www.bilibili.com/v/game/'
res=open_url(game_url)
soup=bs4.BeautifulSoup(res.text,'lxml')
finded_data=find_data(soup)
'''with open('bilibiliweb.txt','w',encoding=res.encoding) as f:
f.write(res.text)'''
if __name__=='__main__':
main()
想把下图里的a标签的地址打印出来,
结果什么都没有
求解{:10_277:} '''with open('bilibiliweb.txt','w',encoding=res.encoding) as f:
f.write(res.text)'''
为什么要加三引号? Marcus-Liu 发表于 2020-7-24 16:08
'''with open('bilibiliweb.txt','w',encoding=res.encoding) as f:
f.write(res.text)'''
为什 ...
注释起来,待会用 你爬取的网页是经过 .js 渲染后生成的数据,推荐使用 selenium 模块渲染后在进行爬取。 我这有selenium 模块使用的例子,如有需要请参照:
import urllib.request
import re
from selenium import webdriver
from selenium.webdriver import ChromeOptions
def search(music=None,singer=None):
if not music and not singer:
raise ValueError('参数错误!!')
if not music:
name = singer
else:
name = music
# 创建参数设置对象.
CLOUD_CHROME_OPTION = ChromeOptions()
CLOUD_CHROME_OPTION.binary_location = '.\\plug-in\\Google\\Chrome\\Application\\chrome.exe'
# chrome_opt.add_argument('--headless') # 无界面化.
# chrome_opt.add_argument('--disable-gpu') # 配合上面的无界面化.
driver = webdriver.Chrome(options = CLOUD_CHROME_OPTION,executable_path=r'chromedriver.exe')
driver.get('https://music.163.com/#/search/m/?s=' + name)
driver.switch_to.frame('g_iframe')
music_list = re.findall(r'<a href="/song\?id=(\d*)"><b title="(.*?)">',driver.page_source)
singer_list = re.findall(r'<a href="/artist\?id=\d*">(.*?)</a>',driver.page_source)
# driver.quit()
music_num = len(music_list)
download_list = list()
if singer:
for i in range(music_num):
if singer in singer_list:
download_list.append(music_list+(singer_list,))
else:
for i in range(music_num):
download_list.append(music_list+(singer_list,))
print(download_list)
down_load_sign = input(r'开始下载吗?:')
if down_load_sign == 'Y':
for each_music in download_list:
print(f'{each_music}-{each_music}.mp3正在下载!')
urllib.request.urlretrieve(f'http://music.163.com/song/media/outer/url?id={each_music}.mp3',f'C:\\CLOUD Artification Intelligence\\clouddata\\Sound\\{each_music}-{each_music}.mp3')
print(f'{each_music}-{each_music}.mp3下载完成!')
if __name__ == '__main__':
while True:
# music_name = input('请输入歌曲名:')
# singer_name = input('请输入歌手名:')
music_name = '大鱼'
singer_name = ''
search(music_name,singer_name)
import requests
import re
headers = {
"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36",
"referer":"https://www.bilibili.com/v/game/"
}
url = "https://api.bilibili.com/x/web-interface/dynamic/region?callback=jqueryCallback_bili_4729731045039791&jsonp=jsonp&ps=10&rid=17&_=1595579753063"
data = {
"callback":"jqueryCallback_bili_4729731045039791",
"jsonp":"jsonp",
"ps":"10",
"rid":"17",
"&_":"1595579753063"
}
res = requests.get(url,headers=headers,data=data).text
之后自己去看看json模块 xiaosi4081 发表于 2020-7-24 16:50
之后自己去看看json模块
这个有点难啊,能解释一下原理吗{:10_266:}
页:
[1]