使用for对多个url进行爬取时,如何跳过其中无法访问的url
本帖最后由 三一王 于 2020-12-24 16:02 编辑如题。
源码如下:
import urllib.request
def url_open(url):
req = urllib.request.Request(url)
req.add_header('user-agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36')
response = urllib.request.urlopen(url)
html = response.read()
return html
def find_m3u8url(url):
html = url_open(url).decode('utf-8')
m3u8url_dict = {}
url_a = html.find('file: "http:', 0) + 12
while url_a != -1:
url_b = html.find('.m3u8', url_a, url_a + 300)
if url_b != -1:
# 将带有中文字符的url地址转换为url编码
m3u8url = 'http:' + urllib.request.quote(html) + '.m3u8'
print(m3u8url)
m3u8url_dict] = m3u8url
break
return m3u8url_dict
def save(m3u8url_dict):
for each in m3u8url_dict:
word = m3u8url_dict
with open('m3u8urls.txt','a') as f:
f.write(each)
f.write('\n')
f.write(word)
f.write('\n')
f.write('\n')
with open('edu_urls.txt','rb') as f:
for url in f:
url_open(url)
m3u8url_dict = find_m3u8url(url)
save(m3u8url_dict)
在代码最后执行的这一步:
with open('edu_urls.txt','rb') as f:
for url in f:
url_open(url)
ai_dict = find_m3u8url(url)
save(m3u8url_dict)
因为文件“edu_urls.txt”中存在无法访问的url。
在执行时,如何跳过因无法访问而报错的url,继续对下个url进行操作?
恳求ing~~~~~ url_open(url):
使用 try
...
except...
失败返回False
或者直接 pass Cool_Breeze 发表于 2020-12-24 15:45
url_open(url):
使用 try
...
with open('edu_urls.txt','rb') as f:
for url in f:
try:
url_open(url)
m3u8url_dict = find_m3u8url(url)
save(m3u8url_dict)
except:
pass
是这样么,但是没有结果诶 找到问题了 ,多谢~~~ 三一王 发表于 2020-12-24 16:00
是这样么,但是没有结果诶
def url_open(url):
req = urllib.request.Request(url)
req.add_header('user-agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36')
try:
response = urllib.request.urlopen(url)
except Exception:
print(f'{url} Fail')
return None
html = response.read()
return html
def find_m3u8url(url):
html = url_open(url).decode('utf-8')
if html is None: return None
m3u8url_dict = {}
url_a = html.find('file: "http:', 0) + 12
while url_a != -1:
url_b = html.find('.m3u8', url_a, url_a + 300)
if url_b != -1:
# 将带有中文字符的url地址转换为url编码
m3u8url = 'http:' + urllib.request.quote(html) + '.m3u8'
print(m3u8url)
m3u8url_dict] = m3u8url
break
return m3u8url_dict
with open('edu_urls.txt','rb') as f:
for url in f:
url_open(url)
ai_dict = find_m3u8url(url)
if ai_dict is None: contiune
save(m3u8url_dict)
页:
[1]