萌新淘宝定向爬取商品信息遇到了问题
import requestsimport re
def getHTMLText(url):
{"cookie":'',
"user-agent":"Mozilla/5.0"}
try:
r = requests.get(url, headers = kv, timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def parsePage(ilt, html):
try:
plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html)
tlt = re.findall(r'\"raw_title\"\:\".*?\"', html)
for i in range(len(plt)):
price = eval(plt.split(':'))
title = eval(tlt.split(':'))
ilt.append()
except:
print("")
def printGoodsList(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g, g))
def main():
goods = '书包'
depth = 2
start_url = 'http://s.taobao.com/search?q=' + goods
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44*i)
html = getHTMLText(url)
parsePage(infoList, html)
except:
continue
printGoodsList(infoList)
main()以上是代码cookie我运行时候查了写里面了但是最后运行提示line38:Need type annotation for 'infoList' (hint: "infoList: List[<type>] = ...") def getHTMLText(url):
kv = {"cookie":'',
"user-agent":"Mozilla/5.0"}
try:
r = requests.get(url, headers = kv, timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""不好意思刚才删cookie时候把kv = {}删了
你代码发的有点乱,缩进也不对
import requests
import re
def getHTMLText(url):
kv = {"cookie":'',
"user-agent":"Mozilla/5.0"}
try:
r = requests.get(url, headers = kv, timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def parsePage(ilt, html):
try:
plt = re.findall(r'"view_price"\:"[\d\.]*"', html)
tlt = re.findall(r'"raw_title"\:".*?"', html)
for i in range(len(plt)):
price = eval(plt.split(':'))
title = eval(tlt.split(':'))
ilt.append()
except:
print("")
def printGoodsList(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g, g))
def main():
goods = '书包'
depth = 2
start_url = 'http://s.taobao.com/search?q=' + goods
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44 * i)
html = getHTMLText(url)
parsePage(infoList, html)
except:
continue
printGoodsList(infoList)
main()
页:
[1]