|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
本帖最后由 cyy6666 于 2021-2-16 03:15 编辑
- import requests
- import re
- import json
- import random
- def url_open(url,keyword,s):
- params={'q':keyword,'sort':'sale-desc', 's':str(s)}
- headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75','referer':'https://s.taobao.com/',
- 'cookie':''
- }
- r=requests.get(url,headers=headers,params=params)
- return r
- def get_ip():
- headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75'}
- r=requests.get('',headers=headers)
- p=r'(?:(?:25[0-5]|2[0-4]\d|[01]{0,1}\d{0,1}\d)\.){3}(?:25[0-5]|2[0-4]\d|[01]{0,1}\d{0,1}\d):[^@]{1,}'
- iplist=re.findall(p,r.text)
- return iplist
- def proxy_url_open(url,ip,keyword,s):
- params={'q':keyword,'sort':'sale-desc', 's':str(s)}
- headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75','referer':'https://s.taobao.com/',
- 'cookie':''
- }
- rip=random.choice(ip)
- proxies={'http':rip}
- while True:
- try:
- r=requests.get(url,headers=headers,params=params,proxies=proxies)
- except:
- print('wait')
- rip=random.choice(ip)
- proxies={'http':rip}
- else:
- break
- return r
- def taobao():
- proxy=input("1使用代理;0不使用代理:")
- proxy=int(proxy)
- keyword=input('搜索关键词:')
- key=input('统计关键词:')
- url='https://s.taobao.com/search'
- ip=get_ip()
- count=0
- if proxy==0:
- r=url_open(url,keyword,0)
- else:
- r=proxy_url_open(url,ip,keyword,0)
- html=re.search(r'g_page_config = (.{,});\n',r.text)
- html=json.loads(html.group(1))
- pages=html['mods']['pager']['data']['totalPage']
- pages=int(pages)
- print(pages)
- html=html['mods']['itemlist']['data']['auctions']
- i=1
- while i < pages:
- for each in html:
- if key in each['title']:
- count+=int(re.search(r'\d{,}',each['view_sales']).group())
- print(str(i)+':'+str(count))
- if proxy==0:
- r=url_open(url,keyword,i*44)
- else:
- r=proxy_url_open(url,ip,keyword,i*44)
- html=re.search(r'g_page_config = (.{,});\n',r.text)
- html=json.loads(html.group(1))
- html=html['mods']['itemlist']['data']['auctions']
- i=i+1
- for each in html:
- if key in each['title']:
- count+=int(re.search(r'\d{,}',each['view_sales']).group())
- print(str(i)+':'+str(count))
- if __name__=='__main__':
- taobao()
复制代码
层次分析
- import re
- import json
- def geten(le):
- return ' '*le+'-'
- def getex(le):
- return ' '*le+'+'
- def find(ta,le):
- keys=iter(ta)
- for each in keys:
- if type(ta[each]) is not dict:
- print(geten(le)+each)
- else:
- print(getex(le)+each)
- find(ta[each],le+1)
- def taobaotest():
- with open('taobao.txt','r',encoding='utf-8') as f:
- result=re.search(r'g_page_config = (.{,});\n',f.read())
- result=json.loads(result.group(1))
- find(result,0)
-
- if __name__=='__main__':
- taobaotest()
复制代码
现在淘宝需要cookie,需要我们登录账号,刷新页面就可以获取cookie,cookie很快就会失效,需要退出账号重新登陆
更新cookie还挺麻烦的,不知道大佬们有什么好方法
本来想发图的,好像没有这个选项
新人,请大佬指教 |
|