cuishitian77
发表于 2018-2-6 03:24:53
看看,嘿嘿嘿
hellopyth
发表于 2018-2-6 22:52:10
学习学习
回不去的鱼
发表于 2018-2-6 23:33:07
学习学习
hldn
发表于 2018-2-7 14:44:47
学习
ccksam
发表于 2018-2-7 21:10:26
wow genius!
leyanqz
发表于 2018-2-7 23:39:55
果然不错!
picklove
发表于 2018-2-8 00:23:31
{:5_92:}66666666666
1109353812
发表于 2018-2-8 20:02:42
666
Ojia
发表于 2018-2-10 16:38:22
回复
xujingyu
发表于 2018-2-10 21:42:17
{:5_90:}
lww31538
发表于 2018-2-12 00:33:13
import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()import os
import re
import requests
def get_urls(url, regex):
urls = []
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
area = re.search(regex, content, re.S).group(0)
tails = re.findall(r'href="(.*?)"', area)
for tail in tails:
urls.append(base_url + tail)
return urls
def download_picture(url, count):
target_dir = 'pic'
if os.path.exists(target_dir):
if not os.path.isdir(target_dir):
os.remove(target_dir)
else:
os.mkdir(target_dir)
content = requests.get(url).content
picture_url = re.search(r'<img id="bigImg" src="(.*?)"', content).group(1)
picture = requests.get(picture_url).content
suffix = re.sub(r'.*\.', '.', picture_url)
with open('pic/' + str(count) + suffix, 'wb') as f:
f.write(picture)
def spider(url, count):
regex1 = r'<ul class="pic-list2clearfix">.*?</ul>'
regex2 = r'<ul id="showImg".*?</ul>'
urls = get_urls(url, regex1)
for each_url in urls:
picture_urls = get_urls(each_url, regex2)
for each_picture_url in picture_urls:
download_picture(each_picture_url, count)
print 'Downloading picture ' + str(count)
count += 1
return count
def get_next_page_url(url):
base_url = 'http://desk.zol.com.cn'
content = requests.get(url).content
tail = re.search(r'<a id="pageNext" href="(.*?)"', content).group(1)
return base_url + tail
if __name__ == '__main__':
url = 'http://desk.zol.com.cn/meinv/'
count = 1
count = spider(url, count)
while True:
key = raw_input('Input y/Y to continue download next page, or input other words to exit.')
if re.match(r'Y', key, re.I):
url = get_next_page_url(url)
count = spider(url, count)
else:
exit()
koalary
发表于 2018-2-12 17:11:59
好棒
石头大大叔
发表于 2018-2-24 14:10:50
学习学习~
VIPLV
发表于 2018-2-25 16:30:31
求解
mlc123
发表于 2018-2-25 16:47:56
感谢分享
Echolulu
发表于 2018-2-25 18:06:26
k
来自他方
发表于 2018-2-25 18:07:02
学习。。。
不爱洗澡的阿毛
发表于 2018-2-25 18:11:00
谢谢分享
lii4562
发表于 2018-2-25 18:25:36
zhnega
keyi198707
发表于 2018-2-26 15:10:36
学习学习