python抓取网页图片示例(python爬虫)
#-*- encoding: utf-8 -*-
'''
created on 2014-4-24
@author: leon wong
'''
import urllib2
import urllib
import re
import time
import os
import uuid
#获取二级页面url
def findurl2(html):
re1 = r'http://tuchong.com/\d+/\d+/|http://\w+(?<!photos).tuchong.com/\d+/'
url2list = re.findall(re1,html)
url2lstfltr = list(set(url2list))
url2lstfltr.sort(key=url2list.index)
#print url2lstfltr
return url2lstfltr
#获取html文本
def gethtml(url):
html = urllib2.urlopen(url).read().decode('utf-8')#解码为utf-8
return html
#下载图片到本地
def download(html_page , pageno):
#定义文件夹的名字
x = time.localtime(time.time())
foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday"))
re2=r'http://photos.tuchong.com/.+/f/.+\.jpg'
imglist=re.findall(re2,html_page)
print imglist
download_img=none
for imgurl in imglist:
picpath = 'd:\\tuchong\\%s\\%s' % (foldername,str(pageno))
filename = str(uuid.uuid1())
if not os.path.exists(picpath):
os.makedirs(picpath)
target = picpath+"\\%s.jpg" % filename
print "the photos location is:"+target
download_img = urllib.urlretrieve(imgurl, target)#将图片下载到指定路径中
time.sleep(1)
print(imgurl)
return download_img
# def callback(blocknum, blocksize, totalsize):
# '''回调函数
# @blocknum: 已经下载的数据块
# @blocksize: 数据块的大小
# @totalsize: 远程文件的大小
# '''
# print str(blocknum),str(blocksize),str(totalsize)
# if blocknum * blocksize >= totalsize:
# print '下载完成'
def quitit():
print "bye!"
exit(0)
if __name__ == '__main__':
print ''' *****************************************
** welcome to spider for tuchong **
** created on 2014-4-24 **
** @author: leon wong **
*****************************************'''
pageno = raw_input("input the page number you want to scratch (1-100),please input 'quit' if you want to quit>")
while not pageno.isdigit() or int(pageno) > 100 :
if pageno == 'quit':quitit()
print "param is invalid , please try again."
pageno = raw_input("input the page number you want to scratch >")
#针对图虫人像模块来爬取
html = gethtml("http://tuchong.com/tags/%e4%ba%ba%e5%83%8f/?page="+str(pageno))
detllst = findurl2(html)
for detail in detllst:
html2 = gethtml(detail)
download(html2,pageno)
print "finished."