python抓取网页图片并放到指定文件夹
程序员文章站
2022-06-11 10:36:58
...
python抓取网站图片并放到指定文件夹
复制代码 代码如下:
# -*- coding=utf-8 -*-
import urllib2
import urllib
import socket
import os
import re
def Docment():
print u'把文件存在E:\Python\图(请输入数字或字母)'
h=raw_input()
path=u'E:\Python\图'+str(h)
if not os.path.exists(path):
os.makedirs(path)
return path
def getallurl(html):
reg=r"a href='(.*?\.htm)'"
allurl= re.compile(reg)
allList = re.findall(allurl,html)
return allList
def getHTML(url):
url=url
req_header = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
req_timeout = 20
html='cuowu'
try:
req = urllib2.Request(url,None,req_header)
resp = urllib2.urlopen(req,None,req_timeout)
html = resp.read()
except urllib2.URLError as e:
print e.message
except socket.timeout as e:
getHTML(url,fu)
return html
def getImg(html,path):
reg = r'img class=IMG_show border=0 src=(.*?\.jpg)'
imgre= re.compile(reg)
imgList = re.findall(imgre, html)
if imgList:
print 'ghasghg',path
for imgurl in imgList:
print imgurl
content2=urllib2.urlopen(imgurl).read()
with open(path+'/'+imgurl[-7:],'wb') as code:
code.write(content2)
else:
return 0
def getallurl(html):
reg=r"a href='(.*?\.htm)'"
allurl= re.compile(reg)
allList = re.findall(allurl,html)
return allList
j=1
i=0
print u'请输入网址:'
ul=raw_input()
print u'开始下载'
print u'第'+str(j)+u'页'
html=getHTML(ul)
allList=getallurl(html)
path=Docment()
getImg(html,path)
while i for lis in allList:
l=lis[i]
url=r'http://www.umei.cc/p/gaoqing/rihan/'+lis
i=i+1
j=j+1
html=getHTML(url)
getImg(html,path)
print u'第'+str(j)+u'页'
else:
print u'下载完毕'
# -*- coding=utf-8 -*-
import urllib2
import urllib
import socket
import os
import re
def Docment():
print u'把文件存在E:\Python\图(请输入数字或字母)'
h=raw_input()
path=u'E:\Python\图'+str(h)
if not os.path.exists(path):
os.makedirs(path)
return path
def getallurl(html):
reg=r"a href='(.*?\.htm)'"
allurl= re.compile(reg)
allList = re.findall(allurl,html)
return allList
def getHTML(url):
url=url
req_header = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
req_timeout = 20
html='cuowu'
try:
req = urllib2.Request(url,None,req_header)
resp = urllib2.urlopen(req,None,req_timeout)
html = resp.read()
except urllib2.URLError as e:
print e.message
except socket.timeout as e:
getHTML(url,fu)
return html
def getImg(html,path):
reg = r'img class=IMG_show border=0 src=(.*?\.jpg)'
imgre= re.compile(reg)
imgList = re.findall(imgre, html)
if imgList:
print 'ghasghg',path
for imgurl in imgList:
print imgurl
content2=urllib2.urlopen(imgurl).read()
with open(path+'/'+imgurl[-7:],'wb') as code:
code.write(content2)
else:
return 0
def getallurl(html):
reg=r"a href='(.*?\.htm)'"
allurl= re.compile(reg)
allList = re.findall(allurl,html)
return allList
j=1
i=0
print u'请输入网址:'
ul=raw_input()
print u'开始下载'
print u'第'+str(j)+u'页'
html=getHTML(ul)
allList=getallurl(html)
path=Docment()
getImg(html,path)
while i
l=lis[i]
url=r'http://www.umei.cc/p/gaoqing/rihan/'+lis
i=i+1
j=j+1
html=getHTML(url)
getImg(html,path)
print u'第'+str(j)+u'页'
else:
print u'下载完毕'
声明:本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系admin@php.cn核实处理。
相关文章
相关视频
专题推荐
-
独孤九贱-php全栈开发教程
全栈 170W+
主讲:Peter-Zhu 轻松幽默、简短易学,非常适合PHP学习入门
-
玉女心经-web前端开发教程
入门 80W+
主讲:灭绝师太 由浅入深、明快简洁,非常适合前端学习入门
-
天龙八部-实战开发教程
实战 120W+
主讲:西门大官人 思路清晰、严谨规范,适合有一定web编程基础学习
上一篇: 深度优先搜索
下一篇: 钱还没挣够,已经掉价了
网友评论
文明上网理性发言,请遵守 新闻评论服务协议
我要评论