欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Python爬虫的scrapy框架的简单应用

程序员文章站 2022-05-06 20:32:32
...

load_mzitu\mzitu\item.py

# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class MzituItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    name=scrapy.Field()
    image_url=scrapy.Field()
    url=scrapy.Field()

load_mzitu\mzitu\middlewares.py

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class MzituSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

#防盗链
class MeiZiTu(object):

    def process_request(self, request, spider):
        '''设置headers和切换请求头
        :param request: 请求体
        :param spider: spider对象
        :return: None
        '''
        referer = request.meta.get('referer', None)
        if referer:
            request.headers['referer'] = referer

load_mzitu\mzitu\pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#图片管道
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy import Request
import re
import urllib
import uuid

class MzituPipeline(ImagesPipeline):
    #重写请求
    def get_media_requests(self, item, info):
        """
          :param item: spider.py中返回的item
          :param info:
          :return:
          """
        # 这里的item['image_url'] 是前面的meizi.py返回的一个列表

        for img_url in item['image_url']:
            #这是每一套图总地址url 是前面的meizi.py返回的是一个字符串
            referer=item['url']
            x=uuid.uuid1()
            urllib.urlretrieve(referer,'F:\scrapy\MZITU\%s.jpg' % x)
            #这里的Request默认callback给file_path meta传递参数给 file_path
            yield Request(img_url,meta={'item':item,'referer':referer})

    # 重写文件路径
    def file_path(self, request, response=None, info=None):
        """
           :param request: 每一个图片下载管道请求
           :param response:
           :param info:
           :param strip :清洗Windows系统的文件夹非法字符,避免无法创建目录
           :return: 每套图的分类目录 传递到item_completed的results
           """
        #接收来自get_media_requests的item 也就是meizi.py提交过来的 item
        item=request.meta['item']
        #得到每套图的名字(文件夹名)
        file_name=item['name']
        #因为得到的名字可能存在window系统非法的文件字符串 所以这里替换一下
        file_name=strip(file_name)
        #利用图片url的分割得到每张图片的名字
        img_name=request.url.split('/')[-1]
        #这里是结合起来得到的每一张图片的具体存取路径
        file_img_path='full/{}/{}'.format(file_name,img_name)
        #print file_img_path
        return file_img_path

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        return item

def strip(path):
    """
    :param path: 需要清洗的文件夹名字
    :return: 清洗掉Windows系统非法文件夹名字的字符串
    """
    #找到 RE 匹配的所有子串,并将其用一个不同的字符串替换。可选参数 count 是模式匹配後替换的最大次数;count 必须是非负整数。
    # 缺省值是 0 表示替换所有的匹配。如果无匹配,字符串将会无改变地返回 re.sub(pattern, repl, string, count=0, flags=0)
    #这里非法字符用空''替换
    path = re.sub(r'[?\\*|“<>:/]','', str(path))
    return path


            # def process_item(self, item, spider):
    #     return item
if __name__=="__main__":
    #没什么意思 测试下strip好不好用
    a = '我是一个?\*|“<>:/错误的字符串'
    print(strip(a))

load_mzitu\mzitu\settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for mzitu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'mzitu'

SPIDER_MODULES = ['mzitu.spiders']
NEWSPIDER_MODULE = 'mzitu.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mzitu (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'mzitu.middlewares.MzituSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'mzitu.middlewares.MeiZiTu':5,
}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'mzitu.pipelines.MzituPipeline': 1.7,
}
IMAGES_STORE = 'F:/scrapy/load_mzitu/mztu'
# 30 days of delay for images expiration
IMAGES_EXPIRES = 10
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

load_mzitu\mzitu\spiders\meizi .py

# -*- coding: utf-8 -*-
import scrapy

from mzitu.items import MzituItem
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
###这里千万要把parser函数删了 不然无法调用parser_item函数 因为CrawlSpider已经默认使用parser函数来爬取第一个链接
#该程序的目的就是找到所有的符合规律的url链接跟进获取图片
class MeiziSpider(CrawlSpider):
    name = "meizi"
    allowed_domains = ["mzitu.com"]
    start_urls = ['http://www.mzitu.com/']
    #定义一个全局的列表
    img_urls=[]
    #定义抓取规则 找到如下规则内的所有url allow是允许的  deny是不允许的 从start_url页,从start_urls链接的源代码开始找 然后再跟进继续使用该规则
    rules = (
        Rule(LinkExtractor(allow=('http://www.mzitu.com/\d{6}',),deny=('http://www.mzitu.com/\s',)),\
             callback='parser_item',follow=True),
           )

    def parser_item(self,response):
        item=MzituItem()
        #得到没套图的名字 extract_first(default=”N/A”)取xpath返回值的第一个元素。如果xpath没有取到值,则返回N/A
        #这里我用字符串分割的方法
        name=response.selector.xpath('/html/body/div[2]/div[1]/div[1]/text()[3]').extract()
        item['name']=name[0][3:-1]
        item['url']=response.url
        all_page=response.selector.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()').extract()
        for page in range(1,int(all_page[0])+1):
            #得到每张照片的地址
            url=response.url+'/'+str(page)
            yield scrapy.Request(url=url,callback=self.img_url)
        #上面的循环所有页面执行完 定义的图片url列表也在下面的 img_url函数里添加好了 所以放入容器
        item['image_url']=self.img_urls
        yield item
    def img_url(self,response):
        urls=response.selector.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@src').extract()
        #有的页面不只一张图 所以加个循环 得到当前页面所有的
        print "--------------------------------------------------\n"
        for img_url in urls:
            #x=uuid.uuid1()
            f =open("data.txt","a+")
            f.write(img_url+"\n")
            f.close()
            print img_url+"    ===>>download"
            self.img_urls.append(img_url)

load_mzitu\begin.py

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/10/30 19:28
# @Author  : Yu
# @Site    : 
# @File    : begin.py
from scrapy import cmdline
cmdline.execute(["scrapy","crawl","meizi"])

#http://i.meizitu.net/2015/05/29m02.jpg
#http://www.mzitu.com/156427

运行begin后会把所有爬取到的图片链接写入到txt中

load_mzitu\download.py

import urllib 
import urllib2 
import requests
import random 
import uuid,os


#img_url = "https://p.ssl.qhimg.com/dm/48_48_100/t017aee03b28107657b.jpg"

img_url="http://i.meizitu.net/2018/10/13c02.jpg"
img="http://i.meizitu.net/2018/11/06c35.jpg"

my_headers={
    "User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Mobile Safari/537.36",
   'Referer':'http://m.mzitu.com/15309'}

def read_txt(path):
    mt=[]
    f=open(path,'r')
    for t in f.readlines():
        mt.append(t)
    f.close()
    return mt
 
def down_load(img,my_headers):
    request =  urllib2.Request(url=img, headers=my_headers)
    response = urllib2.urlopen(request)
    pic=response.read()
    path_name=img.split('/')[-1]
    path_name=path_name.replace('\n','')
    new_name="pic/" + path_name
    
    if not os.path.exists(new_name):
        with open("%s" % new_name, "wb") as f:
            f.write(pic)
        
    print "downloading with urllib"
    
def main():
    path="data.txt"
    mt=read_txt(path)
    print len(mt)
    print len(set(mt))
    for f in mt:
        print f
        down_load(f,my_headers)
    print 'fininsh'.center(40,'-')
    
main()

运行它会把txt中的图片下载到本地。

详情看gittup

相关标签: 爬虫