欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

scrapy爬取汽车之家图片之pipeline方法进阶

程序员文章站 2022-05-09 21:16:45
...

一:不利用scrapy自带的下载图片的方法

爬虫主体:

# -*- coding: utf-8 -*-
import scrapy
from car_spi.items import CarSpiItem


class CarSpider(scrapy.Spider):
    name = 'car'
    allowed_domains = ['"car.autohome.com.cn"']
    start_urls = ['https://car.autohome.com.cn/pic/series/5146.html#pvareaid=2042214']

    def parse(self, response):
        ui_boxs = response.xpath('//div[@class="uibox"]')
        for ui_box in ui_boxs:
            title = ui_box.xpath('.//div[@class="uibox-title"]/a/text()').get()
            urls = ui_box.xpath('.//ul/li/a/img/@src').getall()
            urls=list(map(lambda url:response.urljoin(url),urls)) # 把列表中元素遍历交给拉姆达表达式,返回的是map对象
            item = CarSpiItem(title=title,urls =urls)
            yield item

pipelines:

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
from urllib import request

class CarSpiPipeline(object):
    def __init__(self):
        self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def process_item(self, item, spider):
        title = item['title']
        urls = item['urls']

        title_path = os.path.join(self.path,title)
        if not os.path.exists(title_path):
            os.makedirs(title_path)
        for url in urls:
            image_name = url.split('_')[-1]
            request.urlretrieve(url,os.path.join(title_path,image_name))
        return item

items:

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class CarSpiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    urls = scrapy.Field()

settings:

# -*- coding: utf-8 -*-

# Scrapy settings for car_spi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os

BOT_NAME = 'car_spi'

SPIDER_MODULES = ['car_spi.spiders']
NEWSPIDER_MODULE = 'car_spi.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'car_spi (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'car_spi.middlewares.CarSpiSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'car_spi.middlewares.CarSpiDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'car_spi.pipelines.CarSpiPipeline': 300
   #  'scrapy.pipelines.images.ImagesPipeline':1
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []

二:利用scrapy自带的ImagesPipelines:

自带的ImagePipelines只需要更改三个地方:
1.在Item中加入两个属性:file_urls 和 files,如果是下载图片则改为 image_urls 和images。
2.设置setting中的下载路径,文件是FILES_STORE:,图片则是IMAGES_STORE。
3.把ITEM_PIPELINES进行更改,文件则是:scrapy.pipelines.files.FilesPipeline:1,
图片是:scrapy.pipelines.images.ImagesPipeline:1。

最后把爬虫主体中的item中urls更改为image_urls。(把ITEM_PIPELINES更改为自带的ImagesPipeline之后,之前Pipelines中写的CarSpiPipeline类方法就不会被调用,也就不用进行更改)

爬虫主体:

# -*- coding: utf-8 -*-
import scrapy
from car_spi.items import CarSpiItem


class CarSpider(scrapy.Spider):
    name = 'car'
    allowed_domains = ['"car.autohome.com.cn"']
    start_urls = ['https://car.autohome.com.cn/pic/series/5146.html#pvareaid=2042214']

    def parse(self, response):
        ui_boxs = response.xpath('//div[@class="uibox"]')
        for ui_box in ui_boxs:
            title = ui_box.xpath('.//div[@class="uibox-title"]/a/text()').get()
            urls = ui_box.xpath('.//ul/li/a/img/@src').getall()
            urls=list(map(lambda url:response.urljoin(url),urls)) # 把列表中元素遍历交给拉姆达表达式,返回的是map对象
            item = CarSpiItem(title=title,image_urls =urls)
            yield item

items:

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class CarSpiItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    image_urls = scrapy.Field()
    images =scrapy.Field()

settings:

# -*- coding: utf-8 -*-

# Scrapy settings for car_spi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os

BOT_NAME = 'car_spi'

SPIDER_MODULES = ['car_spi.spiders']
NEWSPIDER_MODULE = 'car_spi.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'car_spi (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'car_spi.middlewares.CarSpiSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'car_spi.middlewares.CarSpiDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   # 'car_spi.pipelines.CarSpiPipeline': 300 # 这是自己写的pipelines,效率较低
    'scrapy.pipelines.images.ImagesPipeline':1 # 这是scrapy自带的下载图片的pipelines
}
IMAGES_STORE = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images') #这里不是图片的下载路径

三:重写ImagesPipelines的保存路径的方法,实现对于下载下来的图片进行按文件夹分类保存。

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
from urllib import request
from car_spi.settings import IMAGES_STORE
from scrapy.pipelines.images import ImagesPipeline


class CarSpiPipeline(object):
    def __init__(self):
        self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images')
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def process_item(self, item, spider):
        title = item['title']
        urls = item['urls']

        title_path = os.path.join(self.path, title)
        if not os.path.exists(title_path):
            os.makedirs(title_path)
        for url in urls:
            image_name = url.split('_')[-1]
            request.urlretrieve(url, os.path.join(title_path, image_name))
        return item


class CarImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        # 这个方法是在发送下载请求之前调用的。
        # 其实这个方法本身就是去发送下载请求的。
        request_objs = super(CarImagesPipeline, self).get_media_requests(item, info)
        # 用来获取item
        for request_obj in request_objs:
            request_obj.item = item
        return request_objs

    def file_path(self, request, response=None, info=None):
        # 这个方法是在图片将要被存储的时候调用的,用来获取图片的存储路径
        path = super(CarImagesPipeline, self).file_path(request, response, info)
        title = request.item.get('title')
        images_store = IMAGES_STORE
        title_path = os.path.join(images_store, title)
        if not os.path.exists(title_path):
            os.makedirs(title_path)
        image_name = path.replace("full/", "")
        image_path = os.path.join(title_path, image_name)
        return image_path
setting里的pipeline部分:
ITEM_PIPELINES = {
   # 'car_spi.pipelines.CarSpiPipeline': 300 # 这是自己写的pipelines,效率较低
   #  'scrapy.pipelines.images.ImagesPipeline':1 # 这是scrapy自带的下载图片的pipelines
    'car_spi.pipelines.CarImagesPipeline':1 # 这是重写方法之后的pipeline
}
IMAGES_STORE = os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')