Scrapy加Selenium爬取简书
程序员文章站
2022-05-09 21:17:09
...
爬虫主体:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from js_spi.items import ArticleItem
class JsSpider(CrawlSpider):
name = 'js'
allowed_domains = ['jianshu.com']
start_urls = ['https://www.jianshu.com/']
rules = (
Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_page', follow=True),
)
def parse_page(self, response):
title = response.xpath('//section[@class="ouvJEz"]/h1/text()').get()
author = response.xpath('//span[@class="FxYr8x"]/a/text()').get()
edit_time = response.xpath('//div[@class="s-dsoj"]//time/text()').get()
content = response.xpath('//article[@class="_2rhmJa"]').getall()
fav_count = response.xpath('//span[@class="_1LOh_5"]/text()').get()
text_type = '|'.join(response.xpath('//a[@class="_3s5t0Q _1OhGeD"]/span/text()').getall())
item = ArticleItem(title=title,
author=author,
edit_time=edit_time,
content=content,
fav_count=fav_count,
text_type=text_type)
yield item
item:
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ArticleItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
author = scrapy.Field()
edit_time = scrapy.Field()
content = scrapy.Field()
fav_count = scrapy.Field()
text_type = scrapy.Field()
# article_id = scrapy.Field()
# origin_url = scrapy.Field()
middleware:
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from selenium import webdriver
import time
from scrapy.http.response.html import HtmlResponse
class SeleniumDownloadMiddleware(object):
def __init__(self):
self.driver = webdriver.Chrome(executable_path=r'C:\ChromeDriver\chromedriver.exe')
def process_request(self, request, spider):
self.driver.get(request.url)
time.sleep(1)
try:
while True:
show_more = self.driver.find_element_by_class_name('H7E3vT')
show_more.click()
time.sleep(0.3)
if not show_more:
break
except:
pass
return HtmlResponse(url=request.url,
body=self.driver.page_source,
request=request,
encoding='utf-8',
status=200)
pipeline:
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import JsonLinesItemExporter
class JsSpiPipeline(object):
def open_spider(self,spider):
self.js=open('./js.json','wb')
self.exporter = JsonLinesItemExporter(self.js,ensure_ascii=False,encoding='utf-8')
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
def close_spider(self,spider):
self.js.close()
settings:
# -*- coding: utf-8 -*-
# Scrapy settings for js_spi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'js_spi'
SPIDER_MODULES = ['js_spi.spiders']
NEWSPIDER_MODULE = 'js_spi.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'js_spi (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'js_spi.middlewares.SeleniumDownloadMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'js_spi.middlewares.SeleniumDownloadMiddleware': 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'js_spi.pipelines.JsSpiPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
上一篇: 利用Requests爬取图书信息
下一篇: 模拟浏览器爬取
推荐阅读
-
爬虫(十七):Scrapy框架(四) 对接selenium爬取京东商品数据
-
Scrapy基于selenium结合爬取淘宝的实例讲解
-
Python3实现爬取简书首页文章标题和文章链接的方法【测试可用】
-
如何在scrapy中集成selenium爬取网页的方法
-
python 自动化 selenium 爬取极简壁纸好看的图片
-
使用scrapy+selenium爬取淘宝网
-
Scrapy:在Scrapy中使用selenium来爬取简书全站内容,并存储到MySQL数据库中
-
爬虫(十七):Scrapy框架(四) 对接selenium爬取京东商品数据
-
Scrapy基于selenium结合爬取淘宝的实例讲解
-
Python3实现爬取简书首页文章标题和文章链接的方法【测试可用】