Python 详解通过Scrapy框架实现爬取百度新冠疫情数据流程
程序员文章站
2022-03-10 09:12:12
目录前言环境部署插件推荐爬虫目标项目创建webdriver部署项目代码item定义中间件定义定义爬虫pipeline输出结果文本配置文件改动验证结果总结前言闲来无聊,写了一个爬虫程序获取百度疫情数据。...
前言
闲来无聊,写了一个爬虫程序获取百度疫情数据。申明一下,研究而已。而且页面应该会进程做反爬处理,可能需要调整对应xpath。
github仓库地址:代码仓库
本文主要使用的是scrapy框架。
环境部署
主要简单推荐一下
插件推荐
这里先推荐一个google chrome的扩展插件xpath helper,可以验证xpath语法是不是正确。
爬虫目标
需要爬取的页面:实时更新:新型冠状病毒肺炎疫情地图
主要爬取的目标选取了全国的数据以及各个身份的数据。
项目创建
使用scrapy命令创建项目
scrapy startproject yqsj
webdriver部署
这里就不重新讲一遍了,可以参考我这篇文章的部署方法:python 详解通过scrapy框架实现爬取****全站热榜标题热词流程
项目代码
开始撸代码,看一下百度疫情省份数据的问题。
页面需要点击展开全部span。所以在提取页面源码的时候需要模拟浏览器打开后,点击该按钮。所以按照这个方向,我们一步步来。
item定义
定义两个类yqsjprovinceitem和yqsjchinaitem,分别定义国内省份数据和国内数据。
# define here the models for your scraped items # # see documentation in: # https://docs.scrapy.org/en/latest/topics/items.html import scrapy class yqsjprovinceitem(scrapy.item): # define the fields for your item here like: # name = scrapy.field() location = scrapy.field() new = scrapy.field() exist = scrapy.field() total = scrapy.field() cure = scrapy.field() dead = scrapy.field() class yqsjchinaitem(scrapy.item): # define the fields for your item here like: # name = scrapy.field() # 现有确诊 exist_diagnosis = scrapy.field() # 无症状 asymptomatic = scrapy.field() # 现有疑似 exist_suspecte = scrapy.field() # 现有重症 exist_severe = scrapy.field() # 累计确诊 cumulative_diagnosis = scrapy.field() # *输入 overseas_input = scrapy.field() # 累计治愈 cumulative_cure = scrapy.field() # 累计死亡 cumulative_dead = scrapy.field()
中间件定义
需要打开页面后点击一下展开全部。
完整代码
# define here the models for your spider middleware # # see documentation in: # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals # useful for handling different item types with a single interface from itemadapter import is_item, itemadapter from scrapy.http import htmlresponse from selenium.common.exceptions import timeoutexception from selenium.webdriver import actionchains import time class yqsjspidermiddleware: # not all methods need to be defined. if a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # this method is used by scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # called for each response that goes through the spider # middleware and into the spider. # should return none or raise an exception. return none def process_spider_output(self, response, result, spider): # called with the results returned from the spider, after # it has processed the response. # must return an iterable of request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # should return either none or an iterable of request or item objects. pass def process_start_requests(self, start_requests, spider): # called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn't have a response associated. # must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('spider opened: %s' % spider.name) class yqsjdownloadermiddleware: # not all methods need to be defined. if a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # this method is used by scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # called for each request that goes through the downloader # middleware. # must either: # - return none: continue processing this request # - or return a response object # - or return a request object # - or raise ignorerequest: process_exception() methods of # installed downloader middleware will be called # return none try: spider.browser.get(request.url) spider.browser.maximize_window() time.sleep(2) spider.browser.find_element_by_xpath("//*[@id='nationtable']/div/span").click() # actionchains(spider.browser).click(searchbuttonelement) time.sleep(5) return htmlresponse(url=spider.browser.current_url, body=spider.browser.page_source, encoding="utf-8", request=request) except timeoutexception as e: print('超时异常:{}'.format(e)) spider.browser.execute_script('window.stop()') finally: spider.browser.close() def process_response(self, request, response, spider): # called with the response returned from the downloader. # must either; # - return a response object # - return a request object # - or raise ignorerequest return response def process_exception(self, request, exception, spider): # called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # must either: # - return none: continue processing this exception # - return a response object: stops process_exception() chain # - return a request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('spider opened: %s' % spider.name)
定义爬虫
分别获取国内疫情数据以及省份疫情数据。完整代码:
#!/usr/bin/env python # -*- coding: utf-8 -*- # @time : 2021/11/7 22:05 # @author : 至尊宝 # @site : # @file : baidu_yq.py import scrapy from selenium import webdriver from selenium.webdriver.chrome.options import options from yqsj.items import yqsjchinaitem, yqsjprovinceitem class yqsjspider(scrapy.spider): name = 'yqsj' # allowed_domains = ['blog.****.net'] start_urls = ['https://voice.baidu.com/act/newpneumonia/newpneumonia#tab0'] china_xpath = "//div[contains(@class, 'virussummarysix_1-1-317_2zjjbj')]/text()" province_xpath = "//*[@id='nationtable']/table/tbody/tr[{}]/td/text()" province_xpath_1 = "//*[@id='nationtable']/table/tbody/tr[{}]/td/div/span/text()" def __init__(self): chrome_options = options() chrome_options.add_argument('--headless') # 使用无头谷歌浏览器模式 chrome_options.add_argument('--disable-gpu') chrome_options.add_argument('--no-sandbox') self.browser = webdriver.chrome(chrome_options=chrome_options, executable_path="e:\\chromedriver_win32\\chromedriver.exe") self.browser.set_page_load_timeout(30) def parse(self, response, **kwargs): country_info = response.xpath(self.china_xpath) yq_china = yqsjchinaitem() yq_china['exist_diagnosis'] = country_info[0].get() yq_china['asymptomatic'] = country_info[1].get() yq_china['exist_suspecte'] = country_info[2].get() yq_china['exist_severe'] = country_info[3].get() yq_china['cumulative_diagnosis'] = country_info[4].get() yq_china['overseas_input'] = country_info[5].get() yq_china['cumulative_cure'] = country_info[6].get() yq_china['cumulative_dead'] = country_info[7].get() yield yq_china # 遍历35个地区 for x in range(1, 35): path = self.province_xpath.format(x) path1 = self.province_xpath_1.format(x) province_info = response.xpath(path) province_name = response.xpath(path1) yq_province = yqsjprovinceitem() yq_province['location'] = province_name.get() yq_province['new'] = province_info[0].get() yq_province['exist'] = province_info[1].get() yq_province['total'] = province_info[2].get() yq_province['cure'] = province_info[3].get() yq_province['dead'] = province_info[4].get() yield yq_province
pipeline输出结果文本
将结果按照一定的文本格式输出出来。完整代码:
# define your item pipelines here # # don't forget to add your pipeline to the item_pipelines setting # see: https://docs.scrapy.org/en/latest/topics/item-pipeline.html # useful for handling different item types with a single interface from itemadapter import itemadapter from yqsj.items import yqsjchinaitem, yqsjprovinceitem class yqsjpipeline: def __init__(self): self.file = open('result.txt', 'w', encoding='utf-8') def process_item(self, item, spider): if isinstance(item, yqsjchinaitem): self.file.write( "国内疫情\n现有确诊\t{}\n无症状\t{}\n现有疑似\t{}\n现有重症\t{}\n累计确诊\t{}\n*输入\t{}\n累计治愈\t{}\n累计死亡\t{}\n".format( item['exist_diagnosis'], item['asymptomatic'], item['exist_suspecte'], item['exist_severe'], item['cumulative_diagnosis'], item['overseas_input'], item['cumulative_cure'], item['cumulative_dead'])) if isinstance(item, yqsjprovinceitem): self.file.write( "省份:{}\t新增:{}\t现有:{}\t累计:{}\t治愈:{}\t死亡:{}\n".format( item['location'], item['new'], item['exist'], item['total'], item['cure'], item['dead'])) return item def close_spider(self, spider): self.file.close()
配置文件改动
直接参考,自行调整:
# scrapy settings for yqsj project # # for simplicity, this file contains only settings considered important or # commonly used. you can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html bot_name = 'yqsj' spider_modules = ['yqsj.spiders'] newspider_module = 'yqsj.spiders' # crawl responsibly by identifying yourself (and your website) on the user-agent #user_agent = 'yqsj (+http://www.yourdomain.com)' user_agent = 'mozilla/5.0' # obey robots.txt rules robotstxt_obey = false # configure maximum concurrent requests performed by scrapy (default: 16) #concurrent_requests = 32 # configure a delay for requests for the same website (default: 0) # see https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # see also autothrottle settings and docs #download_delay = 3 # the download delay setting will honor only one of: #concurrent_requests_per_domain = 16 #concurrent_requests_per_ip = 16 # disable cookies (enabled by default) cookies_enabled = false # disable telnet console (enabled by default) #telnetconsole_enabled = false # override the default request headers: default_request_headers = { 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'accept-language': 'en', 'user-agent': 'mozilla/5.0 (windows nt 6.2; wow64) applewebkit/537.36 (khtml, like gecko) chrome/27.0.1453.94 safari/537.36' } # enable or disable spider middlewares # see https://docs.scrapy.org/en/latest/topics/spider-middleware.html spider_middlewares = { 'yqsj.middlewares.yqsjspidermiddleware': 543, } # enable or disable downloader middlewares # see https://docs.scrapy.org/en/latest/topics/downloader-middleware.html downloader_middlewares = { 'yqsj.middlewares.yqsjdownloadermiddleware': 543, } # enable or disable extensions # see https://docs.scrapy.org/en/latest/topics/extensions.html #extensions = { # 'scrapy.extensions.telnet.telnetconsole': none, #} # configure item pipelines # see https://docs.scrapy.org/en/latest/topics/item-pipeline.html item_pipelines = { 'yqsj.pipelines.yqsjpipeline': 300, } # enable and configure the autothrottle extension (disabled by default) # see https://docs.scrapy.org/en/latest/topics/autothrottle.html #autothrottle_enabled = true # the initial download delay #autothrottle_start_delay = 5 # the maximum download delay to be set in case of high latencies #autothrottle_max_delay = 60 # the average number of requests scrapy should be sending in parallel to # each remote server #autothrottle_target_concurrency = 1.0 # enable showing throttling stats for every response received: #autothrottle_debug = false # enable and configure http caching (disabled by default) # see https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #httpcache_enabled = true #httpcache_expiration_secs = 0 #httpcache_dir = 'httpcache' #httpcache_ignore_http_codes = [] #httpcache_storage = 'scrapy.extensions.httpcache.filesystemcachestorage'
验证结果
看看结果文件
总结
emmmm,闲着无聊,写着玩,没啥好总结的。
分享:
修心,亦是修行之一。顺境修力,逆境修心,缺一不可。 ——《剑来》
如果本文对你有作用的话,不要吝啬你的赞,谢谢。
以上就是python 详解通过scrapy框架实现爬取百度新冠疫情数据流程的详细内容,更多关于python scrapy框架的资料请关注其它相关文章!