python爬虫 scrapy框架
程序员文章站
2022-05-06 20:32:38
...
使用scrapy框架流程
打开pycharm下的Terminal
创建爬虫项目
scrapy startproject 项目名称(自定义)
爬虫项目的创建是为了完成爬虫任务,项目里包括各个python文件
创建爬虫文件
在spider目录下创建爬虫文件
scrapy genspider 文件名 域名
运行爬虫文件
scrapy crawl 文件名
爬虫文件代码
import scrapy
from lxml import etree
class MusicspiderSpider(scrapy.Spider):
name = 'musicspider'
allowed_domains = ['htqyy.com']
start_urls = ['http://www.htqyy.com/genre/1']
def parse(self, response):
#使用xpth表达式对数据进行筛选
music_name_list = response.xpath("//li[@class='mItem']//span[@class='title']//a//@title")
artist_name_list = response.xpath("//li[@class='mItem']//span[@class='artistName']//a//@title")
album_name_list = response.xpath("//li[@class='mItem']//span[@class='albumName']//a//@title")
play_count_list = response.xpath("//li[@class='mItem']//span[@class='playCount']")
#创建字典,将筛选后的数据yield至管道
item_value = {}
item_value["music_name_list"] = music_name_list
item_value["artist_name_list"] = artist_name_list
item_value["album_name_list"] = album_name_list
item_value["play_count_list"] = play_count_list
yield item_value
#不断获得新的url,解析,并再次回到parse函数
page = int(response.url[-1])+1
if page <= 7:
next_url = response.url[:-1] + str(page)
print(next_url)
yield scrapy.Request(next_url,callback=self.parse)
打印response时,会有很多日志,需要在settings文件中加入限制LOG_LEVEL = “WARNING”,这样打印response就不会有日志了
在爬虫文件中使用xpath表达式对数据进行筛选:from lxml import etree。
比如找到所有class属性为song的a标签的title属性的值
res_list = response.xpath("//a[@class=‘song’/@title]")
返回的是一个列表,列表中的每个元素是以Selector开头的数据
遍历res_list,使用get()方法可以获得想要的data,是一个字符串,getall()也可以获得,如果有多个字符串可以放在列表中
爬取的数据可以在管道pipelines中进行处理和保存,在使用管道处理数据之前,需要在settings文件中打开ITEM_PIPELINS开头的部分,而且数据只能以字典的形式返回到管道中,需要构造字典,将获取的data字符串以值的形式放入字典中
for i in res_list:
res_dict = {}
res_dict[“title”] = i.get()
yield res_dict
settings.py文件
# Scrapy settings for myscrapyproject4 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'myscrapyproject4'
SPIDER_MODULES = ['myscrapyproject4.spiders']
NEWSPIDER_MODULE = 'myscrapyproject4.spiders'
LOG_LEVEL = "WARNING"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'myscrapyproject4 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'myscrapyproject4.middlewares.Myscrapyproject4SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'myscrapyproject4.middlewares.Myscrapyproject4DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'myscrapyproject4.pipelines.Myscrapyproject4Pipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
pipelines.py处理和保存数据
import re
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class Myscrapyproject4Pipeline:
def __init__(self):
self.f = open(r"C:\Users\Administrator\PycharmProjects\pythonProject\myscrapyproject4\myscrapyproject4\歌曲详情.txt","a")
def process_item(self, item, spider):
#对传过来的数据进行处理
num = len(item['music_name_list'])
for i in range(num):
music_name_str = item['music_name_list'][i].get()
artist_name_str = item['artist_name_list'][i].get()
album_name_str = item['album_name_list'][i].get()
play_count_str = item['play_count_list'][i].get()
play_count_str2 = re.findall(r"\d+\.?\d*",play_count_str)[0]
song_str = music_name_str + " " + artist_name_str + " " + album_name_str + " " + play_count_str2
self.f.write(song_str+"\n")
print(song_str + "正在保存...")
self.f.write("-------------------------------\n")
return item
def close_spider(self,spider):
self.f.close()
print("爬取完毕")
spider.crawler.engine.close_spider(spider, '没有新数据关闭爬虫')