欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

使用scrapy框架爬取51job的关于python的职位,并且进行分析

程序员文章站 2022-03-02 23:23:59
...

example.py 爬虫的主文件,大部分的爬虫逻辑都在这

# -*- coding: utf-8 -*-
import scrapy
# import pyecharts
from ..items import Scrapy3Item
class ExampleSpider(scrapy.Spider):
    name = 'example'
    # allowed_domains = ['example.com']
    start_urls = ['https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=']
    count = 0
    def parse(self, response):
        company_href=response.xpath('//*[@id="resultList"]/div[@class="el"]/p/span/a/@href').extract()
        title_main=response.xpath('//*[@id="resultList"]/div[class="el"]/p/span/a/text').extract()
        for i in company_href:
            yield scrapy.Request(i,callback=self.parse_self,meta={
                title
            })
        next_page=response.xpath('//li[@class="bk"]')[1].xpath('./a/@href').extract_first()
        if  self.count<2:
            yield scrapy.Request(next_page, callback=self.parse)
            self.count+=1
    def parse_self(self,response):
        company_postion=response.xpath('//div[@class="tHeader tHjob"]//div[@class="in"]//div[@class="cn"]//h1/text()').extract()[0].replace('\t','').strip()
        company_salary=response.xpath('//div[@class="tHeader tHjob"]//div[@class="in"]//div[@class="cn"]//strong/text()').extract()[0].strip()
        corporate_name=response.xpath('//div[@class="tHeader tHjob"]//div[@class="in"]//div[@class="cn"]//p[@class="cname"]/a[1]/text()').extract()[0].strip()
        print(company_postion)
        print(corporate_name)
        print(company_salary)
        item=Scrapy3Item()
        item['company_postion']=kong.strip()
        item['corporate_name']=corporate_name
        item['company_salary']=company_salary
        yield item
# /html/body/div[3]/div[2]/div[2]/div/div[1]/h1
# //*[@id="resultList"]/div[4]/p/span/a

item.py

import scrapy


class Scrapy3Item(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    corporate_name=scrapy.Field()
    company_salary=scrapy.Field()
    company_postion=scrapy.Field()

settings.py 配置里面 加上这些

ROBOTSTXT_OBEY = False#不遵守爬虫协议
DOWNLOAD_DELAY = 2 #下载延迟时间 避免 爬取频率太快 被制裁
FEED_FORMAT='csv'
FEED_URI='jobsa.csv'#定义保存为什么文件csv文件,已保存到哪

run.py 运行文件 每次都在命令行运行比较麻烦 定义一个运行文件

from scrapy.cmdline import execute
execute('scrapy crawl example'.split())

其他的文件暂时没有用到

相关标签: python