欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

智联招聘爬虫

程序员文章站 2022-05-02 22:16:05
...

                                         直接上代码吧,爬虫思路写在了注释中

import requests
from lxml import etree
import time
import re

'''
    1.需求分析
        title gsmc gz addr jy xl fuli
        入口地址:https://www.zhaopin.com/
   
    2.源码实现
        所有职位分类标签://div[@class='zp-jobNavigater-pop-list']/a
        职位详细列表:https://sou.zhaopin.com/?jl=489&kw=Java%E5%BC%80%E5%8F%91&kt=3
    3.代码实现
                
'''


# 1.获取职位标签
def get_job_tag(url):

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }

    response = requests.get(starturl, headers=headers).text
    # print(response)
    # 解析源码
    HTML = etree.HTML(response)
    # 获取职位分类标签
    job_tag = HTML.xpath("//div[@class='zp-jobNavigater-pop-list']/a/text()")
    return job_tag


# 获取职位信息
def get_job_info(url, start, kw):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    info_html = requests.get(infourl.format(start, kw), headers=headers).json()
    job_dict = {}



    for i in info_html['data']['results']:
        job_dict['city'] = i['city']['items'][0]['name']
        job_dict['company_name'] = i['company']['name']
        job_dict['company_size'] = i['company']['size']['name']
        job_dict['companyType'] = i['company']['type']['name']
        job_dict['eduLevel'] = i['eduLevel']['name']
        job_dict['emplType'] = i['emplType']
        job_dict['jobname'] = i['jobName']
        job_dict['jobType'] = i['jobType']['display']
        job_dict['salary'] = i['salary']
        job_dict['welfare'] = i['welfare']
        job_dict['updateDate'] = i['updateDate']
        job_dict['workingExp'] = i['workingExp']['name']
        # print(job_dict)
        # print(i)
        # 去重保存
        if unique_data(job_dict):
            job_dict = clear_data(job_dict)
            save_data(job_dict)
    return info_html['data']['numFound']

# 过滤重复数据
companyList = []
jobNameList = []
def unique_data(data):
    if (data['jobname'] in jobNameList) & (data['company_name'] in companyList):
        return False
    else:
        companyList.append(data['company_name'])
        jobNameList.append(data['jobname'])
        return data

# 数据清洗
def clear_data(data):
    data['welfare'] = '/'.join([str(i) for i in data['welfare']])
    pattern = re.compile('[\u4E00-\u9FA5]+')
    data['company_size'] = pattern.sub('',data['company_size'])
    return data


# 保存数据
def save_data(data):
    data = '::'.join([str(i) for i in data.values()])
    print(data)
    with open('zlzp.txt', 'a+', encoding='utf-8') as file:
        file.write(data + '\n')


# 主函数
if __name__ == '__main__':
    '''
     一.请求首页
    '''
    starturl = 'https://www.zhaopin.com/'
    job_tag_list = get_job_tag(starturl)
    # print(job_tag_list)

    '''
     二.获取职位详细列表页面
    '''

    start = 0
    page = 1
    while True:
        infourl = 'https://fe-api.zhaopin.com/c/i/sou?start={0}pageSize=60&cityId=489&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw={1}&kt=3'
        numFound = get_job_info(infourl, start, job_tag_list[0])
        print('第{0}页'.format(page))
        if start<numFound:
            start+=60
            page+=1
            time.sleep(0.5)
        else:
            break

 

相关标签: 网络爬虫