Scrapy项目实战之爬取某社区用户详情
程序员文章站
2022-04-01 08:31:51
本文介绍了scrapy项目实战之爬取某社区用户详情,分享给大家,具有如下:get_cookies.pyfrom selenium import webdriverfrom pymongo import...
本文介绍了scrapy项目实战之爬取某社区用户详情,分享给大家,具有如下:
get_cookies.py
from selenium import webdriver from pymongo import mongoclient from scrapy.crawler import overridden_settings # from segmentfault import settings import time import settings class getcookies(object): def __init__(self): # 初始化组件 # 设定webdriver选项 self.opt = webdriver.chromeoptions() # self.opt.add_argument("--headless") # 初始化用户列表 self.user_list = settings.user_list # 初始化mongodb参数 self.client = mongoclient(settings.mongo_uri) self.db = self.client[settings.mongo_db] self.collection = self.db["cookies"] def get_cookies(self,username,password): """ :param username: :param password: :return: cookies """ # 使用webdriver选项创建driver driver = webdriver.chrome(executable_path="/users/hank/scrapy/segmentfault/segmentfault/chromedriver",options=self.opt) driver.get("https://segmentfault.com/user/login") driver.find_element_by_name("username").send_keys(username) driver.find_element_by_name("password").send_keys(password) driver.find_element_by_xpath("//button[@type='submit']").click() time.sleep(2) driver.get("https://segmentfault.com/u/luwangmeilun/users/following") # 登陆之后获取页面cookies cookies = driver.get_cookies() driver.quit() return cookies def format_cookies(self,cookies): """ :param cookies: 从driver.get_cookies的形式为: [{'domain': 'segmentfault.com', 'httponly': false, 'name': 'phpsessid', 'path': '/', 'secure': false, 'value': 'web2~5grmfa89j12eksub8hja3bvaq4'}, {'domain': '.segmentfault.com', 'expiry': 1581602940, 'httponly': false, 'name': 'hm_lvt_e23800c454aa573c0ccb16b52665ac26', 'path': '/', 'secure': false, 'value': '1550066940'}, {'domain': '.segmentfault.com', 'httponly': false, 'name': 'hm_lpvt_e23800c454aa573c0ccb16b52665ac26', 'path': '/', 'secure': false, 'value': '1550066940'}, {'domain': '.segmentfault.com', 'expiry': 1550067000, 'httponly': false, 'name': '_gat', 'path': '/', 'secure': false, 'value': '1'}, {'domain': '.segmentfault.com', 'expiry': 1550153340, 'httponly': false, 'name': '_gid', 'path': '/', 'secure': false, 'value': 'ga1.2.783265084.1550066940'}, {'domain': '.segmentfault.com', 'expiry': 1613138940, 'httponly': false, 'name': '_ga', 'path': '/', 'secure': false, 'value': 'ga1.2.1119166665.1550066940'}] 只需提取每一项的name与value即可 :return: """ c = dict() for item in cookies: c[item['name']] = item['value'] return c def save(self): print("开始获取cookies....") # 从用户列表中获取用户名与密码,分别登陆获取cookies for username,password in self.user_list: cookies = self.get_cookies(username,password) f_cookies = self.format_cookies(cookies) print("insert cookie:{}".format(f_cookies)) # 将格式整理后的cookies插入mongodb数据库 self.collection.insert_one(f_cookies) # s = db[self.collection].find() # for i in s: # print(i) if __name__ == '__main__': cookies = getcookies() for i in range(20): cookies.save()
item.py
# -*- coding: utf-8 -*- # define here the models for your scraped items # # see documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class segmentfaultitem(scrapy.item): # define the fields for your item here like: # 个人属性 # 姓名 name = scrapy.field() # 声望 rank = scrapy.field() # 学校 school = scrapy.field() # 专业 majors = scrapy.field() # 公司 company = scrapy.field() # 工作 job = scrapy.field() # blog blog = scrapy.field() # 社交活动数据 # 关注人数 following = scrapy.field() # 粉丝数 fans = scrapy.field() # 回答数 answers = scrapy.field() # 提问数 questions = scrapy.field() # 文章数 articles = scrapy.field() # 讲座数 lives = scrapy.field() # 徽章数 badges = scrapy.field() # 技能属性 # 点赞数 like = scrapy.field() # 技能 skills = scrapy.field() # 注册日期 register_date = scrapy.field() # 问答统计 # 回答最高得票数 answers_top_score = scrapy.field() # 得票数最高的回答对应的问题的标题 answers_top_title = scrapy.field() # 得票数最高的回答对应的问题的标签 answers_top_tags = scrapy.field() # 得票数最高的回答对应的问题的内容 answers_top_question = scrapy.field() # 得票数最高的回答对应的问题的内容 answers_top_content = scrapy.field()
pipeline.py
# -*- coding: utf-8 -*- # define your item pipelines here # # don't forget to add your pipeline to the item_pipelines setting # see: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo class segmentfaultpipeline(object): # 设定mongodb集合名称 collection_name = 'userinfo' def __init__(self,mongo_uri,mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db # 通过crawler获取settings.py中设定的mongodb连接信息 @classmethod def from_crawler(cls,crawler): return cls( mongo_uri = crawler.settings.get('mongo_uri'), mongo_db = crawler.settings.get('mongo_db','segmentfault') ) # 当爬虫启动时连接mongodb def open_spider(self,spider): self.client = pymongo.mongoclient(self.mongo_uri) self.db = self.client[self.mongo_db] # 当爬虫关闭时断开mongodb连接 def close_spider(self,spider): self.client.close() # 将item插入数据库保存 def process_item(self, item, spider): self.db[self.collection_name].insert_one(dict(item)) return item
settings.py
# -*- coding: utf-8 -*- # scrapy settings for segmentfault project # # for simplicity, this file contains only settings considered important or # commonly used. you can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html bot_name = 'segmentfault' spider_modules = ['segmentfault.spiders'] newspider_module = 'segmentfault.spiders' # crawl responsibly by identifying yourself (and your website) on the user-agent user_agent = 'mozilla/5.0 (windows nt 6.1; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/71.0.3578.98 safari/537.36' # obey robots.txt rules robotstxt_obey = false # configure maximum concurrent requests performed by scrapy (default: 16) concurrent_requests = 100 # configure a delay for requests for the same website (default: 0) # see https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # see also autothrottle settings and docs # download_delay = 2 # the download delay setting will honor only one of: # concurrent_requests_per_domain = 32 # concurrent_requests_per_ip = 32 # disable cookies (enabled by default) # cookies_enabled = false # disable telnet console (enabled by default) #telnetconsole_enabled = false retry_enabled = false redirect_enabled = false download_timeout = 5 # httpallow # override the default request headers: #default_request_headers = { # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'accept-language': 'en', #} # enable or disable spider middlewares # see https://doc.scrapy.org/en/latest/topics/spider-middleware.html spider_middlewares = { 'segmentfault.middlewares.segmentfaultspidermiddleware': 543, } # enable or disable downloader middlewares # see https://doc.scrapy.org/en/latest/topics/downloader-middleware.html downloader_middlewares = { # 'segmentfault.middlewares.segmentfaulthttpproxymiddleware': 543, 'segmentfault.middlewares.segmentfaultuseragentmiddleware':643, 'segmentfault.middlewares.segmentfaultcookiesmiddleware':743, 'scrapy.downloadermiddlewares.httpproxy.httpproxymiddleware': none, 'scrapy.downloadermiddlewares.useragent.useragentmiddleware': none, # 'scrapy.downloadermiddlewares.cookies.cookiesmiddleware':none, } # enable or disable extensions # see https://doc.scrapy.org/en/latest/topics/extensions.html #extensions = { # 'scrapy.extensions.telnet.telnetconsole': none, #} # configure item pipelines # see https://doc.scrapy.org/en/latest/topics/item-pipeline.html item_pipelines = { 'segmentfault.pipelines.segmentfaultpipeline': 300, } # enable and configure the autothrottle extension (disabled by default) # see https://doc.scrapy.org/en/latest/topics/autothrottle.html # autothrottle_enabled = true # # the initial download delay # autothrottle_start_delay = 5 # # the maximum download delay to be set in case of high latencies # autothrottle_max_delay = 60 # # the average number of requests scrapy should be sending in parallel to # # each remote server # autothrottle_target_concurrency = 1.0 # # enable showing throttling stats for every response received: # autothrottle_debug = false # enable and configure http caching (disabled by default) # see https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #httpcache_enabled = true #httpcache_expiration_secs = 0 #httpcache_dir = 'httpcache' #httpcache_ignore_http_codes = [] #httpcache_storage = 'scrapy.extensions.httpcache.filesystemcachestorage' # 配置mongodb mongo_uri = 'localhost:27017' mongo_db = 'segmentfault' # 用户列表 user_list = [ ("798549150@qq.com","guoqing1010"), ("learnscrapy@163.com","guoqing1010"), ] # 配置代理列表 proxy_list = [ 'http://115.182.212.169:8080', 'http://121.61.25.149:9999', 'http://180.118.247.189:9000', 'http://115.151.3.12:9999', 'http://183.154.213.160:9000', 'http://113.128.9.106:9999', 'http://124.42.68.152:90', 'http://49.70.48.50:9999', 'http://113.128.11.172:9999', 'http://111.177.177.40:9999', 'http://59.62.83.253:9999', 'http://39.107.84.185:8123', 'http://124.94.195.107:9999', 'http://111.177.160.132:9999', 'http://120.25.203.182:7777' ] user_agent_list = [ 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) chrome/39.0.2171.95 safari/537.36 opr/26.0.1656.60', 'opera/8.0 (windows nt 5.1; u; en)', 'mozilla/5.0 (windows nt 5.1; u; en; rv:1.8.1) gecko/20061208 firefox/2.0.0 opera 9.50', 'mozilla/4.0 (compatible; msie 6.0; windows nt 5.1; en) opera 9.50', 'mozilla/5.0 (windows nt 6.1; wow64; rv:34.0) gecko/20100101 firefox/34.0', 'mozilla/5.0 (x11; u; linux x86_64; zh-cn; rv:1.9.2.10) gecko/20100922 ubuntu/10.10 (maverick) firefox/3.6.10', 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) chrome/39.0.2171.71 safari/537.36', 'mozilla/5.0 (x11; linux x86_64) applewebkit/537.11 (khtml, like gecko) chrome/23.0.1271.64 safari/537.11', 'mozilla/5.0 (windows; u; windows nt 6.1; en-us) applewebkit/534.16 (khtml, like gecko) chrome/10.0.648.133 safari/534.16', 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.1 (khtml, like gecko) chrome/21.0.1180.71 safari/537.1 lbbrowser', 'mozilla/4.0 (compatible; msie 6.0; windows nt 5.1; sv1; qqdownload 732; .net4.0c; .net4.0e; lbbrowser)', 'mozilla/4.0 (compatible; msie 6.0; windows nt 5.1; sv1; qqdownload 732; .net4.0c; .net4.0e)', 'mozilla/5.0 (windows nt 5.1) applewebkit/535.11 (khtml, like gecko) chrome/17.0.963.84 safari/535.11 se 2.x metasr 1.0', 'mozilla/4.0 (compatible; msie 7.0; windows nt 5.1; trident/4.0; sv1; qqdownload 732; .net4.0c; .net4.0e; se 2.x metasr 1.0)', 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) maxthon/4.4.3.4000 chrome/30.0.1599.101 safari/537.36', 'mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) chrome/38.0.2125.122 ubrowser/4.0.3214.0 safari/537.36' ]
userinfo.py
# -*- coding: utf-8 -*- import scrapy import time from scrapy import request from pymongo import mongoclient from scrapy.linkextractors import linkextractor from scrapy.spiders import crawlspider,rule from scrapy.http import formrequest from segmentfault.items import segmentfaultitem class userinfospider(crawlspider): name = 'userinfo' allowed_domains = ['segmentfault.com'] start_urls = ['https://segmentfault.com/u/mybigbigcat/users/following'] rules = ( # 用户主页地址,跟进并进行解析 rule(linkextractor(allow=r'/u/\w+$'),callback='parse_item',follow=true), # 用户关注列表,跟进列表页面,抓取用户主页地址进行后续操作 # rule(linkextractor(allow=r'/users/followed$'),follow=true), # 用户粉丝列表,跟进列表页面,抓取用户主页地址进行后续操作 rule(linkextractor(allow=r'/users/following$'),follow=true), # 跟进其他页面地址 # rule(linkextractor(allow=r'/users/[followed|following]?page=\d+'),follow=true), ) def start_requests(self): # 从mongodb中获取一条cookie,添加到开始方法 client = mongoclient(self.crawler.settings['mongo_uri']) db = client[self.crawler.settings['mongo_db']] cookies_collection = db.cookies # 获取一条cookie cookies = cookies_collection.find_one() # cookie中的'hm_lpvt_e23800c454aa573c0ccb16b52665ac26'参数是当前时间的10位表示法,因此重新填充 cookies['hm_lpvt_e23800c454aa573c0ccb16b52665ac26'] = str(int(time.time())) return [request("https://segmentfault.com", cookies=cookies, meta={'cookiejar':1}, callback=self.after_login)] # 登录之后从start_url中开始抓取数据 def after_login(self,response): for url in self.start_urls: return self.make_requests_from_url(url) # def after_login(self,response): # yield request(self.start_urls[0], # meta={'cookiejar':response.meta['cookiejar']}, # callback=self.parse_item) def parse_item(self, response): """ :param response: :return: """ item = segmentfaultitem() # 个人属性模块 profile_head = response.css('.profile__heading') # 姓名 item['name'] = profile_head.css('h2[class*=name]::text').re_first(r'\w+') # 声望 item['rank'] = profile_head.css('.profile__rank-btn > span::text').extract_first() # 学校专业信息 school_info = profile_head.css('.profile__school::text').extract() if school_info: # 学校 item['school'] = school_info[0] # 专业 item['majors'] = school_info[1].strip() else: item['school'] = '' item['majors'] = '' # 公司职位信息 company_info = profile_head.css('.profile__company::text').extract() if company_info: # 公司 item['company'] = company_info[0] # 职位 item['job'] = company_info[1].strip() else: item['company'] = '' item['job'] = '' # 个人博客 item['blog'] = profile_head.css('a[class*=other-item-link]::attr(href)').extract_first() # 统计面板模块 profile_active = response.xpath("//div[@class='col-md-2']") # 关注人数 item['following'] = profile_active.css('div[class*=info] a > .h5::text').re(r'\d+')[0] # 粉丝人数 item['fans'] = profile_active.css('div[class*=info] a > .h5::text').re(r'\d+')[1] # 回答问题数 item['answers'] = profile_active.css('a[href*=answer] .count::text').re_first(r'\d+') # 提问数 item['questions'] = profile_active.css('a[href*=questions] .count::text').re_first(r'\d+') # 文章数 item['articles'] = profile_active.css('a[href*=articles] .count::text').re_first(r'\d+') # 讲座数 item['lives'] = profile_active.css('a[href*=lives] .count::text').re_first(r'\d+') # 徽章数 item['badges'] = profile_active.css('a[href*=badges] .count::text').re_first(r'\d+') # 徽章详细页面地址 badge_url = profile_active.css('a[href*=badges]::attr(href)').extract_first() # 技能面板模块 profile_skill = response.xpath("//div[@class='col-md-3']") # 技能标签列表 item['skills'] = profile_skill.css('.tag::text').re(r'\w+') # 获得的点赞数 item['like'] = profile_skill.css('.authlist').re_first(r'获得 (\d+) 次点赞') # 注册日期 item['register_date'] = profile_skill.css('.profile__skill--other p::text').extract_first() # if register_time: # item['register_date'] = ''.join(re.findall(r'\d+',register_time)) # else: # item['register_date'] = '' # 产出数据模块 profile_work = response.xpath("//div[@class='col-md-7']") # 回答获得的最高分 item['answers_top_score'] = profile_work.css('#navanswer .label::text').re_first(r'\d+') # 最高分回答对应的问题的标题 item['answers_top_title'] = profile_work.css('#navanswer div[class*=title-warp] > a::text').extract_first() # 最高分回答对应的问题的url answer_url = profile_work.css('#navanswer div[class*=title-warp] > a::attr(href)').extract_first() # 将需要继续跟进抓取数据的url与item作为参数传递给相应方法继续抓取数据 request = scrapy.request( # 问题详细页url url=response.urljoin(answer_url), meta={ # item需要传递 'item':item, # 徽章的url 'badge_url':response.urljoin(badge_url)}, # 调用parse_ansser继续处理 callback=self.parse_answer) yield request def parse_answer(self,response): # 取出传递的item item = response.meta['item'] # 取出传递的徽章详细页url badge_url = response.meta['badge_url'] # 问题标签列表 item['answers_top_tags'] = response.css('.question__title--tag .tag::text').re(r'\w+') # 先获取组成问题内容的字符串列表 question_content = response.css('.widget-question__item p').re(r'>(.*?)<') # 拼接后传入item item['answers_top_question'] = ''.join(question_content) # 先获取组成答案的字符串列表 answer_content = response.css('.qa-answer > article .answer').re(r'>(.*?)<') # 拼接后传入item item['answers_top_content'] = ''.join(answer_content) # 问题页面内容抓取后继续抓取徽章页内容,并将更新后的item继续传递 request = scrapy.request(url=badge_url, meta={'item':item}, callback=self.parse_badge) yield request def parse_badge(self,response): item = response.meta['item'] badge_name = response.css('span.badge span::text').extract() badge_count = response.css('span[class*=badges-count]::text').re(r'\d+') name_count = {} for i in range(len(badge_count)): name_count[badge_name[i]] = badge_count[i] item['badges'] = name_count yield item
middlewars.py
# -*- coding: utf-8 -*- # define here the models for your spider middleware # # see documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html import random import re import datetime import scrapy import logging import time from scrapy.conf import settings from pymongo import mongoclient from scrapy.downloadermiddlewares.httpproxy import httpproxymiddleware import pymongo logger = logging.getlogger(__name__) class segmentfaultspidermiddleware(object): """ 处理item中保存的三种类型注册日期数据: 1. 注册于 2015年12月12日 2. 注册于 3 天前 3. 注册于 5 小时前 """ def process_spider_output(self,response,result,spider): """ 输出response时调用此方法处理item中register_date :param response: :param result: 包含item :param spider: :return:处理过注册日期的item """ for item in result: # 判断获取的数据是否是scrapy.item类型 if isinstance(item,scrapy.item): # 获取当前时间 now = datetime.datetime.now() register_date = item['register_date'] logger.info("获取注册日志格式为{}".format(register_date)) # 提取注册日期字符串,如'注册于2015年12月12日' => '20151212' day = ''.join(re.findall(r'\d+',register_date)) # 如果提取数字字符串长度大于4位,则为'注册于2015年12月12日'形式 if len(day) > 4: date = day # 如果‘时'在提取的字符串中,则为'注册于8小时前'形式 elif '时' in register_date: d = now - datetime.timedelta(hours=int(day)) date = d.strftime("%y%m%d") # 最后一种情况就是'注册于3天前'形式 else: d = now - datetime.timedelta(days=int(day)) date = d.strftime("%y%m%d") # 更新register_date值 item['register_date'] = date yield item class segmentfaulthttpproxymiddleware(object): # not all methods need to be defined. if a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. def __init__(self): self.proxy_list = settings['proxy_list'] def process_request(self, request, spider): proxy = random.choice(self.proxy_list) logger.info('使用代理:{}'.format(proxy)) request.meta['proxy'] = proxy class segmentfaultuseragentmiddleware(object): def __init__(self): self.useragent_list = settings['user_agent_list'] def process_request(self,request,spider): user_agent = random.choice(self.useragent_list) # logger.info('使用的use user-agent:{}'.format(user_agent)) request.headers['user-agent'] = user_agent class segmentfaultcookiesmiddleware(object): client = mongoclient(settings['mongo_uri']) db = client[settings['mongo_db']] collection = db['cookies'] def get_cookies(self): """ 随机获取cookies :return: """ cookies = random.choice([cookie for cookie in self.collection.find()]) # 将不需要的"_id"与"_gat"参数删除 cookies.pop('_id') cookies.pop('_gat') # 将"hm_lpvt_e23800c454aa573c0ccb16b52665ac26"填充当前时间 cookies['hm_lpvt_e23800c454aa573c0ccb16b52665ac26'] = str(int(time.time())) return cookies def remove_cookies(self,cookies): """ 删除已失效的cookies :param cookies: :return: """ # 随机获取cookies中的一对键值,返回结果是一个元祖 i = cookies.popitem() # 删除cookies try: logger.info("删除cookies{}".format(cookies)) self.collection.remove({i[0]:i[1]}) except exception as e: logger.info("no this cookies:{}".format(cookies)) def process_request(self,request,spider): """ 为每一个request添加一个cookie :param request: :param spider: :return: """ cookies = self.get_cookies() request.cookies = cookies def process_response(self,request,response,spider): """ 对于登录失效的情况,可能会重定向到登录页面,这时添加新的cookies继续,将请求放回调度器 :param request: :param response: :param spider: :return: """ if response.status in [301,302]: logger.info("redirect response:{}".format(response)) redirect_url = response.headers['location'] if b'/user/login' in redirect_url: logger.info("cookies失效") # 请求失败,重新获取一个cookie,添加到request,并停止后续中间件处理此request,将此request放入调度器 new_cookie = self.get_cookies() logger.info("获取新cookie:{}".format(new_cookie)) # 删除旧cookies self.remove_cookies(request.cookies) request.cookies = new_cookie return request # return response
run.py
from scrapy import cmdline # from segmentfault.get_cookies import getcookies from get_cookies import getcookies if __name__ == '__main__': cookies = getcookies() cookies.save() name = 'userinfo' "" cmd = 'scrapy crawl {}'.format(name) cmdline.execute(cmd.split())
到此这篇关于scrapy项目实战之爬取某社区用户详情的文章就介绍到这了,更多相关scrapy 爬取某社区用户内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!
上一篇: MySQL 两种恢复数据的方法