selenium + ajax抓取英雄联盟全部英雄的详细信息及多线程保存全部皮肤图片到本地
程序员文章站
2022-06-09 18:14:00
爬虫代码如下:运行需要本地开启mongo服务器端,安装有谷歌浏览器及selenium对应版本的驱动文件#coding=gbkfrom selenium import webdriverfrom lxml import etreeimport reimport jsonimport pymongoclass Lol_spider(object): def __init__(self): #初始化一个浏览器驱动对象 self.driver = web...
爬虫代码如下:
运行需要本地开启mongo服务器端,安装有谷歌浏览器及selenium对应版本的驱动文件
#coding=gbk
from selenium import webdriver
from lxml import etree
import re
import json
import pymongo
class Lol_spider(object):
def __init__(self):
#初始化一个浏览器驱动对象
self.driver = webdriver.Chrome(executable_path="D:/chromedriver_win32/chromedriver.exe")
# 获取当前窗口页的html代码
def get_html_text(self, url):
self.driver.get(url)
return self.driver.page_source
#获取详情页的url
def get_detail_page_url(self, html_text):
#利用xpath提取url
html = etree.HTML(html_text)
li_list = html.xpath("//ul[@class='imgtextlist']/li")
detail_urls = []
for li in li_list:
url = li.xpath("./a/@href")[0]
detail_urls.append("https://lol.qq.com/data/" + url)
return detail_urls
#解析详情页
def parse_detali_page(self, text, hero_id):
#利用xpath解析
html = etree.HTML(text)
#利用字典存放英雄数据
item = {}
#提取姓名
item["name"] = html.xpath("//a[@class='here']/text()")[0]
#提取职业
item["employment"] = html.xpath("//div[@class='defail-tags']/span/text()")[0]
#提取物理攻击能力值
item["physical_attack"] = re.search(r"\d+", html.xpath("//i[@class='up up1']/@style")[0]).group()
#提取法术攻击能力值
item["magic_attack"] = re.search(r"\d+", html.xpath("//i[@class='up up2']/@style")[0]).group()
#提取防御能力值
item["defense_ability"] = re.search(r"\d+", html.xpath("//i[@class='up up3']/@style")[0]).group()
#提取操作难度值
item["operation_difficulty"] = re.search(r"\d+", html.xpath("//i[@class='up up4']/@style")[0]).group()
#提取皮肤和技能信息
item["skills"], item["skins"] = self.get_skillAndSkin(hero_id)
return item
#提取由ajax加载的皮肤和技能信息
def get_skillAndSkin(self, hero_id):
#生成Ajax请求的url,通过浏览器分析network发现
url = "https://game.gtimg.cn/images/lol/act/img/js/hero/" + hero_id + ".js"
#额外创建一个浏览器驱动对象用来请求数据(由于反爬,不能用requests.get直接获取到json数据)
browser = webdriver.Chrome(executable_path="D:/chromedriver_win32/chromedriver.exe")
#驱动浏览器对象发起请求
browser.get(url)
#获取到数据(由于编码问题需要重新编码再用utf8格式解码才能正常显示中文)
data = browser.page_source.encode().decode("utf-8")
#利用正则表达式提取出json字符串(剔除掉html标签)
data = re.search(r'{"hero.+"}', data, re.S).group()
#将json字符串转化成python字典对象
py_data = json.loads(data, encoding="ute-8")
#获技能数据
skill_data = py_data["spells"]
#获取皮肤数据
skin_data = py_data["skins"]
#利用列表存储技能信息
skills = []
#遍历技能数据将技能名称和按键以及描述信息写入列表
for s_d in skill_data:
sk_item = {}
sk_item["spell_Key"] = s_d["spellKey"]
sk_item["name"] = s_d["name"]
sk_item["description"] = s_d["description"]
skills.append(sk_item)
#利用列表存储皮肤信息
skins = []
#遍历皮肤数据将皮肤名称和图像url存入列表
for skin_d in skin_data:
skin_item = (skin_d["name"], skin_d["mainImg"])
skins.append(skin_item)
return skills, skins
#将数据存入mongo数据库
def save_data(self, item):
mongo_client = pymongo.MongoClient()
mongodb = mongo_client["LOL"]
mongo_col = mongodb["legend_info"]
mongo_col.insert_one(item)
mongo_client.close()
print(item["name"] + " obtain sucessfully")
#主方法
def run(self):
#首页url
list_page_url = "https://lol.qq.com/data/info-heros.shtml"
#获取首页html代码
list_page_html = self.get_html_text(list_page_url)
#获取详情页url
detail_urls =self.get_detail_page_url(list_page_html)
#遍历详情页url获取数据
for d_u in detail_urls:
#先获取到英雄id以遍后续提取ajax请求的数据
hero_id = re.search(r"\d+", d_u).group()
#新打开一个窗口
js = 'window.open("' + d_u + '")'
self.driver.execute_script(js)
#切换到新窗口
self.driver.switch_to.window(self.driver.window_handles[1])
#请求并解析数据
item = self.parse_detali_page(self.driver.page_source, hero_id)
#保存数据
self.save_data(item)
#关闭当前窗口(即当前英雄详情页窗口)
self.driver.close()
#将窗口再切换回首页窗口
self.driver.switch_to.window(self.driver.window_handles[0])
if __name__ == '__main__':
ls = Lol_spider()
ls.run()
保存到mongo如下:
利用多线程将全部英雄的皮肤保存到本地代码如下
import pymongo
import queue
import requests
import time
import re
import threading
#从队列中读取图像url及名称下载图片
def download_img():
while url_name_q.not_empty:
u_n = url_name_q.get()
file_name = "./data/" + re.sub(r"/", "", u_n[0]) + ".jpg"
file_url = u_n[1]
if u_n[1] != "":
with open(file_name, "wb") as fp:
fp.write(requests.get(file_url).content)
print(u_n[0] + "save into local successfully")
url_name_q.task_done()
#从mongo数据库中读取数据
client = pymongo.MongoClient()
data = list(client["LOL"]["legend_info"].find({},{"_id": 0,"skins":1}))
#将数据保存在列表中
url_name_list = []
for d in data:
url_name_list.append(d["skins"])
#将数据写入队列
url_name_q = queue.Queue(3000)
for i in url_name_list:
for j in i:
url_name_q.put(j)
#请求头
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
}
#创建10个线程启动下载图片
for i in range(10):
t = threading.Thread(target=download_img)
t.start()
下载结果如下:
本文地址:https://blog.csdn.net/cyj5201314/article/details/107633352