糗事百科数据爬取
程序员文章站
2022-05-07 23:28:40
...
from lxml import etree
import requests
class Qiushi():
def __init__(self,page1,page2):
self.page1 = page1
self.page2 = page2
def __call__(self, *args, **kwargs):
self.run()
def run(self):
# 定义url
for i in range(page1,page2+1):
base_url = "http://www.qiushibaike.com/8hr/page{}/".format(str(i))
# 定义请求头
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"
}
response =requests.get(base_url,headers = headers)
html = response.text
html_xml = etree.HTML(html)
self.get_data(html_xml)
def get_data(self,html):
#获取每个用户信息
li_list = html.xpath('.//div[@class="recommend-article"]//li')
for li in li_list:
#获取图片
pic = li.xpath('.//img//@src')
#获取段子主题
main = li.xpath('.//a[@class="recmd-content"]//text()')
#获取四个信息
info = li.xpath('.//span/text()')
#获得喜欢数量
try:
like_num = info[0]
except:
like_num='0'
#获取评论数量
try:
say_num = info[-3]
except:
say_num='0'
#获取用户昵称
name = info[-1]
dic = {
"视频名称": main,
"用户名":name,
"评论数量":say_num,
"喜欢数量":like_num,
"图片链接": pic
}
self.write_json(str(dic))
def write_json(self,new_list):
#打开文件,每生成一条信息,调用一次函数,追加到文件中
with open('./json文件/record.json','a+',encoding='utf-8') as f:
f.write(new_list + '\n')
if __name__ == '__main__':
page1 = int(input('请输入你想爬取的开始页:'))
page2 = int(input('请输入你想爬取的结束页:'))
download = Qiushi(page1,page2)
download()
上一篇: Windows 入门杂乱无章版
下一篇: Python库打包到PyPI