使用bs4爬取小说
程序员文章站
2022-05-02 20:43:48
...
使用bs4爬取小说
#需求:爬取三国演义小说所有的章节和章节内容
import requests
from bs4 import BeautifulSoup
if __name__=="__main__":
#对首页的页面数据解析爬取
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
url = 'https://www.shicimingju.com/book/sanguoyanyi.html'
response = requests.get(url=url,headers=headers)
response.encoding = response.apparent_encoding
page_text = response.text
#在首页中解析出章节的标题和详情页的url
#1.实例化BeautifulSoup对象,需要将页面源码对象加载到该对象中
soup = BeautifulSoup(page_text,'lxml')
#解析章节标题和详情页的url
li_list = soup.select('.book-mulu > ul > li')
fp = open('./sanguo.txt','w',encoding='utf-8')
for li in li_list:
title = li.a.string
detail_url = 'https://www.shicimingju.com'+li.a['href']
#对详情页发起请求,解析出章节内容
#解决乱码使用response = requests.get(url=url,headers=headers)
# response.encoding = response.apparent_encoding
#response.apparent_encoding他是一个备用编码方式,他会根据内容自动匹配给你个合适的编码方式
detail_response = requests.get(url=detail_url, headers=headers)
detail_response.encoding = detail_response.apparent_encoding
detail_page_text = detail_response.text
#解析出详情页中相关的章节内容
detail_soup = BeautifulSoup(detail_page_text,'lxml')
div_tag = detail_soup.find('div',class_='chapter_content')
#解析到了章节的内容
content = div_tag.text
fp.write(title+":"+content+'\n')
print(title,'爬取成功。。。')
下一篇: 手机淘宝上线“特卖区” 性价比是最大卖点