批量爬取多家公司新闻
程序员文章站
2022-05-02 22:46:53
...
1 需求
批量爬取多家公司新闻,并实现持续化存储。
2 代码实现
import requests
import re
def baidu(company):
# 获取网页源代码
url = 'https://www.baidu.com/s?rtt=4&tn=news&word=' + company
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0'}
source = requests.get(url=url, headers=headers).text
# 获取网页标题
p_title = '<h3 class="news-title_1YtI1">.*?<!--s-text-->(.*?)<!--/s-text--></a></h3>'
title = re.findall(p_title, source, re.S)
# 获取网页网址
p_href = '<h3 class="news-title_1YtI1"><a href="(.*?)" target="_blank"'
href = re.findall(p_href, source)
# 数据清洗
filel = open('./数据挖掘报告.txt', 'a')
filel.write(company + "数据挖掘完毕!" + '\n' + '\n')
for index in range(len(title)):
title[index] = re.sub('<.*?>', '', title[index]).strip()
filel.write(str(index + 1) + "." + title[index] + "(" + href[index] + ")" + "\n")
filel.write("————————————————————" + "\n" + "\n")
companies = ['华能信托', '百度集团', '阿里巴巴']
for index in companies:
baidu(index)
print(index + '百度新闻爬器成功!')
上一篇: 提取百度新闻的标题、网址、日期和来源