python爬取新闻门户网站的示例
程序员文章站
2022-06-23 10:29:56
项目地址:https://github.com/python3spiders/allnewsspider如何使用每个文件夹下的代码就是对应平台的新闻爬虫 py 文件直接运行 pyd 文件需...
项目地址:
https://github.com/python3spiders/allnewsspider
如何使用
每个文件夹下的代码就是对应平台的新闻爬虫
- py 文件直接运行
- pyd 文件需要,假设为 pengpai_news_spider.pyd
将 pyd 文件下载到本地,新建项目,把 pyd 文件放进去
项目根目录下新建 runner.py,写入以下代码即可运行并抓取
import pengpai_news_spider pengpai_news_spider.main()
示例代码
百度新闻
# -*- coding: utf-8 -*- # 文件备注信息 如果遇到打不开的情况,可以先在浏览器打开一下百度搜索引擎 import requests from datetime import datetime, timedelta from lxml import etree import csv import os from time import sleep from random import randint def parsetime(unformatedtime): if '分钟' in unformatedtime: minute = unformatedtime[:unformatedtime.find('分钟')] minute = timedelta(minutes=int(minute)) return (datetime.now() - minute).strftime('%y-%m-%d %h:%m') elif '小时' in unformatedtime: hour = unformatedtime[:unformatedtime.find('小时')] hour = timedelta(hours=int(hour)) return (datetime.now() - hour).strftime('%y-%m-%d %h:%m') else: return unformatedtime def dealhtml(html): results = html.xpath('//div[@class="result-op c-container xpath-log new-pmd"]') savedata = [] for result in results: title = result.xpath('.//h3/a')[0] title = title.xpath('string(.)').strip() summary = result.xpath('.//span[@class="c-font-normal c-color-text"]')[0] summary = summary.xpath('string(.)').strip() # ./ 是直接下级,.// 是直接/间接下级 infos = result.xpath('.//div[@class="news-source"]')[0] source, datetime = infos.xpath(".//span[last()-1]/text()")[0], \ infos.xpath(".//span[last()]/text()")[0] datetime = parsetime(datetime) print('标题', title) print('来源', source) print('时间', datetime) print('概要', summary) print('\n') savedata.append({ 'title': title, 'source': source, 'time': datetime, 'summary': summary }) with open(filename, 'a+', encoding='utf-8-sig', newline='') as f: writer = csv.writer(f) for row in savedata: writer.writerow([row['title'], row['source'], row['time'], row['summary']]) headers = { 'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/75.0.3770.142 safari/537.36', 'referer': 'https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&word=%b0%d9%b6%c8%d0%c2%ce%c5&fr=zhidao' } url = 'https://www.baidu.com/s' params = { 'ie': 'utf-8', 'medium': 0, # rtt=4 按时间排序 rtt=1 按焦点排序 'rtt': 1, 'bsst': 1, 'rsv_dl': 'news_t_sk', 'cl': 2, 'tn': 'news', 'rsv_bp': 1, 'oq': '', 'rsv_btype': 't', 'f': 8, } def dospider(keyword, sortby = 'focus'): ''' :param keyword: 搜索关键词 :param sortby: 排序规则,可选:focus(按焦点排序),time(按时间排序),默认 focus :return: ''' global filename filename = '{}.csv'.format(keyword) if not os.path.exists(filename): with open(filename, 'w+', encoding='utf-8-sig', newline='') as f: writer = csv.writer(f) writer.writerow(['title', 'source', 'time', 'summary']) params['wd'] = keyword if sortby == 'time': params['rtt'] = 4 response = requests.get(url=url, params=params, headers=headers) html = etree.html(response.text) dealhtml(html) total = html.xpath('//div[@id="header_top_bar"]/span/text()')[0] total = total.replace(',', '') total = int(total[7:-1]) pagenum = total // 10 for page in range(1, pagenum): print('第 {} 页\n\n'.format(page)) headers['referer'] = response.url params['pn'] = page * 10 response = requests.get(url=url, headers=headers, params=params) html = etree.html(response.text) dealhtml(html) sleep(randint(2, 4)) ... if __name__ == "__main__": dospider(keyword = '马保国', sortby='focus')
以上就是python爬取新闻门户网站的示例的详细内容,更多关于python爬取新闻门户网站的资料请关注其它相关文章!
下一篇: 滴滴看见角落里的人