欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

模拟浏览器进行反爬虫之银行信息的获取

程序员文章站 2022-05-09 22:02:03
...
import random
import re

from urllib.request import urlopen, Request
from urllib.error import  URLError
def get_content(url):
    """获取页面内容, 反爬虫之模拟浏览器"""
    # 防止一个浏览器访问频繁被封掉;
    user_agents = [
        "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0",
        "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19",
        "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0",
    ]
    try:
        # reqObj = Request(url, headers={'User-Agent': user_agent})
        reqObj = Request(url)
        # 动态添加爬虫请求的头部信息, 可以在实例化时指定, 也可以后续通过add—header方法添加
        reqObj.add_header('User-Agent', random.choice(user_agents))
    except URLError as e:
        print(e)
        return  None
    else:
        content = urlopen(reqObj).read().decode('utf-8').replace('\t', ' ')
        return  content


def parser_content(content):
    """解析页面内容, 获取银行名称和官网URL地址"""
    pattern = r'<a href="(.*)" target="_blank"  style="color:#08619D">\s+(.*)\s+</a>'
    bankinfos = re.findall(pattern, content)
    if not bankinfos:
        raise  Exception("没有获取符合条件的信息")
    else:
        return  bankinfos

def main():
    url = "http://www.cbrc.gov.cn/chinese/jrjg/index.html"
    content = get_content(url)
    bankinfos = parser_content(content)
    with open('doc/bankinfo.txt', 'w') as f:
        # ('http://www.cdb.com.cn/', '国家开发银行\r')
        for bank in bankinfos:
            name = bank[1].rstrip()
            url = bank[0]
            # 根据正则判断银行的url地址是否合法, 如果合法才写入文件;
            pattern =  r'^((https|http|ftp|rtsp|mms)?:\/\/)\S+'
            if re.search(pattern, url):
                f.write('%s: %s\n' %(name, url))
            else:
                print("%s无官方网站" %(name))
        print("写入完成....")


if __name__ == '__main__':
    main()