欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

自学python写出的第一个爬虫和教训

程序员文章站 2022-06-01 12:12:00
...

大一的时候入坑东方,看人气排名时翻到了几个有趣的帖子,统计P站的标签,然后分析,投稿数基本和人气成正相关,给我留下了比较深的印象。后来在知乎上看到了另一个有趣的文章,文章采用python爬虫来爬取E绅士网的标签。然后又经历了一些事情使我有了用爬虫爬取贴吧人员并进行相关分析的想法。

学完python后想受一些大牛启发想做个人群兴趣分析,因为贴吧好爬取并且复杂度低所以选了贴吧,这个工程本来目标是计划爬取几个贴吧的人所关注的吧通过回归分析来确定两个话题关注人群是否重合,但是有些事后知后觉,比如反爬虫和dns解析导致的卡死,正则表达式二重转换,贴吧的大量机器人,隐藏足迹,水军判断(。﹏。*),还有我之前不知道可以没有等级,超过15级。。。。。( _ _)ノ|一言蔽之:改bug改到死。。。。
所以很多想法得等到第三版了,比如最重要的人群重合分析。
这个工程是我是看完www.runoob.com/python3的教程后就开始写了,期间又学了很多新东西,复习了些旧东西,然后这个项目基本成为了类似实验平台的一类东西。杂七杂八的东西都往里面加,很乱。而且这个项目有版本1,2。3正在策划中,得等我考完试。注释写的灰常随意,估计只有我能看懂了,编程全是自学的,所以结构很乱。还请大神轻喷
今天先把代码发上来吧,其他的东西以后再说。

程序分成了
Download_Page.py:连接请求和下载数据
analyse.py:分析网页内容
statisitic.py:分析数据(标准库好像有一个statisitics,后来才知道的)

写程序中遇到的问题:

  1. 对贴吧环境没有调查清楚,比如大量机器人,数据存放,导致第一版完工后接着又得写下一代。
  2. 正则表达式的转义经过了两次转换,一次作为python字符串,一次作为正则表达式,简单来讲最好要在其前加r,表示原样输出,省去一次转义。
  3. 注意网站的反爬虫,可以用sleep减小服务器压力来规避风险。
  4. 有时候于其花时间反反爬虫,其实不如多线程缩短时间,分批次,允许中断的快速爬完。
  5. request卡死问题 https://blog.csdn.net/pilipala6868/article/details/80712195
  6. 浏览器F12调出的调试器看到的源代码和实际上的源代码可能不同!!!F12看到的是浏览器加工过的源代码!!
  7. print sys._getframe().f_lineno 可以输出行号,虽然没用到,但是感觉卡死时应该有用。
    想不起来更多了,先这样吧。

主函数:

import logging
import analyse
import Download_Page
import statisitic
import logging
if __name__=="__main__":
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('对于百度的爬虫v2.0.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

A=Download_Page.Mother_Frist('http://tieba.baidu.com/bawu2/platform/listMemberInfo?word=%E7%F7%C2%B6%C5%B5',True)
#A.main_fun(A.LastPn+1)
#1

i=1
while(i<=A.LastPn):
    analyse.way_1(str(i))
    i+=1
logger.info('bad_member:%d',analyse.Bad_member)
#2
AE=statisitic.main_judge(last_pn=A.LastPn)     #4 for test

B=statisitic.After_Effect()
B.get_statistics_sort("weight","weight")
input("weight ok?\n")           #pause

B.get_statistics_sort("total_num","total_num")
input("total_num ok?\n")
B.get_statistics_sort("robot","robot")
logger.info('robot_page:%d',statisitic.robot_page)
logger.info('bad_page:%d',statisitic.bad_page)

#3

连接请求和下载数据:

import analyse
import re
import os, sys
import urllib.request as ur
import time
import random
from urllib.parse import quote
import string
import json

class Mother_Frist(analyse.Father_Ana2):

def __init__(self,mainurl,re_boot=False):
    if re_boot:
        self.read_overview()
    else:
        self.name="チルノ"
        self.base="http://tieba.baidu.com"

        self.LastPn=0         
        self.Num_member=0          #this class's li_subclass's number 
        self.Error_NoName=0
        self.Bad_member=0          #已经继承了一份了,这是重定义

        self.str_pn=""
        self.mainurl=mainurl
        self.Li_subclass={}
    
    return None


def main_fun(self,pn=1):
    '''get forum member for deeper analyse
    '''
    suffix="&pn="

    while True:
        
        self.str_pn=str(pn)      #pre_process for save time
        try:
            os.makedirs( self.str_pn )
        except FileExistsError:
            pass
        finally:
            retval = os.getcwd()
            ur.urlretrieve(self.mainurl+suffix+self.str_pn,"Mother"+self.str_pn+".html")
            
            if not self.ana_member("Mother"+self.str_pn+".html"):
                break
        os.chdir(retval)
        self.LastPn=pn
        pn+=1
        self.write_overview()
        # anti_anti spider
        time.sleep(random.randint(1,30))
        if pn%5==0:
            time.sleep(5)
        if pn%10==0:
            time.sleep(5)
    return pn
    

def ana_member(self,sequence):
    '''
    TO use analyse's function to get the information of sub_html 's url
    '''
    try:
        with open(sequence, 'r', encoding='GBK') as fr:
            self.passage=fr.read()
    except UnicodeDecodeError:
        with open(sequence, 'r', encoding='UTF-8') as fr:
            self.passage=fr.read()

    self.find_bulk(self.passage,"<div class=\"forum_info_section member_wrap clearfix bawu-info\">.*</span></div></span>       </div>")
    self.find_Lobject(self.bulk,"<span class=\"member.*?\">.*?</span>")
    if self.list_object==[]:
        return False    #end may reach
    else:
        self.list_ReAssign()
        return True


def Download_html(self):
    '''discard and unuseable
    '''
    os.chdir(os.getcwd()+"\\"+self.str_pn)
    #muti_process have been delete because of anti-spider
    for i,j in enumerate(self.list_object):
        self.get_link(j)
        self.get_title(j)
        time.sleep(random.randint(1,5)) 

        if  self.title=="":
            self.Error_NoName+=1
            ur.urlretrieve(self.base+self.link,"Error_NoName"+str(self.Error_NoName)+".html")
        else:
            self.Li_subclass[self.title]=self.link
            ur.urlretrieve(self.base+self.link,the_name+".html")
    return os.getcwd()

def list_ReAssign(self):
    os.chdir(os.getcwd()+"\\"+self.str_pn)

    for j in self.list_object:
        self.get_title(j)
        self.get_link(j)

        req = ur.Request(self.base+self.link)
        req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36',)
        with ur.urlopen(req,timeout=50) as f:
            try:
                self.passage=f.read().decode('utf-8')
            except UnicodeDecodeError:
                self.passage=f.read().decode('gbk')
        if not self.find_bulk(self.passage,"<div class=\"userinfo_shortcut\">.*?</div>"):
            self.Bad_member+=1
            continue
        #贴吧404,应该先想好处理这些的方法
        self.get_link(self.bulk)    #正则表达式默认选取第一份匹配

        time.sleep(random.randint(1,5)) 

        if  self.title=="":
            self.Error_NoName+=1
            self.link = quote("https:"+self.link,safe=string.printable)
            self.Li_subclass["Error_NoName"+str(self.Error_NoName)]=self.link
            #https 协议类型缺失,我不知道浏览器为啥可以正常识别
            ur.urlretrieve(self.link,"Error_NoName"+str(self.Error_NoName)+".html")
        else:
            self.link = quote("https:"+self.link,safe=string.printable)
            self.Li_subclass[self.title]=self.link
            #https 协议类型缺失,我不知道浏览器为啥可以正常识别,同时url中含有中文是可以的,但是需要转码。
            ur.urlretrieve(self.link,self.title+".html")
    return os.getcwd()



def discard_write_overview(self):
    '''old write systems useless
    '''
    with open("record.ini", 'w', encoding='utf-8') as fw:
        fw.write("name:"+self.name+"\n")
        fw.write("mainurl:"+self.mainurl+"\n")
        fw.write("LastPn:"+str(self.LastPn)+"\n")
        fw.write("Li_subclass:"+str(self.Li_subclass)+"\n")
        fw.write("Num_member:"+str(self.Num_member)+"\n")
        fw.write("Bad_member:"+str(self.Bad_member)+"\n")
        fw.write("Error_no_name:"+str(self.Error_NoName)+"\n")

def write_overview(self):
    '''statisitic which may be useful,for read by human
    '''
    with open("record.ini", 'w', encoding='utf-8') as fw:
        json.dump(self.__dict__,fw)
        return True
def read_overview(self):
    '''statisitic which may be useful,for read by human
    '''
    with open("record.ini", 'r', encoding='utf-8') as fr:
        self.__dict__=json.load(fr)
        return True


def Get_FristPage(filename,target):
'''better not use
'''
li=[]
with open(filename,"r+",encoding="utf-8") as fr:
    li=fr.readlines()
for i in li:
    if i.find(target)==0:
        return int(i[len(target):])


if __name__=="__main__":
#for test

A=Mother_Frist('http://tieba.baidu.com/bawu2/platform/listMemberInfo?word=%E7%F7%C2%B6%C5%B5')
A.main_fun()
A.write_FristPage()

分析文件:

import analyse
import os
import json
robot_page=0
bad_page=0

#忽略:名字,u_id(unknow type)
#统计:吧名出现次数和拥有的各级号的数量

class statistics:
title=""                     #anchor pointer
dict_level={i:0 for i in range(0,19)}
total_num=0
weight=-1
robot=0
robot_level={i:0 for i in range(0,19)}
robot_weight=-1

def __init__(self):
    self.title=""
    self.dict_level={i:0 for i in range(0,19)}
    self.robot_level={i:0 for i in range(0,19)}
    self.weight=-1
    self.total_num=0
    return None

def cal_weight(self):
    self.weight=0
    for i,j in self.dict_level.items():
        self.weight+=i*j
    for i,j in self.robot_level.items():
        self.robot_weight+=i*j

    return self.dict_level

def add_level(self,level):
    if level>=0 and level<=18:
        self.dict_level[level]+=1
        self.total_num+=1
    else:
        raise IndexError("level num too high or to low!!")
    return self.dict_level[level]

def add_robot(self,level):
    if level>=0 and level<=18:
        self.robot_level[level]+=1
        self.robot+=1
    else:
        raise IndexError("level num too high or to low!!")
    return self.dict_level[level]

def storge(self,file_object):
    '''
    must to use a+ to write;
    '''

    file_object.write(json.dumps(self.__dict__))
    file_object.write("\n")
    return True
def get_storge(self,file_object):
    '''
    must to use r to read;
    '''
    temp_str=file_object.readline()
    if temp_str=="":
        return False
    else:
        self.__dict__=json.loads(temp_str.rstrip("\n"))
        return True

class After_Effect(statistics):

def __init__(self, *args, **kwargs):
    self.main_dict={}
    return super().__init__(*args, **kwargs)

def fc_print1(self,j):
    print("{1:<5}{0}".format(j.title,j.weight))
    with open("统计1.txt","a+") as fa:
        fa.write("{1:<5}{0}".format(j.title,j.weight))

def fc_print2(self,j):
    print("{1:<5}{0}".format(j.title,j.total_num))
    with open("统计2.txt","a+") as fa:
        fa.write("{1:<5}{0}".format(j.title,j.total_num))

def fc_print3(self,j):
    print("{1:<5}{0}".format(j.title,j.robot))
    with open("统计3.txt","a+") as fa:
        fa.write("{1:<5}{0}".format(j.title,j.robot))

def get_statistics_sort(self,kind="weight",FileName="statisitic_sort"):
    """
    kind ---weight ,total_num ,robot
    """
    Frist=statistics()
    lis=[]
    Temp_dict={}    #存储的东西和lis一样,是Frist,由weight作为键调取
    with open("statisitic.json","r") as fr:
        while Frist.get_storge(fr):
            lis.append(Frist)
            Frist=statistics()
    for i in lis:
        if kind=="weight":
            fc=self.fc_print1       #to be improve
            try:
                Temp_dict[i.weight].append(i)
            except KeyError:
                Temp_dict[i.weight]=[]
                Temp_dict[i.weight].append(i)
        elif kind=="total_num":
            fc=self.fc_print2
            try:
                Temp_dict[i.total_num].append(i)
            except KeyError:
                Temp_dict[i.total_num]=[]
                Temp_dict[i.total_num].append(i)
        elif kind=="robot":
            fc=self.fc_print3
            try:
                Temp_dict[i.robot].append(i)
            except KeyError:
                Temp_dict[i.robot]=[]
                Temp_dict[i.robot].append(i)
        else:
            raise TypeError("kind not recognise!")
    unsort=list(Temp_dict.keys())
    unsort.sort()
    with open(FileName+".json","w+") as fw:
        for n in unsort:
            for j in Temp_dict[n]:
                fc(j)
                j.storge(fw)

def statistics_sort(self,kind="weight",FileName="statisitic_sort"):
    """
    kind ---weight ,total_num ,robot
    """
    lis=[]
    Temp_dict={}    #存储的东西和lis一样,是Frist,由weight作为键调取
    lis=list(self.main_dict.values())
    for i in lis:
        if kind=="weight":
            try:
                Temp_dict[i.weight].append(i)
            except KeyError:
                Temp_dict[i.weight]=[]
                Temp_dict[i.weight].append(i)
        elif kind=="total_num":
            try:
                Temp_dict[i.total_num].append(i)
            except KeyError:
                Temp_dict[i.total_num]=[]
                Temp_dict[i.total_num].append(i)
        elif kind=="robot":
            try:
                Temp_dict[i.robot].append(i)
            except KeyError:
                Temp_dict[i.robot]=[]
                Temp_dict[i.robot].append(i)
        else:
            raise TypeError("kind not recognise!")
    unsort=list(Temp_dict.keys())
    unsort.sort()
    with open(FileName+".json","w+") as fw:
        for n in unsort:
            for j in Temp_dict[n]:
                print("{1:<5}{0}".format(j.title,j.weight))
                j.storge(fw)

def main_judge(dir_taget=os.curdir,last_pn=-1):
os.chdir(dir_taget)

if last_pn==-1:
    last_pn=Downlowd_Page.Get_FristPage("record.ini","LastPn:")

#以后要分清到底是用1,还是0开头啊
folder_num=1
kind_dic=After_Effect()        #A independent class was too waste
new_forum=statistics()
lis=[]              #renturn list of way_2 function
global robot_page
global bad_page

while(folder_num!=last_pn):
    os.chdir(str(folder_num))
    length=len(analyse.search_file(os.curdir,".html"))
    lis=analyse.way_2("report.json")
    if lis==False:
        folder_num+=1
        bad_page+=1
        os.chdir(os.pardir)
        continue
    if len(tuple(i.title for i in lis))>4*length:          #use tuple to determine the unrepeat element num
        for j in lis:
            if j.title in kind_dic.main_dict:
                kind_dic.main_dict[j.title].add_level(j.level)
            else:
                kind_dic.main_dict[j.title]=new_forum
                new_forum.title=j.title
                new_forum.add_level(j.level)
                new_forum=statistics()

    else:
        robot_page+=1
        for j in lis:
            if j.title in kind_dic.main_dict:
                kind_dic.main_dict[j.title].add_robot(j.level)
            else:
                kind_dic.main_dict[j.title]=new_forum
                new_forum.title=j.title
                new_forum.add_robot(j.level)
                new_forum=statistics()
    folder_num+=1
    os.chdir(os.pardir)
with open("statisitic.json","w+") as fw:
    for n in kind_dic.main_dict.values():
        n.cal_weight()
        n.storge(fw)
return kind_dic



if __name__=="__main__":
main_judge(last_pn=459)     #4 for test
相关标签: 随笔