Python 批量刷博客园访问量脚本过程解析
程序员文章站
2024-02-07 09:27:28
今早无聊。。。7点起来突然想写个刷访问量的。。那就动手吧
仅供测试,不建议刷访问量哦~~
很简单的思路,第一步提取代理ip,第二步模拟访问。
提取http代理ip...
今早无聊。。。7点起来突然想写个刷访问量的。。那就动手吧
仅供测试,不建议刷访问量哦~~
很简单的思路,第一步提取代理ip,第二步模拟访问。
提取http代理ip
网上很多收费的代理和免费的代理ip
如:
无论哪个网站,我们需要的就是爬取上面的ip和端口号,整理到一起。
具体的网站根据具体的结构爬取 比如上面那个网站,ip和端口在td标签
这里利用bs4爬取即可。贴上脚本
##获取代理ip def get_proxy_ip(): print("==========批量提取ip刷博客园访问量 by 卿=========") print(" blogs:https://www.cnblogs.com/-qing-/") print(" started! ") global proxy_list proxy_list = [] url = "https://www.kuaidaili.com/free/inha/" headers = { "accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "accept-encoding":"gzip, deflate, sdch, br", "accept-language":"zh-cn,zh;q=0.8", "cache-control":"max-age=0", "connection":"keep-alive", "cookie":"channelid=0; sid=1561681200472193; _ga=ga1.2.762166746.1561681203; _gid=ga1.2.971407760.1561681203; _gat=1; hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203; hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203", "host":"www.kuaidaili.com", "upgrade-insecure-requests":"1", "user-agent":"mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/58.0.3029.110 safari/537.36 se 2.x metasr 1.0", "referrer policy":"no-referrer-when-downgrade", } for i in range(1,100): url = url = "https://www.kuaidaili.com/free/inha/"+str(i) html = requests.get(url = url,headers = headers).content soup = beautifulsoup(html,'html.parser') ip_list = ''; port_list = ''; protocol_list = ''; for ip in soup.find_all('td'): if "ip" in ip.get('data-title') : ip_list = ip.get_text()##获取ip if "port" in ip.get('data-title'): port_list = ip.get_text()##获取port if ip_list != '' and port_list != '': proxy = ip_list+":"+port_list ip_list = ''; port_list = ''; proxy_list.append(proxy) iv_main() time.sleep(2) proxy_list = []
这样就把 提取的ip和端口放到列表里
模拟访问刷博客园文章
这里就很简单了 ,遍历上面那个代理ip的列表,使用requests模块取访问就是了
def iv_main(): proxies = {} requests.packages.urllib3.disable_warnings() #proxy_ip = random.choice(proxy_list) url = 'https://www.cnblogs.com/-qing-/p/11080845.html' for proxy_ip in proxy_list: headers2 = { 'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'accept-encoding':'gzip, deflate, sdch, br', 'accept-language':'zh-cn,zh;q=0.8', 'cache-control':'max-age=0', 'cookie':'__gads=id=8c6fd85d91262bb1:t=1561554219:s=alni_mzwz0cmkqjk-l19drx5dpdtyvp63q; _gat=1; _ga=ga1.2.359634670.1561535095; _gid=ga1.2.1087331661.1561535095', 'if-modified-since':'fri, 28 jun 2019 02:10:23 gmt', 'referer':'https://www.cnblogs.com/', 'upgrade-insecure-requests':'1', 'user-agent':random.choice(user_agent_list), } proxies['http'] = proxy_ip #user_agent = random.choice(user_agent_list) try: r = requests.get(url,headers=headers2,proxies=proxies,verify=false) #verify是否验证服务器的ssl证书 print("[*]"+proxy_ip+"访问成功!") except: print("[-]"+proxy_ip+"访问失败!")
最好带上随机的ua请求头
user_agent_list = [ "mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) " "chrome/45.0.2454.85 safari/537.36 115browser/6.0.3", "mozilla/5.0 (macintosh; u; intel mac os x 10_6_8; en-us) applewebkit/534.50 (khtml, like gecko) version/5.1 safari/534.50", "mozilla/5.0 (windows; u; windows nt 6.1; en-us) applewebkit/534.50 (khtml, like gecko) version/5.1 safari/534.50", "mozilla/4.0 (compatible; msie 8.0; windows nt 6.0; trident/4.0)", "mozilla/4.0 (compatible; msie 7.0; windows nt 6.0)", "mozilla/5.0 (windows nt 6.1; rv:2.0.1) gecko/20100101 firefox/4.0.1", "opera/9.80 (windows nt 6.1; u; en) presto/2.8.131 version/11.11", "mozilla/5.0 (macintosh; intel mac os x 10_7_0) applewebkit/535.11 (khtml, like gecko) chrome/17.0.963.56 safari/535.11", "mozilla/4.0 (compatible; msie 7.0; windows nt 5.1; trident/4.0; se 2.x metasr 1.0; se 2.x metasr 1.0; .net clr 2.0.50727; se 2.x metasr 1.0)", "mozilla/5.0 (compatible; msie 9.0; windows nt 6.1; trident/5.0", "mozilla/5.0 (windows nt 6.1; rv:2.0.1) gecko/20100101 firefox/4.0.1", ]
优化整合
这里可以稍微优化下,加入队列线程优化(虽然python这个没啥用)
最终代码整合:
# -*- coding:utf-8 -*- #by 卿 #blog:https://www.cnblogs.com/-qing-/ import requests from bs4 import beautifulsoup import re import time import random import threading print("==========批量提取ip刷博客园访问量 by 卿=========") print(" blogs:https://www.cnblogs.com/-qing-/") print(" started! ") user_agent_list = [ "mozilla/5.0 (windows nt 6.1; wow64) applewebkit/537.36 (khtml, like gecko) " "chrome/45.0.2454.85 safari/537.36 115browser/6.0.3", "mozilla/5.0 (macintosh; u; intel mac os x 10_6_8; en-us) applewebkit/534.50 (khtml, like gecko) version/5.1 safari/534.50", "mozilla/5.0 (windows; u; windows nt 6.1; en-us) applewebkit/534.50 (khtml, like gecko) version/5.1 safari/534.50", "mozilla/4.0 (compatible; msie 8.0; windows nt 6.0; trident/4.0)", "mozilla/4.0 (compatible; msie 7.0; windows nt 6.0)", "mozilla/5.0 (windows nt 6.1; rv:2.0.1) gecko/20100101 firefox/4.0.1", "opera/9.80 (windows nt 6.1; u; en) presto/2.8.131 version/11.11", "mozilla/5.0 (macintosh; intel mac os x 10_7_0) applewebkit/535.11 (khtml, like gecko) chrome/17.0.963.56 safari/535.11", "mozilla/4.0 (compatible; msie 7.0; windows nt 5.1; trident/4.0; se 2.x metasr 1.0; se 2.x metasr 1.0; .net clr 2.0.50727; se 2.x metasr 1.0)", "mozilla/5.0 (compatible; msie 9.0; windows nt 6.1; trident/5.0", "mozilla/5.0 (windows nt 6.1; rv:2.0.1) gecko/20100101 firefox/4.0.1", ] def iv_main(): proxies = {} requests.packages.urllib3.disable_warnings() #proxy_ip = random.choice(proxy_list) url = 'https://www.cnblogs.com/-qing-/p/11080845.html' for proxy_ip in proxy_list: headers2 = { 'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'accept-encoding':'gzip, deflate, sdch, br', 'accept-language':'zh-cn,zh;q=0.8', 'cache-control':'max-age=0', 'cookie':'__gads=id=8c6fd85d91262bb1:t=1561554219:s=alni_mzwz0cmkqjk-l19drx5dpdtyvp63q; _gat=1; _ga=ga1.2.359634670.1561535095; _gid=ga1.2.1087331661.1561535095', 'if-modified-since':'fri, 28 jun 2019 02:10:23 gmt', 'referer':'https://www.cnblogs.com/', 'upgrade-insecure-requests':'1', 'user-agent':random.choice(user_agent_list), } proxies['http'] = proxy_ip #user_agent = random.choice(user_agent_list) try: r = requests.get(url,headers=headers2,proxies=proxies,verify=false) #verify是否验证服务器的ssl证书 print("[*]"+proxy_ip+"访问成功!") except: print("[-]"+proxy_ip+"访问失败!") ##获取代理ip def get_proxy_ip(): global proxy_list proxy_list = [] url = "https://www.kuaidaili.com/free/inha/" headers = { "accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "accept-encoding":"gzip, deflate, sdch, br", "accept-language":"zh-cn,zh;q=0.8", "cache-control":"max-age=0", "connection":"keep-alive", "cookie":"channelid=0; sid=1561681200472193; _ga=ga1.2.762166746.1561681203; _gid=ga1.2.971407760.1561681203; _gat=1; hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203; hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203", "host":"www.kuaidaili.com", "upgrade-insecure-requests":"1", "user-agent":"mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/58.0.3029.110 safari/537.36 se 2.x metasr 1.0", "referrer policy":"no-referrer-when-downgrade", } for i in range(1,100): url = url = "https://www.kuaidaili.com/free/inha/"+str(i) html = requests.get(url = url,headers = headers).content soup = beautifulsoup(html,'html.parser') ip_list = ''; port_list = ''; protocol_list = ''; for ip in soup.find_all('td'): if "ip" in ip.get('data-title') : ip_list = ip.get_text()##获取ip if "port" in ip.get('data-title'): port_list = ip.get_text()##获取port if ip_list != '' and port_list != '': proxy = ip_list+":"+port_list ip_list = ''; port_list = ''; proxy_list.append(proxy) iv_main() time.sleep(2) proxy_list = [] th=[] th_num=10 for x in range(th_num): t=threading.thread(target=get_proxy_ip) th.append(t) for x in range(th_num): th[x].start() for x in range(th_num): th[x].join()
结果
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。