python多线程爬取西刺代理的示例代码
程序员文章站
2022-05-13 14:52:58
西刺代理是一个国内ip代理,由于代理倒闭了,所以我就把原来的代码放出来供大家学习吧。镜像地址:首先找到所有的tr标签,与class="odd"的标签,然后提取出来。然后再依次找到tr标签里面的所有td...
西刺代理是一个国内ip代理,由于代理倒闭了,所以我就把原来的代码放出来供大家学习吧。
镜像地址:
首先找到所有的tr标签,与class="odd"的标签,然后提取出来。
然后再依次找到tr标签里面的所有td标签,然后只提取出里面的[1,2,5,9]这四个标签的位置,其他的不提取。
最后可以写出提取单一页面的代码,提取后将其保存到文件中。
import sys,re,threading import requests,lxml from queue import queue import argparse from bs4 import beautifulsoup head = {"user-agent": "mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.100 safari/537.36"} if __name__ == "__main__": ip_list=[] fp = open("spideraddr.json","a+",encoding="utf-8") url = "https://www.blib.cn/url/xcdl.html" request = requests.get(url=url,headers=head) soup = beautifulsoup(request.content,"lxml") data = soup.find_all(name="tr",attrs={"class": re.compile("|[^odd]")}) for item in data: soup_proxy = beautifulsoup(str(item),"lxml") proxy_list = soup_proxy.find_all(name="td") for i in [1,2,5,9]: ip_list.append(proxy_list[i].string) print("[+] 爬行列表: {} 已转存".format(ip_list)) fp.write(str(ip_list) + '\n') ip_list.clear()
爬取后会将文件保存为 spideraddr.json 格式。
最后再使用另一段代码,将其转换为一个ssr代理工具直接能识别的格式,{'http': 'http://119.101.112.31:9999'}
import sys,re,threading import requests,lxml from queue import queue import argparse from bs4 import beautifulsoup if __name__ == "__main__": result = [] fp = open("spideraddr.json","r") data = fp.readlines() for item in data: dic = {} read_line = eval(item.replace("\n","")) protocol = read_line[2].lower() if protocol == "http": dic[protocol] = "http://" + read_line[0] + ":" + read_line[1] else: dic[protocol] = "https://" + read_line[0] + ":" + read_line[1] result.append(dic) print(result)
完整多线程版代码如下所示。
import sys,re,threading import requests,lxml from queue import queue import argparse from bs4 import beautifulsoup head = {"user-agent": "mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/80.0.3987.100 safari/537.36"} class agentspider(threading.thread): def __init__(self,queue): threading.thread.__init__(self) self._queue = queue def run(self): ip_list=[] fp = open("spideraddr.json","a+",encoding="utf-8") while not self._queue.empty(): url = self._queue.get() try: request = requests.get(url=url,headers=head) soup = beautifulsoup(request.content,"lxml") data = soup.find_all(name="tr",attrs={"class": re.compile("|[^odd]")}) for item in data: soup_proxy = beautifulsoup(str(item),"lxml") proxy_list = soup_proxy.find_all(name="td") for i in [1,2,5,9]: ip_list.append(proxy_list[i].string) print("[+] 爬行列表: {} 已转存".format(ip_list)) fp.write(str(ip_list) + '\n') ip_list.clear() except exception: pass def startthread(count): queue = queue() threads = [] for item in range(1,int(count)+1): url = "https://www.xicidaili.com/nn/{}".format(item) queue.put(url) print("[+] 生成爬行链接 {}".format(url)) for item in range(count): threads.append(agentspider(queue)) for t in threads: t.start() for t in threads: t.join() # 转换函数 def conversionagentip(filename): result = [] fp = open(filename,"r") data = fp.readlines() for item in data: dic = {} read_line = eval(item.replace("\n","")) protocol = read_line[2].lower() if protocol == "http": dic[protocol] = "http://" + read_line[0] + ":" + read_line[1] else: dic[protocol] = "https://" + read_line[0] + ":" + read_line[1] result.append(dic) return result if __name__ == "__main__": parser = argparse.argumentparser() parser.add_argument("-p","--page",dest="page",help="指定爬行多少页") parser.add_argument("-f","--file",dest="file",help="将爬取到的结果转化为代理格式 spideraddr.json") args = parser.parse_args() if args.page: startthread(int(args.page)) elif args.file: dic = conversionagentip(args.file) for item in dic: print(item) else: parser.print_help()
以上就是python多线程爬取西刺代理的示例代码的详细内容,更多关于python多线程爬取代理的资料请关注其它相关文章!
上一篇: Vite和Vue CLI的优劣
下一篇: 威士忌怎么保存?保存的方法还是很讲究的