欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

《网页爬虫》

程序员文章站 2022-05-02 22:13:53
...

1.初始版本

package com.zyjl.crawler;

import java.io.IOException;

import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * 爬取网络资源的类(网络爬虫)
 * 一级(通过httpclient和jsoup爬取网页,分析所有url)
 * @author Administrator
 *
 */
public class StartCrawler01 {
	
	/**
	 * 解析URL(通过网址URL,利用httpclient技术,获取当前URL对应的网络内容)
	 * @param url
	 */
	public static void parseUrl(String url,String realDir) {
		CloseableHttpClient HttpClient = HttpClients.createDefault(); //获取httpclient的实例
		HttpGet httpGet = new HttpGet(url); //设置提交方式:get
		CloseableHttpResponse response = null;
		try {
			 response = HttpClient.execute(httpGet); //执行
			 HttpEntity entity = response.getEntity(); //获取内容
//			 System.out.println(entity.getContentType().toString()); //Content-Type: text/html
//			 System.out.println(entity.getContentType().getName()); //Content-Type
//			 System.out.println(entity.getContentType().getValue()); //text/html
			 //如果是text/html类型的URL,需要再次解析
			 if("text/html".equals(entity.getContentType().getValue())) {
				 String pageContent = EntityUtils.toString(entity,"utf-8"); //获取网页内容
//				 System.out.println("网页内容:"+pageContent);
				 parsePageContent(pageContent, realDir); //通过网页爬虫框架jsoup解析网页内容的方法
			 }
		} catch (ClientProtocolException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		try {
			//关闭response资源
			if(response != null) {
				response.close();
			}
			//关闭HttpClient资源
			if(HttpClient != null) {
				HttpClient.close();
			}
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	/**
	 * 通过网页爬虫框架jsoup对网页内容进行解析
	 * @param pageContent
	 */
	public static void parsePageContent(String pageContent, String realDir) {
		Document doc = Jsoup.parse(pageContent); //获取jsp的dom树
		Elements aEles = doc.select("a"); //通过选择器获取dom树的所有a标签
		for (Element aEle : aEles) {
			String aUrl = aEle.attr("href"); //获取a标签的href
			System.out.println("URL:"+realDir+aUrl);
//			Element after = aEle.after("href");
//			System.out.println("aHref:"+after);
		}
	}

	/**
	 * 程序入口
	 * @param args
	 */
	public static void main(String[] args) {
		String url="http://central.maven.org/maven2/HTTPClient/HTTPClient/";
		parseUrl(url,url);
	}

}

2,第一步(过滤)优化

package com.zyjl.crawler;

import java.io.IOException;
import java.util.LinkedList;
import java.util.Queue;

import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * 爬取网络资源的类(网络爬虫)
 * 二级(过滤无效url,迭代解析)
 * @author Administrator
 *
 */
public class StartCrawler02 {
	//堆列:先进先出(堆栈:先进后出)
	public static String[] excludeUrls = new String[] { ".pom", ".xml", ".md5", ".sha1", ".asc", ".gz", ".zip", "../" }; // 要过滤的url后缀
	public static Queue<String> waitForCrawlerUrls = new LinkedList<String>();// 等待再次爬取的Url
	public static long total = 0; //计算第n条被爬取的链接
	
	/**
	 * 解析URL(通过网址URL,利用httpclient技术,获取当前URL对应的网络内容)
	 * @param url
	 */
	public static void parseUrl() {
		while(waitForCrawlerUrls.size()>0) {
			
			String URL = waitForCrawlerUrls.poll(); //摘取队列的第一个元素,并且移除
		
		CloseableHttpClient HttpClient = HttpClients.createDefault(); //获取httpclient的实例
		HttpGet httpGet = new HttpGet(URL); //设置提交方式:get
		CloseableHttpResponse response = null;
		try {
			 response = HttpClient.execute(httpGet); //执行
			 HttpEntity entity = response.getEntity(); //获取内容
			 //如果是text/html类型的URL,需要再次解析
			 if("text/html".equals(entity.getContentType().getValue())) {
				 String pageContent = EntityUtils.toString(entity,"utf-8"); //获取网页内容
				 //System.out.println("网页内容:"+pageContent);
				 parsePageContent(pageContent, URL); //通过网页爬虫框架jsoup解析网页内容的方法
			 }
		} catch (ClientProtocolException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		try {
			//关闭response资源
			if(response != null) {
				response.close();
			}
			//关闭HttpClient资源
			if(HttpClient != null) {
				HttpClient.close();
			}
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	  }
		
	}
	
	/**
	 * 通过网页爬虫框架jsoup对网页内容进行解析
	 * @param pageContent
	 */
	public static void parsePageContent(String pageContent, String realDir) {
		Document doc = Jsoup.parse(pageContent); //获取jsp的dom树
		Elements aEles = doc.select("a"); //通过选择器获取dom树的所有a标签
		long i = 0; //计算读取所有链接的第几条
		for (Element aEle : aEles) {
			String aHref = aEle.attr("href"); //获取a标签的href
			String URL = realDir+aHref; //所有链接
			
			if(i == 0) {
				System.out.println("\r"+"开始爬取新的网页..."+"\r");
			}
			
			System.out.println("爬取到第"+(++i)+"条链接:"+URL);
			/**
			 * 1:目标链接
			 * 2:需要过滤掉的链接
			 * 3:需要迭代解析的链接
			 */
			if(URL == null || URL.equals("")) return; //如果URL为空,就return掉
			boolean f = true; //默认就是符合要求的目标链接
			for (String excludeUrl : excludeUrls) { //遍历循环需要过滤掉的链接后缀
				if(URL.endsWith(excludeUrl)) { //如果与链接后缀匹配,就返回false(过滤掉)
					f = false;
					break; //停止
				}
			}
			if(f && URL.endsWith(".jar")) { //是否需要迭代解析的链接
				System.err.println("\r"+"爬取的第"+(++total)+"条目标链接,URL:"+URL+"\r");
			}else { //需要再次迭代爬取的链接
				addUrl(URL);
			}
		}
	}
	
	/**
	 * 添加到爬虫队列,等待再次爬取
	 * @param URL
	 */
	private static void addUrl(String URL) {
		System.out.println("链接:"+URL+"被添加到了爬虫 队列");
		waitForCrawlerUrls.add(URL);
	}
	
	/**
	 * 初始化方法
	 */
	private static void init() {
		String URL="http://central.maven.org/maven2/HTTPClient/HTTPClient/";
		addUrl(URL);
		addUrl("http://central.maven.org/maven2/commons-cli/commons-cli/");
		parseUrl();
	}

	/**
	 * 程序入口
	 * @param args
	 */
	public static void main(String[] args) {
		init();
	}

}

3.性能优化

package com.zyjl.crawler;

import java.io.IOException;
import java.util.LinkedList;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;

import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * 爬取网络资源的类(网络爬虫)
 * 三级(利用异步线程池对爬取的性能进行优化)
 * @author Administrator
 *
 */
public class StartCrawler03 {

	    //堆列:先进先出(堆栈:先进后出)
		public static String[] excludeUrls = new String[] { ".pom", ".xml", ".md5", ".sha1", ".asc", ".gz", ".zip", "../" }; // 要过滤的url后缀
		public static Queue<String> waitForCrawlerUrls = new LinkedList<String>();// 等待再次爬取的Url
		public static long total = 0; //计算第n条被爬取的链接
		public static boolean exeFlag = true; //默认解析爬虫队列中的网址URL
		
		/**
		 * 解析URL(通过网址URL,利用httpclient技术,获取当前URL对应的网络内容)
		 */
		public static void parseUrl() {
			ExecutorService service = Executors.newFixedThreadPool(10); //创建线程池(存放了10个异步线程)
			while(exeFlag) { //正在执行爬虫列队
				if(waitForCrawlerUrls.size()>0) { //如果有等待再次爬取的Url,就执行以下逻辑
					//使用线程池中的异步线程执行解析逻辑
					service.execute(new Runnable() {
						public void run() {
							// TODO Auto-generated method stub
							while(waitForCrawlerUrls.size()>0) { //如果堆列waitForCrawlerUrls中有等待再次爬取的Url
								String URL = waitForCrawlerUrls.poll(); //摘取队列的第一个元素,并且移除
								CloseableHttpClient HttpClient = HttpClients.createDefault(); //获取httpclient的实例
								HttpGet httpGet = new HttpGet(URL); //设置提交方式:get
								//设置配置(连接时长:8秒,等待服务器响应数据时长:10秒)
								RequestConfig config = RequestConfig.custom().setConnectTimeout(8000).setSocketTimeout(10000).build();
								httpGet.setConfig(config);
								CloseableHttpResponse response = null;
								try {
									 response = HttpClient.execute(httpGet); //执行
									 if(response != null) {
										 HttpEntity entity = response.getEntity(); //获取内容
										 //如果是text/html类型的URL,需要再次解析
										 if("text/html".equals(entity.getContentType().getValue())) {
											 String pageContent = EntityUtils.toString(entity,"utf-8"); //获取网页内容
											 //System.out.println("网页内容:"+pageContent);
											 parsePageContent(pageContent, URL); //通过网页爬虫框架jsoup解析网页内容的方法
										 }
									 }else {
										 System.err.println("链接时间过长!!!");
										 addUrl(URL); //将链接URL重新加到爬虫队列
									 }
								} catch (ClientProtocolException e) {
									// TODO Auto-generated catch block
									e.printStackTrace();
								} catch (IOException e) {
									// TODO Auto-generated catch block
									e.printStackTrace();
								}
								try {
									//关闭response资源
									if(response != null) {
										response.close();
									}
									//关闭HttpClient资源
									if(HttpClient != null) {
										HttpClient.close();
									}
								} catch (IOException e) {
									// TODO Auto-generated catch block
									e.printStackTrace();
								}
								
							}
							
						}
					});
					
				}else { //如果没有等待再次爬取的Url
					//获取正在执行爬虫任务的线程数
                    if(((ThreadPoolExecutor)service).getActiveCount() == 0) { //如果线程数为0,就关闭
                    	exeFlag = false;
						break;
					}
				}
			}
			
			try {
				Thread.sleep(1500);//休眠1.5s是为了,给线程解析网页网页的时间,不然容易出现问题
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		  
		}
		
		
		/**
		 * 通过网页爬虫框架jsoup对网页内容进行解析
		 * 
		 * 1:获取所有目标链接
		 * 2:过滤掉不符合要求的链接
		 * 3:拿到需要迭代解析符合要求的目标链接
		 * @param pageContent
		 */
		public static void parsePageContent(String pageContent, String realDir) {
			// * 1:获取所有目标链接
			Document doc = Jsoup.parse(pageContent); //获取jsp的dom树
			Elements aEles = doc.select("a"); //通过选择器获取dom树的所有a标签
			long i = 0; //计算读取当前网页所有链接的第几条
			for (Element aEle : aEles) {
				String aHref = aEle.attr("href"); //获取a标签的href
				String URL = realDir+aHref; //所有链接
				
				if(i == 0) {
					System.out.println("\r"+"开始爬取新的网页..."+"\r");
				}
				System.out.println("读取到第"+(++i)+"条链接:"+URL);
				
				// * 2:过滤掉不符合要求的链接
				if(URL == null || URL.equals("")) return; //如果URL为空,就return掉
				boolean f = true; //默认就是符合要求的目标链接
				for (String excludeUrl : excludeUrls) { //遍历循环需要过滤掉的链接后缀(过滤所有不符合要求的链接)
					if(URL.endsWith(excludeUrl)) { //如果与链接后缀匹配,就返回false(过滤掉)
						f = false;
						break; //停止
					}
				}
				 //* 3:拿到需要迭代解析符合要求的目标链接
				if(f && URL.endsWith(".jar")) { //如果是符合要求需要迭代解析的链接
					System.err.println("\r"+"爬取到第"+(++total)+"条目标链接,URL:"+URL+"\r");
				}else { //如果不是,添加到爬虫列队,再次迭代爬取的链接
					addUrl(URL);
				}
			}
		}
		
		
		/**
		 * 添加到爬虫队列,等待再次爬取
		 * @param URL
		 */
		private static void addUrl(String URL) {
			waitForCrawlerUrls.add(URL);
			System.out.println("链接:"+URL+"被添加到了爬虫 队列");
		}
		
		
		/**
		 * 初始化方法
		 */
		private static void init() {
			String URL="http://central.maven.org/maven2/HTTPClient/HTTPClient/";
			addUrl(URL);
			addUrl("http://central.maven.org/maven2/commons-cli/commons-cli/");
			parseUrl();
		}

		
		/**
		 * 程序入口
		 * @param args
		 */
		public static void main(String[] args) {
			init();
		}
	
}

相关标签: 网络爬虫