lucene-使用CJKTokenizer分词
二分法分词
package busetoken;
import org.apache.lucene.analysis.cjk.CJKTokenizer;
import org.apache.lucene.analysis.Token;
import java.io.IOException;
import java.io.StringReader;
public class UseCjk {
/**
* @param args
*/
public static void main(String[] args) {
String s="编码规范从根本上解决了程序维护员的难题;规范的编码阅读和理解起来更容易,也可以快速的不费力气的借鉴别人的编码。对将来维护你编码的人来说,你的编码越优化,他们就越喜欢你的编码,理解起来也就越快。";
StringReader sr=new StringReader(s);
CJKTokenizer cjk=new CJKTokenizer(sr);
Token t=null;
try {
while ((t=cjk.next())!=null){
System.out.print(t.termText()+"|");
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
// TODO Auto-generated method stub
}
}
效果如下
编码|码规|规范|范从|从根|根本|本上|上解|解决|决了|了程|程序|序维|维护|护员|员的|的难|难题|规范|范的|的编|编码|码阅|阅读|读和|和理|理解|解起|起来|来更|更容|容易|也可|可以|以快|快速|速的|的不|不费|费力|力气|气的|的借|借鉴|鉴别|别人|人的|的编|编码|对将|将来|来维|维护|护你|你编|编码|码的|的人|人来|来说|你的|的编|编码|码越|越优|优化|他们|们就|就越|越喜|喜欢|欢你|你的|的编|编码|理解|解起|起来|来也|也就|就越|越快|
建立索引
package bindex;
import java.io.File;
import tool.FileText;
import tool.FileList;
import java.io.*;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.store.LockObtainFailedException;
public class FileIndexer {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String indexPath ="indexes";
try {
IndexWriter indexWriter = new IndexWriter(indexPath,new CJKAnalyzer());
String[] files=FileList.getFiles("htmls");
int num=files.length;
for(int i=0;i<num;i++){
Document doc=new Document();
File f=new File(files[i]);
String name=f.getName();
String content=FileText.getText(f);
String path=f.getPath();
Field field=new Field("name",name,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("content",content,Field.Store.YES,Field.Index.TOKENIZED);
doc.add(field);
field=new Field("path",path,Field.Store.YES,Field.Index.NO);
doc.add(field);
indexWriter.addDocument(doc);
System.out.println("File:"+path+name+" indexed!");
}
System.out.println("OK!");
indexWriter.close();
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (LockObtainFailedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
效果
File:htmls\wy\360安全中心 - 360安全卫士官网 - 360安全卫士下载免费杀毒木马查杀杀木马电脑安全恶意软件查杀流氓软件查杀.htm360安全中心 - 360安全卫士官网 - 360安全卫士下载免费杀毒木马查杀杀木马电脑安全恶意软件查杀流氓软件查杀.htm indexed!
File:htmls\腾讯首页11.htm腾讯首页11.htm indexed!
OK!
搜索
package bindex;
import java.io.IOException;
import java.lang.StringBuffer;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.document.*;
public class BindexSearcher {
/**
* @param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String indexPath="indexes";
String searchField="content";
String searchPhrase="卫士";
StringBuffer sb=new StringBuffer("");
try {
IndexSearcher searcher=new IndexSearcher(indexPath);
Term t=new Term(searchField,searchPhrase);
Query q=new TermQuery(t);
Hits hs=searcher.search(q);
int num=hs.length();
for (int i=0;i<num;i++){
Document doc=hs.doc(i);
Field fname=doc.getField("name");
Field fcontent=doc.getField("content");
sb.append("name:\n");
sb.append(fname.stringValue()+"\n");
sb.append("content:\n");
sb.append(fcontent.stringValue().substring(0, 100)+"\n");
sb.append("------------"+"\n");
}
searcher.close();
System.out.println(sb);
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
效果
name:
360安全中心 - 360安全卫士官网 - 360安全卫士下载免费杀毒木马查杀杀木马电脑安全恶意软件查杀流氓软件查杀.htm
content:
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3c.org/TR/1999/REC-html4
------------
上一篇: lucene-QueryParser