lucene4.7 (1)全文检索之根据数据库内容创建索引
程序员文章站
2022-07-09 11:51:53
...
package org.apache.lucene.demo;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Date;
import java.util.List;
import java.util.Map;
import org.apache.commons.dbutils.DbUtils;
import org.apache.lucene.analysis.cn.ChineseAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.apache.poi.util.IntegerField;
import thtf.ebuilder.website.search.DBIndex;
import thtf.ebuilder.website.services.HTMLServices;
/** Index all text files under a directory.
* <p>
* This is a command-line application demonstrating simple Lucene indexing.
* Run it with no command-line arguments for usage information.
*/
public class IndexFiles {
private IndexFiles() {}
/** Index all text files under a directory. */
public static void main(String[] args) {
String indexPath = DBIndex._$.getIndexFile().toString();
boolean add = true;
Date start = new Date();
try {
Directory dir = FSDirectory.open(new File(indexPath));
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, DBIndex._$.analyzer);
if (add) {
iwc.setOpenMode(OpenMode.CREATE);
} else {
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
}
IndexWriter writer = new IndexWriter(dir, iwc);
indexDocs(writer);
// NOTE: if you want to maximize search performance,
// you can optionally call forceMerge here. This can be
// a terribly costly operation, so generally it's only
// worth it when your index is relatively static (ie
// you're done adding documents to it):
//
// writer.forceMerge(1);
writer.close();
Date end = new Date();
System.out.println(end.getTime() - start.getTime() + " total milliseconds");
} catch (IOException e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
/**
* Indexes the given file using the given writer, or if a directory is given,
* recurses over files and directories found under the given directory.
*
* NOTE: This method indexes one document per input file. This is slow. For good
* throughput, put multiple documents into your input file(s). An example of this is
* in the benchmark module, which can create "line doc" files, one document per line,
* using the
* <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
* >WriteLineDocTask</a>.
*
* @param writer Writer to the index where the given file/dir info will be stored
* @param file The file to index, or the directory to recurse into to find files to index
* @throws IOException If there is a low-level I/O error
*/
static void indexDocs(IndexWriter writer)
throws IOException {
try {
// make a new, empty document
List list=new DbUtils().queryToMapList("select info_id,info_title,info_content from up_info limit 500");
for(int i=0;i<list.size();i++){
Map map=(Map)list.get(i);
Document doc = new Document();
Field info_id = new IntField("info_id",Integer.valueOf(String.valueOf(map.get("info_id"))), Field.Store.YES);
doc.add(info_id);
Field info_title = new StringField("info_title", map.get("info_title")==null?"": map.get("info_title").toString(), Field.Store.YES);
doc.add(info_title);
Field info_content = new TextField("info_content", map.get("info_content")==null?"": HTMLServices.clearHTMLToString(map.get("info_content").toString()), Field.Store.YES);
doc.add(info_content);
if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
writer.addDocument(doc);
} else {
writer.updateDocument(new Term("info_id",map.get("info_id")==null?"1": map.get("info_id").toString() ), doc);
}
}
writer.commit();
// // Add the path of the file as a field named "path". Use a
// // field that is indexed (i.e. searchable), but don't tokenize
// // the field into separate words and don't index term frequency
// // or positional information:
// Field pathField = new StringField("path", file.getPath(), Field.Store.YES);
// doc.add(pathField);
//
// // Add the last modified date of the file a field named "modified".
// // Use a LongField that is indexed (i.e. efficiently filterable with
// // NumericRangeFilter). This indexes to milli-second resolution, which
// // is often too fine. You could instead create a number based on
// // year/month/day/hour/minutes/seconds, down the resolution you require.
// // For example the long value 2011021714 would mean
// // February 17, 2011, 2-3 PM.
// doc.add(new LongField("modified", file.lastModified(), Field.Store.NO));
//
// // Add the contents of the file to a field named "contents". Specify a Reader,
// // so that the text of the file is tokenized and indexed, but not stored.
// // Note that FileReader expects the file to be in UTF-8 encoding.
// // If that's not the case searching for special characters will fail.
// BufferedReader _content=new BufferedReader(new InputStreamReader(fis, "UTF-8"));
// System.out.println(_content);
// doc.add(new TextField("contents", _content));
//
// if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
// // New index, so we just add the document (no old document can be there):
// System.out.println("adding " + file);
// writer.addDocument(doc);
// } else {
// // Existing index (an old copy of this document may have been indexed) so
// // we use updateDocument instead to replace the old one matching the exact
// // path, if present:
// System.out.println("updating " + file);
// writer.updateDocument(new Term("path", file.getPath()), doc);
// }
}catch (Exception e) {
e.printStackTrace();
}
}
}