正常 sql 查詢時:name like '%繼中%' 想必大家一定明白這樣不會走索引的,然後就在多行資料級別查詢相應時間會很慢,對吧,因為資料庫在一行行掃呢。所以我們自然會想到怎樣能讓它走索引?
解決方案之一:lucene出來了。
其實它就是幫你把文章拆分成若干個關鍵詞,這樣以便按關鍵詞查詢時能通過關鍵詞直接查詢來鎖定哪些文章匹配該關鍵詞並快速返回。說再直白點,就是 sql語句的查詢不用like ,而是 name ='繼中',這樣就走索引了,所以就快了而已。
下面來說正題,spring框架下配置lucene,lucene版本:3.0.3,直接上程式碼,通過程式碼我來分享下各行的作用
mvc-config.xml:
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:mvc="http://www.springframework.org/schema/mvc"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:util="http://www.springframework.org/schema/util"
xsi:schemaLocation="
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/mvc http://www.springframework.org/schema/mvc/spring-mvc-3.0.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.0.xsd
http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.0.xsd"
default-autowire="byName" >
<bean class="org.springframework.web.servlet.mvc.annotation.AnnotationMethodHandlerAdapter">
<property name="messageConverters">
<list>
<bean class = "org.springframework.http.converter.StringHttpMessageConverter">
<property name = "supportedMediaTypes">
<list><value>text/plain;charset=UTF-8</value></list>
</property>
</bean>
</list>
</property>
</bean>
<context:component-scan base-package="com.jizhong" />
<mvc:annotation-driven/>
<bean class="org.springframework.web.servlet.view.InternalResourceViewResolver">
<property name="prefix" value="/" />
<property name="suffix" value=".jsp" />
</bean>
<!-- LUCENE SEARCH CONFIG -->
<!-- 設定欄位內容長度,這裡不做限定 -->
<bean id="MAXFIELDLENGTH2" class="org.apache.lucene.index.IndexWriter.MaxFieldLength.UNLIMITED" />
<!-- set your analyzer, to be used by the IndexWriter and QueryParser ,關於分詞器,因為我們主要進行中文搜尋,所以要選擇好點的中文分詞器,我選擇了paoding-->
<bean id="luceneAnalyzer" class="net.paoding.analysis.analyzer.PaodingAnalyzer">
</bean>
<!-- set your Lucene directory -->
<!-- in this case I am pulling the location from a properties file -->
<!-- also, using the SimpleFSLockFactory ,資料檔案存放位置設定-->
<bean id="luceneDirectory" class="org.apache.lucene.store.SimpleFSDirectory" >
<constructor-arg>
<bean class="java.io.File">
<constructor-arg value="D:\\common\\hahaha" />
</bean>
</constructor-arg>
</bean>
<!-- now you're ready to define the IndexWriter,這裡建立 IndexWriter並引入相關bean-->
<bean id="indexWriter" class="org.apache.lucene.index.IndexWriter">
<constructor-arg ref="luceneDirectory" />
<constructor-arg ref="luceneAnalyzer" />
<constructor-arg name="create" value="false" />
<constructor-arg ref="MAXFIELDLENGTH2" />
</bean>
<!-- define the IndexSearcher ,這裡建立IndexSearcher-->
<bean id="indexSearcher" class="org.apache.lucene.search.IndexSearcher">
<constructor-arg ref="luceneDirectory" />
</bean>
</beans>
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:mvc="http://www.springframework.org/schema/mvc"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:util="http://www.springframework.org/schema/util"
xsi:schemaLocation="
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/mvc http://www.springframework.org/schema/mvc/spring-mvc-3.0.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.0.xsd
http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.0.xsd"
default-autowire="byName" >
<bean class="org.springframework.web.servlet.mvc.annotation.AnnotationMethodHandlerAdapter">
<property name="messageConverters">
<list>
<bean class = "org.springframework.http.converter.StringHttpMessageConverter">
<property name = "supportedMediaTypes">
<list><value>text/plain;charset=UTF-8</value></list>
</property>
</bean>
</list>
</property>
</bean>
<context:component-scan base-package="com.jizhong" />
<mvc:annotation-driven/>
<bean class="org.springframework.web.servlet.view.InternalResourceViewResolver">
<property name="prefix" value="/" />
<property name="suffix" value=".jsp" />
</bean>
<!-- LUCENE SEARCH CONFIG -->
<!-- 設定欄位內容長度,這裡不做限定 -->
<bean id="MAXFIELDLENGTH2" class="org.apache.lucene.index.IndexWriter.MaxFieldLength.UNLIMITED" />
<!-- set your analyzer, to be used by the IndexWriter and QueryParser ,關於分詞器,因為我們主要進行中文搜尋,所以要選擇好點的中文分詞器,我選擇了paoding-->
<bean id="luceneAnalyzer" class="net.paoding.analysis.analyzer.PaodingAnalyzer">
</bean>
<!-- set your Lucene directory -->
<!-- in this case I am pulling the location from a properties file -->
<!-- also, using the SimpleFSLockFactory ,資料檔案存放位置設定-->
<bean id="luceneDirectory" class="org.apache.lucene.store.SimpleFSDirectory" >
<constructor-arg>
<bean class="java.io.File">
<constructor-arg value="D:\\common\\hahaha" />
</bean>
</constructor-arg>
</bean>
<!-- now you're ready to define the IndexWriter,這裡建立 IndexWriter並引入相關bean-->
<bean id="indexWriter" class="org.apache.lucene.index.IndexWriter">
<constructor-arg ref="luceneDirectory" />
<constructor-arg ref="luceneAnalyzer" />
<constructor-arg name="create" value="false" />
<constructor-arg ref="MAXFIELDLENGTH2" />
</bean>
<!-- define the IndexSearcher ,這裡建立IndexSearcher-->
<bean id="indexSearcher" class="org.apache.lucene.search.IndexSearcher">
<constructor-arg ref="luceneDirectory" />
</bean>
</beans>
以上是spring配置檔案中關於lucene的程式碼片段,看起來是不是很簡單?
我們繼續看程式碼
package com.jizhong.mmmmm.controller;
import java.io.IOException;
import java.io.StringReader;
import javax.servlet.http.HttpServletRequest;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.Version;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.ModelMap;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
@Controller
public class LuceneController {
private static Logger logger = Logger.getLogger(LuceneController.class);
@Autowired(required = false)//這裡我寫了required = false,需要時再引入,不寫的話會報錯,大家有更好解決方案請留言哈
private Analyzer myAnalyzer;
@Autowired(required = false)
private IndexWriter indexWriter;
@Autowired(required = false)
private IndexSearcher searcher;
@RequestMapping(value = "search.do", method = RequestMethod.GET)
public String testsSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
search();
return "test";
}
@RequestMapping(value = "idSearch.do", method = RequestMethod.GET)
public String idSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
idSearch();
return "test";
}
@RequestMapping(value = "moreSearch.do", method = RequestMethod.GET)
public String moreSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
searchMore();
return "test";
}
@RequestMapping(value = "create.do", method = RequestMethod.GET)
public String testsCreate(HttpServletRequest request, ModelMap modelMap) throws Exception {
create("整形值新增");
// create(request.getParameter("name"));
return "test";
}
@RequestMapping(value = "delete.do", method = RequestMethod.GET)
public String delete(HttpServletRequest request, ModelMap modelMap) throws Exception {
delete("id", request.getParameter("id"));
return "test";
}
@RequestMapping(value = "optimize.do", method = RequestMethod.GET)
public String optimize(HttpServletRequest request, ModelMap modelMap) throws Exception {
indexWriter.optimize();//優化索引方法,不建議經常呼叫,會很耗時,隔段時間調優下即可
return "test";
}
//關於更新一個文件要注意一點,雖然它提供了updateDocument,但我覺得他是先刪再加,所以大家要把所以值都寫上,雖然可能只更新一個欄位
@RequestMapping(value = "update.do", method = RequestMethod.GET)
public String update(HttpServletRequest request, ModelMap modelMap) throws Exception {
Term term = new Term("id", "1999991");
Document doc = new Document();
doc.add(new Field("id", String.valueOf(1999991), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("name", 555555 + "555555" + 555555, Store.YES, Index.ANALYZED));
doc.add(new Field("level1", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("level2", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("level3", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("brand_id", String.valueOf(555555 + 100000), Store.YES, Index.NOT_ANALYZED));
indexWriter.updateDocument(term, doc);
indexWriter.commit();//凡是涉及到索引變化的動作都要提交才能生效
return "test";
}
//delete,沒啥說的哈
private void delete(String field, String text) throws CorruptIndexException, IOException {
Term term1 = new Term(field, text);
indexWriter.deleteDocuments(term1);
indexWriter.commit();
}
public void create(String string) throws Exception {
long begin = System.currentTimeMillis();
for (int m = 604; m < 605; m++) {
for (int i = m * 10000; i < (m + 1) * 10000; i++) {
Document doc = new Document();
// doc.add(new Field("id", String.valueOf(i), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
NumericField field = new NumericField("id", 6, Field.Store.YES, false);
field.setIntValue(i);
doc.add(field);//這裡不建議這樣寫,無論什麼格式都以字串形式灌入資料最好,否則會因為不匹配而查不到,經驗之談哈,如下面這樣:
doc.add(new Field("name", i + string + i, Store.YES, Index.ANALYZED));//關於索引策略,建議需要模糊查詢欄位進行分詞策略,其他則不分詞
doc.add(new Field("level1", String.valueOf(3), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("level2", String.valueOf(2), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("level3", String.valueOf(1), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("brand_id", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("hehe", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
System.out.println(m);
}
indexWriter.commit();
System.out.println("create cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
//這裡的查詢是說:搜尋name欄位關鍵詞為“整形的”,level3欄位值為1的內容,兩者條件是 'and'的關係
public void search() throws Exception {
long begin = System.currentTimeMillis();
String[] queryString = { "整形", "1" };//注意欄位與值要一一對應哦,同下
String[] fields = { "name", "level3" };////注意欄位與值要一一對應哦,同上
BooleanClause.Occur[] clauses = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST };//這裡就是 and 的關係,詳細策略看文件哈
Query query = MultiFieldQueryParser.parse(Version.LUCENE_30, queryString, fields, clauses, myAnalyzer);
IndexReader readerNow = searcher.getIndexReader();
//這個判斷很重要,就是當我們剛灌入了資料就希望查詢出來,因為前者寫索引時關閉了reader,所以我們現在查詢時要開啟它
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
System.out.println(searcher.maxDoc());
Sort sort = new Sort();
sort.setSort(new SortField("id", SortField.INT, true));
TopDocs topDocs = searcher.search(query, null, 53, sort);//排序策略
// TopDocs topDocs = searcher.search(query, 50);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
private void idSearch() throws ParseException, CorruptIndexException, IOException {
long begin = System.currentTimeMillis();
QueryParser qp = new QueryParser(Version.LUCENE_30, "id", myAnalyzer);
Query query = qp.parse("4040011");
IndexReader readerNow = searcher.getIndexReader();
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
TopDocs topDocs = searcher.search(query, null, 53);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
public void searchMore() throws Exception {
long begin = System.currentTimeMillis();
String[] queryStringOne = { "kkk", "222222" };
String[] queryStringTwo = { "99980", "222222" };
String[] fields = { "name", "level2" };
BooleanClause.Occur[] clauses = { BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };
Query queryOne = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringOne, fields, clauses, myAnalyzer);
Query queryTwo = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringTwo, fields, clauses, myAnalyzer);
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(queryOne, BooleanClause.Occur.MUST);
booleanQuery.add(queryTwo, BooleanClause.Occur.MUST);
IndexReader readerNow = searcher.getIndexReader();
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
System.out.println(searcher.maxDoc());
Sort sort = new Sort();
sort.setSort(new SortField("id", SortField.INT, true));
TopDocs topDocs = searcher.search(booleanQuery, null, 53, sort);
// TopDocs topDocs = searcher.search(query, 50);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
@RequestMapping(value = "result.do", method = RequestMethod.GET)
public void getAnalyzerResult() throws IOException {
StringReader reader = new StringReader("愛國者mp3");
TokenStream ts = myAnalyzer.tokenStream("name", reader);
ts.addAttribute(TermAttribute.class);
while (ts.incrementToken()) {
TermAttribute ta = ts.getAttribute(TermAttribute.class);
System.out.println(ta.term());
}
}
import java.io.IOException;
import java.io.StringReader;
import javax.servlet.http.HttpServletRequest;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.Version;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.ModelMap;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
@Controller
public class LuceneController {
private static Logger logger = Logger.getLogger(LuceneController.class);
@Autowired(required = false)//這裡我寫了required = false,需要時再引入,不寫的話會報錯,大家有更好解決方案請留言哈
private Analyzer myAnalyzer;
@Autowired(required = false)
private IndexWriter indexWriter;
@Autowired(required = false)
private IndexSearcher searcher;
@RequestMapping(value = "search.do", method = RequestMethod.GET)
public String testsSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
search();
return "test";
}
@RequestMapping(value = "idSearch.do", method = RequestMethod.GET)
public String idSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
idSearch();
return "test";
}
@RequestMapping(value = "moreSearch.do", method = RequestMethod.GET)
public String moreSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {
searchMore();
return "test";
}
@RequestMapping(value = "create.do", method = RequestMethod.GET)
public String testsCreate(HttpServletRequest request, ModelMap modelMap) throws Exception {
create("整形值新增");
// create(request.getParameter("name"));
return "test";
}
@RequestMapping(value = "delete.do", method = RequestMethod.GET)
public String delete(HttpServletRequest request, ModelMap modelMap) throws Exception {
delete("id", request.getParameter("id"));
return "test";
}
@RequestMapping(value = "optimize.do", method = RequestMethod.GET)
public String optimize(HttpServletRequest request, ModelMap modelMap) throws Exception {
indexWriter.optimize();//優化索引方法,不建議經常呼叫,會很耗時,隔段時間調優下即可
return "test";
}
//關於更新一個文件要注意一點,雖然它提供了updateDocument,但我覺得他是先刪再加,所以大家要把所以值都寫上,雖然可能只更新一個欄位
@RequestMapping(value = "update.do", method = RequestMethod.GET)
public String update(HttpServletRequest request, ModelMap modelMap) throws Exception {
Term term = new Term("id", "1999991");
Document doc = new Document();
doc.add(new Field("id", String.valueOf(1999991), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("name", 555555 + "555555" + 555555, Store.YES, Index.ANALYZED));
doc.add(new Field("level1", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("level2", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("level3", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("brand_id", String.valueOf(555555 + 100000), Store.YES, Index.NOT_ANALYZED));
indexWriter.updateDocument(term, doc);
indexWriter.commit();//凡是涉及到索引變化的動作都要提交才能生效
return "test";
}
//delete,沒啥說的哈
private void delete(String field, String text) throws CorruptIndexException, IOException {
Term term1 = new Term(field, text);
indexWriter.deleteDocuments(term1);
indexWriter.commit();
}
public void create(String string) throws Exception {
long begin = System.currentTimeMillis();
for (int m = 604; m < 605; m++) {
for (int i = m * 10000; i < (m + 1) * 10000; i++) {
Document doc = new Document();
// doc.add(new Field("id", String.valueOf(i), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
NumericField field = new NumericField("id", 6, Field.Store.YES, false);
field.setIntValue(i);
doc.add(field);//這裡不建議這樣寫,無論什麼格式都以字串形式灌入資料最好,否則會因為不匹配而查不到,經驗之談哈,如下面這樣:
doc.add(new Field("name", i + string + i, Store.YES, Index.ANALYZED));//關於索引策略,建議需要模糊查詢欄位進行分詞策略,其他則不分詞
doc.add(new Field("level1", String.valueOf(3), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("level2", String.valueOf(2), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("level3", String.valueOf(1), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("brand_id", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("hehe", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));
indexWriter.addDocument(doc);
}
System.out.println(m);
}
indexWriter.commit();
System.out.println("create cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
//這裡的查詢是說:搜尋name欄位關鍵詞為“整形的”,level3欄位值為1的內容,兩者條件是 'and'的關係
public void search() throws Exception {
long begin = System.currentTimeMillis();
String[] queryString = { "整形", "1" };//注意欄位與值要一一對應哦,同下
String[] fields = { "name", "level3" };////注意欄位與值要一一對應哦,同上
BooleanClause.Occur[] clauses = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST };//這裡就是 and 的關係,詳細策略看文件哈
Query query = MultiFieldQueryParser.parse(Version.LUCENE_30, queryString, fields, clauses, myAnalyzer);
IndexReader readerNow = searcher.getIndexReader();
//這個判斷很重要,就是當我們剛灌入了資料就希望查詢出來,因為前者寫索引時關閉了reader,所以我們現在查詢時要開啟它
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
System.out.println(searcher.maxDoc());
Sort sort = new Sort();
sort.setSort(new SortField("id", SortField.INT, true));
TopDocs topDocs = searcher.search(query, null, 53, sort);//排序策略
// TopDocs topDocs = searcher.search(query, 50);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
private void idSearch() throws ParseException, CorruptIndexException, IOException {
long begin = System.currentTimeMillis();
QueryParser qp = new QueryParser(Version.LUCENE_30, "id", myAnalyzer);
Query query = qp.parse("4040011");
IndexReader readerNow = searcher.getIndexReader();
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
TopDocs topDocs = searcher.search(query, null, 53);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
public void searchMore() throws Exception {
long begin = System.currentTimeMillis();
String[] queryStringOne = { "kkk", "222222" };
String[] queryStringTwo = { "99980", "222222" };
String[] fields = { "name", "level2" };
BooleanClause.Occur[] clauses = { BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };
Query queryOne = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringOne, fields, clauses, myAnalyzer);
Query queryTwo = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringTwo, fields, clauses, myAnalyzer);
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(queryOne, BooleanClause.Occur.MUST);
booleanQuery.add(queryTwo, BooleanClause.Occur.MUST);
IndexReader readerNow = searcher.getIndexReader();
if (!readerNow.isCurrent()) {
searcher = new IndexSearcher(readerNow.reopen());
}
System.out.println(searcher.maxDoc());
Sort sort = new Sort();
sort.setSort(new SortField("id", SortField.INT, true));
TopDocs topDocs = searcher.search(booleanQuery, null, 53, sort);
// TopDocs topDocs = searcher.search(query, 50);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
System.out.println("id:" + doc.get("id"));
System.out.println("name:" + doc.get("name"));
System.out.println("level3:" + doc.get("level3"));
System.out.println("new field:" + doc.get("hehe"));
}
System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");
}
@RequestMapping(value = "result.do", method = RequestMethod.GET)
public void getAnalyzerResult() throws IOException {
StringReader reader = new StringReader("愛國者mp3");
TokenStream ts = myAnalyzer.tokenStream("name", reader);
ts.addAttribute(TermAttribute.class);
while (ts.incrementToken()) {
TermAttribute ta = ts.getAttribute(TermAttribute.class);
System.out.println(ta.term());
}
}
}
- paoding-analysis.jar (128 KB)
- lucene-core-3.0.3.jar (1009.9 KB)