hibernate提供了全文索引功能,非常棒,这里简要介绍下它的用法,
1. 在pom.xml引入包依赖
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
< dependency > < groupId >org.hibernate</ groupId > < artifactId >hibernate-search-orm</ artifactId > < version >${hibernate-search.version}</ version > </ dependency > < dependency > < groupId >org.apache.lucene</ groupId > < artifactId >lucene-analyzers-smartcn</ artifactId > < version >${lucene.version}</ version > </ dependency > < dependency > < groupId >org.apache.lucene</ groupId > < artifactId >lucene-queryparser</ artifactId > < version >${lucene.version}</ version > </ dependency > < dependency > < groupId >org.apache.lucene</ groupId > < artifactId >lucene-analyzers-phonetic</ artifactId > < version >${lucene.version}</ version > </ dependency > |
hibernate配置 search index保存路径
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
<bean id= "sessionFactory" class = "org.springframework.orm.hibernate4.LocalSessionFactoryBean" destroy-method= "destroy" > <property name= "dataSource" ref= "poolingDataSource" /> <property name= "configLocation" > <value> classpath:hibernate.cfg.xml </value> </property> <property name= "hibernateProperties" > <props> <prop key= "hibernate.dialect" >${hibernate.dialect}</prop> <!-- Booleans can be easily used in expressions by declaring HQL query substitutions in Hibernate configuration --> <prop key= "hibernate.query.substitutions" > true 'Y' , false 'N' </prop> <!-- http: //ehcache.org/documentation/integrations/hibernate --> <!-- http: //www.tutorialspoint.com/hibernate/hibernate_caching.htm --> <prop key= "hibernate.cache.use_second_level_cache" > true </prop> <!-- org.hibernate.cache.ehcache.EhCacheRegionFactory --> <prop key= "hibernate.cache.region.factory_class" >org.hibernate.cache.ehcache.EhCacheRegionFactory</prop> <!-- hibernate只会缓存使用load()方法获得的单个持久化对象,如果想缓存使用findall()、 list()、Iterator()、createCriteria()、createQuery() 等方法获得的数据结果集的话,就需要设置hibernate.cache.use_query_cache true --> <prop key= "hibernate.cache.use_query_cache" > true </prop> <prop key= "net.sf.ehcache.configurationResourceName" >ehcache-hibernate.xml</prop> <!-- Hibernate Search index directory --> ***<prop key= "hibernate.search.default.indexBase" >indexes/</prop>*** </props> </property> </bean> |
对需要搜索的类加上Indexed Annotation,然后对类中可以被搜索的字段加上@Field Annotation,通常Enum字段不需要Analyzer进行词法分析,其他字段则需要,对于不需要Projection(返回部分字段)的情况下,不需要在index中存储实际数据。可以通过AnalyzerDef来定义不同的词法分析器以及对于的特殊词过滤器
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
@Indexed @AnalyzerDef ( name= "enTopicAnalyzer" , charFilters={ @CharFilterDef (factory=HTMLStripCharFilterFactory. class ) }, tokenizer= @TokenizerDef (factory=StandardTokenizerFactory. class ), filters={ @TokenFilterDef (factory=StandardFilterFactory. class ), @TokenFilterDef (factory=StopFilterFactory. class ), @TokenFilterDef (factory=PhoneticFilterFactory. class , params = { @Parameter (name= "encoder" , value= "DoubleMetaphone" ) }), @TokenFilterDef (factory=SnowballPorterFilterFactory. class , params = { @Parameter (name= "language" , value= "English" ) }) } ) public class Topic { ...... @Field (index=Index.YES, analyze=Analyze.YES, store=Store.NO) @Analyzer (definition = "enTopicAnalyzer" ) private String title; ...... @Field (index=Index.YES, analyze=Analyze.YES, store=Store.NO) @Analyzer (definition = "enTopicAnalyzer" ) private String content; ...... @Enumerated (EnumType.STRING) @Field (index=Index.YES, analyze=Analyze.NO, store=Store.NO, bridge= @FieldBridge (impl=EnumBridge. class )) private TopicStatus status; ... } |
通过代码对已有数据创建index
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
ApplicationContext context = new ClassPathXmlApplicationContext( "spring-resources.xml" ); SessionFactory sessionFactory = (SessionFactory) context.getBean( "sessionFactory" ); Session sess = sessionFactory.openSession(); FullTextSession fullTextSession = Search.getFullTextSession(sess); try { fullTextSession.createIndexer().startAndWait(); } catch (InterruptedException e) { LOG.error(e.getMessage(), e); } finally { fullTextSession.close(); } ((AbstractApplicationContext)context).close(); |
创建查询fulltextsession,按照query条件获取结果
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
FullTextSession fullTextSession = Search .getFullTextSession(getSession()); QueryBuilder queryBuilder = fullTextSession.getSearchFactory() .buildQueryBuilder().forEntity(Show. class ).get(); org.apache.lucene.search.Query luceneQuery = null ; luceneQuery = queryBuilder.keyword() // .wildcard() .onFields( "title" , "content" ).matching(query.getKeyword()) // .matching("*" + query.getKeyword() + "*") .createQuery(); FullTextQuery hibernateQuery = fullTextSession.createFullTextQuery( luceneQuery, Show. class ); return hibernateQuery.list(); |
note:
1. 在一次测试过程中,修改了value object,添加了新的index,忘记了rebuildIndex,结果unit test没问题,生成环境就出错了。
2. 搜索还不是很强大,比如搜索测,含有测试的结果可能就搜索不出来
中文词法分析
hibernate search底层使用Lucene,所以Lucene可以使用的中文分词,hibernate search都可以用来支持中文词法分析,比较常用的词法分析器包括paoding,IKAnalyzer,mmseg4j 等等。具体可以参考分词分析 最近分析。hibernate search默认的分词器是org.apache.lucene.analysis.standard.StandardAnalyzer,中文按字分词,显然不符合我们的需求。
这里介绍一下如何在hibernate中配置中文分词,选择的是Lucene自带的中文分词–。使用可以通过3种方式,一种是在hibernate的配置文件设置词法分析方法,另外一种是在每个需要被搜索的类中定义分词方法,最后一种是对单个字段配置。这里介绍下前2种的配置方式。
hibernate配置方式:
1
|
<property name="hibernate.search.analyzer"> org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer</property> |
被搜索类配置中文分词:
1
2
|
@Indexed @Analyzer (impl=SmartChineseAnalyzer. class ) |
同时需要在maven中引入相关包依赖
1
2
3
4
5
|
<dependency> <groupId>org.apache.lucene</groupId> <artifactId>lucene-analyzers-smartcn</artifactId> <version>${lucene.version}</version> </dependency> |
多条件查询
hibernate search可以通过多组合条件来实现多条件查询,这里简单介绍一下多条件查询的一个实践。
如果只是单个条件查询,那么这个查询就可以很简单
luceneQuery = queryBuilder.keyword().onFields("title", "content").matching(query.getKeyword()).createQuery()
如果是多条件并查询,那么就需要使用到Must Join,如果是多条件或查询,就需要使用should Join,这里举个Must Join的例子
1
2
3
4
5
6
|
//must true MustJunction term = queryBuilder.bool().must(queryBuilder.keyword() .onFields( "title" , "content" ) .matching(query.getKeyword()).createQuery()); //must false term.must(queryBuilder.keyword() .onField( "status" ) .matching(query.getExcludeStatus()).createQuery()).not(); |
完整例子:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
private FullTextQuery findByKeywordQuery(TopicQuery query) { FullTextSession fullTextSession = Search .getFullTextSession(getSession()); QueryBuilder queryBuilder = fullTextSession.getSearchFactory() .buildQueryBuilder().forEntity(Topic. class ).get(); org.apache.lucene.search.Query luceneQuery = null ; if ( null == query.getStatus() && null == query.getUsername() && null == query.getExcludeStatus()) { luceneQuery = queryBuilder.keyword() // .wildcard() .onFields( "title" , "content" ).matching(query.getKeyword()) // .matching("*" + query.getKeyword() + "*") .createQuery(); if (LOG.isDebugEnabled()){ LOG.debug( "create clean keyword search query: " + luceneQuery.toString()); } } else { MustJunction term = queryBuilder.bool().must(queryBuilder.keyword() .onFields( "title" , "content" ) .matching(query.getKeyword()).createQuery()); if ( null != query.getStatus()){ term.must(queryBuilder.keyword() // .wildcard() .onField( "status" ) .matching(query.getStatus()).createQuery()); } if ( null != query.getExcludeStatus()){ term.must(queryBuilder.keyword() .onField( "status" ) .matching(query.getExcludeStatus()).createQuery()).not(); } if ( null != query.getUsername()){ term.must(queryBuilder.keyword() // .wildcard() .onField( "owner.username" ) .ignoreFieldBridge() .matching(query.getUsername()).createQuery()); } luceneQuery =term.createQuery(); if (LOG.isDebugEnabled()){ LOG.debug( "create complicated keyword search query: " + luceneQuery.toString()); } } // BooleanQuery FullTextQuery hibernateQuery = fullTextSession.createFullTextQuery( luceneQuery, Topic. class ); return hibernateQuery; } |