lucene grouping 实现分组,并查询出分组数据

查询出分组以后的数据,请注意,这里没做分页。

/**
	 * 获取分组数据
	 * @return
	 * @throws Exception
	 */
	public Map<string list object>&amp;gt;&amp;gt; getGroupSearchData() throws Exception {

		int groupOffset = 0;
		
		int maxDocsPerGroup = 100;
		
		int withinGroupOffset = 0;

		int topNGroups = 10;

		TermFirstPassGroupingCollector c1 = new TermFirstPassGroupingCollector(Constant.CATEGORY, Sort.RELEVANCE, topNGroups);

		boolean cacheScores = true;

		double maxCacheRAMMB = 4.0;

		CachingCollector cachedCollector = CachingCollector.create(c1,cacheScores, maxCacheRAMMB);

		AAnalyzer analyzer = new AAnalyzer(true,false);
		
		BooleanQuery query = this.searchParamter.getBooleanQuery();
		
		SysContext context = SysContext.getInstance();
		
		IndexSearcher search = context.getSearcher();
		
		search.search(query, cachedCollector);

		Collection<searchgroup>&amp;gt; topGroups = c1.getTopGroups(groupOffset, true);
		
		if (topGroups == null) {
			
			return new HashMap<string list object>&amp;gt;&amp;gt;(0);
		}
		
		TermSecondPassGroupingCollector c2 = new TermSecondPassGroupingCollector(Constant.CATEGORY, topGroups, Sort.RELEVANCE, Sort.RELEVANCE,maxDocsPerGroup, true, true, true);
		
		if (cachedCollector.isCached()) {
			
			cachedCollector.replay(c2);
			
		} else {
			
			context.getSearcher().search(query, c2);
		}
		
		TopGroups<string> tg = c2.getTopGroups(withinGroupOffset);
		
		// 同义词
		SynonymDic synDic = this.searchParamter.getAnalyzer().getSynDic();
		
		//标红显示符合查询的字
		SimpleHTMLFormatter sh = new SimpleHTMLFormatter("<font color="red">", "</font>" , synDic.getSynonym());
		
		Highlighter high = new Highlighter(sh, new QueryScorer(query));
		
		high.setTextFragmenter(new SimpleFragmenter(100));
		
		GroupDocs<string>[] gds = tg.groups;
		
		Document doc = null;
		
		Map<string list object>&amp;gt;&amp;gt; ret = new HashMap<string list>&amp;gt;&amp;gt;(gds.length);
		
		List<map object>&amp;gt; list;
		
		Map<object object> map;
		
		IndexUtil iu = new IndexUtil();
		
		for (GroupDocs<string> gd : gds) {
			
			/**
			 * 这个就是分组以后的数据了,循环它,放到list里面去
			 */
			ScoreDoc [] docs = gd.scoreDocs;
			
			list = new ArrayList<map>&amp;gt;(gd.totalHits);
			
			for (ScoreDoc scoreDoc : docs) {
				
				map = new HashMap<object object>();
				
				doc = context.getSearcher().doc(scoreDoc.doc);
				
				String content = iu.getNoHtml(true,doc.get(Constant.CONTENT));
				
				TokenStream tokenStream = analyzer.tokenStream(Constant.CONTENT,new StringReader(content));
				
				String tmpStr = high.getBestFragment(tokenStream, content);
				
				if (tmpStr != null) {
					
					content = tmpStr;
					
				} else{
					
					content =  iu.getContent(content, 100);// content.substring(0 , content.length() &amp;gt;= 100 ? 100 : content.length());
				}
				
				String title = iu.getNoHtml(true,doc.get(Constant.TITLE));
				
				tokenStream = analyzer.tokenStream(Constant.TITLE, new StringReader(title));
				
				String tmpName = high.getBestFragment(tokenStream, title);
				
				if (tmpName != null) {
					
					title = tmpName;
					
				} else{
					
					title = iu.getContent(title,100);//title.substring(0 , title.length() &amp;gt;= 100 ? 100 : title.length());
				}
				
				tokenStream.close();
				
				map.put(Constant.ID, doc.get(Constant.ID));
				map.put(Constant.TITLE, title);
				map.put(Constant.CONTENT, content);
				map.put(Constant.PATH, doc.get(Constant.PATH));
				map.put(Constant.OUTSIDEURL, doc.get(Constant.OUTSIDEURL));
				map.put(Constant.INDEX_TYPE, doc.get(Constant.INDEX_TYPE));
				map.put(Constant.ATTACH_TYPE, doc.get(Constant.ATTACH_TYPE));
				map.put(Constant.CATEGORY, doc.get(Constant.CATEGORY));
				list.add(map);
				
			}
			ret.put(gd.groupValue, list);
			
		}
		// 用完了就释放该查询
		context.releaseSearcher(search);
		
		return ret;

	}

猜你喜欢

转载自laccp.iteye.com/blog/1576860