目测没问题啊
断点看下,程序进入Indexer了吗?
断点看下,程序进入Indexer了吗?
解决方案 »
- Struts2.1.6 ognl.NoSuchPropertyException: org.apache.struts2.dispatcher.Servle
- jsp:include的使用
- 大家好,我又来问问题了,关于MyEclipse6.5的安装
- 关于IE的兼容
- hibernate3 原生sql查询mysql数据库 longtext类型如何处理?
- 急!!! 急!!!!! 急!!!!!!!請各位高手賜教,onmouseover,onmouseout 函數如何調用action 急! 期盼!!!!!!!!
- SSH 关于 Spring 注入的问题
- 有人用过liferay开发portlet嘛,进来帮帮忙。
- weblogic启动提示java.lang.OutOfMemoryError <<no stack trace available>>!
- com.vaadin.terminal.gwt.server.WebApplicationContext这个是什么包,在哪里下载
- 【菜鸟求助】SSH中怎么从JSP页面往后台传值呢
- ResultSet.get Date()取不到值!
我试过在lia.meetlucene里新建一个类,可以调用;然后把Indexer的代码整个复制过来就又ClassNotFound了……
确认之后,对照下面的代码,找一下问题所在:
package com.xnch.lucenesearch.internet;import java.io.File;
import java.io.IOException;
import java.util.ArrayList;import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.Scorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;import com.xnch.lucenesearch.bean.CourseInfo;public class RAM_FSTest {
public final String COURSE_ID = "courseId";
public final String COURSE_NAME = "courseName";
public final String USERNAME = "userName";
public final String COURSE_INTRODU = "course_introdu";
public final String UPLOAD_DATE = "upload_date";
Analyzer analyzer = new IKAnalyzer();
IndexWriter writer = null;
Directory RAMDirectory = null;
Directory fsDirectory = null;
public final static String indexPath = "F:\\javadata\\javawebpro\\lucene\\luceneIndex1"; public static void main(String[] args) throws Exception {
RAM_FSTest rf = new RAM_FSTest();
System.out.println(rf.RAMDirectory);
rf.buildIndex();
System.out.println(rf.RAMDirectory);
// xuwen 程序员 x1@#
String content = "是可以避免的 ";
rf.search(content);
System.out.println(rf.RAMDirectory);
// new RAM_FSTest().deleteDoc("1");
} public void buildIndex() {
RAMDirectory = new RAMDirectory();
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
// 达到3个文件时就和合并
mergePolicy.setMergeFactor(10);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42,
analyzer);
config.setInfoStream(System.out);
config.setMergePolicy(mergePolicy);
config.setMaxBufferedDocs(9);
try {
writer = new IndexWriter(RAMDirectory, getConfig());
writer.addDocuments(getData());
writer.close();
saveIndexToFile();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public void saveIndexToFile() {
try {
fsDirectory = FSDirectory.open(new File(indexPath));
IndexWriter fsIndexWriter = new IndexWriter(fsDirectory,
getConfig());
fsIndexWriter.addIndexes(new Directory[] { RAMDirectory });
fsIndexWriter.close();
fsDirectory.close();
RAMDirectory.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public IndexWriterConfig getConfig() {
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
// 达到3个文件时就和合并
mergePolicy.setMergeFactor(10);
LimitTokenCountAnalyzer limitTokenCountAnalyzer=new LimitTokenCountAnalyzer(new IKAnalyzer(), 1);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42,
limitTokenCountAnalyzer);
config.setInfoStream(System.out);
config.setMergePolicy(mergePolicy);
config.setMaxBufferedDocs(9);
config.setOpenMode(OpenMode.CREATE);
return config;
} // 搜索
public void search(String content) throws Exception {
String queryString = content;
String[] fields = { COURSE_ID, COURSE_NAME, USERNAME };
QueryParser queryParse = new MultiFieldQueryParser(Version.LUCENE_42,
fields, analyzer);
queryParse.setPhraseSlop(3);
Query query = queryParse.parse(queryString);
Directory directory = FSDirectory.open(new File(indexPath));
DirectoryReader directoryReader = DirectoryReader.open(directory);
IndexSearcher isearcher = new IndexSearcher(directoryReader);
Filter filter = null;
Formatter formatter = new SimpleHTMLFormatter("<font color='red'>",
"</font>");
/*
* Term term = new Term(USERNAME, content); query = new TermQuery(term);
*/
Scorer scorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(100);// 截取的字符长度,最长100
highlighter.setTextFragmenter(fragmenter); TopDocs topDocs = isearcher.search(query, null, 1000);
System.out.println("总共有[" + topDocs.totalHits + "]条匹配结果");
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int docsn = scoreDoc.doc;// 文档内部编号
Document doc = isearcher.doc(docsn); // 根据编号取出相应的文档
String hc = highlighter.getBestFragment(analyzer, COURSE_NAME, doc
.get(COURSE_NAME));
if (hc != null) {
int endIndex = Math.min(50, hc.length());// 谁小取谁,取最小的
hc.substring(0, endIndex);
((Field) doc.getField(COURSE_NAME)).setStringValue(hc); }
String hcUserName = highlighter.getBestFragment(analyzer, USERNAME,
doc.get(USERNAME));
if (hcUserName != null) { ((Field) doc.getField(USERNAME)).setStringValue(hcUserName); } String hcCourseId = highlighter.getBestFragment(analyzer,
COURSE_ID, doc.get(COURSE_ID));
if (hcCourseId != null) { ((Field) doc.getField(COURSE_ID)).setStringValue(hcCourseId); } printDocumentInfo(doc);
}
directoryReader.close();
directory.close();
} /**
* 根据id删除文档
*
* @param id
*/
public void deleteDoc(String id) {
try {
Directory dir = FSDirectory.open(new File(indexPath));
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_42,
analyzer);
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
IndexWriter writer = new IndexWriter(dir, iwc);
writer.deleteDocuments(new Term(COURSE_ID, id));
writer.commit();
writer.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public void printDocumentInfo(Document doc) {
// TODO Auto-generated method stub
/*
* Field f = doc.getField("name"); f.readerValue()
*/ System.out.println(COURSE_ID + ":" + doc.get(COURSE_ID));
System.out.println(COURSE_NAME + ":" + doc.get(COURSE_NAME));
System.out.println(USERNAME + ":" + doc.get(USERNAME));
} public final class DocumentUtils {
public Document getDocument(CourseInfo courseInfo) {
Document doc = new Document();
// Field fieldCourseId = TextField(COURSE_ID,
// courseInfo.getCourseId(), Store.YES);
// StoredField 仅仅存储,没有索引的
// Field fieldCourseId = new StoredField(COURSE_ID,
// courseInfo.getCourseId());
// intField LongField 这样字段用于排序和过滤 // 作为一个整体,不分词索引
Field fieldCourseId = new StringField(COURSE_ID, String
.valueOf(courseInfo.getCourseId()), Field.Store.YES);
Field fieldUsrName = new StringField(USERNAME, courseInfo
.getUserName(), Field.Store.YES);
doc.add(fieldCourseId);
doc.add(fieldUsrName);
doc.add(new TextField(COURSE_NAME, courseInfo.getCourseName(),
Field.Store.YES));
return doc;
}
} public Iterable<? extends Iterable<? extends IndexableField>> getData() {
ArrayList<Document> array = new ArrayList<Document>();
CourseInfo courseInfo = null;
courseInfo = new CourseInfo();
courseInfo.setCourseId(1);
courseInfo.setCourseName("我的英语书");
courseInfo.setUserName("xunianchong");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(2);
courseInfo.setCourseName("java高级编程");
courseInfo.setUserName("zijinhua");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(3);
courseInfo.setCourseName("高级程序员之路");
courseInfo.setUserName("xuwen");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(4);
courseInfo.setCourseName("成人高考英语");
courseInfo.setUserName("i,am,in,hubei");
array.add(new DocumentUtils().getDocument(courseInfo));
courseInfo = new CourseInfo();
courseInfo.setCourseId(5);
courseInfo.setCourseName("中华人民午在湾岛及所有附属各岛屿、澎湖列万两");
courseInfo.setUserName("x1@#");
array.add(new DocumentUtils().getDocument(courseInfo));
return array;
}}
jar包加载没问题,Indexer和Searcher都可以直接运行,但是Servlet调用就出问题。
我总觉得不是Indexer这些类的问题,因为貌似根本没有进去……
import出错应该不会吧?这个工程下只有一个lia.meetlucene啊……
servlet:
import java.io.IOException;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import lia.meetlucene.*;
Indexer:
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;//unused
import java.io.InputStreamReader;
import java.io.IOException;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;//unused
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;//unused
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;//unused
可是jar包中引用到的那些类,你确定也有么
就是link的时候没出现问题么?
lia.meetlucene里面引用了lucene相关的jar包,直接跑Indexer.main和Searcher.main都是没问题的,可以建索引也可以搜索,这个应该可以说明lucene这一块是没问题的吧。
servlet只有上面发的doPost里的代码,没有引用jar包,只import了lia.meetlucene以及servlet需要的那些东西。