在线等!怒求各位大神!导入了对应的包,也生成了字节码文件,但还是ClassNotFoundException啊啊啊!! 目测没问题啊断点看下,程序进入Indexer了吗? 解决方案 » 免费领取超大流量手机卡,每月29元包185G流量+100分钟通话, 中国电信官方发货 没有,进去就有两个输出,都没有输出出来。我试过在lia.meetlucene里新建一个类,可以调用;然后把Indexer的代码整个复制过来就又ClassNotFound了…… 谢谢,不过基本都是import never used和The word 'Servlet' is not correctly spelled之类的…… 首先确认确认一下jar包是否全部加载。确认之后,对照下面的代码,找一下问题所在:package com.xnch.lucenesearch.internet;import java.io.File;import java.io.IOException;import java.util.ArrayList;import org.apache.lucene.analysis.Analyzer;import org.apache.lucene.analysis.miscellaneous.LimitTokenCountAnalyzer;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.document.StringField;import org.apache.lucene.document.TextField;import org.apache.lucene.index.DirectoryReader;import org.apache.lucene.index.IndexWriter;import org.apache.lucene.index.IndexWriterConfig;import org.apache.lucene.index.IndexableField;import org.apache.lucene.index.LogByteSizeMergePolicy;import org.apache.lucene.index.LogMergePolicy;import org.apache.lucene.index.Term;import org.apache.lucene.index.IndexWriterConfig.OpenMode;import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;import org.apache.lucene.queryparser.classic.QueryParser;import org.apache.lucene.search.Filter;import org.apache.lucene.search.IndexSearcher;import org.apache.lucene.search.Query;import org.apache.lucene.search.ScoreDoc;import org.apache.lucene.search.TopDocs;import org.apache.lucene.search.highlight.Formatter;import org.apache.lucene.search.highlight.Fragmenter;import org.apache.lucene.search.highlight.Highlighter;import org.apache.lucene.search.highlight.QueryScorer;import org.apache.lucene.search.highlight.Scorer;import org.apache.lucene.search.highlight.SimpleFragmenter;import org.apache.lucene.search.highlight.SimpleHTMLFormatter;import org.apache.lucene.store.Directory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.store.RAMDirectory;import org.apache.lucene.util.Version;import org.wltea.analyzer.lucene.IKAnalyzer;import com.xnch.lucenesearch.bean.CourseInfo;public class RAM_FSTest { public final String COURSE_ID = "courseId"; public final String COURSE_NAME = "courseName"; public final String USERNAME = "userName"; public final String COURSE_INTRODU = "course_introdu"; public final String UPLOAD_DATE = "upload_date"; Analyzer analyzer = new IKAnalyzer(); IndexWriter writer = null; Directory RAMDirectory = null; Directory fsDirectory = null; public final static String indexPath = "F:\\javadata\\javawebpro\\lucene\\luceneIndex1"; public static void main(String[] args) throws Exception { RAM_FSTest rf = new RAM_FSTest(); System.out.println(rf.RAMDirectory); rf.buildIndex(); System.out.println(rf.RAMDirectory); // xuwen 程序员 x1@# String content = "是可以避免的 "; rf.search(content); System.out.println(rf.RAMDirectory); // new RAM_FSTest().deleteDoc("1"); } public void buildIndex() { RAMDirectory = new RAMDirectory(); LogMergePolicy mergePolicy = new LogByteSizeMergePolicy(); // 达到3个文件时就和合并 mergePolicy.setMergeFactor(10); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42, analyzer); config.setInfoStream(System.out); config.setMergePolicy(mergePolicy); config.setMaxBufferedDocs(9); try { writer = new IndexWriter(RAMDirectory, getConfig()); writer.addDocuments(getData()); writer.close(); saveIndexToFile(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void saveIndexToFile() { try { fsDirectory = FSDirectory.open(new File(indexPath)); IndexWriter fsIndexWriter = new IndexWriter(fsDirectory, getConfig()); fsIndexWriter.addIndexes(new Directory[] { RAMDirectory }); fsIndexWriter.close(); fsDirectory.close(); RAMDirectory.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public IndexWriterConfig getConfig() { LogMergePolicy mergePolicy = new LogByteSizeMergePolicy(); // 达到3个文件时就和合并 mergePolicy.setMergeFactor(10); LimitTokenCountAnalyzer limitTokenCountAnalyzer=new LimitTokenCountAnalyzer(new IKAnalyzer(), 1); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42, limitTokenCountAnalyzer); config.setInfoStream(System.out); config.setMergePolicy(mergePolicy); config.setMaxBufferedDocs(9); config.setOpenMode(OpenMode.CREATE); return config; } // 搜索 public void search(String content) throws Exception { String queryString = content; String[] fields = { COURSE_ID, COURSE_NAME, USERNAME }; QueryParser queryParse = new MultiFieldQueryParser(Version.LUCENE_42, fields, analyzer); queryParse.setPhraseSlop(3); Query query = queryParse.parse(queryString); Directory directory = FSDirectory.open(new File(indexPath)); DirectoryReader directoryReader = DirectoryReader.open(directory); IndexSearcher isearcher = new IndexSearcher(directoryReader); Filter filter = null; Formatter formatter = new SimpleHTMLFormatter("<font color='red'>", "</font>"); /* * Term term = new Term(USERNAME, content); query = new TermQuery(term); */ Scorer scorer = new QueryScorer(query); Highlighter highlighter = new Highlighter(formatter, scorer); Fragmenter fragmenter = new SimpleFragmenter(100);// 截取的字符长度,最长100 highlighter.setTextFragmenter(fragmenter); TopDocs topDocs = isearcher.search(query, null, 1000); System.out.println("总共有[" + topDocs.totalHits + "]条匹配结果"); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { int docsn = scoreDoc.doc;// 文档内部编号 Document doc = isearcher.doc(docsn); // 根据编号取出相应的文档 String hc = highlighter.getBestFragment(analyzer, COURSE_NAME, doc .get(COURSE_NAME)); if (hc != null) { int endIndex = Math.min(50, hc.length());// 谁小取谁,取最小的 hc.substring(0, endIndex); ((Field) doc.getField(COURSE_NAME)).setStringValue(hc); } String hcUserName = highlighter.getBestFragment(analyzer, USERNAME, doc.get(USERNAME)); if (hcUserName != null) { ((Field) doc.getField(USERNAME)).setStringValue(hcUserName); } String hcCourseId = highlighter.getBestFragment(analyzer, COURSE_ID, doc.get(COURSE_ID)); if (hcCourseId != null) { ((Field) doc.getField(COURSE_ID)).setStringValue(hcCourseId); } printDocumentInfo(doc); } directoryReader.close(); directory.close(); } /** * 根据id删除文档 * * @param id */ public void deleteDoc(String id) { try { Directory dir = FSDirectory.open(new File(indexPath)); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_42, analyzer); iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); IndexWriter writer = new IndexWriter(dir, iwc); writer.deleteDocuments(new Term(COURSE_ID, id)); writer.commit(); writer.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void printDocumentInfo(Document doc) { // TODO Auto-generated method stub /* * Field f = doc.getField("name"); f.readerValue() */ System.out.println(COURSE_ID + ":" + doc.get(COURSE_ID)); System.out.println(COURSE_NAME + ":" + doc.get(COURSE_NAME)); System.out.println(USERNAME + ":" + doc.get(USERNAME)); } public final class DocumentUtils { public Document getDocument(CourseInfo courseInfo) { Document doc = new Document(); // Field fieldCourseId = TextField(COURSE_ID, // courseInfo.getCourseId(), Store.YES); // StoredField 仅仅存储,没有索引的 // Field fieldCourseId = new StoredField(COURSE_ID, // courseInfo.getCourseId()); // intField LongField 这样字段用于排序和过滤 // 作为一个整体,不分词索引 Field fieldCourseId = new StringField(COURSE_ID, String .valueOf(courseInfo.getCourseId()), Field.Store.YES); Field fieldUsrName = new StringField(USERNAME, courseInfo .getUserName(), Field.Store.YES); doc.add(fieldCourseId); doc.add(fieldUsrName); doc.add(new TextField(COURSE_NAME, courseInfo.getCourseName(), Field.Store.YES)); return doc; } } public Iterable<? extends Iterable<? extends IndexableField>> getData() { ArrayList<Document> array = new ArrayList<Document>(); CourseInfo courseInfo = null; courseInfo = new CourseInfo(); courseInfo.setCourseId(1); courseInfo.setCourseName("我的英语书"); courseInfo.setUserName("xunianchong"); array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo(); courseInfo.setCourseId(2); courseInfo.setCourseName("java高级编程"); courseInfo.setUserName("zijinhua"); array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo(); courseInfo.setCourseId(3); courseInfo.setCourseName("高级程序员之路"); courseInfo.setUserName("xuwen"); array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo(); courseInfo.setCourseId(4); courseInfo.setCourseName("成人高考英语"); courseInfo.setUserName("i,am,in,hubei"); array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo(); courseInfo.setCourseId(5); courseInfo.setCourseName("中华人民午在湾岛及所有附属各岛屿、澎湖列万两"); courseInfo.setUserName("x1@#"); array.add(new DocumentUtils().getDocument(courseInfo)); return array; }} 谢谢,我看一下。jar包加载没问题,Indexer和Searcher都可以直接运行,但是Servlet调用就出问题。我总觉得不是Indexer这些类的问题,因为貌似根本没有进去…… 修改完后台重启下服务器,另外你import的会不会引用错了。好多unused import 谢谢,试过重启服务器N次…………import出错应该不会吧?这个工程下只有一个lia.meetlucene啊……servlet:import java.io.IOException;import javax.servlet.ServletConfig;import javax.servlet.ServletException;import javax.servlet.annotation.WebServlet;import javax.servlet.http.HttpServlet;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpServletResponse;import lia.meetlucene.*;Indexer:import java.io.BufferedReader;import java.io.File;import java.io.FileInputStream;import java.io.FileReader;//unusedimport java.io.InputStreamReader;import java.io.IOException;import java.util.Date;import org.apache.lucene.analysis.Analyzer;import org.apache.lucene.analysis.cjk.CJKAnalyzer;//unusedimport org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;import org.apache.lucene.analysis.standard.StandardAnalyzer;//unusedimport org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.document.StringField;import org.apache.lucene.document.TextField;import org.apache.lucene.index.IndexWriter;import org.apache.lucene.index.IndexWriterConfig;import org.apache.lucene.index.IndexWriterConfig.OpenMode;import org.apache.lucene.store.Directory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.util.Version;import org.wltea.analyzer.lucene.IKAnalyzer;//unused 所需要的类也许jar包中确实有可是jar包中引用到的那些类,你确定也有么就是link的时候没出现问题么? 谢谢,我比较菜可能不太理解你的意思,情况是这样的……lia.meetlucene里面引用了lucene相关的jar包,直接跑Indexer.main和Searcher.main都是没问题的,可以建索引也可以搜索,这个应该可以说明lucene这一块是没问题的吧。servlet只有上面发的doPost里的代码,没有引用jar包,只import了lia.meetlucene以及servlet需要的那些东西。 撸主看下WEB-INFO/lib下是否有对应的jar包 果然没有!buildpath里面貌似设置错了!也没有导入到lib下面!跪谢! 问个webservice接口的问题,有人会吗? jax-ws中对外暴露的服务都是单例吗? 疑惑 转篇文章,大家怎么看[转]仅仅Spring+hibernate系统架构为什么不可取!!! 倾囊求教:运行时查看变量值的问题 100分求一代码(照片的上传) tomcat的缺省运行目录咋的了 java参数传递的时候是传值呢?还是传递参数了, 新做一个巨简单sample . jsp+servlet+ejb(session+entity) . 适合新新手 !!!!!! 急!请教ejb高手 【菜鸟求助】SSH中怎么从JSP页面往后台传值呢 ResultSet.get Date()取不到值!
我试过在lia.meetlucene里新建一个类,可以调用;然后把Indexer的代码整个复制过来就又ClassNotFound了……
确认之后,对照下面的代码,找一下问题所在:
package com.xnch.lucenesearch.internet;import java.io.File;
import java.io.IOException;
import java.util.ArrayList;import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.Scorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;import com.xnch.lucenesearch.bean.CourseInfo;public class RAM_FSTest {
public final String COURSE_ID = "courseId";
public final String COURSE_NAME = "courseName";
public final String USERNAME = "userName";
public final String COURSE_INTRODU = "course_introdu";
public final String UPLOAD_DATE = "upload_date";
Analyzer analyzer = new IKAnalyzer();
IndexWriter writer = null;
Directory RAMDirectory = null;
Directory fsDirectory = null;
public final static String indexPath = "F:\\javadata\\javawebpro\\lucene\\luceneIndex1"; public static void main(String[] args) throws Exception {
RAM_FSTest rf = new RAM_FSTest();
System.out.println(rf.RAMDirectory);
rf.buildIndex();
System.out.println(rf.RAMDirectory);
// xuwen 程序员 x1@#
String content = "是可以避免的 ";
rf.search(content);
System.out.println(rf.RAMDirectory);
// new RAM_FSTest().deleteDoc("1");
} public void buildIndex() {
RAMDirectory = new RAMDirectory();
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
// 达到3个文件时就和合并
mergePolicy.setMergeFactor(10);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42,
analyzer);
config.setInfoStream(System.out);
config.setMergePolicy(mergePolicy);
config.setMaxBufferedDocs(9);
try {
writer = new IndexWriter(RAMDirectory, getConfig());
writer.addDocuments(getData());
writer.close();
saveIndexToFile();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public void saveIndexToFile() {
try {
fsDirectory = FSDirectory.open(new File(indexPath));
IndexWriter fsIndexWriter = new IndexWriter(fsDirectory,
getConfig());
fsIndexWriter.addIndexes(new Directory[] { RAMDirectory });
fsIndexWriter.close();
fsDirectory.close();
RAMDirectory.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public IndexWriterConfig getConfig() {
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
// 达到3个文件时就和合并
mergePolicy.setMergeFactor(10);
LimitTokenCountAnalyzer limitTokenCountAnalyzer=new LimitTokenCountAnalyzer(new IKAnalyzer(), 1);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_42,
limitTokenCountAnalyzer);
config.setInfoStream(System.out);
config.setMergePolicy(mergePolicy);
config.setMaxBufferedDocs(9);
config.setOpenMode(OpenMode.CREATE);
return config;
} // 搜索
public void search(String content) throws Exception {
String queryString = content;
String[] fields = { COURSE_ID, COURSE_NAME, USERNAME };
QueryParser queryParse = new MultiFieldQueryParser(Version.LUCENE_42,
fields, analyzer);
queryParse.setPhraseSlop(3);
Query query = queryParse.parse(queryString);
Directory directory = FSDirectory.open(new File(indexPath));
DirectoryReader directoryReader = DirectoryReader.open(directory);
IndexSearcher isearcher = new IndexSearcher(directoryReader);
Filter filter = null;
Formatter formatter = new SimpleHTMLFormatter("<font color='red'>",
"</font>");
/*
* Term term = new Term(USERNAME, content); query = new TermQuery(term);
*/
Scorer scorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(100);// 截取的字符长度,最长100
highlighter.setTextFragmenter(fragmenter); TopDocs topDocs = isearcher.search(query, null, 1000);
System.out.println("总共有[" + topDocs.totalHits + "]条匹配结果");
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int docsn = scoreDoc.doc;// 文档内部编号
Document doc = isearcher.doc(docsn); // 根据编号取出相应的文档
String hc = highlighter.getBestFragment(analyzer, COURSE_NAME, doc
.get(COURSE_NAME));
if (hc != null) {
int endIndex = Math.min(50, hc.length());// 谁小取谁,取最小的
hc.substring(0, endIndex);
((Field) doc.getField(COURSE_NAME)).setStringValue(hc); }
String hcUserName = highlighter.getBestFragment(analyzer, USERNAME,
doc.get(USERNAME));
if (hcUserName != null) { ((Field) doc.getField(USERNAME)).setStringValue(hcUserName); } String hcCourseId = highlighter.getBestFragment(analyzer,
COURSE_ID, doc.get(COURSE_ID));
if (hcCourseId != null) { ((Field) doc.getField(COURSE_ID)).setStringValue(hcCourseId); } printDocumentInfo(doc);
}
directoryReader.close();
directory.close();
} /**
* 根据id删除文档
*
* @param id
*/
public void deleteDoc(String id) {
try {
Directory dir = FSDirectory.open(new File(indexPath));
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_42,
analyzer);
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
IndexWriter writer = new IndexWriter(dir, iwc);
writer.deleteDocuments(new Term(COURSE_ID, id));
writer.commit();
writer.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} public void printDocumentInfo(Document doc) {
// TODO Auto-generated method stub
/*
* Field f = doc.getField("name"); f.readerValue()
*/ System.out.println(COURSE_ID + ":" + doc.get(COURSE_ID));
System.out.println(COURSE_NAME + ":" + doc.get(COURSE_NAME));
System.out.println(USERNAME + ":" + doc.get(USERNAME));
} public final class DocumentUtils {
public Document getDocument(CourseInfo courseInfo) {
Document doc = new Document();
// Field fieldCourseId = TextField(COURSE_ID,
// courseInfo.getCourseId(), Store.YES);
// StoredField 仅仅存储,没有索引的
// Field fieldCourseId = new StoredField(COURSE_ID,
// courseInfo.getCourseId());
// intField LongField 这样字段用于排序和过滤 // 作为一个整体,不分词索引
Field fieldCourseId = new StringField(COURSE_ID, String
.valueOf(courseInfo.getCourseId()), Field.Store.YES);
Field fieldUsrName = new StringField(USERNAME, courseInfo
.getUserName(), Field.Store.YES);
doc.add(fieldCourseId);
doc.add(fieldUsrName);
doc.add(new TextField(COURSE_NAME, courseInfo.getCourseName(),
Field.Store.YES));
return doc;
}
} public Iterable<? extends Iterable<? extends IndexableField>> getData() {
ArrayList<Document> array = new ArrayList<Document>();
CourseInfo courseInfo = null;
courseInfo = new CourseInfo();
courseInfo.setCourseId(1);
courseInfo.setCourseName("我的英语书");
courseInfo.setUserName("xunianchong");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(2);
courseInfo.setCourseName("java高级编程");
courseInfo.setUserName("zijinhua");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(3);
courseInfo.setCourseName("高级程序员之路");
courseInfo.setUserName("xuwen");
array.add(new DocumentUtils().getDocument(courseInfo)); courseInfo = new CourseInfo();
courseInfo.setCourseId(4);
courseInfo.setCourseName("成人高考英语");
courseInfo.setUserName("i,am,in,hubei");
array.add(new DocumentUtils().getDocument(courseInfo));
courseInfo = new CourseInfo();
courseInfo.setCourseId(5);
courseInfo.setCourseName("中华人民午在湾岛及所有附属各岛屿、澎湖列万两");
courseInfo.setUserName("x1@#");
array.add(new DocumentUtils().getDocument(courseInfo));
return array;
}}
jar包加载没问题,Indexer和Searcher都可以直接运行,但是Servlet调用就出问题。
我总觉得不是Indexer这些类的问题,因为貌似根本没有进去……
import出错应该不会吧?这个工程下只有一个lia.meetlucene啊……
servlet:
import java.io.IOException;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import lia.meetlucene.*;
Indexer:
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;//unused
import java.io.InputStreamReader;
import java.io.IOException;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;//unused
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;//unused
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;//unused
可是jar包中引用到的那些类,你确定也有么
就是link的时候没出现问题么?
lia.meetlucene里面引用了lucene相关的jar包,直接跑Indexer.main和Searcher.main都是没问题的,可以建索引也可以搜索,这个应该可以说明lucene这一块是没问题的吧。
servlet只有上面发的doPost里的代码,没有引用jar包,只import了lia.meetlucene以及servlet需要的那些东西。