solrDispatchFilter过滤器解析

转载:[url]http://blog.csdn.net/zyb243380456/article/details/7217434[/url]
<1>.SolrDispatchFilter实例化做的事情  
过滤solr相关请求 找到solrHome
[SolrDispatchFilter->SolrResourceLoader->Config]

<2>.SolrDispatchFilter初始化做的事情
帮助初始化CoreContianer的一个内部类
[CoreContainer.Initializer----------->CoreContainer->CoreAdminHandler->CoreContainer->]
将solrCore容器初始化并且将所有的SolrCore实例装载到CoreContainer中

<3>.SolrDispatchFilter过滤的时候做的事情
[在solrDispatchFilter中解决所有全文检索的问题包括对索引库的读写等对索引库的性能调优以及控制]

1.SolrDispatchFilter.java
/** 这个类是一个标准的过滤器检查所有的请求路径将他们映射到在solrconfig.xml
*中定义的处理器上,从过滤器的声明周期来看他有被容器实例化,初始化,
*过滤,以及最终的destroy被容器给销毁
*solr是在什么时候加载到我们的web容器中去的就是从这个过滤器的诞生开始的所以
*我们的焦点就在这个过滤器上
*/
protected CoreContainer cores;/** 用来保存多个索引库实例的一个多实例solr容器 */
protected String pathPrefix = null;/** 定义以什么开头的请求访问路径将会被这个过滤器过滤这个路径相对与虚拟的上下文路径来说的也就是不包括 http://localhost:8080/myapp/ [...]这么一段路径 */
protected String abortErrorMessage = null;
protected String solrConfigFilename = null;
protected final Map<SolrConfig, SolrRequestParsers> parsers = new WeakHashMap<SolrConfig, SolrRequestParsers>();/** 请求分析器的一个集合*/
protected final SolrRequestParsers adminRequestParser;/** solr请求分析器专门用于管理solr请求地址的分析 */

/** 这个构造函数覆盖了默认的构造函数所以容器会调用他 */
public SolrDispatchFilter() {
try {
adminRequestParser = new SolrRequestParsers( /** 请求分析管理对象管理所有的的请求分析器并且拥有请求分析器的集合对象*/
new Config(
null,"solr",new InputSource(
new ByteArrayInputStream("<root/>".getBytes("UTF-8"))),"") );
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,e);
}
}


/** 这是过滤器的初始化方法他会从web.xml中解析自己的配置信息
*在容器调用该过滤器实例的init方法的时候实例化了几个中要的对象一个是CoreContainer.Initializer这个内部类
*然后这个内部类接着又将CoreContainer给实例化了这个CoreContainer实例化的时候需要的配置文件是solr.xml也就是solrHome的父目下的一个配置文件
*通过这个配置文件的配置可以告诉我们的CoreContainer容器他会容纳多少个索引库实例然后怎么去管理这些实例
*他是通过xPath的方式将配置文件以FileInputStream的形式读取上来然后根据不同的命名空间来进行获取对应的配置信息
* try {
* SolrResourceLoader loader = new SolrResourceLoader(null);
* InputStream cfgis = new FileInputStream(new File("F:/solr/solr.xml"));
* Config cfg = new Config(loader, null, cfgis, null);/** 所对应配置文件解析方式以及构造方式这个对象对应所有的配置文件的解析内部采用xpath的方式进行一个解析 */
* String s = cfg.get("solr/cores/@adminPath");
* /** 得到 @adminPath 这个属性的文本内容
* <solr persistent="false">
* <cores adminPath="/admin/cores" defaultCoreName="collection1">
* <core name="collection1" instanceDir="." />
* </cores>
* </solr> */
*
* System.out.println(s);
* } catch (Exception e) {
* e.printStackTrace();
* }
*关键还是根据solr.xml这个文件可以是是默认的也可以是自己配置的实例化了CoreContainer这个容器
*/
/** 实例化CoreContainer容器将所有的solrCore实例装到容器中 */
public void init(FilterConfig config) throws ServletException
{
log.info("SolrDispatchFilter.init()");

boolean abortOnConfigurationError = true;
CoreContainer.Initializer init = createInitializer();/** 初始化CoreContianer的内部类 */
try {
/** 配置当前web应用上下文参数*/
this.pathPrefix = config.getInitParameter( "path-prefix" );/** 可以在过滤器配置文件中指定好只针对该特殊的请求进行一个过滤*/
init.setSolrConfigFilename(config.getInitParameter("solrconfig-filename"));/** 指定好solr配置文件的名字是什么 */

this.cores = init.initialize();/** 正式将CoreContainer进行一个初始化并创建实例对象然后将所有的solrCore实例放到容器中 */
abortOnConfigurationError = init.isAbortOnConfigurationError();
log.info("user.dir=" + System.getProperty("user.dir"));
}
catch( Throwable t ) {
/** 捕获这个异常我们的过滤器将会继续工作 */
log.error( "Could not start Solr. Check solr/home property", t);
SolrConfig.severeErrors.add( t );
SolrCore.log( t );
}

/** 如果服务器报错可以选择性的忽略该错误 */
if( abortOnConfigurationError && SolrConfig.severeErrors.size() > 0 ) {
StringWriter sw = new StringWriter();
PrintWriter out = new PrintWriter( sw );
out.println( "Severe errors in solr configuration.\n" );
out.println( "Check your log files for more detailed information on what may be wrong.\n" );
out.println( "If you want solr to continue after configuration errors, change: \n");
out.println( " <abortOnConfigurationError>false</abortOnConfigurationError>\n" );
out.println( "in "+init.getSolrConfigFilename()+"\n" );

for( Throwable t : SolrConfig.severeErrors ) {
out.println( "-------------------------------------------------------------" );
t.printStackTrace( out );
}
out.flush();

// Servlet containers behave slightly differently if you throw an exception during
// initialization. Resin will display that error for every page, jetty prints it in
// the logs, but continues normally. (We will see a 404 rather then the real error)
// rather then leave the behavior undefined, lets cache the error and spit it out
// for every request.
abortErrorMessage = sw.toString();
//throw new ServletException( abortErrorMessage );
}

log.info("SolrDispatchFilter.init() done");
}

/** Method to override to change how CoreContainer initialization is performed. */
protected CoreContainer.Initializer createInitializer() {
return new CoreContainer.Initializer();
}

/** 在销毁这个过滤器的时候同时将释放所有的索引实例的文件流 */
public void destroy() {
if (cores != null) {
cores.shutdown();
cores = null;
}
}

/** 将对特定的需要全文检索的请求进行一个过滤然后提供全文检索服务 */
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
if( abortErrorMessage != null ) {
((HttpServletResponse)response).sendError( 500, abortErrorMessage );
return;
}

if( request instanceof HttpServletRequest) {
HttpServletRequest req = (HttpServletRequest)request;
HttpServletResponse resp = (HttpServletResponse)response;
SolrRequestHandler handler = null;/** 请求处理器*/
SolrQueryRequest solrReq = null; /** 查询请求*/
SolrCore core = null; /** 请求资源所对应的solrCore实例*/
String corename = ""; /** solrCore实例的名字*/
try {
/** 将我们的CoreContainer容器保存到请求中 */
req.setAttribute("org.apache.solr.CoreContainer", cores);
String path = req.getServletPath();/**http://localhost:8080/collection2/admin/这样一段URL那么path为"collection2/admin/" */
if( req.getPathInfo() != null ) {
// this lets you handle /update/commit when /update is a servlet
path += req.getPathInfo();
}
/** 如果路径是http://localhost:8080/mysolr/test/select/... 为了能够让solr知道这个请求URL有检索需要首先要把这个test/这段字符窜给截取*/
if( pathPrefix != null && path.startsWith( pathPrefix ) ) {
path = path.substring( pathPrefix.length() );
}

/** 得到管理CoreContainer这个容器的路径的名字*/
String alternate = cores.getManagementPath();
if (alternate != null && path.startsWith(alternate)) {
path = path.substring(0, alternate.length());
}
// unused feature ?
int idx = path.indexOf( ':' );
if( idx > 0 ) {
// save the portion after the ':' for a 'handler' path parameter
path = path.substring( 0, idx );
}

/** 检查是否需要去管理页面 */
if( path.equals( cores.getAdminPath() ) ) {
handler = cores.getMultiCoreHandler();
solrReq = adminRequestParser.parse(null,path, req);
handleAdminRequest(req, response, handler, solrReq);
return;
}
else {
/** 另外,我们应该从路径中找到solrCore的名字*/
idx = path.indexOf( "/", 1 );
if( idx > 1 ) {
/** 通过请求参数得到solrCore实例的名字*/
corename = path.substring( 1, idx );
core = cores.getCore(corename);
if (core != null) {
path = path.substring( idx );/** 将路径中的solrCore名字截取掉 */
}
}
if (core == null) {
corename = "";
core = cores.getCore("");
}
}

/** 使用一个有效的solrcore实例*/
if( core != null ) {
final SolrConfig config = core.getSolrConfig();
/** 从solrCore的实例缓存中得到一个请求解析对象或者创建一个新的缓存起来*/
SolrRequestParsers parser = null;
parser = parsers.get(config);
if( parser == null ) {
parser = new SolrRequestParsers(config);
parsers.put(config, parser );/** 将加载这个solrCore实例所配置的所有请求解析器对象*/
}

/** 如果没有设置请求处理器的类型从请求路径得到处理器*/
/** 我们已经选择了一个请求处理器*/
if( handler == null && path.length() > 1 ) { /**没有匹配空窜或者/是有效字符 */
handler = core.getRequestHandler( path );
/** 没有处理器但是允许处理查询*/
if( handler == null && parser.isHandleSelect() ) {
if( "/select".equals( path ) || "/select/".equals( path ) ) {/**是一个查询请求 */
solrReq = parser.parse( core, path, req );/** 得到请求查询对象 */
String qt = solrReq.getParams().get( CommonParams.QT );/** 得到查询类型也就是对应的查询请求处理器的名字 */
handler = core.getRequestHandler( qt );/** 根据查询类型得到请求查询处理器对象 */
if( handler == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+qt);/**如果发生异常表示没有对应的请求查询处理器 */
}
}
}
}

// With a valid handler and a valid core...
/** 使用一个有效的处理器和有效的solrCore实例*/
if( handler != null ) {
// if not a /select, create the request
if( solrReq == null ) {
solrReq = parser.parse( core, path, req );
}

final Method reqMethod = Method.getMethod(req.getMethod());/** 得到http请求提交的方式*/
HttpCacheHeaderUtil.setCacheControlHeader(config, resp, reqMethod);
// unless we have been explicitly told not to, do cache validation
// if we fail cache validation, execute the query
if (config.getHttpCachingConfig().isNever304() ||
!HttpCacheHeaderUtil.doCacheHeaderValidation(solrReq, req, reqMethod, resp)) {
SolrQueryResponse solrRsp = new SolrQueryResponse();
/* even for HEAD requests, we need to execute the handler to
* ensure we don't get an error (and to make sure the correct
* QueryResponseWriter is selected and we get the correct
* Content-Type)
*/
/** solr所有的操作的最后一步*/
this.execute( req, handler, solrReq, solrRsp );/** 根据请求对象,根据请求参数中指定的处理器名字得到的处理器,根据响应对象,以及发送请求的方式对索引库进行一个读写操作 */
HttpCacheHeaderUtil.checkHttpCachingVeto(solrRsp, resp, reqMethod);
// add info to http headers
//TODO: See SOLR-232 and SOLR-267.
/*try {
NamedList solrRspHeader = solrRsp.getResponseHeader();
for (int i=0; i<solrRspHeader.size(); i++) {
((javax.servlet.http.HttpServletResponse) response).addHeader(("Solr-" + solrRspHeader.getName(i)), String.valueOf(solrRspHeader.getVal(i)));
}
} catch (ClassCastException cce) {
log.log(Level.WARNING, "exception adding response header log information", cce);
}*/
QueryResponseWriter responseWriter = core.getQueryResponseWriter(solrReq);/** 将请求结果通过输出流写出到客户端*/
writeResponse(solrRsp, response, responseWriter, solrReq, reqMethod);
}
return; // we are done with a valid handler
}
// otherwise (we have a core), let's ensure the core is in the SolrCore request attribute so
// a servlet/jsp can retrieve it
else {
req.setAttribute("org.apache.solr.SolrCore", core);
// Modify the request so each core gets its own /admin
/** 修改请求根据不同的solrcore的管理界面进行一个转发*/
if( path.startsWith( "/admin" ) ) {
req.getRequestDispatcher( pathPrefix == null ? path : pathPrefix + path ).forward( request, response );
return;
}
}
}
log.debug("no handler or core retrieved for " + path + ", follow through...");
}
catch (Throwable ex) {
sendError( (HttpServletResponse)response, ex );
return;
}
finally {
if( solrReq != null ) {
solrReq.close();
}
if (core != null) {
core.close();
}
}
}

/** 让web应用处理其他请求*/
chain.doFilter(request, response);
}


2.Config.java
/** 对应到所有solr的关键配置文件以及对配置文件的一个加载
在这个方法中初始话了一个重要的类为SolrResourceLoader以及Config本生
*/
public Config(SolrResourceLoader loader, String name, InputSource is, String prefix) throws ParserConfigurationException, IOException, SAXException
{
if( loader == null ) {
loader = new SolrResourceLoader( null );/** 这里到了SolrResourceLoader的构造方法之后又进行了一个相应的定位然后定位到了SolrHome也就是索引库和重要的配置文件所在目录 */
}
this.loader = loader;
this.name = name;
this.prefix = (prefix != null && !prefix.endsWith("/"))? prefix + '/' : prefix;
try {
javax.xml.parsers.DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();

if (is == null) {
is = new InputSource(loader.openConfig(name));
is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(name));
}

// only enable xinclude, if a SystemId is available
if (is.getSystemId() != null) {
try {
dbf.setXIncludeAware(true);
dbf.setNamespaceAware(true);
} catch(UnsupportedOperationException e) {
log.warn(name + " XML parser doesn't support XInclude option");
}
}

final DocumentBuilder db = dbf.newDocumentBuilder();
db.setEntityResolver(new SystemIdResolver(loader));
db.setErrorHandler(xmllog);
try {
doc = db.parse(is);
} finally {
// some XML parsers are broken and don't close the byte stream (but they should according to spec)
IOUtils.closeQuietly(is.getByteStream());
}
DOMUtil.substituteProperties(doc, loader.getCoreProperties());
} catch (ParserConfigurationException e) {
SolrException.log(log, "Exception during parsing file: " + name, e);
throw e;
} catch (SAXException e) {
SolrException.log(log, "Exception during parsing file: " + name, e);
throw e;
} catch( SolrException e ){
SolrException.log(log,"Error in "+name,e);
throw e;
}
}

3.SolrResourceLoader.java
public SolrResourceLoader( String instanceDir, ClassLoader parent, Properties coreProperties )
{
if( instanceDir == null ) {
this.instanceDir = SolrResourceLoader.locateSolrHome();/**这个方法用于定位到solrHome的主目录 */
} else{
this.instanceDir = normalizeDir(instanceDir);
}
log.info("Solr home set to '" + this.instanceDir + "'");

this.classLoader = createClassLoader(null, parent);
addToClassLoader("./lib/", null);

this.coreProperties = coreProperties;
}

/**
* Determines the solrhome from the environment.
* 决定从整个虚拟机环境中找到solrhome
* Tries JNDI (java:comp/env/solr/home) then system property (solr.solr.home);
* 尝试使用JNDI或系统虚拟机系统属性进行找到solrhome所对应的地址
* if both fail, defaults to solr/
* 如果两者都失败,默认在当前目录进行进行一个寻找
* @return the instance directory name
* 返回实例所在目录的名字
*/
/**
* Finds the solrhome based on looking up the value in one of three places:
* 基于三个地方查找solrhome
* <ol>
* <li>JNDI: via java:comp/env/solr/home</li>
* <li>JNDI: 同过jndi上下文中进行一个查找</li>
* <li>The system property solr.solr.home</li>
* <li>系统的属性solr.solr.home</li>
* <li>Look in the current working directory for a solr/ directory</li>
* <li>在当前工作目录中进行一个查找根据solr/</li>
* </ol>
*
* The return value is normalized. Normalization essentially means it ends in a trailing slash.
* 返回一个标准化的值.
* @return A normalized solrhome
* @see #normalizeDir(String)
*/
public static String locateSolrHome() {
String home = null;
// Try JNDI
try {
Context c = new InitialContext();
home = (String)c.lookup("java:comp/env/"+project+"/home");
log.info("Using JNDI solr.home: "+home );
} catch (NoInitialContextException e) {
log.info("JNDI not configured for "+project+" (NoInitialContextEx)");
} catch (NamingException e) {
log.info("No /"+project+"/home in JNDI");
} catch( RuntimeException ex ) {
log.warn("Odd RuntimeException while testing for JNDI: " + ex.getMessage());
}

// Now try system property
if( home == null ) {
String prop = project + ".solr.home";
home = System.getProperty(prop);
if( home != null ) {
log.info("using system property "+prop+": " + home );
}
}

// if all else fails, try
if( home == null ) {
home = project + '/';
log.info(project + " home defaulted to '" + home + "' (could not find system property or JNDI)");
}
return normalizeDir( home );
}




4.SolrRequestParsers.java
这个类用于初始化所有的请求管理器以及将这些请求管理器放到一个集合中保存起来
public SolrRequestParsers( Config globalConfig )
{
long uploadLimitKB = 1048;
if( globalConfig == null ) {
uploadLimitKB = Long.MAX_VALUE;
enableRemoteStreams = true;
handleSelect = true;
}
else {
uploadLimitKB = globalConfig.getInt(
"requestDispatcher/requestParsers/@multipartUploadLimitInKB", (int)uploadLimitKB );

enableRemoteStreams = globalConfig.getBool(
"requestDispatcher/requestParsers/@enableRemoteStreaming", false );

// Let this filter take care of /select?xxx format
handleSelect = globalConfig.getBool(
"requestDispatcher/@handleSelect", handleSelect );
}

MultipartRequestParser multi = new MultipartRequestParser( uploadLimitKB );
RawRequestParser raw = new RawRequestParser();
standard = new StandardRequestParser( multi, raw );

// I don't see a need to have this publicly configured just yet
// adding it is trivial
parsers = new HashMap<String, SolrRequestParser>();
parsers.put( MULTIPART, multi );
parsers.put( RAW, raw );
parsers.put( SIMPLE, new SimpleRequestParser() );
parsers.put( STANDARD, standard );
parsers.put( "", standard );
}

5.CoreCainter.java
/** 初始化容器的时候就加载索引库的代码 */
/**
* 加载solr.xml列出所有可以使用的solrCore配置信息
*
* @param dir 所有资源文件所在的主目录
* @param cfgis solr.xml文件的输入字节流
* @throws ParserConfigurationException 解析solr.xml文件的时候抛出的异常
* @throws IOException 文件读写操作时候抛出的异常
* @throws SAXException sax解析抛出的异常
*/
public void load(String dir, InputSource cfgis)
throws ParserConfigurationException, IOException, SAXException {
this.loader = new SolrResourceLoader(dir);
solrHome = loader.getInstanceDir();/** 找到所有资源文件所在的主目录*/
Config cfg = new Config(loader, null, cfgis, null);/** 根据cfgis这个输入字节流构造config对象*/
String dcoreName = cfg.get("solr/cores/@defaultCoreName", null);/** 根据xpath的方式对该solr.xml文件进行解析*/
if(dcoreName != null) {
defaultCoreName = dcoreName;
}
persistent = cfg.getBool( "solr/@persistent", false );
libDir = cfg.get( "solr/@sharedLib", null);
adminPath = cfg.get( "solr/cores/@adminPath", null );
shareSchema = cfg.getBool("solr/cores/@shareSchema", false );
if(shareSchema){
indexSchemaCache = new ConcurrentHashMap<String ,IndexSchema>();
}
adminHandler = cfg.get("solr/cores/@adminHandler", null );
managementPath = cfg.get("solr/cores/@managementPath", null );

if (libDir != null) {
File f = FileUtils.resolvePath(new File(dir), libDir);
log.info( "loading shared library: "+f.getAbsolutePath() );
libLoader = SolrResourceLoader.createClassLoader(f, null);
}

if (adminPath != null) {
if (adminHandler == null) {
coreAdminHandler = new CoreAdminHandler(this);/** 生成对CoreCotanier这个容器进行管理的处理器*/
} else {
coreAdminHandler = this.createMultiCoreHandler(adminHandler);
}
}

try {
containerProperties = readProperties(cfg, ((NodeList) cfg.evaluate("solr", XPathConstants.NODESET)).item(0));
} catch (Throwable e) {
SolrConfig.severeErrors.add(e);
SolrException.logOnce(log,null,e);
}

/** 在遍历每一个solrCore之前,检查一下名字是否符合要求 */
/** 使用多个solrCore实例的时候快速找到使用同样名字的solrCore */
{ /** 本地的作用域, 不要使用相同的变量名字 */
NodeList nodes = (NodeList)cfg.evaluate("solr/cores/core/@name", /** 找到所有的solrCore的名字列表*/
XPathConstants.NODESET);
Set<String> names = new HashSet<String>();/**使用hashSet可以避免出现相同的名字 */
for (int i=0; i<nodes.getLength(); i++) {/** 循环取出SolrCore的名字*/
String name = DOMUtil.getText(nodes.item(i));
if (names.contains(name)) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Multiple cores found with same name: " +
name);
}
names.add(name);/**添加到set集合中 */
}
}
/** 可选参数配置项{solr.core.schemaName=schema.xml, solr.core.configName=solrconfig.xml, solr.core.instanceDir=.\, solr.core.dataDir=F:\\solr\.\data\, solr.core.name=}*/
/** 正式遍历每一个solrCore的节点配置信息 */
/** 找到所有的core节点like this:【<core name="collection1" instanceDir="." />】*/
NodeList nodes = (NodeList)cfg.evaluate("solr/cores/core", XPathConstants.NODESET);
for (int i=0; i<nodes.getLength(); i++) {
Node node = nodes.item(i);
try {
String name = DOMUtil.getAttr(node, "name", null);/**得到solrCore的实例名字 */
if (null == name) {/**如果该core实例子的名字为空将抛出异常 */
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Each core in solr.xml must have a 'name'");
}
if (name.equals(defaultCoreName)){/**名字等于默认的solrCore的配置名 */
/** 那么我们将默认的solrCore名字修改为一个空的窜*/
name="";
}

/** CoreDescriptor详细的描述了一个SolrCore所拥有的完整配置信息和创建该实例的所有参数 */
CoreDescriptor p = new CoreDescriptor(this, name, DOMUtil.getAttr(node, "instanceDir", null));

/** 处理可选配置项目 */
String opt = DOMUtil.getAttr(node, "config", null);
if(solrConfigFilenameOverride != null && name.equals("")) {
p.setConfigName(solrConfigFilenameOverride);
} else if (opt != null) {
p.setConfigName(opt);
}
opt = DOMUtil.getAttr(node, "schema", null);
if (opt != null) {
p.setSchemaName(opt);
}
opt = DOMUtil.getAttr(node, "properties", null);
if (opt != null) {
p.setPropertiesName(opt);
}
opt = DOMUtil.getAttr(node, CoreAdminParams.DATA_DIR, null);
if (opt != null) {
p.setDataDir(opt);
}

p.setCoreProperties(readProperties(cfg, node));

SolrCore core = create(p);
register(name, core, false);
}
catch (Throwable ex) {
SolrConfig.severeErrors.add( ex );
SolrException.logOnce(log,null,ex);
}
}
}

6.SolrConfig.java
/**
*SolrConfig.java 对应solrConfig.xml
*SolrIndexConfig.java 对应索引文件的时候的配置
*IndexSchema.java 对应索引字段配置文件对象
*Config schemaConf = new Config(loader, "schema", is, "/schema/");/** 用于解析所有的xml配置文件 */
*Config schemaConf = new Config(loader, "schema", is, "/schema/");
*Document document = schemaConf.getDocument();
*final XPath xpath = schemaConf.getXPath();
*SolrQueryAnalyzer对应到lucene的queryParser
*dataDir指定的是索引库存放的地方
*/

SolrConfig(SolrResourceLoader loader, String name, InputSource is)
throws ParserConfigurationException, IOException, SAXException {
super(loader, name, is, "/config/");
initLibs();
luceneMatchVersion = getLuceneVersion("luceneMatchVersion", Version.LUCENE_24);
defaultIndexConfig = new SolrIndexConfig(this, null, null);
mainIndexConfig = new SolrIndexConfig(this, "mainIndex", defaultIndexConfig);
reopenReaders = getBool("mainIndex/reopenReaders", true);

booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", BooleanQuery.getMaxClauseCount());
log.info("Using Lucene MatchVersion: " + luceneMatchVersion);

filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false);
filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32);
filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f);

useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false);
queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);


filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache");
queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache");
documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache");
CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache");
if (conf == null) {
Map<String,String> args = new HashMap<String,String>();
args.put("name","fieldValueCache");
args.put("size","10000");
args.put("initialSize","10");
args.put("showItems","-1");
conf = new CacheConfig(FastLRUCache.class, args, null);
}
fieldValueCacheConfig = conf;
unlockOnStartup = getBool("mainIndex/unlockOnStartup", false);
useColdSearcher = getBool("query/useColdSearcher",false);
dataDir = get("dataDir", null);
if (dataDir != null && dataDir.length()==0) dataDir=null;

userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache");

org.apache.solr.search.SolrIndexSearcher.initRegenerators(this);

hashSetInverseLoadFactor = 1.0f / getFloat("//HashDocSet/@loadFactor",0.75f);
hashDocSetMaxSize= getInt("//HashDocSet/@maxSize",3000);

pingQueryParams = readPingQueryParams(this);

httpCachingConfig = new HttpCachingConfig(this);

Node jmx = getNode("jmx", false);
if (jmx != null) {
jmxConfig = new JmxConfiguration(true, get("jmx/@agentId", null), get(
"jmx/@serviceUrl", null));
} else {
jmxConfig = new JmxConfiguration(false, null, null);
}
maxWarmingSearchers = getInt("query/maxWarmingSearchers",Integer.MAX_VALUE);

loadPluginInfo(SolrRequestHandler.class,"requestHandler",true, true);
loadPluginInfo(QParserPlugin.class,"queryParser",true, true);
loadPluginInfo(QueryResponseWriter.class,"queryResponseWriter",true, true);
loadPluginInfo(ValueSourceParser.class,"valueSourceParser",true, true);
loadPluginInfo(SearchComponent.class,"searchComponent",true, true);
loadPluginInfo(QueryConverter.class,"queryConverter",true, true);

// this is hackish, since it picks up all SolrEventListeners,
// regardless of when/how/why thye are used (or even if they are
// declared outside of the appropriate context) but there's no nice
// way arround that in the PluginInfo framework
loadPluginInfo(SolrEventListener.class, "//listener",false, true);

loadPluginInfo(DirectoryFactory.class,"directoryFactory",false, true);
loadPluginInfo(IndexDeletionPolicy.class,"mainIndex/deletionPolicy",false, true);
loadPluginInfo(IndexReaderFactory.class,"indexReaderFactory",false, true);
loadPluginInfo(UpdateRequestProcessorChain.class,"updateRequestProcessorChain",false, false);

//TODO deprecated remove it later
loadPluginInfo(SolrHighlighter.class,"highlighting",false, false);
if( pluginStore.containsKey( SolrHighlighter.class.getName() ) )
log.warn( "Deprecated syntax found. <highlighting/> should move to <searchComponent/>" );

updateHandlerInfo = loadUpdatehandlerInfo();

Config.log.info("Loaded SolrConfig: " + name);

// TODO -- at solr 2.0. this should go away
config = this;
}
<div class="iteye-blog-content-contain" style="font-size: 14px"></div>
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值