渣渣的 ElasticSearch 源码解析 —— 启动流程(下)

83次阅读

共计 23586 个字符,预计需要花费 59 分钟才能阅读完成。

关注我

转载请务必注明原创地址为:http://www.54tianzhisheng.cn/2018/08/12/es-code03/
前提
上篇文章写完了 ES 流程启动的一部分,main 方法都入口,以及创建 Elasticsearch 运行的必须环境以及相关配置,接着就是创建该环境的节点了。
Node 的创建
看下新建节点的代码:(代码比较多,这里是比较关键的地方,我就把注释直接写在代码上面了,实在不好拆开这段代码,300 多行代码)
public Node(Environment environment) {
this(environment, Collections.emptyList()); // 执行下面的代码
}

protected Node(final Environment environment, Collection<Class<? extends Plugin>> classpathPlugins) {
final List<Closeable> resourcesToClose = new ArrayList<>(); // register everything we need to release in the case of an error
boolean success = false;
{
// use temp logger just to say we are starting. we can’t use it later on because the node name might not be set
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings()));
logger.info(“initializing …”);
}
try {
originalSettings = environment.settings();
Settings tmpSettings = Settings.builder().put(environment.settings())
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();

// create the node environment as soon as possible, to recover the node id and enable logging
try {
nodeEnvironment = new NodeEnvironment(tmpSettings, environment); //1、创建节点环境, 比如节点名称, 节点 ID, 分片信息, 存储元, 以及分配内存准备给节点使用
resourcesToClose.add(nodeEnvironment);
} catch (IOException ex) {
throw new IllegalStateException(“Failed to create node environment”, ex);
}
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
final String nodeId = nodeEnvironment.nodeId();
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
final Logger logger = Loggers.getLogger(Node.class, tmpSettings);
// this must be captured after the node name is possibly added to the settings
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
if (hadPredefinedNodeName == false) {
logger.info(“node name derived from node ID [{}]; set [{}] to override”, nodeId, NODE_NAME_SETTING.getKey());
} else {
logger.info(“node name [{}], node ID [{}]”, nodeName, nodeId);
}

//2、打印出 JVM 相关信息
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
logger.info(
“version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]”,
Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()),
jvmInfo.pid(), Build.CURRENT.flavor().displayName(),
Build.CURRENT.type().displayName(), Build.CURRENT.shortHash(),
Build.CURRENT.date(), Constants.OS_NAME, Constants.OS_VERSION,
Constants.OS_ARCH,Constants.JVM_VENDOR,Constants.JVM_NAME,
Constants.JAVA_VERSION,Constants.JVM_VERSION);
logger.info(“JVM arguments {}”, Arrays.toString(jvmInfo.getInputArguments()));
// 检查当前版本是不是 pre-release 版本(Snapshot),
warnIfPreRelease(Version.CURRENT, Build.CURRENT.isSnapshot(), logger);
。。。
this.pluginsService = new PluginsService(tmpSettings, environment.configFile(), environment.modulesFile(), environment.pluginsFile(), classpathPlugins); //3、利用 PluginsService 加载相应的模块和插件
this.settings = pluginsService.updatedSettings();
localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId());

// create the environment based on the finalized (processed) view of the settings
// this is just to makes sure that people get the same settings, no matter where they ask them from
this.environment = new Environment(this.settings, environment.configFile());
Environment.assertEquivalent(environment, this.environment);

final List<ExecutorBuilder<?>> executorBuilders = pluginsService.getExecutorBuilders(settings); // 线程池

final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0]));
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
// adds the context to the DeprecationLogger so that it does not need to be injected everywhere
DeprecationLogger.setThreadContext(threadPool.getThreadContext());
resourcesToClose.add(() -> DeprecationLogger.removeThreadContext(threadPool.getThreadContext()));

final List<Setting<?>> additionalSettings = new ArrayList<>(pluginsService.getPluginSettings()); // 额外配置
final List<String> additionalSettingsFilter = new ArrayList<>(pluginsService.getPluginSettingsFilter());
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
//4、加载一些额外配置
additionalSettings.addAll(builder.getRegisteredSettings());
}
client = new NodeClient(settings, threadPool);//5、创建一个节点客户端

//6、缓存一系列模块, 如 NodeModule,ClusterModule,IndicesModule,ActionModule,GatewayModule,SettingsModule,RepositioriesModule,scriptModule,analysisModule
final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool);
final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class));
AnalysisModule analysisModule = new AnalysisModule(this.environment, pluginsService.filterPlugins(AnalysisPlugin.class));
// this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool so we might be late here already
final SettingsModule settingsModule = new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter);
scriptModule.registerClusterSettingsListeners(settingsModule.getClusterSettings());
resourcesToClose.add(resourceWatcherService);
final NetworkService networkService = new NetworkService(
getCustomNameResolvers(pluginsService.filterPlugins(DiscoveryPlugin.class)));
List<ClusterPlugin> clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class);
final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, ClusterModule.getClusterStateCustomSuppliers(clusterPlugins));
clusterService.addStateApplier(scriptModule.getScriptService());
resourcesToClose.add(clusterService);
final IngestService ingestService = new IngestService(settings, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class));
final DiskThresholdMonitor listener = new DiskThresholdMonitor(settings, clusterService::state, clusterService.getClusterSettings(), client);
final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client,
listener::onNewInfo);
final UsageService usageService = new UsageService(settings);

ModulesBuilder modules = new ModulesBuilder();
// plugin modules must be added here, before others or we can get crazy injection errors…
for (Module pluginModule : pluginsService.createGuiceModules()) {
modules.add(pluginModule);
}
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, clusterInfoService);
ClusterModule clusterModule = new ClusterModule(settings, clusterService, clusterPlugins, clusterInfoService);
modules.add(clusterModule);
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
modules.add(indicesModule);

SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
modules.add(new GatewayModule());

PageCacheRecycler pageCacheRecycler = createPageCacheRecycler(settings);
BigArrays bigArrays = createBigArrays(pageCacheRecycler, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
NetworkModule.getNamedWriteables().stream(),
indicesModule.getNamedWriteables().stream(),
searchModule.getNamedWriteables().stream(),
pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.getNamedWriteables().stream()),
ClusterModule.getNamedWriteables().stream())
.flatMap(Function.identity()).collect(Collectors.toList());
final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of(
NetworkModule.getNamedXContents().stream(),
searchModule.getNamedXContents().stream(),
pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.getNamedXContent().stream()),
ClusterModule.getNamedXWriteables().stream())
.flatMap(Function.identity()).collect(toList()));
modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), xContentRegistry));
final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment, xContentRegistry);
final IndicesService indicesService = new IndicesService(settings, pluginsService, nodeEnvironment, xContentRegistry,
analysisModule.getAnalysisRegistry(), clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry,threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(),client, metaStateService);

Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService,scriptModule.getScriptService(), xContentRegistry, environment, nodeEnvironment,namedWriteableRegistry).stream())
.collect(Collectors.toList());

ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(),
settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(),threadPool, pluginsService.filterPlugins(ActionPlugin.class), client, circuitBreakerService, usageService);
modules.add(actionModule);

//7、获取 RestController, 用于处理各种 Elasticsearch 的 rest 命令, 如_cat,_all,_cat/health,_clusters 等 rest 命令(Elasticsearch 称之为 action)
final RestController restController = actionModule.getRestController();
final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class),threadPool, bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry,networkService, restController);
Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders =
pluginsService.filterPlugins(Plugin.class).stream()
.map(Plugin::getCustomMetaDataUpgrader)
.collect(Collectors.toList());
Collection<UnaryOperator<Map<String, IndexTemplateMetaData>>> indexTemplateMetaDataUpgraders =
pluginsService.filterPlugins(Plugin.class).stream()
.map(Plugin::getIndexTemplateMetaDataUpgrader)
.collect(Collectors.toList());
Collection<UnaryOperator<IndexMetaData>> indexMetaDataUpgraders = pluginsService.filterPlugins(Plugin.class).stream()
.map(Plugin::getIndexMetaDataUpgrader).collect(Collectors.toList());
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders, indexTemplateMetaDataUpgraders);
final MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, xContentRegistry, indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings(), indexMetaDataUpgraders);
final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, nodeEnvironment, metaStateService, metaDataIndexUpgradeService, metaDataUpgrader);
new TemplateUpgradeService(settings, client, clusterService, threadPool, indexTemplateMetaDataUpgraders);
final Transport transport = networkModule.getTransportSupplier().get();
Set<String> taskHeaders = Stream.concat(
pluginsService.filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.getTaskHeaders().stream()),
Stream.of(“X-Opaque-Id”)
).collect(Collectors.toSet());
final TransportService transportService = newTransportService(settings, transport, threadPool,
networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders);
final ResponseCollectorService responseCollectorService = new ResponseCollectorService(this.settings, clusterService);
final SearchTransportService searchTransportService = new SearchTransportService(settings, transportService,
SearchExecutionStatsCollector.makeWrapper(responseCollectorService));
final Consumer<Binder> httpBind;
final HttpServerTransport httpServerTransport;
if (networkModule.isHttpEnabled()) {
httpServerTransport = networkModule.getHttpServerTransportSupplier().get();
httpBind = b -> {
b.bind(HttpServerTransport.class).toInstance(httpServerTransport);
};
} else {
httpBind = b -> {
b.bind(HttpServerTransport.class).toProvider(Providers.of(null));
};
httpServerTransport = null;
}

final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry,networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(),clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class),clusterModule.getAllocationService());

this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService,searchTransportService);

final SearchService searchService = newSearchService(clusterService, indicesService, threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase(),responseCollectorService);

final List<PersistentTasksExecutor<?>> tasksExecutors = pluginsService
.filterPlugins(PersistentTaskPlugin.class).stream()
.map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client))
.flatMap(List::stream)
.collect(toList());

final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(settings, tasksExecutors);
final PersistentTasksClusterService persistentTasksClusterService =
new PersistentTasksClusterService(settings, registry, clusterService);
final PersistentTasksService persistentTasksService = new PersistentTasksService(settings, clusterService, threadPool, client);

//8、绑定处理各种服务的实例, 这里是最核心的地方, 也是 Elasticsearch 能处理各种服务的核心.
modules.add(b -> {
b.bind(Node.class).toInstance(this);
b.bind(NodeService.class).toInstance(nodeService);
b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(Client.class).toInstance(client);
b.bind(NodeClient.class).toInstance(client);
b.bind(Environment.class).toInstance(this.environment);
b.bind(ThreadPool.class).toInstance(threadPool);
b.bind(NodeEnvironment.class).toInstance(nodeEnvironment);
b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(ScriptService.class).toInstance(scriptModule.getScriptService());
b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry());
b.bind(IngestService.class).toInstance(ingestService);
b.bind(UsageService.class).toInstance(usageService);
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
b.bind(MetaStateService.class).toInstance(metaStateService);
b.bind(IndicesService.class).toInstance(indicesService);
b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService);
b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, searchService::createReduceContext));
b.bind(Transport.class).toInstance(transport);
b.bind(TransportService.class).toInstance(transportService);
b.bind(NetworkService.class).toInstance(networkService);
b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService()));
b.bind(MetaDataIndexUpgradeService.class).toInstance(metaDataIndexUpgradeService);
b.bind(ClusterInfoService.class).toInstance(clusterInfoService);
b.bind(GatewayMetaState.class).toInstance(gatewayMetaState);
b.bind(Discovery.class).toInstance(discoveryModule.getDiscovery());
{
RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings());
processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings);
b.bind(PeerRecoverySourceService.class).toInstance(new PeerRecoverySourceService(settings, transportService,
indicesService, recoverySettings));
b.bind(PeerRecoveryTargetService.class).toInstance(new PeerRecoveryTargetService(settings, threadPool,
transportService, recoverySettings, clusterService));
}
httpBind.accept(b);
pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
b.bind(PersistentTasksService.class).toInstance(persistentTasksService); b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService);
b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); });
injector = modules.createInjector();

// TODO hack around circular dependencies problems in AllocationService
clusterModule.getAllocationService().setGatewayAllocator(injector.getInstance(GatewayAllocator.class));

List<LifecycleComponent> pluginLifecycleComponents = pluginComponents.stream()
.filter(p -> p instanceof LifecycleComponent)
.map(p -> (LifecycleComponent) p).collect(Collectors.toList());

//9、利用 Guice 将各种模块以及服务 (xxxService) 注入到 Elasticsearch 环境中
pluginLifecycleComponents.addAll(pluginsService.getGuiceServiceClasses().stream() .map(injector::getInstance).collect(Collectors.toList()));
resourcesToClose.addAll(pluginLifecycleComponents);
this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents);
client.initialize(injector.getInstance(new Key<Map<GenericAction, TransportAction>>() {}), () -> clusterService.localNode().getId(), transportService.getRemoteClusterService());

if (NetworkModule.HTTP_ENABLED.get(settings)) {// 如果 elasticsearch.yml 文件中配置了 http.enabled 参数(默认为 true), 则会初始化 RestHandlers
logger.debug(“initializing HTTP handlers …”);
actionModule.initRestHandlers(() -> clusterService.state().nodes()); // 初始化 RestHandlers, 解析集群命令, 如_cat/,_cat/health
}
//10、初始化工作完成
logger.info(“initialized”);

success = true;
} catch (IOException ex) {
throw new ElasticsearchException(“failed to bind service”, ex);
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(resourcesToClose);
}
}
}
上面代码真的很多,这里再说下上面这么多代码主要干了什么吧:(具体是哪行代码执行的如下流程,上面代码中也标记了)
1、创建节点环境, 比如节点名称, 节点 ID, 分片信息, 存储元, 以及分配内存准备给节点使用
2、打印出 JVM 相关信息
3、利用 PluginsService 加载相应的模块和插件,具体哪些模块可以去 modules 目录下查看
4、加载一些额外的配置参数
5、创建一个节点客户端
6、缓存一系列模块, 如 NodeModule,ClusterModule,IndicesModule,ActionModule,GatewayModule,SettingsModule,RepositioriesModule,scriptModule,analysisModule
7、获取 RestController,用于处理各种 Elasticsearch 的 rest 命令, 如 _cat, _all, _cat/health, _clusters 等 rest 命令
8、绑定处理各种服务的实例
9、利用 Guice 将各种模块以及服务 (xxxService) 注入到 Elasticsearch 环境中
10、初始化工作完成(打印日志)
JarHell 报错解释
前一篇阅读源码环境搭建的文章写过用 JDK 1.8 编译 ES 源码是会遇到如下异常:
org.elasticsearch.bootstrap.StartupException: java.lang.IllegalStateException: jar hell!
这里说下就是 setup 方法中的如下代码导致的
try {
// look for jar hell
final Logger logger = ESLoggerFactory.getLogger(JarHell.class);
JarHell.checkJarHell(logger::debug);
} catch (IOException | URISyntaxException e) {
throw new BootstrapException(e);
}
所以你如果是用 JDK 1.8 编译的,那么就需要把所有的有这块的代码给注释掉就可以编译成功的。
我自己试过用 JDK 10 编译是没有出现这里报错的。
正式启动 ES 节点
回到上面 Bootstrap 中的静态 init 方法中,接下来就是正式启动 elasticsearch 节点了:
INSTANCE.start(); // 调用下面的 start 方法

private void start() throws NodeValidationException {
node.start(); // 正式启动 Elasticsearch 节点
keepAliveThread.start();
}
接下来看看这个 start 方法里面的 node.start() 方法源码:
public Node start() throws NodeValidationException {
if (!lifecycle.moveToStarted()) {
return this;
}

Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info(“starting …”);
pluginLifecycleComponents.forEach(LifecycleComponent::start);

//1、利用 Guice 获取上述注册的各种模块以及服务
//Node 的启动其实就是 node 里每个组件的启动,同样的,分别调用不同的实例的 start 方法来启动这个组件, 如下:
injector.getInstance(MappingUpdatedAction.class).setClient(client);
injector.getInstance(IndicesService.class).start();
injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(SnapshotShardsService.class).start();
injector.getInstance(RoutingService.class).start();
injector.getInstance(SearchService.class).start();
nodeService.getMonitorService().start();

final ClusterService clusterService = injector.getInstance(ClusterService.class);

final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class);
nodeConnectionsService.start();
clusterService.setNodeConnectionsService(nodeConnectionsService);

injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(GatewayService.class).start();
Discovery discovery = injector.getInstance(Discovery.class);
clusterService.getMasterService().setClusterStatePublisher(discovery::publish);

// Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class);
transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class));
transportService.start();
assert localNodeFactory.getNode() != null;
assert transportService.getLocalNode().equals(localNodeFactory.getNode())
: “transportService has a different local node than the factory provided”;
final MetaData onDiskMetadata;
try {
// we load the global state here (the persistent part of the cluster state stored on disk) to
// pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state.
if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) {// 根据配置文件看当前节点是 master 还是 data 节点
onDiskMetadata = injector.getInstance(GatewayMetaState.class).loadMetaState();
} else {
onDiskMetadata = MetaData.EMPTY_META_DATA;
}
assert onDiskMetadata != null : “metadata is null but shouldn’t”; // this is never null
} catch (IOException e) {
throw new UncheckedIOException(e);
}
validateNodeBeforeAcceptingRequests(new BootstrapContext(settings, onDiskMetadata), transportService.boundAddress(), pluginsService
.filterPlugins(Plugin
.class)
.stream()
.flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList()));

//2、将当前节点加入到一个集群簇中去, 并启动当前节点
clusterService.addStateApplier(transportService.getTaskManager());
// start after transport service so the local disco is known
discovery.start(); // start before cluster service so that it can set initial state on ClusterApplierService
clusterService.start();
assert clusterService.localNode().equals(localNodeFactory.getNode())
: “clusterService has a different local node than the factory provided”;
transportService.acceptIncomingRequests();
discovery.startInitialJoin();
// tribe nodes don’t have a master so we shouldn’t register an observer s
final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings);
if (initialStateTimeout.millis() > 0) {
final ThreadPool thread = injector.getInstance(ThreadPool.class);
ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext());
if (clusterState.nodes().getMasterNodeId() == null) {
logger.debug(“waiting to join the cluster. timeout [{}]”, initialStateTimeout);
final CountDownLatch latch = new CountDownLatch(1);
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {latch.countDown(); }

@Override
public void onClusterServiceClose() {
latch.countDown();
}

@Override
public void onTimeout(TimeValue timeout) {
logger.warn(“timed out while waiting for initial discovery state – timeout: {}”,
initialStateTimeout);
latch.countDown();
}
}, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout);

try {
latch.await();
} catch (InterruptedException e) {
throw new ElasticsearchTimeoutException(“Interrupted while waiting for initial discovery state”);
}
}
}

if (NetworkModule.HTTP_ENABLED.get(settings)) {
injector.getInstance(HttpServerTransport.class).start();
}

if (WRITE_PORTS_FILE_SETTING.get(settings)) {
if (NetworkModule.HTTP_ENABLED.get(settings)) {
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile(“http”, http.boundAddress());
}
TransportService transport = injector.getInstance(TransportService.class);
writePortsFile(“transport”, transport.boundAddress());
}

logger.info(“started”);

pluginsService.filterPlugins(ClusterPlugin.class).forEach(ClusterPlugin::onNodeStarted);

return this;
}
上面代码主要是:
1、利用 Guice 获取上述注册的各种模块以及服务,然后启动 node 里每个组件(分别调用不同的实例的 start 方法来启动)
2、打印日志(启动节点完成)
总结
这篇文章主要把大概启动流程串通了,讲了下 node 节点的创建和正式启动 ES 节点了。因为篇幅较多所以拆开成两篇,先不扣细节了,后面流程启动文章写完后我们再单一的扣细节。
相关文章
1、渣渣菜鸡为什么要看 ElasticSearch 源码?
2、渣渣菜鸡的 ElasticSearch 源码解析 —— 环境搭建
3、渣渣菜鸡的 ElasticSearch 源码解析 —— 启动流程(上)
4、渣渣菜鸡的 ElasticSearch 源码解析 —— 启动流程(下)
5、Elasticsearch 系列文章(一):Elasticsearch 默认分词器和中分分词器之间的比较及使用方法
6、Elasticsearch 系列文章(二):全文搜索引擎 Elasticsearch 集群搭建入门教程
7、Elasticsearch 系列文章(三):ElasticSearch 集群监控
8、Elasticsearch 系列文章(四):ElasticSearch 单个节点监控
9、Elasticsearch 系列文章(五):ELK 实时日志分析平台环境搭建
10、教你如何在 IDEA 远程 Debug ElasticSearch

正文完
 0