gpt4 book ai didi

org.apache.solr.common.cloud.ZkStateReader类的使用及代码示例

转载 作者:知者 更新时间:2024-03-15 12:18:40 27 4
gpt4 key购买 nike

本文整理了Java中org.apache.solr.common.cloud.ZkStateReader类的一些代码示例,展示了ZkStateReader类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZkStateReader类的具体详情如下:
包路径:org.apache.solr.common.cloud.ZkStateReader
类名称:ZkStateReader

ZkStateReader介绍

暂无

代码示例

代码示例来源:origin: thinkaurelius/titan

ZkStateReader zkStateReader = server.getZkStateReader();
try {
  boolean cont = true;
    zkStateReader.updateClusterState(true);
    ClusterState clusterState = zkStateReader.getClusterState();
    Map<String, Slice> slices = clusterState.getSlicesMap(collection);
    Preconditions.checkNotNull("Could not find collection:" + collection, slices);
        String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
        if ((state.equals(Replica.State.RECOVERING) || state.equals(Replica.State.DOWN))
            && clusterState.liveNodesContain(shard.getValue().getStr(
            ZkStateReader.NODE_NAME_PROP))) {
          sawLiveRecovering = true;

代码示例来源:origin: BroadleafCommerce/BroadleafCommerce

CloudSolrClient reindexCloudClient = (CloudSolrClient) solrConfiguration.getReindexServer();
try {
  primaryCloudClient.connect();
  Aliases aliases = primaryCloudClient.getZkStateReader().getAliases();
  Map<String, String> aliasCollectionMap = aliases.getCollectionAliasMap();
  if (aliasCollectionMap == null || !aliasCollectionMap.containsKey(primaryCloudClient.getDefaultCollection())
      || !aliasCollectionMap.containsKey(reindexCloudClient.getDefaultCollection())) {
    throw new IllegalStateException("Could not determine the PRIMARY or REINDEX "

代码示例来源:origin: thinkaurelius/titan

/**
 * Checks if the collection has already been created in Solr.
 */
private static boolean checkIfCollectionExists(CloudSolrClient server, String collection) throws KeeperException, InterruptedException {
  ZkStateReader zkStateReader = server.getZkStateReader();
  zkStateReader.updateClusterState(true);
  ClusterState clusterState = zkStateReader.getClusterState();
  return clusterState.getCollectionOrNull(collection) != null;
}

代码示例来源:origin: thinkaurelius/titan

@Override
public void clearStorage() throws BackendException {
  try {
    if (mode!=Mode.CLOUD) throw new UnsupportedOperationException("Operation only supported for SolrCloud");
    logger.debug("Clearing storage from Solr: {}", solrClient);
    ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader();
    zkStateReader.updateClusterState(true);
    ClusterState clusterState = zkStateReader.getClusterState();
    for (String collection : clusterState.getCollections()) {
      logger.debug("Clearing collection [{}] in Solr",collection);
      UpdateRequest deleteAll = newUpdateRequest();
      deleteAll.deleteByQuery("*:*");
      solrClient.request(deleteAll, collection);
    }
  } catch (SolrServerException e) {
    logger.error("Unable to clear storage from index due to server error on Solr.", e);
    throw new PermanentBackendException(e);
  } catch (IOException e) {
    logger.error("Unable to clear storage from index due to low-level I/O error.", e);
    throw new PermanentBackendException(e);
  } catch (Exception e) {
    logger.error("Unable to clear storage from index due to general error.", e);
    throw new PermanentBackendException(e);
  }
}

代码示例来源:origin: org.apache.solr/solr-solrj

while (!success && System.nanoTime() < timeout) {
 success = true;
 ClusterState clusterState = zkStateReader.getClusterState();
 if (clusterState != null) {
  Map<String, DocCollection> collections = null;
  if (collection != null) {
   collections = Collections.singletonMap(collection, clusterState.getCollection(collection));
  } else {
   collections = clusterState.getCollectionsMap();
     for (Replica replica : replicas) {
      boolean live = clusterState.liveNodesContain(replica
        .getNodeName());
      if (live) {
   } catch (InterruptedException e) {
    Thread.currentThread().interrupt();
    throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted");

代码示例来源:origin: com.hynnet/solr-solrj

throws SolrServerException, IOException {
connect(); // important to call this before you start working with the ZkStateReader
boolean isAdmin = ADMIN_PATHS.contains(request.getPath());
if (collection != null &&  !isAdmin) { // don't do _stateVer_ checking for admin requests
 Set<String> requestedCollectionNames = getCollectionNames(getZkStateReader().getClusterState(), collection);
  DocCollection coll = getDocCollection(getZkStateReader().getClusterState(), requestedCollection,null);
  int collVer = coll.getZNodeVersion();
  if (coll.getStateFormat()>1) {
 resp = sendRequest(request, collection);
  resp.remove(resp.size()-1);
  Map invalidStates = (Map) o;
  for (Object invalidEntries : invalidStates.entrySet()) {
   Map.Entry e = (Map.Entry) invalidEntries;
   getDocCollection(getZkStateReader().getClusterState(),(String)e.getKey(), (Integer)e.getValue());
 Throwable rootCause = SolrException.getRootCause(exc);
   ((SolrException)rootCause).code() : SolrException.ErrorCode.UNKNOWN.code;
   wasCommError) {
  for (DocCollection ext : requestedCollections) {
   DocCollection latestStateFromZk = getDocCollection(zkStateReader.getClusterState(), ext.getName(),null);
   if (latestStateFromZk.getZNodeVersion() != ext.getZNodeVersion()) {

代码示例来源:origin: com.hynnet/solr-solrj

Aliases aliases = zkStateReader.getAliases();
if(aliases != null) {
 Map<String, String> collectionAliases = aliases.getCollectionAliasMap();
DocCollection col = getDocCollection(clusterState, collection,null);
Map<String,List<String>> urlMap = buildUrlMap(col);
if (urlMap == null) {
NamedList<Throwable> exceptions = new NamedList<>();
NamedList<NamedList> shardResponses = new NamedList<>();
  final Future<NamedList<?>> responseFuture = entry.getValue();
  try {
   shardResponses.add(url, responseFuture.get());
  } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
  shardResponses.add(urlList.get(0), rsp.getResponse());
 } catch (Exception e) {
  throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e);
RouteResponse rr =  condenseResponse(shardResponses, (long)((end - start)/1000000));
rr.setRouteResponses(shardResponses);
rr.setRoutes(routes);

代码示例来源:origin: com.hynnet/solr-solrj

public Map getClusterProps(){
 Map result = null;
 try {
  if(getZkClient().exists(ZkStateReader.CLUSTER_PROPS, true)){
   result = (Map) Utils.fromJSON(getZkClient().getData(ZkStateReader.CLUSTER_PROPS, null, new Stat(), true)) ;
  } else {
   result= new LinkedHashMap();
  }
  return result;
 } catch (Exception e) {
  throw new SolrException(ErrorCode.SERVER_ERROR,"Error reading cluster properties",e) ;
 }
}

代码示例来源:origin: org.apache.solr/solr-solrj

@Override
public String getDatabaseProductVersion() throws SQLException {
 // Returns the version for the first live node in the Solr cluster.
 SolrQuery sysQuery = new SolrQuery();
 sysQuery.setRequestHandler("/admin/info/system");
 CloudSolrClient cloudSolrClient = this.connection.getClient();
 Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
 SolrClient solrClient = null;
 for (String node : liveNodes) {
  try {
   String nodeURL = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(node);
   solrClient = new Builder(nodeURL).build();
   QueryResponse rsp = solrClient.query(sysQuery);
   return String.valueOf(((SimpleOrderedMap) rsp.getResponse().get("lucene")).get("solr-spec-version"));
  } catch (SolrServerException | IOException ignore) {
   return "";
  } finally {
   if (solrClient != null) {
    try {
     solrClient.close();
    } catch (IOException ignore) {
     // Don't worry about failing to close the Solr client
    }
   }
  }
 }
 // If no version found just return empty string
 return "";
}

代码示例来源:origin: com.hynnet/solr-solrj

protected NamedList<Object> sendRequest(SolrRequest request, String collection)
  throws SolrServerException, IOException {
 connect();
 ClusterState clusterState = zkStateReader.getClusterState();
   NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, collection, clusterState);
   if (response != null) {
    return response;
  Set<String> liveNodes = clusterState.getLiveNodes();
  for (String liveNode : liveNodes) {
   theUrlList.add(zkStateReader.getBaseUrlForNodeName(liveNode));
  Set<String> collectionNames = getCollectionNames(clusterState, collection);
  if (collectionNames.size() == 0) {
   throw new SolrException(ErrorCode.BAD_REQUEST,
     "Could not find collection: " + collection);
   ClientUtils.addSlices(slices, collectionName, routeSlices, true);
  Set<String> liveNodes = clusterState.getLiveNodes();
    if(s!=null) collectionStateCache.remove(s);
   throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Could not find a healthy node to handle the request.");

代码示例来源:origin: com.hynnet/solr-solrj

throw new SolrException(ErrorCode.BAD_REQUEST, "Not a known cluster property " + propertyName);
Stat s = new Stat();
try {
 if (getZkClient().exists(CLUSTER_PROPS, true)) {
  int v = 0;
  Map properties = (Map) Utils.fromJSON(getZkClient().getData(CLUSTER_PROPS, null, s, true));
  if (propertyValue == null) {
    getZkClient().setData(CLUSTER_PROPS, Utils.toJSON(properties), s.getVersion(), true);
    getZkClient().setData(CLUSTER_PROPS, Utils.toJSON(properties), s.getVersion(), true);
  Map properties = new LinkedHashMap();
  properties.put(propertyName, propertyValue);
  getZkClient().create(CLUSTER_PROPS, Utils.toJSON(properties), CreateMode.PERSISTENT, true);
} catch (Exception ex) {
 log.error("Error updating path " + CLUSTER_PROPS, ex);
 throw new SolrException(ErrorCode.SERVER_ERROR, "Error updating cluster property " + propertyName, ex);

代码示例来源:origin: org.apache.solr/solr-solrj

private void getCheckpoints() throws IOException {
 this.checkpoints = new HashMap<>();
 ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
 Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
 ClusterState clusterState = zkStateReader.getClusterState();
 Set<String> liveNodes = clusterState.getLiveNodes();
 for(Slice slice : slices) {
  String sliceName = slice.getName();
  long checkpoint;
  if(initialCheckpoint > -1) {
   checkpoint = initialCheckpoint;
  } else {
   checkpoint = getCheckpoint(slice, liveNodes);
  }
  this.checkpoints.put(sliceName, checkpoint);
 }
}

代码示例来源:origin: com.hynnet/solr-solrj

public static DocCollection getCollectionLive(ZkStateReader zkStateReader,
  String coll) {
 String collectionPath = getCollectionPath(coll);
 try {
  Stat stat = new Stat();
  byte[] data = zkStateReader.getZkClient().getData(collectionPath, null, stat, true);
  ClusterState state = ClusterState.load(stat.getVersion(), data,
    Collections.<String> emptySet(), collectionPath);
  ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll);
  return collectionRef == null ? null : collectionRef.get();
 } catch (KeeperException.NoNodeException e) {
  log.warn("No node available : " + collectionPath, e);
  return null;
 } catch (KeeperException e) {
  throw new SolrException(ErrorCode.BAD_REQUEST,
    "Could not load collection from ZK:" + coll, e);
 } catch (InterruptedException e) {
  Thread.currentThread().interrupt();
  throw new SolrException(ErrorCode.BAD_REQUEST,
    "Could not load collection from ZK:" + coll, e);
 }
}

代码示例来源:origin: org.apache.solr/solr-test-framework

try (ZkStateReader zk = new ZkStateReader(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT,
  AbstractZkTestCase.TIMEOUT)) {
 zk.createClusterStateWatchersAndUpdate();
 clusterState = zk.getClusterState();
 final DocCollection docCollection = clusterState.getCollectionOrNull(DEFAULT_COLLECTION);
 slices = (docCollection != null) ? docCollection.getSlicesMap() : null;
   + DEFAULT_COLLECTION + " in " + clusterState.getCollectionsMap().keySet());
 ZkStateReader zkStateReader = cloudClient.getZkStateReader();
 long count = 0;
 final Replica.State currentState = Replica.State.getState(cjetty.info.getStr(ZkStateReader.STATE_PROP));
 if (currentState == Replica.State.ACTIVE
   && zkStateReader.getClusterState().liveNodesContain(cjetty.info.getStr(ZkStateReader.NODE_NAME_PROP))) {
  SolrQuery query = new SolrQuery("*:*");
  query.set("distrib", false);
SolrQuery query = new SolrQuery("*:*");
assertEquals("Doc Counts do not add up", controlCount,
  cloudClient.query(query).getResults().getNumFound());

代码示例来源:origin: org.apache.jackrabbit/oak-solr-core

private void createCollectionIfNeeded(CloudSolrClient cloudSolrServer) throws SolrServerException {
  String solrCollection = remoteSolrServerConfiguration.getSolrCollection();
  ZkStateReader zkStateReader = cloudSolrServer.getZkStateReader();
  SolrZkClient zkClient = zkStateReader.getZkClient();
  log.debug("creating {} collection if needed", solrCollection);
  try {
    if (zkClient.isConnected() && !zkClient.exists("/configs/" + solrCollection, true)) {
      String solrConfDir = remoteSolrServerConfiguration.getSolrConfDir();
      Path dir;
      cloudSolrServer.uploadConfig(dir, solrCollection);
      cloudSolrServer.request(req);

代码示例来源:origin: com.ngdata/hbase-indexer-common

if (verbose) System.out.println("-");
boolean sawLiveRecovering = false;
ClusterState clusterState = zkStateReader.getClusterState();
final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
if (docCollection == null) throw new IllegalStateException("Could not find collection:" + collection);
Map<String,Slice> slices = docCollection.getSlicesMap();
    + shard.getValue().getStr(ZkStateReader.STATE_PROP)
    + " live:"
    + clusterState.liveNodesContain(shard.getValue().getNodeName()));
  final Replica.State state = shard.getValue().getState();
  if ((state == Replica.State.RECOVERING || state == Replica.State.DOWN || state == Replica.State.RECOVERY_FAILED)
    && clusterState.liveNodesContain(shard.getValue().getStr(ZkStateReader.NODE_NAME_PROP))) {
   sawLiveRecovering = true;
   Diagnostics.logThreadDumps("Gave up waiting for recovery to finish.  THREAD DUMP:");
   try {
    zkStateReader.getZkClient().printLayoutToStdOut();
   } catch (KeeperException | InterruptedException e) {
    throw new RuntimeException(e);

代码示例来源:origin: org.apache.solr/solr-solrj

final Stat stat = getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true);
   setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion()));
   return;
    throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out trying to update aliases! " +
      "Either zookeeper or this node may be overloaded.");
throw new SolrException(ErrorCode.SERVER_ERROR, "Too many successive version failures trying to update aliases");

代码示例来源:origin: com.cloudera.search/search-mr

public DocCollection extractDocCollection(String zkHost, String collection) {
 if (collection == null) {
  throw new IllegalArgumentException("collection must not be null");
 }
 SolrZkClient zkClient = getZkClient(zkHost);
 
 try (ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
  try {
   // first check for alias
   collection = checkForAlias(zkClient, collection);
   zkStateReader.createClusterStateWatchersAndUpdate();
  } catch (Exception e) {
   throw new IllegalArgumentException("Cannot find expected information for SolrCloud in ZooKeeper: " + zkHost, e);
  }
  
  try {
   return zkStateReader.getClusterState().getCollection(collection);
  } catch (SolrException e) {
   throw new IllegalArgumentException("Cannot find collection '" + collection + "' in ZooKeeper: " + zkHost, e);
  }
 } finally {
  zkClient.close();
 }    
}

代码示例来源:origin: org.apache.solr/solr-test-framework

ZkStateReader zkr = cloudClient.getZkStateReader();
zkr.forceUpdateCollection(testCollectionName); // force the state to be fresh
ClusterState cs = zkr.getClusterState();
Collection<Slice> slices = cs.getCollection(testCollectionName).getActiveSlices();
assertTrue(slices.size() == shards);
boolean allReplicasUp = false;
long maxWaitMs = maxWaitSecs * 1000L;
Replica leader = null;
ZkShardTerms zkShardTerms = new ZkShardTerms(testCollectionName, shardId, cloudClient.getZkStateReader().getZkClient());
while (waitMs < maxWaitMs && !allReplicasUp) {
 cs = cloudClient.getZkStateReader().getClusterState();
 assertNotNull(cs);
 final DocCollection docCollection = cs.getCollectionOrNull(testCollectionName);
 assertNotNull("No collection found for " + testCollectionName, docCollection);
 Slice shard = docCollection.getSlice(shardId);

代码示例来源:origin: org.apache.solr/solr-test-framework

protected int getTotalReplicas(String collection) {
 ZkStateReader zkStateReader = cloudClient.getZkStateReader();
 DocCollection coll = zkStateReader.getClusterState().getCollectionOrNull(collection);
 if (coll == null) return 0;  // support for when collection hasn't been created yet
 int cnt = 0;
 for (Slice slices : coll.getSlices()) {
  cnt += slices.getReplicas().size();
 }
 return cnt;
}

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com