gpt4 book ai didi

org.I0Itec.zkclient.ZkConnection类的使用及代码示例

转载 作者:知者 更新时间:2024-03-16 12:32:40 27 4
gpt4 key购买 nike

本文整理了Java中org.I0Itec.zkclient.ZkConnection类的一些代码示例,展示了ZkConnection类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZkConnection类的具体详情如下:
包路径:org.I0Itec.zkclient.ZkConnection
类名称:ZkConnection

ZkConnection介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), false);
int partitions = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.PARTITION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);
int replication = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.REPLICATION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);

代码示例来源:origin: hopshadoop/hopsworks

if (zk == null || !zk.getState().isConnected()) {
 if (zk != null) {
  zk.close();
 zk = new ZooKeeper(settings.getZkConnectStr(),
   sessionTimeoutMs, new ZookeeperWatcher());
 for (String topicName : zkTopics) {
  if (zkConnection == null) {
   zkConnection = new ZkConnection(settings.getZkConnectStr());
  zkConnection.close();

代码示例来源:origin: hopshadoop/hopsworks

getHostName(),
  Settings.ZOOKEEPER_SESSION_TIMEOUT_MS, Settings.ZOOKEEPER_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);
ZkConnection zkConnection = new ZkConnection(settings.getZkConnectStr());
ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
 zkClient.close();
 try {
  zkConnection.close();
 } catch (InterruptedException ex) {
  Logger.getLogger(KafkaFacade.class.getName()).

代码示例来源:origin: com.101tec/zkclient

public void writeData(String path, byte[] data) throws KeeperException, InterruptedException {
  writeData(path, data, -1);
}

代码示例来源:origin: hopshadoop/hopsworks

zkClient = new ZkClient(getIp(settings.getZkConnectStr()).getHostName(),
  Settings.ZOOKEEPER_SESSION_TIMEOUT_MS, Settings.ZOOKEEPER_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);
zkConnection = new ZkConnection(settings.getZkConnectStr());
for (ProjectTopics topic : topics) {
 zkConnection.close();

代码示例来源:origin: com.github.sgroschupf/zkclient

public void writeData(String path, byte[] data) throws KeeperException, InterruptedException {
  writeData(path, data, -1);
}

代码示例来源:origin: apache/drill

public static void createTopicHelper(final String topicName, final int partitions) {
 Properties topicProps = new Properties();
 topicProps.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime");
 topicProps.put(TopicConfig.RETENTION_MS_CONFIG, "-1");
 ZkUtils zkUtils = new ZkUtils(zkClient,
   new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
 AdminUtils.createTopic(zkUtils, topicName, partitions, 1,
   topicProps, RackAwareMode.Disabled$.MODULE$);
 org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk =
   AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
 logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);
}

代码示例来源:origin: hopshadoop/hopsworks

getHostName(),
  Settings.ZOOKEEPER_SESSION_TIMEOUT_MS, Settings.ZOOKEEPER_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);
ZkConnection zkConnection = new ZkConnection(settings.getZkConnectStr());
ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
 zkClient.close();
 try {
  zkConnection.close();
 } catch (InterruptedException ex) {
  LOG.log(Level.SEVERE, null, ex.getMessage());

代码示例来源:origin: linkedin/cruise-control

public BrokerFailureDetector(KafkaCruiseControlConfig config,
               LoadMonitor loadMonitor,
               Queue<Anomaly> anomalies,
               Time time,
               KafkaCruiseControl kafkaCruiseControl) {
 String zkUrl = config.getString(KafkaCruiseControlConfig.ZOOKEEPER_CONNECT_CONFIG);
 ZkConnection zkConnection = new ZkConnection(zkUrl, 30000);
 _zkClient = new ZkClient(zkConnection, 30000, new ZkStringSerializer());
 // Do not support secure ZK at this point.
 _zkUtils = new ZkUtils(_zkClient, zkConnection, false);
 _failedBrokers = new HashMap<>();
 _failedBrokersZkPath = config.getString(KafkaCruiseControlConfig.FAILED_BROKERS_ZK_PATH_CONFIG);
 _loadMonitor = loadMonitor;
 _anomalies = anomalies;
 _time = time;
 _kafkaCruiseControl = kafkaCruiseControl;
 _allowCapacityEstimation = config.getBoolean(KafkaCruiseControlConfig.ANOMALY_DETECTION_ALLOW_CAPACITY_ESTIMATION_CONFIG);
}

代码示例来源:origin: apache/incubator-gobblin

ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster);

代码示例来源:origin: apache/incubator-gobblin

ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster);

代码示例来源:origin: apache/drill

@BeforeClass
public static void initKafka() throws Exception {
 synchronized (TestKafkaSuit.class) {
  if (initCount.get() == 0) {
   ZookeeperTestUtil.setZookeeperSaslTestConfigProps();
   System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, ClassLoader.getSystemResource(LOGIN_CONF_RESOURCE_PATHNAME).getFile());
   embeddedKafkaCluster = new EmbeddedKafkaCluster();
   Properties topicProps = new Properties();
   zkClient = new ZkClient(embeddedKafkaCluster.getZkServer().getConnectionString(), SESSION_TIMEOUT, CONN_TIMEOUT, ZKStringSerializer$.MODULE$);
   ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
   AdminUtils.createTopic(zkUtils, TestQueryConstants.JSON_TOPIC, 1, 1, topicProps, RackAwareMode.Disabled$.MODULE$);
   org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk = AdminUtils
     .fetchTopicMetadataFromZk(TestQueryConstants.JSON_TOPIC, zkUtils);
   logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);
   KafkaMessageGenerator generator = new KafkaMessageGenerator(embeddedKafkaCluster.getKafkaBrokerList(),
     StringSerializer.class);
   generator.populateJsonMsgIntoKafka(TestQueryConstants.JSON_TOPIC, NUM_JSON_MSG);
  }
  initCount.incrementAndGet();
  runningSuite = true;
 }
 logger.info("Initialized Embedded Zookeeper and Kafka");
}

代码示例来源:origin: apache/incubator-druid

String zkHosts = config.getZookeeperHosts();
zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts, sessionTimeoutMs), false);
if (config.manageKafkaTopic()) {
 int numPartitions = 1;

代码示例来源:origin: apache/incubator-druid

String zkHosts = config.getZookeeperHosts();
zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts, sessionTimeoutMs), false);
if (config.manageKafkaTopic()) {
 int numPartitions = 4;

代码示例来源:origin: confluentinc/kafka-streams-examples

/**
  * Delete a Kafka topic.
  *
  * @param topic The name of the topic.
  */
 public void deleteTopic(String topic) {
  log.debug("Deleting topic {}", topic);
  ZkClient zkClient = new ZkClient(
    zookeeperConnect(),
    DEFAULT_ZK_SESSION_TIMEOUT_MS,
    DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
    ZKStringSerializer$.MODULE$);
  boolean isSecure = false;
  ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure);
  AdminUtils.deleteTopic(zkUtils, topic);
  zkClient.close();
 }
}

代码示例来源:origin: confluentinc/kafka-streams-examples

/**
 * Create a Kafka topic with the given parameters.
 *
 * @param topic       The name of the topic.
 * @param partitions  The number of partitions for this topic.
 * @param replication The replication factor for (partitions of) this topic.
 * @param topicConfig Additional topic-level configuration settings.
 */
public void createTopic(String topic,
            int partitions,
            int replication,
            Properties topicConfig) {
 log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
   topic, partitions, replication, topicConfig);
 // Note: You must initialize the ZkClient with ZKStringSerializer.  If you don't, then
 // createTopic() will only seem to work (it will return without error).  The topic will exist in
 // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
 // topic.
 ZkClient zkClient = new ZkClient(
   zookeeperConnect(),
   DEFAULT_ZK_SESSION_TIMEOUT_MS,
   DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
   ZKStringSerializer$.MODULE$);
 boolean isSecure = false;
 ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure);
 AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$);
 zkClient.close();
}

代码示例来源:origin: org.apache.helix/helix-core

public ZkClient(String zkServers, int sessionTimeout, int connectionTimeout,
  PathBasedZkSerializer zkSerializer, String monitorType, String monitorKey) {
 this(new ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer, monitorType,
   monitorKey);
}

代码示例来源:origin: org.apache.helix/helix-core

public ZkClient(final String zkServers, final int sessionTimeout, final int connectionTimeout,
  final ZkSerializer zkSerializer, final long operationRetryTimeout) {
 this(new ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer,
   operationRetryTimeout);
}

代码示例来源:origin: org.apache.helix/helix-core

public ZkClient(String zkServers, int sessionTimeout, int connectionTimeout,
  ZkSerializer zkSerializer) {
 this(new ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer);
}

代码示例来源:origin: org.apache.helix/helix-core

public ZkClient(String zkServers, int sessionTimeout, int connectionTimeout,
  PathBasedZkSerializer zkSerializer) {
 this(new ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer);
}

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com