gpt4 book ai didi

kafka.utils.ZkUtils.apply()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-17 01:22:40 29 4
gpt4 key购买 nike

本文整理了Java中kafka.utils.ZkUtils.apply()方法的一些代码示例,展示了ZkUtils.apply()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZkUtils.apply()方法的具体详情如下:
包路径:kafka.utils.ZkUtils
类名称:ZkUtils
方法名:apply

ZkUtils.apply介绍

暂无

代码示例

代码示例来源:origin: linkedin/cruise-control

public static ZkUtils createZkUtils(String zkConnect) {
 return ZkUtils.apply(zkConnect, ZK_SESSION_TIMEOUT, ZK_CONNECTION_TIMEOUT, IS_ZK_SECURITY_ENABLED);
}

代码示例来源:origin: OryxProject/oryx

/**
 * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
 * @param topic topic to check for existence
 * @return {@code true} if and only if the given topic exists
 */
public static boolean topicExists(String zkServers, String topic) {
 ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
 try {
  return AdminUtils.topicExists(zkUtils, topic);
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: OryxProject/oryx

/**
 * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
 * @param topic topic to delete, if it exists
 */
public static void deleteTopic(String zkServers, String topic) {
 ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
 try {
  if (AdminUtils.topicExists(zkUtils, topic)) {
   log.info("Deleting topic {}", topic);
   AdminUtils.deleteTopic(zkUtils, topic);
   log.info("Deleted Zookeeper topic {}", topic);
  } else {
   log.info("No need to delete topic {} as it does not exist", topic);
  }
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: OryxProject/oryx

/**
 * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
 * @param topic topic to create (if not already existing)
 * @param partitions number of topic partitions
 * @param topicProperties optional topic config properties
 */
public static void maybeCreateTopic(String zkServers,
                  String topic,
                  int partitions,
                  Properties topicProperties) {
 ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
 try {
  if (AdminUtils.topicExists(zkUtils, topic)) {
   log.info("No need to create topic {} as it already exists", topic);
  } else {
   log.info("Creating topic {} with {} partition(s)", topic, partitions);
   try {
    AdminUtils.createTopic(
      zkUtils, topic, partitions, 1, topicProperties, RackAwareMode.Enforced$.MODULE$);
    log.info("Created topic {}", topic);
   } catch (TopicExistsException re) {
    log.info("Topic {} already exists", topic);
   }
  }
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: apache/flink

public ZkUtils getZkUtils() {
  LOG.info("In getZKUtils:: zookeeperConnectionString = {}", zookeeperConnectionString);
  ZkClient creator = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
      Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());
  return ZkUtils.apply(creator, false);
}

代码示例来源:origin: apache/flink

public ZkUtils getZkUtils() {
  ZkClient creator = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
      Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());
  return ZkUtils.apply(creator, false);
}

代码示例来源:origin: apache/flink

public ZkUtils getZkUtils() {
  ZkClient creator = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
      Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());
  return ZkUtils.apply(creator, false);
}

代码示例来源:origin: OryxProject/oryx

/**
 * @param zkServers Zookeeper server string: host1:port1[,host2:port2,...]
 * @param groupID consumer group to update
 * @param offsets mapping of (topic and) partition to offset to push to Zookeeper
 */
public static void setOffsets(String zkServers,
               String groupID,
               Map<Pair<String,Integer>,Long> offsets) {
 ZkUtils zkUtils = ZkUtils.apply(zkServers, ZK_TIMEOUT_MSEC, ZK_TIMEOUT_MSEC, false);
 try {
  offsets.forEach((topicAndPartition, offset) -> {
   String topic = topicAndPartition.getFirst();
   int partition = topicAndPartition.getSecond();
   String partitionOffsetPath = "/consumers/" + groupID + "/offsets/" + topic + "/" + partition;
   zkUtils.updatePersistentPath(partitionOffsetPath,
                  Long.toString(offset),
                  ZkUtils$.MODULE$.defaultAcls(false, ""));
  });
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: linkedin/kafka-monitor

/**
 * @param zkUrl zookeeper connection url
 * @return      number of brokers in this cluster
 */
public static int getBrokerCount(String zkUrl) {
 ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
 try {
  return zkUtils.getAllBrokersInCluster().size();
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: apache/incubator-gobblin

public void stopClients() throws IOException {
 for (Map.Entry<String, KafkaConsumerSuite> consumerSuiteEntry: _topicConsumerMap.entrySet())
 {
  consumerSuiteEntry.getValue().shutdown();
  AdminUtils.deleteTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false),
    consumerSuiteEntry.getKey());
 }
}

代码示例来源:origin: linkedin/kafka-monitor

/**
 * Read number of partitions for the given topic on the specified zookeeper
 * @param zkUrl zookeeper connection url
 * @param topic topic name
 *
 * @return the number of partitions of the given topic
 */
public static int getPartitionNumForTopic(String zkUrl, String topic) {
 ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
 try {
  Seq<String> topics = scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(topic));
  return zkUtils.getPartitionsForTopics(topics).apply(topic).size();
 } catch (NoSuchElementException e) {
  return 0;
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: linkedin/kafka-monitor

ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
 if (AdminUtils.topicExists(zkUtils, topic)) {

代码示例来源:origin: apache/incubator-gobblin

public void provisionTopic(String topic) {
 if (_topicConsumerMap.containsKey(topic)) {
  // nothing to do: return
 } else {
  // provision topic
  AdminUtils.createTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false),
    topic, 1, 1, new Properties());
  List<KafkaServer> servers = new ArrayList<>();
  servers.add(_kafkaServerSuite.getKafkaServer());
  kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);
  KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic);
  _topicConsumerMap.put(topic, consumerSuite);
 }
}

代码示例来源:origin: apache/metron

/**
 * Bean for ZooKeeper
 */
@Bean
public ZkUtils zkUtils() {
 return ZkUtils.apply(zkClient, false);
}

代码示例来源:origin: uber/chaperone

private static void putOffsetInfoIntoZk(String groupId, Map<String, Map<Integer, Long>> topicOffsetsMap) {
 ZkUtils zkUtils =
   ZkUtils.apply(AuditConfig.INGESTER_ZK_CONNECT, Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS),
     Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS), false);
 try {
  for (Map.Entry<String, Map<Integer, Long>> topicEntry : topicOffsetsMap.entrySet()) {
   String zkPath = String.format("%s/%s/offsets/%s/", ZkUtils.ConsumersPath(), groupId, topicEntry.getKey());
   for (Map.Entry<Integer, Long> offsetEntry : topicEntry.getValue().entrySet()) {
    logger.info("Put offset={} to partition={} with znode path={}", offsetEntry.getValue(), offsetEntry.getKey(),
      zkPath + offsetEntry.getKey());
    zkUtils.updatePersistentPath(zkPath + offsetEntry.getKey(), offsetEntry.getValue().toString(),
      zkUtils.DefaultAcls());
   }
  }
 } catch (Exception e) {
  logger.error("Got exception to put offset, with zkPathPrefix={}",
    String.format("%s/%s/offsets", ZkUtils.ConsumersPath(), groupId));
  throw e;
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: uber/chaperone

public KafkaBrokerTopicObserver(String brokerClusterName, String zkString) {
 LOGGER.info("Trying to init KafkaBrokerTopicObserver {} with ZK: {}", brokerClusterName,
   zkString);
 _kakfaClusterName = brokerClusterName;
 _zkUtils = ZkUtils.apply(zkString, 30000, 30000, false);
 _zkClient = ZkUtils.createZkClient(zkString, 30000, 30000);
 _zkClient.subscribeChildChanges(KAFKA_TOPICS_PATH, this);
 registerMetric();
 executorService.scheduleAtFixedRate(new Runnable() {
  @Override
  public void run() {
   tryToRefreshCache();
  }
 }, 0, 600, TimeUnit.SECONDS);
}

代码示例来源:origin: uber/chaperone

private static void removeOffsetInfoFromZk(final String groupId) {
 ZkUtils zkUtils =
   ZkUtils.apply(AuditConfig.INGESTER_ZK_CONNECT, Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS),
     Integer.valueOf(AuditConfig.INGESTER_ZK_SESSION_TIMEOUT_MS), false);
 try {
  String[] targets = new String[] {"offsets", "owners"};
  for (String target : targets) {
   String zkPath = String.format("%s/%s/%s", ZkUtils.ConsumersPath(), groupId, target);
   logger.info("Remove {} with znode path={}", target, zkPath);
   zkUtils.deletePathRecursive(zkPath);
  }
 } catch (Exception e) {
  logger.error("Got exception to remove offsets or owners from zookeeper, with zkPathPrefix={}",
    String.format("%s/%s/", ZkUtils.ConsumersPath(), groupId));
  throw e;
 } finally {
  zkUtils.close();
 }
}

代码示例来源:origin: uber/chaperone

public AutoTopicWhitelistingManager(KafkaBrokerTopicObserver srcKafkaTopicObserver,
                  KafkaBrokerTopicObserver destKafkaTopicObserver,
                  HelixMirrorMakerManager helixMirrorMakerManager,
                  String patternToExcludeTopics,
                  int refreshTimeInSec,
                  int initWaitTimeInSec) {
 _srcKafkaTopicObserver = srcKafkaTopicObserver;
 _destKafkaTopicObserver = destKafkaTopicObserver;
 _helixMirrorMakerManager = helixMirrorMakerManager;
 _patternToExcludeTopics = patternToExcludeTopics;
 _refreshTimeInSec = refreshTimeInSec;
 _initWaitTimeInSec = initWaitTimeInSec;
 _zkUtils = ZkUtils.apply(_helixMirrorMakerManager.getHelixZkURL(), 30000, 30000, false);
 _zkClient = ZkUtils.createZkClient(_helixMirrorMakerManager.getHelixZkURL(), 30000, 30000);
 _blacklistedTopicsZPath =
     String.format("/%s/BLACKLISTED_TOPICS", _helixMirrorMakerManager.getHelixClusterName());
}

代码示例来源:origin: uber/AthenaX

public static boolean createKafkaTopicIfNecessary(String brokerUri, int replFactor, int numPartitions, String topic)
   throws IOException {
  URI zkUri = URI.create(brokerUri);
  Preconditions.checkArgument("zk".equals(zkUri.getScheme()));
  String zkServerList = zkUri.getAuthority() + zkUri.getPath();

  ZkUtils zkUtils = ZkUtils.apply(zkServerList, ZK_SESSION_TIMEOUT_MS,
    ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
  try {
   if (AdminUtils.topicExists(zkUtils, topic)) {
    return false;
   }

   try {
    AdminUtils.createTopic(zkUtils, topic, numPartitions, replFactor, new Properties());
   } catch (TopicExistsException ignored) {
    return false;
   } catch (RuntimeException e) {
    throw new IOException(e);
   }
  } finally {
   if (zkUtils != null) {
    zkUtils.close();
   }
  }
  return true;
 }
}

代码示例来源:origin: apache/phoenix

@Before
public void setUp() throws IOException, SQLException {
  // setup Zookeeper
  zkServer = new EmbeddedZookeeper();
  String zkConnect = ZKHOST + ":" + zkServer.port();
  zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
  ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
  // setup Broker
  Properties brokerProps = new Properties();
  brokerProps.setProperty("zookeeper.connect", zkConnect);
  brokerProps.setProperty("broker.id", "0");
  brokerProps.setProperty("log.dirs",
    Files.createTempDirectory("kafka-").toAbsolutePath().toString());
  brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
  KafkaConfig config = new KafkaConfig(brokerProps);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);
  kafkaServer.startup();
  // create topic
  AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties());
  pConsumer = new PhoenixConsumer();
  
  Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
  conn = DriverManager.getConnection(getUrl(), props);
}

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com