- 使用 Spring Initializr 创建 Spring Boot 应用程序
- 在Spring Boot中配置Cassandra
- 在 Spring Boot 上配置 Tomcat 连接池
- 将Camel消息路由到嵌入WildFly的Artemis上
本文整理了Java中com.github.sakserv.minicluster.util.WindowsLibsUtils.setHadoopHome()
方法的一些代码示例,展示了WindowsLibsUtils.setHadoopHome()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。WindowsLibsUtils.setHadoopHome()
方法的具体详情如下:
包路径:com.github.sakserv.minicluster.util.WindowsLibsUtils
类名称:WindowsLibsUtils
方法名:setHadoopHome
暂无
代码示例来源:origin: sakserv/hadoop-mini-clusters
@Override
public void configure() throws Exception {
if(null != hdfsEnableRunningUserAsProxyUser && hdfsEnableRunningUserAsProxyUser) {
hdfsConfig.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
hdfsConfig.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
}
hdfsConfig.setBoolean("dfs.permissions", hdfsEnablePermissions);
System.setProperty("test.build.data", hdfsTempDir);
// Handle Windows
WindowsLibsUtils.setHadoopHome();
}
代码示例来源:origin: com.github.sakserv/hadoop-mini-clusters-hbase
@Override
public void configure() throws Exception {
configure(hbaseConfiguration);
// Handle Windows
WindowsLibsUtils.setHadoopHome();
}
代码示例来源:origin: sakserv/hadoop-mini-clusters
@Override
public void configure() throws Exception {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress);
configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname);
configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress);
configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress);
configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress);
configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
if (getUseInJvmContainerExecutor()) {
configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass);
configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
}
}
代码示例来源:origin: jetoile/hadoop-unit
@Override
public void configure() throws Exception {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
// Oozie has very particular naming conventions for these directories, don't change
fullOozieHomeDir = oozieTestDir + "/" + oozieHomeDir;
fullOozieConfDir = fullOozieHomeDir + "/conf";
fullOozieHadoopConfDir = fullOozieConfDir + "/hadoop-conf";
fullOozieActionDir = fullOozieConfDir + "/action-conf";
//set system properties
System.setProperty(Services.OOZIE_HOME_DIR, new File(fullOozieHomeDir).getAbsolutePath());
System.setProperty(ConfigurationService.OOZIE_CONFIG_DIR, fullOozieConfDir);
System.setProperty("oozielocal.log", fullOozieHomeDir + "/oozielocal.log");
System.setProperty(XTestCase.OOZIE_TEST_JOB_TRACKER, oozieYarnResourceManagerAddress);
System.setProperty(XTestCase.OOZIE_TEST_NAME_NODE, oozieHdfsDefaultFs);
System.setProperty("oozie.test.db.host", "localhost");
System.setProperty(ConfigurationService.OOZIE_DATA_DIR, fullOozieHomeDir);
System.setProperty(HadoopAccessorService.SUPPORTED_FILESYSTEMS, "*");
if (oozieShareLibCreate) {
oozieConf.set("oozie.service.WorkflowAppService.system.libpath",
oozieHdfsDefaultFs + oozieHdfsShareLibDir);
oozieConf.set("use.system.libpath.for.mapreduce.and.pig.jobs", "true");
}
oozieConf.set("oozie.service.JPAService.jdbc.driver", "org.hsqldb.jdbcDriver");
oozieConf.set("oozie.service.JPAService.jdbc.url", "jdbc:hsqldb:mem:oozie-db;create=true");
oozieConf.set(JPAService.CONF_CREATE_DB_SCHEMA, "true");
}
代码示例来源:origin: fr.jetoile.hadoop/hadoop-unit-hive
private HiveConf buildHiveConf() {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
HiveConf hiveConf = new HiveConf();
hiveConf.set("fs.defaultFS", "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY));
// hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
// hiveConf.set("hive.root.logger", "DEBUG,console");
// hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
// hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
// hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
// System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
// System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
return hiveConf;
}
代码示例来源:origin: jetoile/hadoop-unit
private HiveConf buildHiveConf() {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
HiveConf hiveConf = new HiveConf();
hiveConf.set("fs.defaultFS", "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY));
// hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
// hiveConf.set("hive.root.logger", "DEBUG,console");
// hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
// hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
// hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
// System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
// System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
return hiveConf;
}
代码示例来源:origin: fr.jetoile.hadoop/hadoop-unit-hive
private HiveConf buildHiveConf() {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
HiveConf hiveConf = new HiveConf();
hiveConf.set("fs.defaultFS", hdfsUri);
// hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
// hiveConf.set("hive.root.logger", "DEBUG,console");
// hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
// hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
// hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
// System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
// System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
return hiveConf;
}
代码示例来源:origin: jetoile/hadoop-unit
private HiveConf buildHiveConf() {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
HiveConf hiveConf = new HiveConf();
hiveConf.set("fs.defaultFS", hdfsUri);
// hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
// hiveConf.set("hive.root.logger", "DEBUG,console");
// hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
// hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
// hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
// System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
// System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
return hiveConf;
}
代码示例来源:origin: sakserv/hadoop-mini-clusters
@Override
public void configure() throws Exception {
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS,
"thrift://" + hiveMetastoreHostname + ":" + hiveMetastorePort);
hiveConf.setVar(HiveConf.ConfVars.SCRATCHDIR, hiveScratchDir);
hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
"jdbc:derby:;databaseName=" + hiveMetastoreDerbyDbDir + ";create=true");
hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new File(hiveWarehouseDir).getAbsolutePath());
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
// Handle Windows
WindowsLibsUtils.setHadoopHome();
}
代码示例来源:origin: com.github.sakserv/hadoop-mini-clusters-hiveserver2
@Override
public void configure() throws Exception {
// Handle Windows
WindowsLibsUtils.setHadoopHome();
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS,
"thrift://" + hiveMetastoreHostname + ":" + hiveMetastorePort);
hiveConf.setVar(HiveConf.ConfVars.SCRATCHDIR, hiveScratchDir);
hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
"jdbc:derby:;databaseName=" + hiveMetastoreDerbyDbDir + ";create=true");
hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new File(hiveWarehouseDir).getAbsolutePath());
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, String.valueOf(hiveServer2Hostname));
hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, hiveServer2Port);
hiveConf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, zookeeperConnectionString);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, Boolean.TRUE);
}
代码示例来源:origin: fr.jetoile.hadoop/hadoop-unit-hive
@Override
public void configure() throws Exception {
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS,
"thrift://" + hiveMetastoreHostname + ":" + hiveMetastorePort);
hiveConf.setVar(HiveConf.ConfVars.SCRATCHDIR, hiveScratchDir);
hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
"jdbc:derby:;databaseName=" + hiveMetastoreDerbyDbDir + ";create=true");
// hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new File(hiveWarehouseDir).getAbsolutePath());
hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, hiveWarehouseDir);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
// Handle Windows
WindowsLibsUtils.setHadoopHome();
}
代码示例来源:origin: jetoile/hadoop-unit
@Override
public void configure() throws Exception {
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS,
"thrift://" + hiveMetastoreHostname + ":" + hiveMetastorePort);
hiveConf.setVar(HiveConf.ConfVars.SCRATCHDIR, hiveScratchDir);
hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
"jdbc:derby:;databaseName=" + hiveMetastoreDerbyDbDir + ";create=true");
// hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new File(hiveWarehouseDir).getAbsolutePath());
hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, hiveWarehouseDir);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
// Handle Windows
WindowsLibsUtils.setHadoopHome();
}
本文整理了Java中com.github.sakserv.minicluster.util.WindowsLibsUtils.setHadoopHome()方法的一些代码示例,展示了WindowsLi
我是一名优秀的程序员,十分优秀!