gpt4 book ai didi

java - Cloudera Hbase 1.0.0 集成依赖冲突

转载 作者:行者123 更新时间:2023-11-30 06:59:28 25 4
gpt4 key购买 nike

我尝试将我的 Play Framework (2.4.2) Web 应用程序连接到 cloudera hbase 集群。我在我的 bulid.sbt 文件中包含了 hbase 依赖项,并使用 hbase 示例代码将一个单元格插入到一个表中。但是,我得到了这个异常,这似乎是 play framework 和 Hbase 之间的依赖冲突。我还附上了示例代码和 build.sbt 文件。如果您能帮助解决此错误,我将不胜感激。

    [ERROR] [07/21/2015 12:03:05.919] [application-akka.actor.default-dispatcher-5] [ActorSystem(application)] Uncaught fatal error from thread [application-akka.actor.default-dispatcher-5] shutting down ActorSystem [application]
java.lang.IllegalAccessError: tried to access method com.google.common.base.Stopwatch.<init>()V from class org.apache.hadoop.hbase.zookeeper.MetaTableLocator
at org.apache.hadoop.hbase.zookeeper.MetaTableLocator.blockUntilAvailable(MetaTableLocator.java:434)
at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getMetaRegionLocation(ZooKeeperRegistry.java:60)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1123)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1110)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1262)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1126)
at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:369)
at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:320)
at org.apache.hadoop.hbase.client.BufferedMutatorImpl.backgroundFlushCommits(BufferedMutatorImpl.java:206)
at org.apache.hadoop.hbase.client.BufferedMutatorImpl.flush(BufferedMutatorImpl.java:183)
at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:1496)
at org.apache.hadoop.hbase.client.HTable.put(HTable.java:1107)
at controllers.Application.index(Application.java:44)
at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95)
at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95)
at play.core.routing.HandlerInvokerFactory$$anon$4.resultCall(HandlerInvoker.scala:136)
at play.core.routing.HandlerInvokerFactory$JavaActionInvokerFactory$$anon$14$$anon$3$$anon$1.invocation(HandlerInvoker.scala:127)
at play.core.j.JavaAction$$anon$1.call(JavaAction.scala:70)
at play.http.DefaultHttpRequestHandler$1.call(DefaultHttpRequestHandler.java:20)
at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94)
at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at play.core.j.HttpExecutionContext$$anon$2.run(HttpExecutionContext.scala:40)
at play.api.libs.iteratee.Execution$trampoline$.execute(Execution.scala:70)
at play.core.j.HttpExecutionContext.execute(HttpExecutionContext.scala:32)
at scala.concurrent.impl.Future$.apply(Future.scala:31)
at scala.concurrent.Future$.apply(Future.scala:492)
at play.core.j.JavaAction.apply(JavaAction.scala:94)
at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105)
at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105)
at play.utils.Threads$.withContextClassLoader(Threads.scala:21)
at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:104)
at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:103)
at scala.Option.map(Option.scala:146)
at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:103)
at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:96)
at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524)
at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524)
at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560)
at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560)
at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536)
at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)

这是我的 bulid.sbt 文件:

name := """HbaseTest"""

version := "1.0-SNAPSHOT"

lazy val root = (project in file(".")).enablePlugins(PlayJava)

scalaVersion := "2.11.6"

libraryDependencies ++= Seq(
javaJdbc,
cache,
javaWs
)
//hbase
libraryDependencies +="org.apache.hbase" % "hbase-client" % "1.0.0-cdh5.4.4"
libraryDependencies +="org.apache.hbase" % "hbase-annotations" % "1.0.0-cdh5.4.4"
libraryDependencies +="org.apache.hbase" % "hbase-common" % "1.0.0-cdh5.4.4"
libraryDependencies +="org.apache.hbase" % "hbase-protocol" % "1.0.0-cdh5.4.4"
//hadoop
libraryDependencies +="org.apache.hadoop" % "hadoop-common"%"2.6.0-cdh5.4.4"
libraryDependencies +="org.apache.hadoop" % "hadoop-annotations"%"2.6.0-cdh5.4.4"
libraryDependencies +="org.apache.hadoop" % "hadoop-auth"%"2.6.0-cdh5.4.4"
// Play provides two styles of routers, one expects its actions to be injected, the
// other, legacy style, accesses its actions statically.
routesGenerator := InjectedRoutesGenerator

这是我的代码:

package controllers;

import play.*;
import play.mvc.*;
import views.html.*;

import java.io.IOException;
import java.util.HashMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.util.Bytes;
public class Application extends Controller {

public Result index() {
String ZooKeeperIP = "10.12.7.43";
String ZooKeeperPort = "2181";
String HBaseMaster = "10.12.7.43:60000";
Configuration hBaseConfig;
Connection connection = null;
//TableName TABLE_NAME = "sample";
hBaseConfig = HBaseConfiguration.create();
hBaseConfig.set("hbase.zookeeper.quorum",ZooKeeperIP);
hBaseConfig.set("hbase.zookeeper.property.clientPort", ZooKeeperPort);
hBaseConfig.set("hbase.master", HBaseMaster);


//connection = ConnectionFactory.createConnection(hBaseConfig);

try {
connection = ConnectionFactory.createConnection(hBaseConfig);
HTable table = new HTable(hBaseConfig, "sample");
Put p = new Put(Bytes.toBytes("1"));
p.add(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("4"));
table.put(p);
}catch (Exception e) {
e.printStackTrace();
System.out.println(e.getMessage());
}
return ok(index.render("Your new application is ready."));
}

}

最佳答案

如我所见,问题出在依赖项上。
特别是 guava 库(这是 hadoop 的常见问题)。
如我所见,Play 使用较新版本的 guava。它没有 hbase 需要的 StopWatch 类。

您可以通过多种方式解决这个问题(不幸的是,我知道的所有方式都是“hacky”)。

简单的方法是使用类似 zipkin 的 hack .我们自己添加 StopWatch 的地方。

另一种方法是以某种方式分离 HBase 操作。 (这需要大量的工作和设计更改)

如果 sbt 支持 'shading' 会容易得多,据我所知目前还不支持。
你仍然可以使用 sbt 来解决它,比如如何 spark处理类似的问题。

关于java - Cloudera Hbase 1.0.0 集成依赖冲突,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31547463/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com