- android - RelativeLayout 背景可绘制重叠内容
- android - 如何链接 cpufeatures lib 以获取 native android 库?
- java - OnItemClickListener 不起作用,但 OnLongItemClickListener 在自定义 ListView 中起作用
- java - Android 文件转字符串
相关问题@Testing multiple outputs with MRUnit但答案不适用于较新的版本 1.1.0
问题是如何设置多个命名输出,以便底层模拟实现识别命名路径。我写信是为了将相同的 reducer 记录写入 2 条路径。我可以通过调用 MultipleOutputs.addNamedOutput(job, "mos", ...) 在常规 MR 作业中做同样的事情
当我尝试运行 mrunit 时,出现以下异常
Named output 'mos' not defined
java.lang.IllegalArgumentException: Named output 'mos' not defined
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.checkNamedOutputName(MultipleOutputs.java:256)
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.write(MultipleOutputs.java:426)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:48)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:47)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:47)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:35)
scala代码贴在这里。为代码的冗长道歉。我尝试拉入所有部分,以便于独立运行代码。
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver
import org.apache.hadoop.io._
import org.apache.hadoop.mapreduce.{Counters, TaskInputOutputContext, Reducer, Mapper}
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import org.scalatest.FunSuite
import org.apache.hadoop.io.SequenceFile.{Writer, Reader}
import java.nio.file.{Path, Paths, Files}
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.hadoop.fs.{Path => HadoopFSPath}
object TestMultipleOutputsAction {
class TestMapper extends Mapper[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
override def setup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def cleanup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def map(key: LongWritable, value: MapWritable, context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
context.write(key, value)
}
}
class TestReducer extends Reducer[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
var multipleOutputs: MultipleOutputs[LongWritable, MapWritable] = null
override def setup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
multipleOutputs = new MultipleOutputs[LongWritable, MapWritable](context.asInstanceOf[TaskInputOutputContext[_, _, LongWritable, MapWritable]])
super.setup(context)
}
override def cleanup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def reduce(key: LongWritable, values: java.lang.Iterable[MapWritable], context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
values.foreach(value => {
multipleOutputs.write("mos", key, value, "outputPath1")
multipleOutputs.write("mos", key, value, "outputPath2")
})
}
}
}
object TestHelper extends Logging {
def generateInput(conf: Configuration, deleteOnExit: Boolean): String = {
val dirPath = Files.createTempDirectory(Paths.get("/tmp"), "multiple_outputs")
val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")
if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")
val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))
for (i <- 1 to 10) {
val mapWritable = new MapWritable()
mapWritable.put(new Text("mod2"), new LongWritable(i % 2))
writer.append(new LongWritable(i), mapWritable)
}
writer.close()
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")
dirPath.toFile.getAbsolutePath
}
def readInput(conf: Configuration
, path: String
, mapReduceDriver: MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]) {
val entries = Files.newDirectoryStream(Paths.get(path), "part-m-*")
var numRecords = 0
entries.asScala.foreach(entry => {
val entryName = entry.toFile.getName
val absolutePath = entry.toFile.getAbsolutePath
logger.debug(s"entry name : [${entryName}], absolute path : [${absolutePath}]")
val validEntry = entryName.startsWith("part-m-")
if (validEntry) {
logger.debug(s"adding inputs from path : [${absolutePath}] ...")
val hadoopPath = new HadoopFSPath(absolutePath)
val reader = new SequenceFile.Reader(conf, Reader.file(hadoopPath))
var key = new LongWritable()
var mapWritable = new MapWritable()
var numFileRecords = 0
while (reader.next(key, mapWritable)) {
logger.debug(key + "\t" + mapWritable)
mapReduceDriver.addInput(key, mapWritable)
numFileRecords = numFileRecords + 1
numRecords = numRecords + 1
}
logger.debug(s"adding inputs from path : [${absolutePath}] completed. num file records : [${numFileRecords}]")
}
})
logger.debug(s"adding inputs from path : [${path}] completed. num records : [${numRecords}]")
}
def writeOutput(conf: Configuration, dirPath: Path, outputPairs: java.util.List[org.apache.hadoop.mrunit.types.Pair[LongWritable, MapWritable]], deleteOnExit: Boolean): Unit = {
val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")
if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")
val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))
outputPairs.asScala.toSeq.foreach(outputPair => {
logger.debug(s"key : [${outputPair.getFirst}], value : [${outputPair.getSecond}]")
writer.append(outputPair.getFirst, outputPair.getSecond)
})
writer.close()
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")
}
def checkCounters(counters: Counters): Unit = {
counters.getGroupNames.asScala.foreach(groupName => {
counters.getGroup(groupName).iterator().asScala.foreach(counter => {
logger.debug(s"groupName: [${groupName}], counterName: [${counter.getName}], counterValue : [${counter.getValue}]")
})
})
}
}
object TestMultipleOutputs extends FunSuite with Logging {
def testMultipleOutputs(conf: Configuration, inputPath: String, deleteOnExit: Boolean) {
logger.info(s"TESTINPUT : input path : [${inputPath}] ...")
val mapReduceDriver = new MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]()
.withMapper(new TestMultipleOutputsAction.TestMapper)
.withReducer(new TestMultipleOutputsAction.TestReducer)
mapReduceDriver.addMultiOutput("mos", classOf[LongWritable], classOf[MapWritable])
val parentOutputPath = Files.createTempDirectory(Paths.get("/tmp"), "pr_output")
if (deleteOnExit) {
parentOutputPath.toFile.deleteOnExit
}
TestHelper.readInput(conf, inputPath, mapReduceDriver)
val outputPairs = mapReduceDriver.run()
TestHelper.writeOutput(conf, parentOutputPath, outputPairs, deleteOnExit)
TestHelper.checkCounters(mapReduceDriver.getCounters())
logger.info(s"TESTINPUT : input path : [${inputPath}] completed")
}
}
class TestMultipleOutputs extends FunSuite with Logging {
test("multiple outputs action") {
val deleteOnExit = true
val conf = new Configuration()
val inputPath = TestHelper.generateInput(conf, deleteOnExit)
TestMultipleOutputs.testMultipleOutputs(conf, inputPath, deleteOnExit)
}
}
最佳答案
我在 Java 中遇到了同样的问题,并用
注释了我的单元测试@RunWith(PowerMockRunner.class)
@PrepareForTest(PricePerPlacementReducer.class)
在进行了正确的导入后(基本上是 powermock 版本 1.5.1 及其 junit 绑定(bind)程序)为我解决了这个问题。
关于scala - 使用 MRUnit 1.1.0 测试多个输出,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24331793/
我正在使用 OUTFILE 命令,但由于权限问题和安全风险,我想将 shell 的输出转储到文件中,但出现了一些错误。我试过的 #This is a simple shell to connect t
我刚刚开始学习 Java,我想克服在尝试为这个“问题”创建 Java 程序时出现的障碍。这是我必须创建一个程序来解决的问题: Tandy 喜欢分发糖果,但只有 n 颗糖果。对于她给第 i 个糖果的人,
你好,我想知道我是否可以得到一些帮助来解决我在 C++ 中打印出 vector 内容的问题 我试图以特定顺序在一个或两个函数调用中输出一个类的所有变量。但是我在遍历 vector 时收到一个奇怪的错误
我正在将 intellij (2019.1.1) 用于 java gradle (5.4.1) 项目,并使用 lombok (1.18.6) 来自动生成代码。 Intellij 将生成的源放在 out
编辑:在与 guest271314 交流后,我意识到问题的措辞(在我的问题正文中)可能具有误导性。我保留了旧版本并更好地改写了新版本 背景: 从远程服务器获取 JSON 时,响应 header 包含一
我的问题可能有点令人困惑。我遇到的问题是我正在使用来自 Java 的 StoredProcedureCall 调用过程,例如: StoredProcedureCall call = new Store
在我使用的一些IDL中,我注意到在方法中标记返回值有2个约定-[in, out]和[out, retval]。 当存在多个返回值时,似乎使用了[in, out],例如: HRESULT MyMetho
当我查看 gar -h 的帮助输出时,它告诉我: [...] gar: supported targets: elf64-x86-64 elf32-i386 a.out-i386-linux [...
我想循环遍历一个列表,并以 HTML 格式打印其中的一部分,以代码格式打印其中的一部分。所以更准确地说:我想产生与这相同的输出 1 is a great number 2 is a great
我有下面的tekton管道,并尝试在Google Cloud上运行。集群角色绑定。集群角色。该服务帐户具有以下权限。。例外。不确定需要为服务帐户设置什么权限。
当尝试从 make 过滤非常长的输出以获取特定警告或错误消息时,第一个想法是这样的: $ make | grep -i 'warning: someone set up us the bomb' 然而
我正在创建一个抽象工具类,该类对另一组外部类(不受我控制)进行操作。外部类在某些接口(interface)点概念上相似,但访问它们相似属性的语法不同。它们还具有不同的语法来应用工具操作的结果。我创建了
这个问题已经有答案了: What do numbers starting with 0 mean in python? (9 个回答) 已关闭 7 年前。 在我的代码中使用按位与运算符 (&) 时,我
我写了这段代码来解析输入文件中的行输入格式:电影 ID 可以有多个条目,所以我们应该计算平均值输出:**没有重复(这是问题所在) import re f = open("ratings2.txt",
我需要处理超过 1000 万个光谱数据集。数据结构如下:大约有 1000 个 .fits(.fits 是某种数据存储格式)文件,每个文件包含大约 600-1000 个光谱,其中每个光谱中有大约 450
我编写了一个简单的 C 程序,它读取一个文件并生成一个包含每个单词及其出现频率的表格。 该程序有效,我已经能够在 Linux 上运行的终端中获得显示的输出,但是,我不确定如何获得生成的显示以生成包含词
很难说出这里要问什么。这个问题模棱两可、含糊不清、不完整、过于宽泛或夸夸其谈,无法以目前的形式得到合理的回答。如需帮助澄清此问题以便重新打开,visit the help center . 关闭 1
1.普通的输出: print(str)#str是任意一个字符串,数字··· 2.格式化输出: ?
我无法让 logstash 正常工作。 Basic logstash Example作品。但后来我与 Advanced Pipeline Example 作斗争.也许这也可能是 Elasticsear
这是我想要做的: 我想让用户给我的程序一些声音数据(通过麦克风输入),然后保持 250 毫秒,然后通过扬声器输出。 我已经使用 Java Sound API 做到了这一点。问题是它有点慢。从发出声音到
我是一名优秀的程序员,十分优秀!