gpt4 book ai didi

scala - 使用 MRUnit 1.1.0 测试多个输出

转载 作者:可可西里 更新时间:2023-11-01 17:00:45 25 4
gpt4 key购买 nike

相关问题@Testing multiple outputs with MRUnit但答案不适用于较新的版本 1.1.0

问题是如何设置多个命名输出,以便底层模拟实现识别命名路径。我写信是为了将相同的 reducer 记录写入 2 条路径。我可以通过调用 MultipleOutputs.addNamedOutput(job, "mos", ...) 在常规 MR 作业中做同样的事情

当我尝试运行 mrunit 时,出现以下异常

Named output 'mos' not defined
java.lang.IllegalArgumentException: Named output 'mos' not defined
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.checkNamedOutputName(MultipleOutputs.java:256)
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.write(MultipleOutputs.java:426)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:48)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:47)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:47)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:35)

scala代码贴在这里。为代码的冗长道歉。我尝试拉入所有部分,以便于独立运行代码。

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver
import org.apache.hadoop.io._
import org.apache.hadoop.mapreduce.{Counters, TaskInputOutputContext, Reducer, Mapper}
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import org.scalatest.FunSuite
import org.apache.hadoop.io.SequenceFile.{Writer, Reader}
import java.nio.file.{Path, Paths, Files}
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.hadoop.fs.{Path => HadoopFSPath}

object TestMultipleOutputsAction {
class TestMapper extends Mapper[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
override def setup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}

override def cleanup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}

override def map(key: LongWritable, value: MapWritable, context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
context.write(key, value)
}
}

class TestReducer extends Reducer[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
var multipleOutputs: MultipleOutputs[LongWritable, MapWritable] = null

override def setup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
multipleOutputs = new MultipleOutputs[LongWritable, MapWritable](context.asInstanceOf[TaskInputOutputContext[_, _, LongWritable, MapWritable]])
super.setup(context)
}

override def cleanup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}

override def reduce(key: LongWritable, values: java.lang.Iterable[MapWritable], context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
values.foreach(value => {
multipleOutputs.write("mos", key, value, "outputPath1")
multipleOutputs.write("mos", key, value, "outputPath2")
})
}
}
}

object TestHelper extends Logging {
def generateInput(conf: Configuration, deleteOnExit: Boolean): String = {
val dirPath = Files.createTempDirectory(Paths.get("/tmp"), "multiple_outputs")

val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")

if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}

logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")

val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))

for (i <- 1 to 10) {
val mapWritable = new MapWritable()
mapWritable.put(new Text("mod2"), new LongWritable(i % 2))

writer.append(new LongWritable(i), mapWritable)
}

writer.close()

logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")

dirPath.toFile.getAbsolutePath
}

def readInput(conf: Configuration
, path: String
, mapReduceDriver: MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]) {
val entries = Files.newDirectoryStream(Paths.get(path), "part-m-*")
var numRecords = 0

entries.asScala.foreach(entry => {
val entryName = entry.toFile.getName
val absolutePath = entry.toFile.getAbsolutePath

logger.debug(s"entry name : [${entryName}], absolute path : [${absolutePath}]")

val validEntry = entryName.startsWith("part-m-")

if (validEntry) {
logger.debug(s"adding inputs from path : [${absolutePath}] ...")

val hadoopPath = new HadoopFSPath(absolutePath)
val reader = new SequenceFile.Reader(conf, Reader.file(hadoopPath))

var key = new LongWritable()
var mapWritable = new MapWritable()
var numFileRecords = 0

while (reader.next(key, mapWritable)) {
logger.debug(key + "\t" + mapWritable)

mapReduceDriver.addInput(key, mapWritable)

numFileRecords = numFileRecords + 1
numRecords = numRecords + 1
}

logger.debug(s"adding inputs from path : [${absolutePath}] completed. num file records : [${numFileRecords}]")
}
})

logger.debug(s"adding inputs from path : [${path}] completed. num records : [${numRecords}]")
}

def writeOutput(conf: Configuration, dirPath: Path, outputPairs: java.util.List[org.apache.hadoop.mrunit.types.Pair[LongWritable, MapWritable]], deleteOnExit: Boolean): Unit = {
val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")

if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}

logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")

val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))

outputPairs.asScala.toSeq.foreach(outputPair => {
logger.debug(s"key : [${outputPair.getFirst}], value : [${outputPair.getSecond}]")
writer.append(outputPair.getFirst, outputPair.getSecond)
})

writer.close()

logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")
}

def checkCounters(counters: Counters): Unit = {
counters.getGroupNames.asScala.foreach(groupName => {
counters.getGroup(groupName).iterator().asScala.foreach(counter => {
logger.debug(s"groupName: [${groupName}], counterName: [${counter.getName}], counterValue : [${counter.getValue}]")

})
})
}
}

object TestMultipleOutputs extends FunSuite with Logging {
def testMultipleOutputs(conf: Configuration, inputPath: String, deleteOnExit: Boolean) {
logger.info(s"TESTINPUT : input path : [${inputPath}] ...")

val mapReduceDriver = new MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]()
.withMapper(new TestMultipleOutputsAction.TestMapper)
.withReducer(new TestMultipleOutputsAction.TestReducer)

mapReduceDriver.addMultiOutput("mos", classOf[LongWritable], classOf[MapWritable])

val parentOutputPath = Files.createTempDirectory(Paths.get("/tmp"), "pr_output")

if (deleteOnExit) {
parentOutputPath.toFile.deleteOnExit
}

TestHelper.readInput(conf, inputPath, mapReduceDriver)

val outputPairs = mapReduceDriver.run()

TestHelper.writeOutput(conf, parentOutputPath, outputPairs, deleteOnExit)
TestHelper.checkCounters(mapReduceDriver.getCounters())

logger.info(s"TESTINPUT : input path : [${inputPath}] completed")
}
}

class TestMultipleOutputs extends FunSuite with Logging {
test("multiple outputs action") {
val deleteOnExit = true
val conf = new Configuration()

val inputPath = TestHelper.generateInput(conf, deleteOnExit)
TestMultipleOutputs.testMultipleOutputs(conf, inputPath, deleteOnExit)
}
}

最佳答案

我在 Java 中遇到了同样的问题,并用

注释了我的单元测试
@RunWith(PowerMockRunner.class)
@PrepareForTest(PricePerPlacementReducer.class)

在进行了正确的导入后(基本上是 powermock 版本 1.5.1 及其 junit 绑定(bind)程序)为我解决了这个问题。

关于scala - 使用 MRUnit 1.1.0 测试多个输出,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24331793/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com