相关问题@ Testing multiple outputs with MRUnit但答案不适用于较新的1.1.0版本
问题是如何设置多个命名输出,以便底层模拟实现识别命名路径。我写的是将相同的reducer记录写入2个路径。通过调用MultipleOutputs.addNamedOutput(job," mos",...),我可以在常规MR工作中做同样的事情。
当我尝试运行mrunit时,我得到以下异常
Named output 'mos' not defined
java.lang.IllegalArgumentException: Named output 'mos' not defined
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.checkNamedOutputName(MultipleOutputs.java:256)
at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.write(MultipleOutputs.java:426)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:48)
at TestMultipleOutputsAction$TestReducer$$anonfun$reduce$1.apply(TestMultipleOutputs.scala:47)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:47)
at TestMultipleOutputsAction$TestReducer.reduce(TestMultipleOutputs.scala:35)
scala代码发布在此处。为代码的冗长道歉。我尝试了所有部分,以便于独立运行代码。
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver
import org.apache.hadoop.io._
import org.apache.hadoop.mapreduce.{Counters, TaskInputOutputContext, Reducer, Mapper}
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import org.scalatest.FunSuite
import org.apache.hadoop.io.SequenceFile.{Writer, Reader}
import java.nio.file.{Path, Paths, Files}
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.hadoop.fs.{Path => HadoopFSPath}
object TestMultipleOutputsAction {
class TestMapper extends Mapper[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
override def setup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def cleanup(context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def map(key: LongWritable, value: MapWritable, context: Mapper[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
context.write(key, value)
}
}
class TestReducer extends Reducer[LongWritable, MapWritable, LongWritable, MapWritable] with Logging {
var multipleOutputs: MultipleOutputs[LongWritable, MapWritable] = null
override def setup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
multipleOutputs = new MultipleOutputs[LongWritable, MapWritable](context.asInstanceOf[TaskInputOutputContext[_, _, LongWritable, MapWritable]])
super.setup(context)
}
override def cleanup(context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
}
override def reduce(key: LongWritable, values: java.lang.Iterable[MapWritable], context: Reducer[LongWritable, MapWritable, LongWritable, MapWritable]#Context) {
values.foreach(value => {
multipleOutputs.write("mos", key, value, "outputPath1")
multipleOutputs.write("mos", key, value, "outputPath2")
})
}
}
}
object TestHelper extends Logging {
def generateInput(conf: Configuration, deleteOnExit: Boolean): String = {
val dirPath = Files.createTempDirectory(Paths.get("/tmp"), "multiple_outputs")
val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")
if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")
val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))
for (i <- 1 to 10) {
val mapWritable = new MapWritable()
mapWritable.put(new Text("mod2"), new LongWritable(i % 2))
writer.append(new LongWritable(i), mapWritable)
}
writer.close()
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")
dirPath.toFile.getAbsolutePath
}
def readInput(conf: Configuration
, path: String
, mapReduceDriver: MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]) {
val entries = Files.newDirectoryStream(Paths.get(path), "part-m-*")
var numRecords = 0
entries.asScala.foreach(entry => {
val entryName = entry.toFile.getName
val absolutePath = entry.toFile.getAbsolutePath
logger.debug(s"entry name : [${entryName}], absolute path : [${absolutePath}]")
val validEntry = entryName.startsWith("part-m-")
if (validEntry) {
logger.debug(s"adding inputs from path : [${absolutePath}] ...")
val hadoopPath = new HadoopFSPath(absolutePath)
val reader = new SequenceFile.Reader(conf, Reader.file(hadoopPath))
var key = new LongWritable()
var mapWritable = new MapWritable()
var numFileRecords = 0
while (reader.next(key, mapWritable)) {
logger.debug(key + "\t" + mapWritable)
mapReduceDriver.addInput(key, mapWritable)
numFileRecords = numFileRecords + 1
numRecords = numRecords + 1
}
logger.debug(s"adding inputs from path : [${absolutePath}] completed. num file records : [${numFileRecords}]")
}
})
logger.debug(s"adding inputs from path : [${path}] completed. num records : [${numRecords}]")
}
def writeOutput(conf: Configuration, dirPath: Path, outputPairs: java.util.List[org.apache.hadoop.mrunit.types.Pair[LongWritable, MapWritable]], deleteOnExit: Boolean): Unit = {
val filePath = Files.createTempFile(dirPath, "part-m-", ".0001")
if (deleteOnExit) {
filePath.toFile.deleteOnExit()
}
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] ...")
val seqFilePath = new HadoopFSPath(filePath.toFile.getAbsolutePath)
val writer = SequenceFile.createWriter(conf,
Writer.file(seqFilePath), Writer.keyClass(classOf[LongWritable]),
Writer.valueClass(classOf[MapWritable]))
outputPairs.asScala.toSeq.foreach(outputPair => {
logger.debug(s"key : [${outputPair.getFirst}], value : [${outputPair.getSecond}]")
writer.append(outputPair.getFirst, outputPair.getSecond)
})
writer.close()
logger.info(s"writing to path [${filePath.toFile.getAbsolutePath}] completed")
}
def checkCounters(counters: Counters): Unit = {
counters.getGroupNames.asScala.foreach(groupName => {
counters.getGroup(groupName).iterator().asScala.foreach(counter => {
logger.debug(s"groupName: [${groupName}], counterName: [${counter.getName}], counterValue : [${counter.getValue}]")
})
})
}
}
object TestMultipleOutputs extends FunSuite with Logging {
def testMultipleOutputs(conf: Configuration, inputPath: String, deleteOnExit: Boolean) {
logger.info(s"TESTINPUT : input path : [${inputPath}] ...")
val mapReduceDriver = new MapReduceDriver[LongWritable, MapWritable, LongWritable, MapWritable, LongWritable, MapWritable]()
.withMapper(new TestMultipleOutputsAction.TestMapper)
.withReducer(new TestMultipleOutputsAction.TestReducer)
mapReduceDriver.addMultiOutput("mos", classOf[LongWritable], classOf[MapWritable])
val parentOutputPath = Files.createTempDirectory(Paths.get("/tmp"), "pr_output")
if (deleteOnExit) {
parentOutputPath.toFile.deleteOnExit
}
TestHelper.readInput(conf, inputPath, mapReduceDriver)
val outputPairs = mapReduceDriver.run()
TestHelper.writeOutput(conf, parentOutputPath, outputPairs, deleteOnExit)
TestHelper.checkCounters(mapReduceDriver.getCounters())
logger.info(s"TESTINPUT : input path : [${inputPath}] completed")
}
}
class TestMultipleOutputs extends FunSuite with Logging {
test("multiple outputs action") {
val deleteOnExit = true
val conf = new Configuration()
val inputPath = TestHelper.generateInput(conf, deleteOnExit)
TestMultipleOutputs.testMultipleOutputs(conf, inputPath, deleteOnExit)
}
}
答案 0 :(得分:1)
我在Java中遇到了同样的问题,并使用
注释我的单元测试@RunWith(PowerMockRunner.class)
@PrepareForTest(PricePerPlacementReducer.class)
做了正确的导入(基本上,powermock版本1.5.1及其junit binder)为我解决了。