gpt4 book ai didi

java - Cassandra Hadoop MapReduce : java. lang.ClassCastException : java. util.HashMap 无法转换为 java.nio.ByteBuffer

转载 作者:可可西里 更新时间:2023-11-01 16:12:20 36 4
gpt4 key购买 nike

我正在尝试使用 Apache Cassandra 创建 mapreduce 作业。输入日期来自 cassandra,输出也转到 cassandra。

该程序尝试从名为 tweetstore 的表中选择所有数据,然后插入包含用户名的行数。

这是 mapreduce 作业的主要类:

package com.cassandra.hadoop;
import java.io.*;
import java.lang.*;
import java.util.*;
import java.nio.ByteBuffer;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.hadoop.cql3.*;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.utils.ByteBufferUtil;

public class App
{
static final String KEYSPACE_NAME = "tweet_cassandra_map_reduce";
static final String INPUT_COLUMN_FAMILY = "tweetstore";
static final String OUTPUT_COLUMN_FAMILY = "tweetcount";
static final String COLUMN_NAME = "user";

public static void main( String[] args ) throws IOException, InterruptedException, ClassNotFoundException
{
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
Job job = new Job(conf, "tweet count");
job.setJarByClass(App.class);

// mapper configuration.
job.setMapperClass(TweetMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setInputFormatClass(ColumnFamilyInputFormat.class);

// Reducer configuration
job.setReducerClass(TweetAggregator.class);
job.setOutputKeyClass(ByteBuffer.class);
job.setOutputValueClass(List.class);
job.setOutputFormatClass(ColumnFamilyOutputFormat.class);

// Cassandra input column family configuration
ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160");
ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost");
ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, INPUT_COLUMN_FAMILY);

job.setInputFormatClass(ColumnFamilyInputFormat.class);
SlicePredicate slicePredicate = new SlicePredicate();
slicePredicate.setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE));

// Prepare index expression.
IndexExpression ixpr = new IndexExpression();ixpr.setColumn_name(ByteBufferUtil.bytes(COLUMN_NAME));
ixpr.setOp(IndexOperator.EQ);
ixpr.setValue(ByteBufferUtil.bytes(otherArgs.length > 0 && !StringUtils.isBlank(otherArgs[0])?otherArgs[0]: "mevivs"));

List<IndexExpression> ixpressions = new ArrayList<IndexExpression>();
ixpressions.add(ixpr);
ConfigHelper.setInputRange(job.getConfiguration(), ixpressions);
ConfigHelper.setInputSlicePredicate(job.getConfiguration(), slicePredicate);

// Cassandra output family configuration.
ConfigHelper.setOutputRpcPort(job.getConfiguration(), "9160");
ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost");
ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, OUTPUT_COLUMN_FAMILY);
job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
job.getConfiguration().set("row_key", "key");
job.waitForCompletion(true);
}
}

映射器代码

package com.cassandra.hadoop;
import java.io.*;
import java.lang.*;
import java.util.*;
import java.nio.ByteBuffer;
import java.util.SortedMap;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.cassandra.db.Column;
import org.apache.cassandra.utils.ByteBufferUtil;

public class TweetMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, Column>, Text, IntWritable>
{
static final String COLUMN_NAME = App.COLUMN_NAME;
private final static IntWritable one = new IntWritable(1);

/* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, org.apache.hadoop.mapreduce.
Mapper.Context)
*/
public void map(ByteBuffer key, SortedMap<ByteBuffer, Column> columns, Context context) throws IOException, InterruptedException
{
Column column = columns.get(ByteBufferUtil.bytes(COLUMN_NAME));
String value = ByteBufferUtil.string(column.value());
context.write(new Text(value), one);
}
}

reducer 代码:

package com.cassandra.hadoop;
import java.io.IOException;
import java.util.*;
import java.lang.*;
import java.io.*;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.db.marshal.Int32Type;

public class TweetAggregator extends Reducer<Text,IntWritable, Map<String,ByteBuffer>, List<ByteBuffer>>
{
private static Map<String,ByteBuffer> keys = new HashMap<>();
public void reduce(Text word, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum = 0;
for (IntWritable val : values)
sum += val.get();
System.out.println("writing");
keys.put("key", ByteBufferUtil.bytes(word.toString()));
context.write(keys, getBindVariables(word, sum));
}

private List<ByteBuffer> getBindVariables(Text word, int sum)
{
List<ByteBuffer> variables = new ArrayList<ByteBuffer>();
variables.add(Int32Type.instance.decompose(sum));
return variables;
}
}

当我尝试在 reduce 步骤使用 hadoop cammand 执行作业时出现此错误的问题:

15/02/14 16:53:13 WARN hadoop.AbstractColumnFamilyInputFormat: ignoring  jobKeyRange specified without start_key
15/02/14 16:53:14 INFO mapred.JobClient: Running job: job_201502141652_0001
15/02/14 16:53:15 INFO mapred.JobClient: map 0% reduce 0%
15/02/14 16:53:20 INFO mapred.JobClient: map 66% reduce 0%
15/02/14 16:53:22 INFO mapred.JobClient: map 100% reduce 0%
15/02/14 16:53:28 INFO mapred.JobClient: map 100% reduce 33%
15/02/14 16:53:30 INFO mapred.JobClient: Task Id : attempt_201502141652_0001_r_000000_0, Status : FAILED
java.lang.ClassCastException: java.util.HashMap cannot be cast to java.nio.ByteBuffer
at org.apache.cassandra.hadoop.ColumnFamilyRecordWriter.write(ColumnFamilyRecordWriter.java:50)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:588)
at org.apache.hadoop.mapreduce.TaskInputOutputContext.write(TaskInputOutputContext.java:80)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:40)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:20)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:650)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:418)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1136)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
attempt_201502141652_0001_r_000000_0: writing

请帮忙!!谢谢

最佳答案

看起来你的 reducer 作业设置将 bytebuffer 作为输出,而不是 map<>。尝试在您的工作设置中更改此设置

job.setOutputKeyClass(ByteBuffer.class);

对此

job.setOutputKeyClass(Map<String,ByteBuffer>.class);

无论如何,job.set.... 中的泛型类型需要对齐映射器和缩减器的泛型类型参数,因此请检查以确保它们对齐。

关于java - Cassandra Hadoop MapReduce : java. lang.ClassCastException : java. util.HashMap 无法转换为 java.nio.ByteBuffer,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/28517730/

36 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com