gpt4 book ai didi

hadoop - 用于配置单元中简单序列文件的 serde

转载 作者:可可西里 更新时间:2023-11-01 15:16:16 27 4
gpt4 key购买 nike

我有一个包含 Text 键和 DoubleWritable 值的序列文件。当我将文件加载为外部表时

Create external table t (id String, data Double) STORED AS SEQUENCEFILE LOCATION '/output';

创建成功。但是,当我尝试使用 select * 语句查看数据时,出现异常

"Failed with exception java.io.IOException:org.apache.hadoop.hive.serde2.SerDeException: class org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe: expects either BytesWritable or Text object!"

我从异常中了解到 serde 是不正确的。我试图为它实现 serde,但无法使其工作。我应该如何为它实现简单的 serde?

最佳答案

解决方案 摆弄了一下输入格式,找到了解决方案。 1) 出现异常是因为 hive 默认忽略序列文件的键,因此当尝试匹配模式时会给出异常。

我实现了自定义输入格式

import java.io.IOException;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.util.ReflectionUtils;


public abstract class PSequenceFileKeyRecordReader<K, V> implements RecordReader<K, BytesWritable> {

private SequenceFile.Reader in;
private long start;
private long end;
private boolean more = true;
protected Configuration conf;

public PSequenceFileKeyRecordReader(Configuration conf, FileSplit split)
throws IOException {
Path path = split.getPath();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = split.getStart() + split.getLength();
this.conf = conf;

if (split.getStart() > in.getPosition())
in.sync(split.getStart()); // sync to start

this.start = in.getPosition();
more = start < end;
}


public Class getKeyClass() { return in.getKeyClass(); }


public Class getValueClass() { return in.getValueClass(); }

@SuppressWarnings("unchecked")
public K createKey() {
return (K) ReflectionUtils.newInstance(getKeyClass(), conf);
}


public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start));
}
}

public synchronized long getPos() throws IOException {
return in.getPosition();
}

protected synchronized void seek(long pos) throws IOException {
in.seek(pos);
}
public synchronized void close() throws IOException { in.close(); }


@Override
public boolean next(K key, BytesWritable value) throws IOException {
if (!more) return false;

long pos = in.getPosition();
V trueValue = (V) ReflectionUtils.newInstance(in.getValueClass(), conf);
boolean remaining = in.next((Writable)key, (Writable)trueValue);
if (remaining) combineKeyValue(key, trueValue, value);
if (pos >= end && in.syncSeen()) {
more = false;
} else {
more = remaining;
}
return more;
}
protected abstract void combineKeyValue(K key, V trueValue, BytesWritable newValue);
}

主要的阅读器类将文本和 DoubleWritable 组合扩展为 BytesWritable。

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;


public class DoubleTextReader extends PSequenceFileKeyRecordReader<Text, DoubleWritable>{

public DoubleTextReader(Configuration conf, FileSplit split)
throws IOException {
super(conf, split);

}

@Override
protected void combineKeyValue(Text key, DoubleWritable trueValue,
BytesWritable newValue) {
StringBuilder builder = new StringBuilder();
builder.append(key);
builder.append('\001');
builder.append(trueValue.get());
newValue.set(new BytesWritable(builder.toString().getBytes()) );

}

@Override
public BytesWritable createValue() {
return new BytesWritable();
}

}

自定义输入格式类

public class PSequenceFileKeyInputFormat<K, V> extends FileInputFormat<K, V> {

public PSequenceFileKeyInputFormat() {
setMinSplitSize(SequenceFile.SYNC_INTERVAL);
}

@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] files = super.listStatus(job);
for (int i = 0; i < files.length; i++) {
FileStatus file = files[i];
if (file.isDir()) { // it's a MapFile
Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
FileSystem fs = file.getPath().getFileSystem(job);
// use the data file
files[i] = fs.getFileStatus(dataFile);
}
}
return files;
}

public RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException {

reporter.setStatus(split.toString());

return (RecordReader<K, V>) new DoubleTextReader(job, (FileSplit) split);
}

可以用命令创建表

 Create external table t(id String, Bytes Double) STORED AS INPUTFORMAT 'PSequenceFileKeyInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' location '/output';

关于hadoop - 用于配置单元中简单序列文件的 serde,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/20898674/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com