gpt4 book ai didi

org.apache.hadoop.io.compress.zlib.ZlibDecompressor类的使用及代码示例

转载 作者:知者 更新时间:2024-03-14 15:12:49 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.io.compress.zlib.ZlibDecompressor类的一些代码示例,展示了ZlibDecompressor类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZlibDecompressor类的具体详情如下:
包路径:org.apache.hadoop.io.compress.zlib.ZlibDecompressor
类名称:ZlibDecompressor

ZlibDecompressor介绍

[英]A Decompressor based on the popular zlib compression algorithm. http://www.zlib.net/
[中]基于流行zlib压缩算法的解压器。http://www.zlib.net/

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Returns the total number of compressed bytes input so far.</p>
 *
 * @return the total (non-negative) number of compressed bytes input so far
 */
public long getBytesRead() {
 checkStream();
 return getBytesRead(stream);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Returns the total number of uncompressed bytes output so far.
 *
 * @return the total (non-negative) number of uncompressed bytes output so far
 */
public long getBytesWritten() {
 checkStream();
 return getBytesWritten(stream);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Returns the number of bytes remaining in the input buffers; normally
 * called when finished() is true to determine amount of post-gzip-stream
 * data.</p>
 *
 * @return the total (non-negative) number of unprocessed bytes in input
 */
@Override
public int getRemaining() {
 checkStream();
 return userBufLen + getRemaining(stream);  // userBuf + compressedDirectBuf
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

private byte[] compressDecompressZlib(byte[] rawData,
  ZlibCompressor zlibCompressor, ZlibDecompressor zlibDecompressor)
  throws IOException {
 int cSize = 0;
 byte[] compressedByte = new byte[rawData.length];
 byte[] decompressedRawData = new byte[rawData.length];
 zlibCompressor.setInput(rawData, 0, rawData.length);
 zlibCompressor.finish();
 while (!zlibCompressor.finished()) {
  cSize = zlibCompressor.compress(compressedByte, 0, compressedByte.length);
 }
 zlibCompressor.reset();
 assertTrue(zlibDecompressor.getBytesWritten() == 0);
 assertTrue(zlibDecompressor.getBytesRead() == 0);
 assertTrue(zlibDecompressor.needsInput());
 zlibDecompressor.setInput(compressedByte, 0, cSize);
 assertFalse(zlibDecompressor.needsInput());
 while (!zlibDecompressor.finished()) {
  zlibDecompressor.decompress(decompressedRawData, 0,
    decompressedRawData.length);
 }
 assertTrue(zlibDecompressor.getBytesWritten() == rawData.length);
 assertTrue(zlibDecompressor.getBytesRead() == cSize);
 zlibDecompressor.reset();
 assertTrue(zlibDecompressor.getRemaining() == 0);
 assertArrayEquals(
   "testZlibCompressorDecompressorWithConfiguration array equals error",
   rawData, decompressedRawData);
 return decompressedRawData;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Resets everything including the input buffers (user and direct).</p>
 */
@Override
public void reset() {
 checkStream();
 reset(stream);
 finished = false;
 needDict = false;
 compressedDirectBufOff = compressedDirectBufLen = 0;
 uncompressedDirectBuf.limit(directBufferSize);
 uncompressedDirectBuf.position(directBufferSize);
 userBufOff = userBufLen = 0;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());
   "testZlibCompressDecompress compressed size no less then original size",
   cSize < rawDataSize);
 decompressor.setInput(compressedResult, 0, cSize);
 byte[] decompressedBytes = new byte[rawDataSize];
 decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
 assertArrayEquals("testZlibCompressDecompress arrays not equals ",
   rawData, decompressedBytes);
 compressor.reset();
 decompressor.reset();
} catch (IOException ex) {
 fail("testZlibCompressDecompress ex !!!" + ex);

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void end() {
 if (stream != 0) {
  end(stream);
  stream = 0;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

int n = 0;
try {
 n = inflateBytesDirect();
 presliced.position(presliced.position() + n);
 if (compressedDirectBufLen > 0) {

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void reset() {
 super.reset();
 endOfInput = true;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Creates a new decompressor.
 */
public ZlibDecompressor(CompressionHeader header, int directBufferSize) {
 this.header = header;
 this.directBufferSize = directBufferSize;    
 compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 uncompressedDirectBuf.position(directBufferSize);
 
 stream = init(this.header.windowBits());
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public boolean finished() {
 return (endOfInput && super.finished());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

private byte[] compressDecompressZlib(byte[] rawData,
  ZlibCompressor zlibCompressor, ZlibDecompressor zlibDecompressor)
  throws IOException {
 int cSize = 0;
 byte[] compressedByte = new byte[rawData.length];
 byte[] decompressedRawData = new byte[rawData.length];
 zlibCompressor.setInput(rawData, 0, rawData.length);
 zlibCompressor.finish();
 while (!zlibCompressor.finished()) {
  cSize = zlibCompressor.compress(compressedByte, 0, compressedByte.length);
 }
 zlibCompressor.reset();
 assertTrue(zlibDecompressor.getBytesWritten() == 0);
 assertTrue(zlibDecompressor.getBytesRead() == 0);
 assertTrue(zlibDecompressor.needsInput());
 zlibDecompressor.setInput(compressedByte, 0, cSize);
 assertFalse(zlibDecompressor.needsInput());
 while (!zlibDecompressor.finished()) {
  zlibDecompressor.decompress(decompressedRawData, 0,
    decompressedRawData.length);
 }
 assertTrue(zlibDecompressor.getBytesWritten() == rawData.length);
 assertTrue(zlibDecompressor.getBytesRead() == cSize);
 zlibDecompressor.reset();
 assertTrue(zlibDecompressor.getRemaining() == 0);
 assertArrayEquals(
   "testZlibCompressorDecompressorWithConfiguration array equals error",
   rawData, decompressedRawData);
 return decompressedRawData;
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

public synchronized void reset() {
 checkStream();
 reset(stream);
 finished = false;
 needDict = false;
 compressedDirectBufOff = compressedDirectBufLen = 0;
 uncompressedDirectBuf.limit(directBufferSize);
 uncompressedDirectBuf.position(directBufferSize);
 userBufOff = userBufLen = 0;
}

代码示例来源:origin: io.hops/hadoop-common

/**
 * Returns the number of bytes remaining in the input buffers; normally
 * called when finished() is true to determine amount of post-gzip-stream
 * data.</p>
 *
 * @return the total (non-negative) number of unprocessed bytes in input
 */
@Override
public int getRemaining() {
 checkStream();
 return userBufLen + getRemaining(stream);  // userBuf + compressedDirectBuf
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());
   "testZlibCompressDecompress compressed size no less then original size",
   cSize < rawDataSize);
 decompressor.setInput(compressedResult, 0, cSize);
 byte[] decompressedBytes = new byte[rawDataSize];
 decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
 assertArrayEquals("testZlibCompressDecompress arrays not equals ",
   rawData, decompressedBytes);
 compressor.reset();
 decompressor.reset();
} catch (IOException ex) {
 fail("testZlibCompressDecompress ex !!!" + ex);

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
protected void finalize() {
 end();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

n = inflateBytesDirect();
uncompressedDirectBuf.limit(n);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Override
public void reset() {
 super.reset();
 endOfInput = true;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
 * Creates a new decompressor.
 */
public ZlibDecompressor(CompressionHeader header, int directBufferSize) {
 this.header = header;
 this.directBufferSize = directBufferSize;    
 compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 uncompressedDirectBuf.position(directBufferSize);
 
 stream = init(this.header.windowBits());
}

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com