gpt4 book ai didi

org.apache.hadoop.io.compress.zlib.ZlibCompressor类的使用及代码示例

转载 作者:知者 更新时间:2024-03-14 15:46:49 25 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.io.compress.zlib.ZlibCompressor类的一些代码示例,展示了ZlibCompressor类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZlibCompressor类的具体详情如下:
包路径:org.apache.hadoop.io.compress.zlib.ZlibCompressor
类名称:ZlibCompressor

ZlibCompressor介绍

[英]A Compressor based on the popular zlib compression algorithm. http://www.zlib.net/
[中]基于流行的zlib压缩算法的压缩器。http://www.zlib.net/

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Returns the total number of uncompressed bytes input so far.</p>
 *
 * @return the total (non-negative) number of uncompressed bytes input so far
 */
@Override
public long getBytesRead() {
 checkStream();
 return getBytesRead(stream);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Prepare the compressor to be used in a new stream with settings defined in
 * the given Configuration. It will reset the compressor's compression level
 * and compression strategy.
 * 
 * @param conf Configuration storing new settings
 */
@Override
public void reinit(Configuration conf) {
 reset();
 if (conf == null) {
  return;
 }
 end(stream);
 level = ZlibFactory.getCompressionLevel(conf);
 strategy = ZlibFactory.getCompressionStrategy(conf);
 stream = init(level.compressionLevel(), 
        strategy.compressionStrategy(), 
        windowBits.windowBits());
 if(LOG.isDebugEnabled()) {
  LOG.debug("Reinit compressor with new compression configuration");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Returns the total number of compressed bytes output so far.
 *
 * @return the total (non-negative) number of compressed bytes output so far
 */
@Override
public long getBytesWritten() {
 checkStream();
 return getBytesWritten(stream);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void reset() {
 checkStream();
 reset(stream);
 finish = false;
 finished = false;
 uncompressedDirectBuf.rewind();
 uncompressedDirectBufOff = uncompressedDirectBufLen = 0;
 keepUncompressedBuf = false;
 compressedDirectBuf.limit(directBufferSize);
 compressedDirectBuf.position(directBufferSize);
 userBufOff = userBufLen = 0;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

rawData = generate(rawDataSize);
try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());
 compressor.setInput(rawData, 0, rawData.length);
 assertTrue("testZlibCompressDecompress getBytesRead before error",
   compressor.getBytesRead() == 0);
 compressor.finish();
 int cSize = compressor.compress(compressedResult, 0, rawDataSize);
 assertTrue("testZlibCompressDecompress getBytesRead ather error",
   compressor.getBytesRead() == rawDataSize);
 assertTrue(
   "testZlibCompressDecompress compressed size no less then original size",
 assertArrayEquals("testZlibCompressDecompress arrays not equals ",
   rawData, decompressedBytes);
 compressor.reset();
 decompressor.reset();
} catch (IOException ex) {

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

private byte[] compressDecompressZlib(byte[] rawData,
  ZlibCompressor zlibCompressor, ZlibDecompressor zlibDecompressor)
  throws IOException {
 int cSize = 0;
 byte[] compressedByte = new byte[rawData.length];
 byte[] decompressedRawData = new byte[rawData.length];
 zlibCompressor.setInput(rawData, 0, rawData.length);
 zlibCompressor.finish();
 while (!zlibCompressor.finished()) {
  cSize = zlibCompressor.compress(compressedByte, 0, compressedByte.length);
 }
 zlibCompressor.reset();
 assertTrue(zlibDecompressor.getBytesWritten() == 0);
 assertTrue(zlibDecompressor.getBytesRead() == 0);
 assertTrue(zlibDecompressor.needsInput());
 zlibDecompressor.setInput(compressedByte, 0, cSize);
 assertFalse(zlibDecompressor.needsInput());
 while (!zlibDecompressor.finished()) {
  zlibDecompressor.decompress(decompressedRawData, 0,
    decompressedRawData.length);
 }
 assertTrue(zlibDecompressor.getBytesWritten() == rawData.length);
 assertTrue(zlibDecompressor.getBytesRead() == cSize);
 zlibDecompressor.reset();
 assertTrue(zlibDecompressor.getRemaining() == 0);
 assertArrayEquals(
   "testZlibCompressorDecompressorWithConfiguration array equals error",
   rawData, decompressedRawData);
 return decompressedRawData;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void setDictionary(byte[] b, int off, int len) {
 if (stream == 0 || b == null) {
  throw new NullPointerException();
 }
 if (off < 0 || len < 0 || off > b.length - len) {
  throw new ArrayIndexOutOfBoundsException();
 }
 setDictionary(stream, b, off, len);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void end() {
 if (stream != 0) {
  end(stream);
  stream = 0;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/** 
 * Creates a new compressor using the specified compression level.
 * Compressed data will be generated in ZLIB format.
 * 
 * @param level Compression level #CompressionLevel
 * @param strategy Compression strategy #CompressionStrategy
 * @param header Compression header #CompressionHeader
 * @param directBufferSize Size of the direct buffer to be used.
 */
public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, 
           CompressionHeader header, int directBufferSize) {
 this.level = level;
 this.strategy = strategy;
 this.windowBits = header;
 stream = init(this.level.compressionLevel(), 
        this.strategy.compressionStrategy(), 
        this.windowBits.windowBits());
 this.directBufferSize = directBufferSize;
 uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 compressedDirectBuf.position(directBufferSize);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * The type of header for compressed data.
 */
public enum CompressionHeader {
 /**
  * No headers/trailers/checksums.
  */
 NO_HEADER (-15),
 
 /**
  * Default headers/trailers/checksums.
  */
 DEFAULT_HEADER (15),
 
 /**
  * Simple gzip headers/trailers.
  */
 GZIP_FORMAT (31);
 private final int windowBits;
 
 CompressionHeader(int windowBits) {
  this.windowBits = windowBits;
 }
 
 public int windowBits() {
  return windowBits;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

n = deflateBytesDirect();
compressedDirectBuf.limit(n);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

rawData = generate(rawDataSize);
try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());
 compressor.setInput(rawData, 0, rawData.length);
 assertTrue("testZlibCompressDecompress getBytesRead before error",
   compressor.getBytesRead() == 0);
 compressor.finish();
 int cSize = compressor.compress(compressedResult, 0, rawDataSize);
 assertTrue("testZlibCompressDecompress getBytesRead ather error",
   compressor.getBytesRead() == rawDataSize);
 assertTrue(
   "testZlibCompressDecompress compressed size no less then original size",
 assertArrayEquals("testZlibCompressDecompress arrays not equals ",
   rawData, decompressedBytes);
 compressor.reset();
 decompressor.reset();
} catch (IOException ex) {

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public synchronized void reset() {
 checkStream();
 reset(stream);
 finish = false;
 finished = false;
 uncompressedDirectBuf.rewind();
 uncompressedDirectBufOff = uncompressedDirectBufLen = 0;
 keepUncompressedBuf = false;
 compressedDirectBuf.limit(directBufferSize);
 compressedDirectBuf.position(directBufferSize);
 userBufOff = userBufLen = 0;
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Returns the total number of compressed bytes output so far.
 *
 * @return the total (non-negative) number of compressed bytes output so far
 */
public synchronized long getBytesWritten() {
 checkStream();
 return getBytesWritten(stream);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

private byte[] compressDecompressZlib(byte[] rawData,
  ZlibCompressor zlibCompressor, ZlibDecompressor zlibDecompressor)
  throws IOException {
 int cSize = 0;
 byte[] compressedByte = new byte[rawData.length];
 byte[] decompressedRawData = new byte[rawData.length];
 zlibCompressor.setInput(rawData, 0, rawData.length);
 zlibCompressor.finish();
 while (!zlibCompressor.finished()) {
  cSize = zlibCompressor.compress(compressedByte, 0, compressedByte.length);
 }
 zlibCompressor.reset();
 assertTrue(zlibDecompressor.getBytesWritten() == 0);
 assertTrue(zlibDecompressor.getBytesRead() == 0);
 assertTrue(zlibDecompressor.needsInput());
 zlibDecompressor.setInput(compressedByte, 0, cSize);
 assertFalse(zlibDecompressor.needsInput());
 while (!zlibDecompressor.finished()) {
  zlibDecompressor.decompress(decompressedRawData, 0,
    decompressedRawData.length);
 }
 assertTrue(zlibDecompressor.getBytesWritten() == rawData.length);
 assertTrue(zlibDecompressor.getBytesRead() == cSize);
 zlibDecompressor.reset();
 assertTrue(zlibDecompressor.getRemaining() == 0);
 assertArrayEquals(
   "testZlibCompressorDecompressorWithConfiguration array equals error",
   rawData, decompressedRawData);
 return decompressedRawData;
}

代码示例来源:origin: io.hops/hadoop-common

@Override
public void setDictionary(byte[] b, int off, int len) {
 if (stream == 0 || b == null) {
  throw new NullPointerException();
 }
 if (off < 0 || len < 0 || off > b.length - len) {
  throw new ArrayIndexOutOfBoundsException();
 }
 setDictionary(stream, b, off, len);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public void end() {
 if (stream != 0) {
  end(stream);
  stream = 0;
 }
}

代码示例来源:origin: io.hops/hadoop-common

/** 
 * Creates a new compressor using the specified compression level.
 * Compressed data will be generated in ZLIB format.
 * 
 * @param level Compression level #CompressionLevel
 * @param strategy Compression strategy #CompressionStrategy
 * @param header Compression header #CompressionHeader
 * @param directBufferSize Size of the direct buffer to be used.
 */
public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, 
           CompressionHeader header, int directBufferSize) {
 this.level = level;
 this.strategy = strategy;
 this.windowBits = header;
 stream = init(this.level.compressionLevel(), 
        this.strategy.compressionStrategy(), 
        this.windowBits.windowBits());
 this.directBufferSize = directBufferSize;
 uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
 compressedDirectBuf.position(directBufferSize);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
 * The type of header for compressed data.
 */
public static enum CompressionHeader {
 /**
  * No headers/trailers/checksums.
  */
 NO_HEADER (-15),
 
 /**
  * Default headers/trailers/checksums.
  */
 DEFAULT_HEADER (15),
 
 /**
  * Simple gzip headers/trailers.
  */
 GZIP_FORMAT (31);
 private final int windowBits;
 
 CompressionHeader(int windowBits) {
  this.windowBits = windowBits;
 }
 
 public int windowBits() {
  return windowBits;
 }
}

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com