gpt4 book ai didi

org.apache.hadoop.io.compress.zlib.ZlibCompressor.()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-14 14:48:49 30 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.io.compress.zlib.ZlibCompressor.<init>()方法的一些代码示例,展示了ZlibCompressor.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZlibCompressor.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.io.compress.zlib.ZlibCompressor
类名称:ZlibCompressor
方法名:<init>

ZlibCompressor.<init>介绍

[英]Creates a new compressor with the default compression level. Compressed data will be generated in ZLIB format.
[中]创建具有默认压缩级别的新压缩器。压缩数据将以ZLIB格式生成。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

public Compressor createCompressor() {
 return (ZlibFactory.isNativeZlibLoaded(conf)) ?
       new ZlibCompressor(ZlibCompressor.CompressionLevel.DEFAULT_COMPRESSION,
                ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY,
                ZlibCompressor.CompressionHeader.GZIP_FORMAT,
                64*1024) :
       null;
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor() : new BuiltInZlibDeflater(); 
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor() : new BuiltInZlibDeflater(); 
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: io.hops/hadoop-common

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib compressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib compressor.
 */
public static Compressor getZlibCompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibCompressor(conf) :
  new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testZlibCompressorDecompressor() {
 try {
  int SIZE = 44 * 1024;
  byte[] rawData = generate(SIZE);
  
  CompressDecompressTester.of(rawData)
   .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
   .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressor error !!!" + ex);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test
public void testZlibCompressorDecompressor() {
 try {
  int SIZE = 44 * 1024;
  byte[] rawData = generate(SIZE);
  
  CompressDecompressTester.of(rawData)
   .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
   .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressor error !!!" + ex);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
 int BYTE_SIZE = 100 * 1024;
 byte[] rawData = generate(BYTE_SIZE);
 try {
  CompressDecompressTester.of(rawData)
  .withCompressDecompressPair(
   new ZlibCompressor(
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
     CompressionStrategy.DEFAULT_STRATEGY,
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE),
    new ZlibDecompressor(
     org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE))
    .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
 } 
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
 int BYTE_SIZE = 100 * 1024;
 byte[] rawData = generate(BYTE_SIZE);
 try {
  CompressDecompressTester.of(rawData)
  .withCompressDecompressPair(
   new ZlibCompressor(
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
     CompressionStrategy.DEFAULT_STRATEGY,
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE),
    new ZlibDecompressor(
     org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE))
    .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
 } 
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

rawData = generate(rawDataSize);
try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

rawData = generate(rawDataSize);
try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com