gpt4 book ai didi

org.apache.hadoop.io.compress.zlib.ZlibDecompressor.()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-14 13:30:49 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.io.compress.zlib.ZlibDecompressor.<init>()方法的一些代码示例,展示了ZlibDecompressor.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZlibDecompressor.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.io.compress.zlib.ZlibDecompressor
类名称:ZlibDecompressor
方法名:<init>

ZlibDecompressor.<init>介绍

[英]Creates a new decompressor.
[中]创建一个新的解压器。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

public Decompressor createDecompressor() {
 return (ZlibFactory.isNativeZlibLoaded(conf)) ?
       new ZlibDecompressor(ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB,
                 64*1024) :
       null;                               
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: io.hops/hadoop-common

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
 * Return the appropriate implementation of the zlib decompressor. 
 * 
 * @param conf configuration
 * @return the appropriate implementation of the zlib decompressor.
 */
public static Decompressor getZlibDecompressor(Configuration conf) {
 return (isNativeZlibLoaded(conf)) ? 
  new ZlibDecompressor() : new BuiltInZlibInflater(); 
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testZlibCompressorDecompressor() {
 try {
  int SIZE = 44 * 1024;
  byte[] rawData = generate(SIZE);
  
  CompressDecompressTester.of(rawData)
   .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
   .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressor error !!!" + ex);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test
public void testZlibCompressorDecompressor() {
 try {
  int SIZE = 44 * 1024;
  byte[] rawData = generate(SIZE);
  
  CompressDecompressTester.of(rawData)
   .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
   .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressor error !!!" + ex);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
 int BYTE_SIZE = 100 * 1024;
 byte[] rawData = generate(BYTE_SIZE);
 try {
  CompressDecompressTester.of(rawData)
  .withCompressDecompressPair(
   new ZlibCompressor(
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
     CompressionStrategy.DEFAULT_STRATEGY,
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE),
    new ZlibDecompressor(
     org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE))
    .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
 } 
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
 int BYTE_SIZE = 100 * 1024;
 byte[] rawData = generate(BYTE_SIZE);
 try {
  CompressDecompressTester.of(rawData)
  .withCompressDecompressPair(
   new ZlibCompressor(
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
     CompressionStrategy.DEFAULT_STRATEGY,
     org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE),
    new ZlibDecompressor(
     org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
     BYTE_SIZE))
    .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
     CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
    .test();
 } catch (Exception ex) {
  fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
 } 
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

try {
 ZlibCompressor compressor = new ZlibCompressor();
 ZlibDecompressor decompressor = new ZlibDecompressor();
 assertFalse("testZlibCompressDecompress finished error",
   compressor.finished());

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com