gpt4 book ai didi

org.apache.commons.compress.archivers.zip.ZipArchiveEntry.getCompressedSize()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-17 07:08:40 26 4
gpt4 key购买 nike

本文整理了Java中org.apache.commons.compress.archivers.zip.ZipArchiveEntry.getCompressedSize()方法的一些代码示例,展示了ZipArchiveEntry.getCompressedSize()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZipArchiveEntry.getCompressedSize()方法的具体详情如下:
包路径:org.apache.commons.compress.archivers.zip.ZipArchiveEntry
类名称:ZipArchiveEntry
方法名:getCompressedSize

ZipArchiveEntry.getCompressedSize介绍

暂无

代码示例

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * If the compressed size of the current entry is included in the entry header
 * and there are any outstanding bytes in the underlying stream, then
 * this returns true.
 *
 * @return true, if current entry is determined to have outstanding bytes, false otherwise
 */
private boolean currentEntryHasOutstandingBytes() {
  return current.bytesReadFromStream <= current.entry.getCompressedSize()
      && !current.hasDataDescriptor;
}

代码示例来源:origin: org.apache.commons/commons-compress

private boolean isTooLageForZip32(final ZipArchiveEntry zipArchiveEntry){
  return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC;
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Read all data of the current entry from the underlying stream
 * that hasn't been read, yet.
 */
private void drainCurrentEntryData() throws IOException {
  long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream;
  while (remaining > 0) {
    final long n = in.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining));
    if (n < 0) {
      throw new EOFException("Truncated ZIP entry: "
                  + ArchiveUtils.sanitize(current.entry.getName()));
    }
    count(n);
    remaining -= n;
  }
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Expose the raw stream of the archive entry (compressed form).
 *
 * <p>This method does not relate to how/if we understand the payload in the
 * stream, since we really only intend to move it on to somewhere else.</p>
 *
 * @param ze The entry to get the stream for
 * @return The raw input stream containing (possibly) compressed data.
 * @since 1.11
 */
public InputStream getRawInputStream(final ZipArchiveEntry ze) {
  if (!(ze instanceof Entry)) {
    return null;
  }
  final long start = ze.getDataOffset();
  return createBoundedInputStream(start, ze.getCompressedSize());
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Whether to addd a Zip64 extended information extra field to the
 * local file header.
 *
 * <p>Returns true if</p>
 *
 * <ul>
 * <li>mode is Always</li>
 * <li>or we already know it is going to be needed</li>
 * <li>or the size is unknown and we can ensure it won't hurt
 * other implementations if we add it (i.e. we can erase its
 * usage</li>
 * </ul>
 */
private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) {
  return mode == Zip64Mode.Always
    || entry.getSize() >= ZIP64_MAGIC
    || entry.getCompressedSize() >= ZIP64_MAGIC
    || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN
      && channel != null && mode != Zip64Mode.Never);
}

代码示例来源:origin: org.apache.commons/commons-compress

private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
  final EntryMetaData entryMetaData = metaData.get(ze);
  final boolean needsZip64Extra = hasZip64Extra(ze)
      || ze.getCompressedSize() >= ZIP64_MAGIC
      || ze.getSize() >= ZIP64_MAGIC
      || entryMetaData.offset >= ZIP64_MAGIC
      || zip64Mode == Zip64Mode.Always;
  if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
    // must be the offset that is too big, otherwise an
    // exception would have been throw in putArchiveEntry or
    // closeArchiveEntry
    throw new Zip64RequiredException(Zip64RequiredException
        .ARCHIVE_TOO_BIG_MESSAGE);
  }
  handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra);
  return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra);
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Whether the compressed size for the entry is either known or
 * not required by the compression method being used.
 */
private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) {
  return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN
    || entry.getMethod() == ZipEntry.DEFLATED
    || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode()
    || (entry.getGeneralPurposeBit().usesDataDescriptor()
      && allowStoredEntriesWithDataDescriptor
      && entry.getMethod() == ZipEntry.STORED);
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * If the entry needs Zip64 extra information inside the central
 * directory then configure its data.
 */
private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset,
               final boolean needsZip64Extra) {
  if (needsZip64Extra) {
    final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze);
    if (ze.getCompressedSize() >= ZIP64_MAGIC
      || ze.getSize() >= ZIP64_MAGIC
      || zip64Mode == Zip64Mode.Always) {
      z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
      z64.setSize(new ZipEightByteInteger(ze.getSize()));
    } else {
      // reset value that may have been set for LFH
      z64.setCompressedSize(null);
      z64.setSize(null);
    }
    if (lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) {
      z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset));
    }
    ze.setExtra();
  }
}

代码示例来源:origin: org.apache.commons/commons-compress

new BufferedInputStream(createBoundedInputStream(start, ze.getCompressedSize())); //NOSONAR
switch (ZipMethod.getMethodByCode(ze.getMethod())) {
  case STORED:

代码示例来源:origin: org.apache.commons/commons-compress

&& getSize() == other.getSize()
&& getCrc() == other.getCrc()
&& getCompressedSize() == other.getCompressedSize()
&& Arrays.equals(getCentralDirectoryExtra(),
         other.getCentralDirectoryExtra())

代码示例来源:origin: org.apache.commons/commons-compress

if (ze.getCompressedSize() >= ZIP64_MAGIC
    || ze.getSize() >= ZIP64_MAGIC
    || zip64Mode == Zip64Mode.Always) {
  ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET);
} else {
  putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET);
  putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET);

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Writes the data descriptor entry.
 * @param ze the entry to write
 * @throws IOException on error
 */
protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException {
  if (!usesDataDescriptor(ze.getMethod(), false)) {
    return;
  }
  writeCounted(DD_SIG);
  writeCounted(ZipLong.getBytes(ze.getCrc()));
  if (!hasZip64Extra(ze)) {
    writeCounted(ZipLong.getBytes(ze.getCompressedSize()));
    writeCounted(ZipLong.getBytes(ze.getSize()));
  } else {
    writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
    writeCounted(ZipEightByteInteger.getBytes(ze.getSize()));
  }
}

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Adds an archive entry with a raw input stream.
 *
 * If crc, size and compressed size are supplied on the entry, these values will be used as-is.
 * Zip64 status is re-established based on the settings in this stream, and the supplied value
 * is ignored.
 *
 * The entry is put and closed immediately.
 *
 * @param entry The archive entry to add
 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted.
 * @throws IOException If copying fails
 */
public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream)
    throws IOException {
  final ZipArchiveEntry ae = new ZipArchiveEntry(entry);
  if (hasZip64Extra(ae)) {
    // Will be re-added as required. this may make the file generated with this method
    // somewhat smaller than standard mode,
    // since standard mode is unable to remove the zip 64 header.
    ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
  }
  final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN
      && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN
      && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN;
  putArchiveEntry(ae, is2PhaseSource);
  copyFromZipInputStream(rawStream);
  closeCopiedEntry(is2PhaseSource);
}

代码示例来源:origin: org.apache.commons/commons-compress

ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET);
} else if (phased) {
  putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET);
  putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET);
} else if (zipMethod == DEFLATED || channel != null) {

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Throws an exception if the size is unknown for a stored entry
 * that is written to a non-seekable output or the entry is too
 * big to be written without Zip64 extra but the mode has been set
 * to Never.
 */
private void validateSizeInformation(final Zip64Mode effectiveMode)
  throws ZipException {
  // Size/CRC not required if SeekableByteChannel is used
  if (entry.entry.getMethod() == STORED && channel == null) {
    if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) {
      throw new ZipException("uncompressed size is required for"
                  + " STORED method when not writing to a"
                  + " file");
    }
    if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) {
      throw new ZipException("crc checksum is required for STORED"
                  + " method when not writing to a file");
    }
    entry.entry.setCompressedSize(entry.entry.getSize());
  }
  if ((entry.entry.getSize() >= ZIP64_MAGIC
     || entry.entry.getCompressedSize() >= ZIP64_MAGIC)
    && effectiveMode == Zip64Mode.Never) {
    throw new Zip64RequiredException(Zip64RequiredException
                     .getEntryTooBigMessage(entry.entry));
  }
}

代码示例来源:origin: org.apache.commons/commons-compress

if (z64 != null) {
  final boolean hasUncompressedSize = ze.getSize() == ZIP64_MAGIC;
  final boolean hasCompressedSize = ze.getCompressedSize() == ZIP64_MAGIC;
  final boolean hasRelativeHeaderOffset =
    ze.getLocalHeaderOffset() == ZIP64_MAGIC;
    ze.setCompressedSize(z64.getCompressedSize().getLongValue());
  } else if (hasUncompressedSize) {
    z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));

代码示例来源:origin: org.apache.commons/commons-compress

writeOut(ZipLong.getBytes(entry.entry.getCrc()));
if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
  writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
  writeOut(ZipLong.getBytes(entry.entry.getSize()));
} else {
  writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()));

代码示例来源:origin: org.apache.commons/commons-compress

compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize());
} else if (entry.entry.getMethod() == STORED
    && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) {

代码示例来源:origin: org.apache.commons/commons-compress

if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) {
  if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) {
    InputStream bis = new BoundedInputStream(in, current.entry.getCompressedSize());
    switch (m) {
    case UNSHRINKING:

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

/**
 * Whether the compressed size for the entry is either known or
 * not required by the compression method being used.
 */
private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) {
  return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN
    || entry.getMethod() == ZipEntry.DEFLATED
    || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode()
    || (entry.getGeneralPurposeBit().usesDataDescriptor()
      && allowStoredEntriesWithDataDescriptor
      && entry.getMethod() == ZipEntry.STORED);
}

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com