gpt4 book ai didi

org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream.hasZip64Extra()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-16 17:02:40 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream.hasZip64Extra()方法的一些代码示例,展示了ZipArchiveOutputStream.hasZip64Extra()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZipArchiveOutputStream.hasZip64Extra()方法的具体详情如下:
包路径:org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream
类名称:ZipArchiveOutputStream
方法名:hasZip64Extra

ZipArchiveOutputStream.hasZip64Extra介绍

[英]Is there a ZIP64 extended information extra field for the entry?
[中]条目是否有ZIP64扩展信息额外字段?

代码示例

代码示例来源:origin: org.apache.commons/commons-compress

private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
  final EntryMetaData entryMetaData = metaData.get(ze);
  final boolean needsZip64Extra = hasZip64Extra(ze)
      || ze.getCompressedSize() >= ZIP64_MAGIC
      || ze.getSize() >= ZIP64_MAGIC
      || entryMetaData.offset >= ZIP64_MAGIC
      || zip64Mode == Zip64Mode.Always;
  if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
    // must be the offset that is too big, otherwise an
    // exception would have been throw in putArchiveEntry or
    // closeArchiveEntry
    throw new Zip64RequiredException(Zip64RequiredException
        .ARCHIVE_TOO_BIG_MESSAGE);
  }
  handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra);
  return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra);
}

代码示例来源:origin: org.apache.commons/commons-compress

final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased);
putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET);
if (hasZip64Extra(entry.entry)){

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Adds an archive entry with a raw input stream.
 *
 * If crc, size and compressed size are supplied on the entry, these values will be used as-is.
 * Zip64 status is re-established based on the settings in this stream, and the supplied value
 * is ignored.
 *
 * The entry is put and closed immediately.
 *
 * @param entry The archive entry to add
 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted.
 * @throws IOException If copying fails
 */
public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream)
    throws IOException {
  final ZipArchiveEntry ae = new ZipArchiveEntry(entry);
  if (hasZip64Extra(ae)) {
    // Will be re-added as required. this may make the file generated with this method
    // somewhat smaller than standard mode,
    // since standard mode is unable to remove the zip 64 header.
    ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
  }
  final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN
      && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN
      && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN;
  putArchiveEntry(ae, is2PhaseSource);
  copyFromZipInputStream(rawStream);
  closeCopiedEntry(is2PhaseSource);
}

代码示例来源:origin: org.apache.commons/commons-compress

if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
  writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
  writeOut(ZipLong.getBytes(entry.entry.getSize()));
if (hasZip64Extra(entry.entry)) {
  final ByteBuffer name = getName(entry.entry);
  final int nameLen = name.limit() - name.position();

代码示例来源:origin: org.apache.commons/commons-compress

/**
 * Writes the data descriptor entry.
 * @param ze the entry to write
 * @throws IOException on error
 */
protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException {
  if (!usesDataDescriptor(ze.getMethod(), false)) {
    return;
  }
  writeCounted(DD_SIG);
  writeCounted(ZipLong.getBytes(ze.getCrc()));
  if (!hasZip64Extra(ze)) {
    writeCounted(ZipLong.getBytes(ze.getCompressedSize()));
    writeCounted(ZipLong.getBytes(ze.getSize()));
  } else {
    writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
    writeCounted(ZipEightByteInteger.getBytes(ze.getSize()));
  }
}

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
  final EntryMetaData entryMetaData = metaData.get(ze);
  final boolean needsZip64Extra = hasZip64Extra(ze)
      || ze.getCompressedSize() >= ZIP64_MAGIC
      || ze.getSize() >= ZIP64_MAGIC
      || entryMetaData.offset >= ZIP64_MAGIC
      || zip64Mode == Zip64Mode.Always;
  if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
    // must be the offset that is too big, otherwise an
    // exception would have been throw in putArchiveEntry or
    // closeArchiveEntry
    throw new Zip64RequiredException(Zip64RequiredException
        .ARCHIVE_TOO_BIG_MESSAGE);
  }
  handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra);
  return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra);
}

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased);
putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET);
if (hasZip64Extra(entry.entry)){

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

/**
 * Adds an archive entry with a raw input stream.
 *
 * If crc, size and compressed size are supplied on the entry, these values will be used as-is.
 * Zip64 status is re-established based on the settings in this stream, and the supplied value
 * is ignored.
 *
 * The entry is put and closed immediately.
 *
 * @param entry The archive entry to add
 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted.
 * @throws IOException If copying fails
 */
public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream)
    throws IOException {
  final ZipArchiveEntry ae = new ZipArchiveEntry(entry);
  if (hasZip64Extra(ae)) {
    // Will be re-added as required. this may make the file generated with this method
    // somewhat smaller than standard mode,
    // since standard mode is unable to remove the zip 64 header.
    ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
  }
  final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN
      && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN
      && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN;
  putArchiveEntry(ae, is2PhaseSource);
  copyFromZipInputStream(rawStream);
  closeCopiedEntry(is2PhaseSource);
}

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
  writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
  writeOut(ZipLong.getBytes(entry.entry.getSize()));
if (hasZip64Extra(entry.entry)) {
  final ByteBuffer name = getName(entry.entry);
  final int nameLen = name.limit() - name.position();

代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded

/**
 * Writes the data descriptor entry.
 * @param ze the entry to write
 * @throws IOException on error
 */
protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException {
  if (!usesDataDescriptor(ze.getMethod(), false)) {
    return;
  }
  writeCounted(DD_SIG);
  writeCounted(ZipLong.getBytes(ze.getCrc()));
  if (!hasZip64Extra(ze)) {
    writeCounted(ZipLong.getBytes(ze.getCompressedSize()));
    writeCounted(ZipLong.getBytes(ze.getSize()));
  } else {
    writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
    writeCounted(ZipEightByteInteger.getBytes(ze.getSize()));
  }
}

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com