- 使用 Spring Initializr 创建 Spring Boot 应用程序
- 在Spring Boot中配置Cassandra
- 在 Spring Boot 上配置 Tomcat 连接池
- 将Camel消息路由到嵌入WildFly的Artemis上
本文整理了Java中org.apache.commons.compress.archivers.zip.ZipArchiveEntry.getCompressedSize()
方法的一些代码示例,展示了ZipArchiveEntry.getCompressedSize()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZipArchiveEntry.getCompressedSize()
方法的具体详情如下:
包路径:org.apache.commons.compress.archivers.zip.ZipArchiveEntry
类名称:ZipArchiveEntry
方法名:getCompressedSize
暂无
代码示例来源:origin: org.apache.commons/commons-compress
/**
* If the compressed size of the current entry is included in the entry header
* and there are any outstanding bytes in the underlying stream, then
* this returns true.
*
* @return true, if current entry is determined to have outstanding bytes, false otherwise
*/
private boolean currentEntryHasOutstandingBytes() {
return current.bytesReadFromStream <= current.entry.getCompressedSize()
&& !current.hasDataDescriptor;
}
代码示例来源:origin: org.apache.commons/commons-compress
private boolean isTooLageForZip32(final ZipArchiveEntry zipArchiveEntry){
return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC;
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Read all data of the current entry from the underlying stream
* that hasn't been read, yet.
*/
private void drainCurrentEntryData() throws IOException {
long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream;
while (remaining > 0) {
final long n = in.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining));
if (n < 0) {
throw new EOFException("Truncated ZIP entry: "
+ ArchiveUtils.sanitize(current.entry.getName()));
}
count(n);
remaining -= n;
}
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Expose the raw stream of the archive entry (compressed form).
*
* <p>This method does not relate to how/if we understand the payload in the
* stream, since we really only intend to move it on to somewhere else.</p>
*
* @param ze The entry to get the stream for
* @return The raw input stream containing (possibly) compressed data.
* @since 1.11
*/
public InputStream getRawInputStream(final ZipArchiveEntry ze) {
if (!(ze instanceof Entry)) {
return null;
}
final long start = ze.getDataOffset();
return createBoundedInputStream(start, ze.getCompressedSize());
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Whether to addd a Zip64 extended information extra field to the
* local file header.
*
* <p>Returns true if</p>
*
* <ul>
* <li>mode is Always</li>
* <li>or we already know it is going to be needed</li>
* <li>or the size is unknown and we can ensure it won't hurt
* other implementations if we add it (i.e. we can erase its
* usage</li>
* </ul>
*/
private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) {
return mode == Zip64Mode.Always
|| entry.getSize() >= ZIP64_MAGIC
|| entry.getCompressedSize() >= ZIP64_MAGIC
|| (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN
&& channel != null && mode != Zip64Mode.Never);
}
代码示例来源:origin: org.apache.commons/commons-compress
private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException {
final EntryMetaData entryMetaData = metaData.get(ze);
final boolean needsZip64Extra = hasZip64Extra(ze)
|| ze.getCompressedSize() >= ZIP64_MAGIC
|| ze.getSize() >= ZIP64_MAGIC
|| entryMetaData.offset >= ZIP64_MAGIC
|| zip64Mode == Zip64Mode.Always;
if (needsZip64Extra && zip64Mode == Zip64Mode.Never) {
// must be the offset that is too big, otherwise an
// exception would have been throw in putArchiveEntry or
// closeArchiveEntry
throw new Zip64RequiredException(Zip64RequiredException
.ARCHIVE_TOO_BIG_MESSAGE);
}
handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra);
return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra);
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Whether the compressed size for the entry is either known or
* not required by the compression method being used.
*/
private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) {
return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN
|| entry.getMethod() == ZipEntry.DEFLATED
|| entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode()
|| (entry.getGeneralPurposeBit().usesDataDescriptor()
&& allowStoredEntriesWithDataDescriptor
&& entry.getMethod() == ZipEntry.STORED);
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* If the entry needs Zip64 extra information inside the central
* directory then configure its data.
*/
private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset,
final boolean needsZip64Extra) {
if (needsZip64Extra) {
final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze);
if (ze.getCompressedSize() >= ZIP64_MAGIC
|| ze.getSize() >= ZIP64_MAGIC
|| zip64Mode == Zip64Mode.Always) {
z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
z64.setSize(new ZipEightByteInteger(ze.getSize()));
} else {
// reset value that may have been set for LFH
z64.setCompressedSize(null);
z64.setSize(null);
}
if (lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) {
z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset));
}
ze.setExtra();
}
}
代码示例来源:origin: org.apache.commons/commons-compress
new BufferedInputStream(createBoundedInputStream(start, ze.getCompressedSize())); //NOSONAR
switch (ZipMethod.getMethodByCode(ze.getMethod())) {
case STORED:
代码示例来源:origin: org.apache.commons/commons-compress
&& getSize() == other.getSize()
&& getCrc() == other.getCrc()
&& getCompressedSize() == other.getCompressedSize()
&& Arrays.equals(getCentralDirectoryExtra(),
other.getCentralDirectoryExtra())
代码示例来源:origin: org.apache.commons/commons-compress
if (ze.getCompressedSize() >= ZIP64_MAGIC
|| ze.getSize() >= ZIP64_MAGIC
|| zip64Mode == Zip64Mode.Always) {
ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET);
} else {
putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET);
putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET);
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Writes the data descriptor entry.
* @param ze the entry to write
* @throws IOException on error
*/
protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException {
if (!usesDataDescriptor(ze.getMethod(), false)) {
return;
}
writeCounted(DD_SIG);
writeCounted(ZipLong.getBytes(ze.getCrc()));
if (!hasZip64Extra(ze)) {
writeCounted(ZipLong.getBytes(ze.getCompressedSize()));
writeCounted(ZipLong.getBytes(ze.getSize()));
} else {
writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize()));
writeCounted(ZipEightByteInteger.getBytes(ze.getSize()));
}
}
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Adds an archive entry with a raw input stream.
*
* If crc, size and compressed size are supplied on the entry, these values will be used as-is.
* Zip64 status is re-established based on the settings in this stream, and the supplied value
* is ignored.
*
* The entry is put and closed immediately.
*
* @param entry The archive entry to add
* @param rawStream The raw input stream of a different entry. May be compressed/encrypted.
* @throws IOException If copying fails
*/
public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream)
throws IOException {
final ZipArchiveEntry ae = new ZipArchiveEntry(entry);
if (hasZip64Extra(ae)) {
// Will be re-added as required. this may make the file generated with this method
// somewhat smaller than standard mode,
// since standard mode is unable to remove the zip 64 header.
ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID);
}
final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN
&& ae.getSize() != ArchiveEntry.SIZE_UNKNOWN
&& ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN;
putArchiveEntry(ae, is2PhaseSource);
copyFromZipInputStream(rawStream);
closeCopiedEntry(is2PhaseSource);
}
代码示例来源:origin: org.apache.commons/commons-compress
ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET);
} else if (phased) {
putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET);
putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET);
} else if (zipMethod == DEFLATED || channel != null) {
代码示例来源:origin: org.apache.commons/commons-compress
/**
* Throws an exception if the size is unknown for a stored entry
* that is written to a non-seekable output or the entry is too
* big to be written without Zip64 extra but the mode has been set
* to Never.
*/
private void validateSizeInformation(final Zip64Mode effectiveMode)
throws ZipException {
// Size/CRC not required if SeekableByteChannel is used
if (entry.entry.getMethod() == STORED && channel == null) {
if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) {
throw new ZipException("uncompressed size is required for"
+ " STORED method when not writing to a"
+ " file");
}
if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) {
throw new ZipException("crc checksum is required for STORED"
+ " method when not writing to a file");
}
entry.entry.setCompressedSize(entry.entry.getSize());
}
if ((entry.entry.getSize() >= ZIP64_MAGIC
|| entry.entry.getCompressedSize() >= ZIP64_MAGIC)
&& effectiveMode == Zip64Mode.Never) {
throw new Zip64RequiredException(Zip64RequiredException
.getEntryTooBigMessage(entry.entry));
}
}
代码示例来源:origin: org.apache.commons/commons-compress
if (z64 != null) {
final boolean hasUncompressedSize = ze.getSize() == ZIP64_MAGIC;
final boolean hasCompressedSize = ze.getCompressedSize() == ZIP64_MAGIC;
final boolean hasRelativeHeaderOffset =
ze.getLocalHeaderOffset() == ZIP64_MAGIC;
ze.setCompressedSize(z64.getCompressedSize().getLongValue());
} else if (hasUncompressedSize) {
z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
代码示例来源:origin: org.apache.commons/commons-compress
writeOut(ZipLong.getBytes(entry.entry.getCrc()));
if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) {
writeOut(ZipLong.getBytes(entry.entry.getCompressedSize()));
writeOut(ZipLong.getBytes(entry.entry.getSize()));
} else {
writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()));
代码示例来源:origin: org.apache.commons/commons-compress
compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize());
} else if (entry.entry.getMethod() == STORED
&& entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
代码示例来源:origin: org.apache.commons/commons-compress
if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) {
if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) {
InputStream bis = new BoundedInputStream(in, current.entry.getCompressedSize());
switch (m) {
case UNSHRINKING:
代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded
/**
* Whether the compressed size for the entry is either known or
* not required by the compression method being used.
*/
private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) {
return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN
|| entry.getMethod() == ZipEntry.DEFLATED
|| entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode()
|| (entry.getGeneralPurposeBit().usesDataDescriptor()
&& allowStoredEntriesWithDataDescriptor
&& entry.getMethod() == ZipEntry.STORED);
}
我在网上搜索但没有找到任何合适的文章解释如何使用 javascript 使用 WCF 服务,尤其是 WebScriptEndpoint。 任何人都可以对此给出任何指导吗? 谢谢 最佳答案 这是一篇关于
我正在编写一个将运行 Linux 命令的 C 程序,例如: cat/etc/passwd | grep 列表 |剪切-c 1-5 我没有任何结果 *这里 parent 等待第一个 child (chi
所以我正在尝试处理文件上传,然后将该文件作为二进制文件存储到数据库中。在我存储它之后,我尝试在给定的 URL 上提供文件。我似乎找不到适合这里的方法。我需要使用数据库,因为我使用 Google 应用引
我正在尝试制作一个宏,将下面的公式添加到单元格中,然后将其拖到整个列中并在 H 列中复制相同的公式 我想在 F 和 H 列中输入公式的数据 Range("F1").formula = "=IF(ISE
问题类似于this one ,但我想使用 OperatorPrecedenceParser 解析带有函数应用程序的表达式在 FParsec . 这是我的 AST: type Expression =
我想通过使用 sequelize 和 node.js 将这个查询更改为代码取决于在哪里 select COUNT(gender) as genderCount from customers where
我正在使用GNU bash,版本5.0.3(1)-发行版(x86_64-pc-linux-gnu),我想知道为什么简单的赋值语句会出现语法错误: #/bin/bash var1=/tmp
这里,为什么我的代码在 IE 中不起作用。我的代码适用于所有浏览器。没有问题。但是当我在 IE 上运行我的项目时,它发现错误。 而且我的 jquery 类和 insertadjacentHTMl 也不
我正在尝试更改标签的innerHTML。我无权访问该表单,因此无法编辑 HTML。标签具有的唯一标识符是“for”属性。 这是输入和标签的结构:
我有一个页面,我可以在其中返回用户帖子,可以使用一些 jquery 代码对这些帖子进行即时评论,在发布新评论后,我在帖子下插入新评论以及删除 按钮。问题是 Delete 按钮在新插入的元素上不起作用,
我有一个大约有 20 列的“管道分隔”文件。我只想使用 sha1sum 散列第一列,它是一个数字,如帐号,并按原样返回其余列。 使用 awk 或 sed 执行此操作的最佳方法是什么? Accounti
我需要将以下内容插入到我的表中...我的用户表有五列 id、用户名、密码、名称、条目。 (我还没有提交任何东西到条目中,我稍后会使用 php 来做)但由于某种原因我不断收到这个错误:#1054 - U
所以我试图有一个输入字段,我可以在其中输入任何字符,但然后将输入的值小写,删除任何非字母数字字符,留下“。”而不是空格。 例如,如果我输入: 地球的 70% 是水,-!*#$^^ & 30% 土地 输
我正在尝试做一些我认为非常简单的事情,但出于某种原因我没有得到想要的结果?我是 javascript 的新手,但对 java 有经验,所以我相信我没有使用某种正确的规则。 这是一个获取输入值、检查选择
我想使用 angularjs 从 mysql 数据库加载数据。 这就是应用程序的工作原理;用户登录,他们的用户名存储在 cookie 中。该用户名显示在主页上 我想获取这个值并通过 angularjs
我正在使用 autoLayout,我想在 UITableViewCell 上放置一个 UIlabel,它应该始终位于单元格的右侧和右侧的中心。 这就是我想要实现的目标 所以在这里你可以看到我正在谈论的
我需要与 MySql 等效的 elasticsearch 查询。我的 sql 查询: SELECT DISTINCT t.product_id AS id FROM tbl_sup_price t
我正在实现代码以使用 JSON。 func setup() { if let flickrURL = NSURL(string: "https://api.flickr.com/
我尝试使用for循环声明变量,然后测试cols和rols是否相同。如果是,它将运行递归函数。但是,我在 javascript 中执行 do 时遇到问题。有人可以帮忙吗? 现在,在比较 col.1 和
我举了一个我正在处理的问题的简短示例。 HTML代码: 1 2 3 CSS 代码: .BB a:hover{ color: #000; } .BB > li:after {
我是一名优秀的程序员,十分优秀!