- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我的代码有一些问题。我正在尝试遍历包含许多 PDF 的 Drive 文件夹,然后将它们合并为一个文件。当我使用我的代码时,它只是为 Drive 文件夹中的最后一个 PDF 创建一个 PDF,而不是按预期将它们全部合并在一起。
function MergeFiles(){
var folder = DocsList.getFolderById('myFolderID');
var files = folder.getFiles();
var blobs = [];
for( var i in files )
blobs.push(files[i].getBlob().getBytes());
Logger.log(blobs.push(files[i].getBlob().getBytes()));
var myPDF = Utilities.newBlob(blobs.pop(), "application/pdf", "newPDF.pdf");
folder.createFile(myPDF);
}
最佳答案
因此,这不仅仅是简单地组合每个文件中的数据。每个文件的实际可用数据是用标记和其他代码(类似于 HTML 和其他文档格式)“打包”的。您实际上必须解码每个 PDF 文件,组合必要的部分,然后使用新的“打包”重新编码。这需要 PDF 规范和结构的工作知识,可从 Adobe here 免费获得。 .
我使用这些信息编写了一个足以满足我需要的脚本。然而,它并没有考虑到所有可能性,因此特别是合并任何需要 PDF-1.4 及更高版本的文档,这将需要相当多的工作。
/**
* Merges all given PDF files into one.
*
* @param {Folder} directory the folder to store the output file
* @param {string} name the desired name of the output file
* @param {File} pdf1 the first PDF file
* @param {File} pdf2 the second PDF file
* @param {File} opt_pdf3 [optional] the third PDF file; add as many more as you like
*
* @return {File} the merged file
*/
function mergePdfs(directory, name, pdf1, pdf2, opt_pdf3) {
if (name.slice(-4) != '.pdf') {
name = name + '.pdf';
}
var newObjects = ['1 0 obj\r\n<</Type/Catalog/Pages 2 0 R >>\r\nendobj'];
var pageAddresses = [];
for (var argumentIndex = 2; argumentIndex < arguments.length; argumentIndex++) {
var bytes = arguments[argumentIndex].getBlob().getBytes();
var xrefByteOffset = '';
var byteIndex = bytes.length - 1;
while (!/\sstartxref\s/.test(xrefByteOffset)) {
xrefByteOffset = String.fromCharCode(bytes[byteIndex]) + xrefByteOffset;
byteIndex--;
}
xrefByteOffset = +(/\s\d+\s/.exec(xrefByteOffset)[0]);
var objectByteOffsets = [];
var trailerDictionary = '';
var rootAddress = '';
do {
var xrefTable = '';
var trailerEndByteOffset = byteIndex;
byteIndex = xrefByteOffset;
for (byteIndex; byteIndex <= trailerEndByteOffset; byteIndex++) {
xrefTable = xrefTable + String.fromCharCode(bytes[byteIndex]);
}
xrefTable = xrefTable.split(/\s*trailer\s*/);
trailerDictionary = xrefTable[1];
if (objectByteOffsets.length < 1) {
rootAddress = /\d+\s+\d+\s+R/.exec(/\/Root\s*\d+\s+\d+\s+R/.exec(trailerDictionary)[0])[0].replace('R', 'obj');
}
xrefTable = xrefTable[0].split('\n');
xrefTable.shift();
while (xrefTable.length > 0) {
var xrefSectionHeader = xrefTable.shift().split(/\s+/);
var objectNumber = +xrefSectionHeader[0];
var numberObjects = +xrefSectionHeader[1];
for (var entryIndex = 0; entryIndex < numberObjects; entryIndex++) {
var entry = xrefTable.shift().split(/\s+/);
objectByteOffsets.push([[objectNumber, +entry[1], 'obj'], +entry[0]]);
objectNumber++;
}
}
if (/\s*\/Prev/.test(trailerDictionary)) {
xrefByteOffset = +(/\s*\d+\s/.exec(/\s*\/Prev\s*\d+\s/.exec(trailerDictionary)[0])[0]);
}
} while (/\s*\/Prev/.test(trailerDictionary));
var rootObject = getObject(rootAddress, objectByteOffsets, bytes);
var pagesAddress = /\d+\s+\d+\s+R/.exec(/\/Pages\s*\d+\s+\d+\s+R/.exec(rootObject)[0])[0].replace('R', 'obj');
var pagesObject = getObject(pagesAddress, objectByteOffsets, bytes);
var objects = getDependencies(pagesObject, objectByteOffsets, bytes);
var newObjectsInsertionIndex = newObjects.length;
for (var objectIndex = 0; objectIndex < objects.length; objectIndex++) {
var newObjectAddress = [(newObjects.length + 3) + '', 0 + '', 'obj'];
if (!Array.isArray(objects[objectIndex])) {
objects[objectIndex] = [objects[objectIndex]];
}
objects[objectIndex].unshift(newObjectAddress);
var objectAddress = objects[objectIndex][1].match(/\d+\s+\d+\s+obj/)[0].split(/\s+/);
objects[objectIndex].splice(1, 0, objectAddress);
if (/\/Type\s*\/Page[^s]/.test(objects[objectIndex][2])) {
objects[objectIndex][2] = objects[objectIndex][2].replace(/\/Parent\s*\d+\s+\d+\s+R/.exec(objects[objectIndex][2])[0], '/Parent 2 0 R');
pageAddresses.push(newObjectAddress.join(' ').replace('obj', 'R'));
}
var addressRegExp = new RegExp(objectAddress[0] + '\\s+' + objectAddress[1] + '\\s+' + 'obj');
objects[objectIndex][2] = objects[objectIndex][2].replace(addressRegExp.exec(objects[objectIndex][2])[0], newObjectAddress.join(' '));
newObjects.push(objects[objectIndex]);
}
for (var referencingObjectIndex = newObjectsInsertionIndex; referencingObjectIndex < newObjects.length; referencingObjectIndex++) {
var references = newObjects[referencingObjectIndex][2].match(/\d+\s+\d+\s+R/g);
if (references != null) {
var string = newObjects[referencingObjectIndex][2];
var referenceIndices = [];
var currentIndex = 0;
for (var referenceIndex = 0; referenceIndex < references.length; referenceIndex++) {
referenceIndices.push([]);
referenceIndices[referenceIndex].push(string.slice(currentIndex).indexOf(references[referenceIndex]) + currentIndex);
referenceIndices[referenceIndex].push(references[referenceIndex].length);
currentIndex += string.slice(currentIndex).indexOf(references[referenceIndex]);
}
for (var referenceIndex = 0; referenceIndex < references.length; referenceIndex++) {
var objectAddress = references[referenceIndex].replace('R', 'obj').split(/\s+/);
for (var objectIndex = newObjectsInsertionIndex; objectIndex < newObjects.length; objectIndex++) {
if (arrayEquals(objectAddress, newObjects[objectIndex][1])) {
var length = string.length;
newObjects[referencingObjectIndex][2] = string.slice(0, referenceIndices[referenceIndex][0]) + newObjects[objectIndex][0].join(' ').replace('obj', 'R') +
string.slice(referenceIndices[referenceIndex][0] + referenceIndices[referenceIndex][1]);
string = newObjects[referencingObjectIndex][2];
var newLength = string.length;
if (!(length == newLength)) {
for (var subsequentReferenceIndex = referenceIndex + 1; subsequentReferenceIndex < references.length; subsequentReferenceIndex++) {
referenceIndices[subsequentReferenceIndex][0] += (newLength - length);
}
}
break;
}
}
}
}
}
for (var objectIndex = newObjectsInsertionIndex; objectIndex < newObjects.length; objectIndex++) {
if (Array.isArray(newObjects[objectIndex])) {
if (newObjects[objectIndex][3] != undefined) {
newObjects[objectIndex] = newObjects[objectIndex].slice(2);
} else {
newObjects[objectIndex] = newObjects[objectIndex][2];
}
}
}
}
newObjects.splice(1, 0, '2 0 obj\r\n<</Type/Pages/Count ' + pageAddresses.length + ' /Kids [' + pageAddresses.join(' ') + ' ]>>\r\nendobj');
newObjects.splice(2, 0, '3 0 obj\r\n<</Title (' + name + ') /CreationDate (D' +
Utilities.formatDate(new Date(), CalendarApp.getDefaultCalendar().getTimeZone(), 'yyyyMMddHHmmssZ').slice(0, -2) + "'00) /ModDate (D" + Utilities.formatDate(new Date(),
CalendarApp.getDefaultCalendar().getTimeZone(), 'yyyyMMddHHmmssZ').slice(0, -2) + "'00)>>\r\nendobj");
var byteOffsets = [0];
var bytes = [];
var header = '%PDF-1.3\r\n';
for (var headerIndex = 0; headerIndex < header.length; headerIndex++) {
bytes.push(header.charCodeAt(headerIndex));
}
bytes.push('%'.charCodeAt(0));
for (var characterCode = -127; characterCode < -123; characterCode++) {
bytes.push(characterCode);
}
bytes.push('\r'.charCodeAt(0));
bytes.push('\n'.charCodeAt(0));
while (newObjects.length > 0) {
byteOffsets.push(bytes.length);
var object = newObjects.shift();
if (Array.isArray(object)) {
var streamKeyword = /stream\s*\n/.exec(object[0])[0];
if (streamKeyword.indexOf('\n\n') > streamKeyword.length - 3) {
streamKeyword = streamKeyword.slice(0, -1);
} else if (streamKeyword.indexOf('\r\n\r\n') > streamKeyword.length - 5) {
streamKeyword = streamKeyword.slice(0, -2);
}
var streamIndex = object[0].indexOf(streamKeyword) + streamKeyword.length;
for (var objectIndex = 0; objectIndex < streamIndex; objectIndex++) {
bytes.push(object[0].charCodeAt(objectIndex))
}
bytes = bytes.concat(object[1]);
for (var objectIndex = streamIndex; objectIndex < object[0].length; objectIndex++) {
bytes.push(object[0].charCodeAt(objectIndex));
}
} else {
for (var objectIndex = 0; objectIndex < object.length; objectIndex++) {
bytes.push(object.charCodeAt(objectIndex));
}
}
bytes.push('\r'.charCodeAt(0));
bytes.push('\n'.charCodeAt(0));
}
var xrefByteOffset = bytes.length;
var xrefHeader = 'xref\r\n';
for (var xrefHeaderIndex = 0; xrefHeaderIndex < xrefHeader.length; xrefHeaderIndex++) {
bytes.push(xrefHeader.charCodeAt(xrefHeaderIndex));
}
var xrefSectionHeader = '0 ' + byteOffsets.length + '\r\n';
for (var xrefSectionHeaderIndex = 0; xrefSectionHeaderIndex < xrefSectionHeader.length; xrefSectionHeaderIndex++) {
bytes.push(xrefSectionHeader.charCodeAt(xrefSectionHeaderIndex));
}
for (var byteOffsetIndex = 0; byteOffsetIndex < byteOffsets.length; byteOffsetIndex++) {
for (var byteOffsetStringIndex = 0; byteOffsetStringIndex < 10; byteOffsetStringIndex++) {
bytes.push(Utilities.formatString('%010d', byteOffsets[byteOffsetIndex]).charCodeAt(byteOffsetStringIndex));
}
bytes.push(' '.charCodeAt(0));
if (byteOffsetIndex == 0) {
for (var generationStringIndex = 0; generationStringIndex < 5; generationStringIndex++) {
bytes.push('65535'.charCodeAt(generationStringIndex));
}
for (var keywordIndex = 0; keywordIndex < 2; keywordIndex++) {
bytes.push(' f'.charCodeAt(keywordIndex));
}
} else {
for (var generationStringIndex = 0; generationStringIndex < 5; generationStringIndex++) {
bytes.push('0'.charCodeAt(0));
}
for (var keywordIndex = 0; keywordIndex < 2; keywordIndex++) {
bytes.push(' n'.charCodeAt(keywordIndex));
}
}
bytes.push('\r'.charCodeAt(0));
bytes.push('\n'.charCodeAt(0));
}
for (var trailerHeaderIndex = 0; trailerHeaderIndex < 9; trailerHeaderIndex++) {
bytes.push('trailer\r\n'.charCodeAt(trailerHeaderIndex));
}
var idBytes = Utilities.computeDigest(Utilities.DigestAlgorithm.MD5, (new Date).toString());
var id = '';
for (var idByteIndex = 0; idByteIndex < idBytes.length; idByteIndex++) {
id = id + ('0' + (idBytes[idByteIndex] & 0xFF).toString(16)).slice(-2);
}
var trailer = '<</Size ' + (byteOffsets.length) + ' /Root 1 0 R /Info 2 0 R /ID [<' + id + '> <' + id + '>]>>\r\nstartxref\r\n' + xrefByteOffset + '\r\n%%EOF';
for (var trailerIndex = 0; trailerIndex < trailer.length; trailerIndex++) {
bytes.push(trailer.charCodeAt(trailerIndex));
}
return directory.createFile(Utilities.newBlob(bytes, 'application/pdf', name));
function getObject(objectAddress, objectByteOffsets, bytes) {
objectAddress = objectAddress.split(/\s+/);
for (var addressIndex = 0; addressIndex < 2; addressIndex++) {
objectAddress[addressIndex] = +objectAddress[addressIndex];
}
var object = [];
var byteIndex = 0;
for each (var offset in objectByteOffsets) {
if (arrayEquals(objectAddress, offset[0])) {
byteIndex = offset[1];
break;
}
}
object.push('');
while (object[0].indexOf('endobj') <= -1) {
if (/stream\s*\n/.test(object[0])) {
var streamLength;
var lengthFinder = object[0].slice(object[0].indexOf(/\/Length/.exec(object[0])[0]));
if (/\/Length\s*\d+\s+\d+\s+R/.test(lengthFinder)) {
var lengthObjectAddress = /\d+\s+\d+\s+R/.exec(/\/Length\s*\d+\s+\d+\s+R/.exec(lengthFinder)[0])[0].split(/\s+/);
lengthObjectAddress[2] = 'obj';
for (var addressIndex = 0; addressIndex < 2; addressIndex++) {
lengthObjectAddress[addressIndex] = +lengthObjectAddress[addressIndex];
}
var lengthObject = ''
var lengthByteIndex = 0;
for each (var offset in objectByteOffsets) {
if (arrayEquals(lengthObjectAddress, offset[0])) {
lengthByteIndex = offset[1];
break;
}
}
while (lengthObject.indexOf('endobj') <= -1) {
lengthObject = lengthObject + String.fromCharCode(bytes[lengthByteIndex]);
lengthByteIndex++;
}
streamLength = +(lengthObject.match(/obj\s*\n\s*\d+\s*\n\s*endobj/)[0].match(/\d+/)[0]);
} else {
streamLength = +(/\d+/.exec(lengthFinder)[0]);
}
var streamBytes = bytes.slice(byteIndex, byteIndex + streamLength);
object.push(streamBytes);
byteIndex += streamLength;
while (object[0].indexOf('endobj') <= -1) {
object[0] = object[0] + String.fromCharCode(bytes[byteIndex]);
byteIndex++;
}
return object;
}
object[0] = object[0] + String.fromCharCode(bytes[byteIndex]);
byteIndex++;
}
return object[0];
}
function arrayEquals(array1, array2) {
if (array1 == array2) {
return true;
}
if (array1 == null && array2 == null) {
return true;
} else if (array1 == null || array2 == null) {
return false;
}
if (array1.length != array2.length) {
return false;
}
for (var index = 0; index < array1.length; index++) {
if (Array.isArray(array1[index])) {
if (!arrayEquals(array1[index], array2[index])) {
return false;
}
continue;
}
if (array1[index] != array2[index]) {
return false;
}
}
return true;
}
function getDependencies(objectString, objectByteOffsets, bytes) {
var dependencies = [];
var references = objectString.match(/\d+\s+\d+\s+R/g);
if (references != null) {
while (references.length > 0) {
if (/\/Parent/.test(objectString.slice(objectString.indexOf(references[0]) - 8, objectString.indexOf(references[0])))) {
references.shift();
continue;
}
var dependency = getObject(references.shift().replace('R', 'obj'), objectByteOffsets, bytes);
var dependencyExists = false;
for each (var entry in dependencies) {
dependencyExists = (arrayEquals(dependency, entry)) ? true : dependencyExists;
}
if (!dependencyExists) {
dependencies.push(dependency);
}
if (Array.isArray(dependency)) {
dependencies = dependencies.concat(getDependencies(dependency[0], objectByteOffsets, bytes));
} else {
dependencies = dependencies.concat(getDependencies(dependency, objectByteOffsets, bytes));
}
}
}
return dependencies;
}
}
function(mergePdfs(directory, name, pdf1, pdf2, opt_pdf3) {
if (name.slice(-4) != '.pdf') {
name = name + '.pdf';
}
var newObjects = ['1 0 obj\r\n<</Type/Catalog/Pages 2 0 R >>r\nendobj'];
var pageAddresses = [];
for (var argumentIndex = 2; argumentIndex < arguments.length; argumentIndex++) {
var bytes = arguments[argumentIndex].getBlob().getBytes();
function mergePdfs(directory, name) {
if (name.slice(-4) != '.pdf') {
name = name + '.pdf';
}
var newObjects = ['1 0 obj\r\n<</Type/Catalog/Pages 2 0 R >>\r\nendobj'];
var pageAddresses = [];
var files = directory.getFiles();
for (var fileIndex = 0; fileIndex < files.length; fileIndex++) {
var bytes = files[fileIndex].getBlob().getBytes();
关于pdf - 将多个 PDF 合并为一个 PDF,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/15414077/
我有几个长度不等的 vector ,我想对其进行cbind。我将 vector 放入列表中,并尝试结合使用do.call(cbind, ...): nm <- list(1:8, 3:8, 1:5)
合并(合并)两个 JSONObjects 的最佳方式是什么? JSONObject o1 = { "one": "1", "two": "2", "three": "3" }
我在一个表中有许多空间实体,其中有一个名为 Boundaries 的 geometry 字段。我想生成一个具有简化形状/几何图形的 GeoJson 文件。 这是我的第一次尝试: var entitie
谁能说出为什么这个选择返回 3.0 而不是 3.5: SELECT coalesce(1.0*(7/2),0) as foo 这个返回 3: SELECT coalesce(7/2,0) as foo
首先抱歉,也许这个问题已经提出,但我找不到任何可以帮助我的东西,可能是因为我对 XSLT 缺乏了解。 我有以下 XML: 0 OK
有时用户会使用 Windows 资源管理器复制文件并在他们应该执行 svn 存储库级别的复制或合并时提交它们。因此,SVN 没有正确跟踪这些变化。一旦我发现这一点,损坏显然已经完成,并且可能已经对相关
我想组合/堆叠 2 个不同列的值并获得唯一值。 如果范围相邻,则可以正常工作。例如: =UNIQUE(FILTERXML(""&SUBSTITUTE(TEXTJOIN(",",TRUE,TRANSPO
使用iTextSharp,如何将多个PDF合并为一个PDF,而又不丢失每个PDF中的“表单字段”及其属性? (我希望有一个使用来自数据库的流的示例,但文件系统也可以) 我发现this code可以正常
是否有一个合并函数可以优先考虑公共(public)变量中的非缺失值? 考虑以下示例。 首先,我们生成两个 data.frames,它们具有相同的 ID,但在特定变量上有互补的缺失值: set.seed
我们正在尝试实现 ALM Rangers 在最新的 Visual Studio TFS Branching and Merging Guide 中描述的“基本双分支计划”。 .从指导: The bas
我在不同目录(3个不同名称)中有很多(3个只是一个例子)文本文件,如下所示: 目录:A,文件名:run.txt 格式:txt制表符分隔 ; file one 10 0.2 0.5 0.
我有一张包含学生等级关系的表: Student Grade StartDate EndDate 1 1 09/01/2009 NULL 2
我在学习 https://www.doctrine-project.org/projects/doctrine-orm/en/2.6/reference/working-with-associatio
我觉得我有世界上最简单的 SVN 用例: 我有一个文件,Test.java在 trunk SVN的。 我分行trunk至 dev-branch . 我搬家Test.java进入 com/mycompa
我有两个数据框,其中一些列名称相同,而另一些列名称不同。数据框看起来像这样: df1 ID hello world hockey soccer 1 1 NA NA
Elasticsearch 中是否缺少以扁平化形式(多个子/子aggs)返回结果的方法? 例如,当前我正在尝试获取所有产品类型及其状态(在线/离线)。 这就是我最终得到的: aggs [ { key:
如何合并如下所示的 map : Map1 = Map(1 -> Class1(1), 2 -> Class1(2)) Map2 = Map(2 -> Class2(1), 3 -> Class2(2)
我试图通过从netezza服务器导入数据来合并两个数据集。 以下是数据集,其数字为,ID为,字母为,名称为: 下表都是使用命令从netezza导入的: sqoop import --connect n
我有两个数组 $array1 = array('first', 'second', 'third', 'fourth'); $array2 = array('first', 'third', 'fou
我正在 SQL Server 中运行合并。在我的更新中,我只想在值发生更改时更新该行。有一个版本行在每次更新时都会递增。下面是一个例子: MERGE Employee as tgt USING (SE
我是一名优秀的程序员,十分优秀!