gpt4 book ai didi

java - Wikipedia Dump 会在 Lucene 中变大吗?

转载 作者:行者123 更新时间:2023-11-29 07:47:37 25 4
gpt4 key购买 nike

索引 Wikipedia Dump 是一个 35GB 的 XML 文件,在 Lucene 中证明是巨大的(请参阅下面的代码了解概览)。在 100 篇文章之后,没有存储字段,我使用了大约 35 MB 的索引空间,这意味着对于大约 350 万篇文章,我使用了超过 1.2 TB!这怎么可能,因为原始文件只是该大小的一小部分。这个包是低效的还是 Lucene 只是有非常大的索引存储需求?我是不是忽略了什么?

如果我假设来自维基百科的前 100 篇文章非常大并且不代表非常正常的文章,我只能解释它,在这种情况下我会大大高估它。有人能告诉我他们的转储(用 Lucene 索引)有多大以及花了多长时间吗?

这里是代码:

            package example;

import edu.jhu.nlp.wikipedia.*;
import edu.jhu.nlp.language.Language;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Date;
import java.util.Vector;

public class WikipediaTextFacetIndexer {

protected String facetName = null;
protected ConfigurationManager configManager = null;
protected String contentFolder = null;
protected String indexFolder = null;
private static int counter = 0;

public void index() {

// true creates a new index / false updates the existing index
boolean create = false;

// check if data directory exists
logger.debug("wikipedia dump file = " + contentFolder);
final File wikipediaDumpFile = new File(contentFolder);
if (!wikipediaDumpFile.exists() || !wikipediaDumpFile.canRead()) {
logger.error("Wikipedia dump file '" + wikipediaDumpFile.getAbsolutePath()
+ "' does not exist or is not readable, please check the path. ");
System.exit(1);
}

// to calculate indexing time as a performance measure
Date start = new Date();

try {
logger.debug("Indexing to directory '" + this.indexFolder + "'...");

Directory dir = FSDirectory.open(new File(this.indexFolder));
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_42);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_42, analyzer);

if (create) {
// Create new index, remove previous index
logger.debug("Creating a new index in directory: '" + this.indexFolder + "'...");
iwc.setOpenMode(OpenMode.CREATE);
} else {
// Add new documents to existing index
logger.debug("Updating the index in directory: '" + this.indexFolder + "'...");
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
}

// index
IndexWriter writer = new IndexWriter(dir, iwc);
indexDocuments(writer, wikipediaDumpFile);
writer.close();

// time stamping
Date end = new Date();
logger.debug("Indexing time: " + (end.getTime() - start.getTime()) + " total milliseconds for " + WikipediaTextFacetIndexer.counter + " articles.");

} catch (IOException e) {
logger.error("Exception: " + e.getMessage());
}
}

/**
* Indexes individual pages from the wikipedia dump with a set of configured IndexFieldGenerators.
*
* @param writer A writing handle to the index
* @param file The file to be indexed
* @throws IOException
*/
private void indexDocuments(final IndexWriter writer, File file) throws IOException {

// reset the file counter
WikipediaTextFacetIndexer.counter = 0;

// do not try to index files that cannot be read
if (file.canRead()) {

if (file.isDirectory()) {
String[] files = file.list();

// an IO error could occur
if (files != null) {
for (int i = 0; i < files.length; i++) {
indexDocuments(writer, new File(file, files[i]));
}
}

} else {

FileInputStream fis;
try {
fis = new FileInputStream(file);
} catch (FileNotFoundException fnfe) {
// at least on windows, some temporary files raise this exception with an "access denied" message
// checking if the file can be read doesn't help
return;
}

try {

// create a new, empty document
final Document doc = new Document();

// access wikipedia dump file
WikiXMLParser wxsp = WikiXMLParserFactory.getSAXParser(file.getAbsolutePath());

try {
wxsp.setPageCallback(new PageCallbackHandler() {
public void process(WikiPage page) {

if (page.isRedirect() || page.isDisambiguationPage() || page.isSpecialPage() || page.isStub()) {
logger.info("- Excluding Redirection / Disambiguation / Special / Stub Wikipedia page id " + page.getID() + " about '" + page.getTitle().trim() + "'");
return;
}

// facetid is wikipedia ID
// check if docId was read successfully, stop if not
if (page.getID() == null || page.getID().length() == 0) {
logger.error("Facet Id unknown for wikipedia article '" + page.getTitle() + "'. Nothing done.");
return;
}

// id
doc.add(new IntField("id", new Integer(page.getID()), Field.Store.NO));
logger.info("id: " + page.getID());

// title
doc.add(new TextField("title", page.getTitle().trim(), Field.Store.NO));
logger.info("title: " + page.getTitle());

// text
doc.add(new TextField("text", page.getText().trim(), Field.Store.NO));
logger.info("TEXT: " + page.getText());


// original wikipedia text --- excludes some Wiki markup
//doc.add(new TextField("wikitext", page.getWikiText(), Field.Store.YES));

// infobox
InfoBox infoBox = page.getInfoBox();
if (infoBox != null) {
doc.add(new TextField("infobox", page.getInfoBox().dumpRaw(), Field.Store.NO));
logger.info("infobox: " + page.getInfoBox().dumpRaw());
}

// links
Vector links = page.getLinks();
String linksString = "";
for (int i = 0; i < links.size(); i++) {
linksString = linksString + links.get(i);
if (i < (links.size() - 1)) {
linksString = linksString + ";";
}
}
doc.add(new TextField("links", linksString.trim(), Field.Store.NO));
logger.info("links: " + linksString.trim());

// categories
Vector categories = page.getCategories();
String categoriesString = "";
for (int i = 0; i < categories.size(); i++) {
categoriesString = categoriesString + categories.get(i);
if (i < (categories.size() - 1)) {
categoriesString = categoriesString + ";";
}
}
doc.add(new TextField("categories", categoriesString.trim(), Field.Store.NO));
logger.info("cat: " + categoriesString.trim());


// redirect page
if (page.getRedirectPage() != null){
doc.add(new TextField("redirectPage", page.getRedirectPage(), Field.Store.NO));
logger.info("redirect: " + page.getRedirectPage());
}

// translated titles for French, German and Spanish
if (page.getTranslatedTitle(Language.FRENCH) != null){
doc.add(new TextField("translatedTitleFR", page.getTranslatedTitle(Language.FRENCH), Field.Store.NO));
logger.info("translate: " + page.getTranslatedTitle(Language.FRENCH));
}
if (page.getTranslatedTitle(Language.GERMAN) != null){
doc.add(new TextField("translatedTitleFR", page.getTranslatedTitle(Language.GERMAN), Field.Store.NO));
logger.info("translate: " + page.getTranslatedTitle(Language.GERMAN));
}
if (page.getTranslatedTitle(Language.SPANISH) != null){
doc.add(new TextField("translatedTitleFR", page.getTranslatedTitle(Language.SPANISH), Field.Store.NO));
logger.info("translate: " + page.getTranslatedTitle(Language.SPANISH));
}

// write document to index
try {
logger.debug("[" + WikipediaTextFacetIndexer.counter + "] + Adding Wikipedia page id " + page.getID() + " about '" + page.getTitle().trim() + "'");
writer.addDocument(doc);
WikipediaTextFacetIndexer.counter++;

// just build a small index with 5000 concepts first!!! Remove later !!!
if (WikipediaTextFacetIndexer.counter == 100) {
writer.commit();
writer.close();
System.exit(0);
}

} catch (Exception e) {
logger.error("Exception while writing index: " + e.getMessage());
}
}

});

wxsp.parse();
} catch (Exception e) {
e.printStackTrace();
}


} finally {
fis.close();
}
}
}

return;
}

}

最佳答案

我在您的代码中检测到一个错误。当你编写文档时,你不会从文档中删除字段,所以每次添加文档时,它都有以前文章和新文章的字段,因为这些没有被删除。因此,当您获得结果时,它们似乎都是一样的,具有大量字段(存储的字段*文章数)。

为了解决它,我添加了

doc.removeField("id");
doc.removeField("title");
doc.removeField("text");
doc.removeField("wikitext");

之后

writer.addDocument(doc);

编辑:刚刚完成了 8 GB 西类牙语转储的完整索引,其中只有 id、title(stored)、wikitext(stored) 和 infobox 字段。只有 4.8GB

关于java - Wikipedia Dump 会在 Lucene 中变大吗?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24163230/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com