gpt4 book ai didi

java - 使用 Lucene 使多值字段评分了解字段值的计数

转载 作者:太空宇宙 更新时间:2023-11-04 06:17:17 24 4
gpt4 key购买 nike

我正在尝试将(多单词)标签列表与每个文档相关联。因此,对于每个文档,我添加多个 StringField 条目,其中“tag”作为 fieldName。

搜索时,我期望分数与我成功匹配的标签的比例成正比,例如:

  • 如果我匹配一半标签,则为 0.5
  • 如果我全部匹配,则为 1.0。

但分数中似乎没有考虑标签数量。

在这四个文档上进行测试时:

 - tags.put("doc1", "piano, electric guitar, violon");

- tags.put("doc2", "piano, electric guitar");

- tags.put("doc3", "piano");

- tags.put("doc4", "electric guitar");

我得到的是:

 - Score : 1.0 
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
- Score : 1.0
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
- Score : 1.0
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>

如何改变这种行为?我是否缺少正确的做事方式?

下面是我的测试代码。

最诚挚的问候,

雷诺

public class LuceneQueryTest {

Analyzer analyzer;
BasicIndex basicIndex;
LinkedList<String> phrases;
Query query;
Map<Document, Float> results;

@Test
public void testListOfTags() throws Exception {

analyzer = new StandardAnalyzer();

basicIndex = new BasicIndex(analyzer);

Map<String, String> tags = new HashMap();

tags.put("doc1", "piano, electric guitar, violon");

tags.put("doc2", "piano, electric guitar");

tags.put("doc3", "piano");

tags.put("doc4", "electric guitar");

Queue<String> queue = new LinkedList<>();
queue.addAll(tags.keySet());

basicIndex.index(new Supplier<Document>() {

public Document get() {
Document doc = new Document();

if (queue.isEmpty()) {
return null;
}

String docName = queue.poll();

System.out.println("**** "+docName);

String tag = tags.get(docName);
doc.add(new StringField("id", docName, Field.Store.YES));

for (String tagItem : tag.split("\\,")) {
System.out.println(tagItem);
Field tagField;
tagField = new StringField("tag",tagItem,Field.Store.YES);

System.out.println(tagField);

doc.add(tagField);
}
return doc;
}
});

BooleanQuery booleanQuery = new BooleanQuery();
//booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);

//Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
query = booleanQuery;
//query = parsedQuery;


System.out.println(query);

results = basicIndex.search(query);
displayResults(results);

System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));

}

private void displayResults(Map<Document, Float> results) {
results.forEach((Document doc, Float score) -> {
System.out.println("Score : " + score + " \n Doc : " + doc);
});
}
}

BasicIndex(测试实用程序)类的代码:

import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

/**
*
* @author renaud
*/
public class BasicIndex {

final Directory directory = new RAMDirectory();
final IndexWriter indexWriter;
final Analyzer analyzer;

public BasicIndex(Analyzer analyzer) {
this.analyzer = analyzer;
this.indexWriter = newIndexWriter();
}

public Analyzer getAnalyzer() {
return analyzer;
}

private IndexWriter newIndexWriter() {
IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer);
try {
return new IndexWriter(directory, config);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}

public IndexSearcher newIndexSearcher() {
return new IndexSearcher(newIndexReader());
}

public IndexReader newIndexReader() {
IndexReader reader;
try {

reader = DirectoryReader.open(directory);
} catch (IOException ex) {
throw ExceptionUtils.asRuntimeException(ex);
}
return reader;
}

public void index(LinkedList<String> phrases, final String fieldName) {
index(phrases, (String phrase) -> {
Document doc = new Document();

Field workField = new TextField(fieldName, phrase, Field.Store.YES);
doc.add(workField);
return doc;
});
}

public void index(Supplier<Document> documents) {
Document document;
while ((document = documents.get()) != null) {
try {
indexWriter.addDocument(document);
} catch (IOException e) {
throw ExceptionUtils.asRuntimeException(e);
}
}
close();
}

public void index(LinkedList<String> phrases, Function<String, Document> docBuilder) {
for (String phrase : phrases) {
try {
indexWriter.addDocument(docBuilder.apply(phrase));
} catch (IOException e) {
throw ExceptionUtils.asRuntimeException(e);
}
}
close();
}

private void close() {
IOUtils.closeSilently(indexWriter);
}

public Map<Document, Float> search(Query query) {
final IndexSearcher indexSearcher = newIndexSearcher();
int hitsPerPage = 10;
TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
try {
indexSearcher.search(query, collector);
} catch (IOException ex) {
throw new RuntimeException(ex);
}

ScoreDoc[] hits = collector.topDocs().scoreDocs;

Map<Document, Float> results = new LinkedHashMap<>();
for (int i = 0; i < hits.length; ++i) {
ScoreDoc scoreDoc = hits[i];
int docId = scoreDoc.doc;
float score = scoreDoc.score;
Document doc;
try {
doc = indexSearcher.doc(docId);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
results.put(doc, score);
}


return results;
}

public Document document(int i){
try {
return newIndexSearcher().doc(i);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}

}

最佳答案

好吧,我最终得出的解决方案是:

  1. 添加一个包含文档标签数量的 IntField
  2. 使用 IntFieldSource 的 Reciprocal 函数来使用 BoostedQuery

我还发现 SOLR 是在互联网上搜索 Lucene 信息的一个很好的关键字,因为它有更多的文档记录,同时又靠近 java 代码。

我对结果非常满意:

Score : 0.5 
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored<count:1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.33333334
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored<count:2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.25
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored<count:3> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>

更新后的代码:

    @Test
public void testListOfTags() throws Exception {

analyzer = new StandardAnalyzer();

basicIndex = new BasicIndex(analyzer);

Map<String, String> tags = new HashMap();

tags.put("doc1", "piano, electric guitar, violon");

tags.put("doc2", "piano, electric guitar");

tags.put("doc3", "piano");

tags.put("doc4", "electric guitar");

Queue<String> queue = new LinkedList<>();
queue.addAll(tags.keySet());

basicIndex.index(new Supplier<Document>() {

public Document get() {
Document doc = new Document();

if (queue.isEmpty()) {
return null;
}

String docName = queue.poll();

System.out.println("**** " + docName);

String tag = tags.get(docName);
doc.add(new StringField("id", docName, Field.Store.YES));
String[] tags = tag.split("\\,");

Field tagCountField = new IntField("count", tags.length, Field.Store.YES);
doc.add(tagCountField);

for (String tagItem : tags) {
System.out.println(tagItem);
Field tagField;
tagField = new StringField("tag", tagItem.trim(), Field.Store.YES);

System.out.println(tagField);

doc.add(tagField);
}
return doc;
}
});

BooleanQuery booleanQuery = new BooleanQuery();
//booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);

//Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
query = booleanQuery;
//query = parsedQuery;


ValueSource boostSource = new ReciprocalFloatFunction(new IntFieldSource("count"), 1, 1, 1);
query = new BoostedQuery(query, boostSource);

System.out.println(query);

results = basicIndex.search(query);
displayResults(results);

System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));

}

关于java - 使用 Lucene 使多值字段评分了解字段值的计数,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/27919702/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com