gpt4 book ai didi

python - 在 Python 中聚类文本

转载 作者:IT老高 更新时间:2023-10-28 20:31:24 27 4
gpt4 key购买 nike

我需要对一些文本文档进行聚类,并且一直在研究各种选项。看起来 LingPipe 可以在没有事先转换(到向量空间等)的情况下对纯文本进行聚类,但它是我见过的唯一明确声称可以处理字符串的工具。

有没有可以直接聚类文本的 Python 工具?如果没有,最好的处理方法是什么?

最佳答案

文本聚类的质量主要取决于两个因素:

  1. 您想要聚类的文档之间的一些相似性概念。例如,通过 tfidf-cosine-distance 很容易区分向量空间中有关体育和政治的新闻文章。根据此衡量标准将产品评论归类为“好”或“坏”要困难得多。

  2. 聚类方法本身。你知道会有多少个集群吗?好的,使用kmeans。您不关心准确性,但想显示一个漂亮的树结构来导航搜索结果?使用某种层次聚类。

没有在任何情况下都能正常工作的文本聚类解决方案。因此,开箱即用一些集群软件并将数据扔给它可能是不够的。

话虽如此,这是我前段时间用来玩文本聚类的一些实验性代码。文档被表示为归一化的 tfidf 向量,相似度被测量为余弦距离。聚类方法本身就是 majorclust

import sys
from math import log, sqrt
from itertools import combinations

def cosine_distance(a, b):
cos = 0.0
a_tfidf = a["tfidf"]
for token, tfidf in b["tfidf"].iteritems():
if token in a_tfidf:
cos += tfidf * a_tfidf[token]
return cos

def normalize(features):
norm = 1.0 / sqrt(sum(i**2 for i in features.itervalues()))
for k, v in features.iteritems():
features[k] = v * norm
return features

def add_tfidf_to(documents):
tokens = {}
for id, doc in enumerate(documents):
tf = {}
doc["tfidf"] = {}
doc_tokens = doc.get("tokens", [])
for token in doc_tokens:
tf[token] = tf.get(token, 0) + 1
num_tokens = len(doc_tokens)
if num_tokens > 0:
for token, freq in tf.iteritems():
tokens.setdefault(token, []).append((id, float(freq) / num_tokens))

doc_count = float(len(documents))
for token, docs in tokens.iteritems():
idf = log(doc_count / len(docs))
for id, tf in docs:
tfidf = tf * idf
if tfidf > 0:
documents[id]["tfidf"][token] = tfidf

for doc in documents:
doc["tfidf"] = normalize(doc["tfidf"])

def choose_cluster(node, cluster_lookup, edges):
new = cluster_lookup[node]
if node in edges:
seen, num_seen = {}, {}
for target, weight in edges.get(node, []):
seen[cluster_lookup[target]] = seen.get(
cluster_lookup[target], 0.0) + weight
for k, v in seen.iteritems():
num_seen.setdefault(v, []).append(k)
new = num_seen[max(num_seen)][0]
return new

def majorclust(graph):
cluster_lookup = dict((node, i) for i, node in enumerate(graph.nodes))

count = 0
movements = set()
finished = False
while not finished:
finished = True
for node in graph.nodes:
new = choose_cluster(node, cluster_lookup, graph.edges)
move = (node, cluster_lookup[node], new)
if new != cluster_lookup[node] and move not in movements:
movements.add(move)
cluster_lookup[node] = new
finished = False

clusters = {}
for k, v in cluster_lookup.iteritems():
clusters.setdefault(v, []).append(k)

return clusters.values()

def get_distance_graph(documents):
class Graph(object):
def __init__(self):
self.edges = {}

def add_edge(self, n1, n2, w):
self.edges.setdefault(n1, []).append((n2, w))
self.edges.setdefault(n2, []).append((n1, w))

graph = Graph()
doc_ids = range(len(documents))
graph.nodes = set(doc_ids)
for a, b in combinations(doc_ids, 2):
graph.add_edge(a, b, cosine_distance(documents[a], documents[b]))
return graph

def get_documents():
texts = [
"foo blub baz",
"foo bar baz",
"asdf bsdf csdf",
"foo bab blub",
"csdf hddf kjtz",
"123 456 890",
"321 890 456 foo",
"123 890 uiop",
]
return [{"text": text, "tokens": text.split()}
for i, text in enumerate(texts)]

def main(args):
documents = get_documents()
add_tfidf_to(documents)
dist_graph = get_distance_graph(documents)

for cluster in majorclust(dist_graph):
print "========="
for doc_id in cluster:
print documents[doc_id]["text"]

if __name__ == '__main__':
main(sys.argv)

对于实际应用,你会使用一个像样的标记器,使用整数而不是标记字符串,并且不要计算 O(n^2) 距离矩阵...

关于python - 在 Python 中聚类文本,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/1789254/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com