- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我针对特定任务对序列分类 BERT 进行了微调,我希望应用 LIME 解释来查看每个标记如何有助于分类到特定标签,因为 LIME 将分类器作为黑盒处理。我根据可用的在线代码制作了如下组合代码:
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This code is a modification of the original run_examples.py script from the repository 'https://#github.com/wanghm92/pytorch-pretrained-BERT'. Modified for research purposes by Andraž P.
import logging
import csv
import argparse
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from tqdm import tqdm
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SemEvalProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(data_dir))
return self._create_examples(
self._read_tsv(data_dir), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(data_dir), "dev")
def get_test_examples(self, data_dir):
logger.info("LOOKING AT {}".format(data_dir))
return self._create_examples(
self._read_tsv(data_dir), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def create_examples(self, line, set_type='test'):
examples = []
if set_type == 'test':
guid = "%s" % (set_type)
text_a = line
text_b = None
label = "0"
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples[0]
class Predictions:
def convert_examples_to_features(self,example, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label : i for i, label in enumerate(label_list)}
else:
label_map = {"0": i for i in range(1)}
features = []
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
#def accuracy(out, labels):
# outputs = np.argmax(out, axis=1)
# return np.sum(outputs == labels)
#def warmup_linear(x, warmup=0.002):
# if x < warmup:
# return x/warmup
# return 1.0 - x
def predict(self, text):
examples = []
print(text)
for example in text:
test_example = processor.create_examples(example)
test_features = self.convert_examples_to_features(test_example, None, args.max_seq_length, tokenizer)
examples.append(test_features)
logger.info("***** Running prediction *****")
#logger.info(" test_example = %", test_example)
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.predict_batch_size)
model.eval()
all_preds = []
all_ids = []
results=[]
for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc="Predicting"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
outputs = model(input_ids, segment_ids, input_mask)
logits = outputs[0]
print("logits,logits.shape",logits)
#preds = np.argmax(logits).tolist()
logits = F.softmax(logits, dim = 1)
results.append(logits.cpu().detach().numpy()[0])
#logits = logits.detach().cpu().numpy()[0]
input_ids = input_ids.to('cpu').numpy().tolist()
#all_preds.extend(preds)
all_ids.extend(input_ids)
results_array = np.array(results)
logger.info("results_array",results_array.shape)
return np.array(logits)
if __name__ == "__main__":
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
from lime.lime_text import LimeTextExplainer
import logging
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_path",
type=str,
help="The path of the input data file. The data should be in the .tsv format.")
parser.add_argument("--bert_model", default='bert-base-uncased', type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default='SEMEVAL',
type=str,
help="The name of the task.")
parser.add_argument("--output_dir",
default='results/bert_output.tsv',
type=str,
help="The output results file.")
parser.add_argument("--model_path",
default='results/semeval_output/pytorch_model_task.bin',
type=str,
help="The path to the trained model file.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
action='store_true',
default=True,
help="Set this flag if you are using an uncased model.")
parser.add_argument("--predict_batch_size",
default=16,
type=int,
help="Total batch size for prediction.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
args = parser.parse_args()
processors = {
"semeval": SemEvalProcessor,
}
num_labels_task = {
"semeval": 2,
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
#label_list = processor.get_labels()
# loading a tokenizer
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# Lodaing a model
model_state_dict = torch.load(args.model_path)
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict,
num_labels=num_labels)
model.to(device)
predictions = Predictions()
#model_path = "models/mrpc"
#bert_model_class = "bert"
#prediction = Prediction(bert_model_class, model_path,
# lower_case = True, seq_length = 512)
label_names = [0, 1]
train_df = pd.read_csv(args.data_path, sep = '\t')
#test_examples = processor.get_test_examples(texts)
# guids = [example.guid for example in test_examples]
#resultpredict=predict(test_examples)
logger.info("Applying LIME")
explainer = LimeTextExplainer(class_names=label_names)
train_ls = train_df['text'].tolist()
train_ls=train_ls[0]
example=train_ls
print(type(predictions.predict))
for example in train_ls:
print('example train_ls',example)
exp = explainer.explain_instance(example, predictions.predict)
words = exp.as_list()
logger.info("Fininsh Applying LIME")
当我运行代码时,由于 predictions.predict 输入,它永远不会给我输出,总是存在我无法理解的维度错误,我应该以最简单的形式向 explainer.explain_instance 输入什么?我应该输入('文本',[[0.9867 0.1243]])吗?或者('文本',[[0.9867 0.1243] [0.7651 0.3459] [0.3254 0.775]])或者是什么?我遇到了这个错误
Traceback (most recent call last):
File "/content/drive/My Drive/BERT CODE/Try_LIME.py", line 442, in <module>
exp = explainer.explain_instance(example, predictions.predict)
File "/usr/local/lib/python3.6/dist-packages/lime/lime_text.py", line 415, in explain_instance
distance_metric=distance_metric)
File "/usr/local/lib/python3.6/dist-packages/lime/lime_text.py", line 482, in __data_labels_distances
labels = classifier_fn(inverse_data)
File "/content/drive/My Drive/BERT CODE/Try_LIME.py", line 273, in predict
logits = F.softmax(logits, dim = 1)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py", line 1498, in softmax
ret = input.softmax(dim)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
提前致谢
最佳答案
太复杂的例子。
您可以按照简单的方法检查为什么您不起作用(在 jupyter-lab 中)
import numpy as np
import lime
import torch
import torch.nn.functional as F
from lime.lime_text import LimeTextExplainer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
filename_model = 'ProsusAI/finbert'
tokenizer = AutoTokenizer.from_pretrained(filename_model)
model = AutoModelForSequenceClassification.from_pretrained(filename_model)
class_names = ['positive','negative', 'neutral']
def predictor(texts):
outputs = model(**tokenizer(texts, return_tensors="pt", padding=True))
tensor_logits = outputs[0]
probas = F.softmax(tensor_logits).detach().numpy()
return probas
text = 'Building more bypasses will help the environment by reducing pollution and traffic jams in towns and cities.'
print(tokenizer(text, return_tensors='pt', padding=True))
explainer = LimeTextExplainer(class_names=class_names)
exp = explainer.explain_instance(text, predictor, num_features=20, num_samples=2000)
exp.show_in_notebook(text=text)
关于python - 将 LIME 解释应用于我的微调 BERT 序列分类模型?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/64484738/
我正在尝试从 BERT 模型中的隐藏状态中获取句子向量。看着拥抱脸 BertModel 说明 here ,其中说: from transformers import BertTokenizer, Be
我正在将 Huggingface BERT 用于 NLP 任务。我的文本包含被分成子词的公司名称。 tokenizer = BertTokenizerFast.from_pretrained('ber
对于 Transformer 模型,尤其是 BERT,以编程方式禁止模型以特殊标记作为预测结果是否有意义(并且在统计上是否正确)?在最初的实现中情况如何?在收敛过程中,模型必须学会不预测这些,但这种干
我有一个包含段落的数据集,我需要将其分为两类。这些段落通常有 3-5 句话长。其中绝大多数的长度不到 500 字。我想利用BERT来解决这个问题。 我想知道我应该如何使用 BERT 来生成这些段落的向
我想在特定域上微调 BERT。我在文本文件中有该域的文本。我如何使用这些来微调 BERT? 我在找 here目前。 我的主要目标是使用 BERT 获得句子嵌入。 最佳答案 这里要做出的重要区别是您是否
我想针对未标记数据的特定域微调 BERT,并让输出层检查它们之间的相似性。我该怎么做?我是否需要先微调分类器任务(或问题答案等)并获得嵌入?或者我可以只使用预先训练好的 Bert 模型而无需执行任务并
我遇到了这个page 1)我想在微调完成后获得句子级嵌入(由[CLS] token 给出的嵌入)。我怎么能做到? 2)我还注意到该页面上的代码需要花费大量时间才能返回测试数据的结果。这是为什么?当我训
我读过解释滑动窗口如何工作的帖子,但我找不到任何关于它是如何实际实现的信息。 据我了解,如果输入太长,可以使用滑动窗口来处理文本。 如果我错了,请纠正我。 假设我有一个文本 “2017 年 6 月 K
我正在尝试使用 BERT 微调模型(使用 transformers 库),但我对优化器和调度器有点不确定。 首先,我明白我应该使用 transformers.AdamW而不是 Pytorch 的版本。
我在 Tensorflow 中使用 BERT,有一个细节我不太明白。根据文档( https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1 ),合并
我正在阅读 BERT paper并且不清楚 transformer 的输入编码器和解码器。 对于学习掩码语言模型(Cloze 任务),论文称 15% 的标记是被掩码的,并且训练网络来预测被掩码的标记。
我想使用 Bert 训练一个21 类 文本分类模型。但是我的训练数据很少,所以下载了一个类似的数据集,其中包含 5 类 和 200 万个样本。t并使用 bert 提供的 uncased 预训练模型对下
我正在训练一个在 BERT 之上使用自定义层的分类模型。在此期间,该模型的训练性能随着纪元的增加而下降(在第一个纪元之后)..我不确定这里要修复什么 - 是模型还是数据? (对于数据来说,它是二进制标
我是初学者..我正在和伯特一起工作。但出于公司网络的安全考虑,以下代码并没有直接接收bert模型。 tokenizer = BertTokenizer.from_pretrained('bert-ba
如何卡住上述预训练模型中的最后两层(dropout 和分类器层)?这样当模型运行时,我将得到一个致密层作为输出。 最佳答案 我想指出 BertForSequenceClassification 的定义
我正在使用 Huggingface 进一步训练 BERT 模型。我使用两种方法保存模型:步骤 (1) 使用此代码保存整个模型:model.save_pretrained(save_location),
我收到以下错误: AssertionError:文本输入必须为 str(单个示例)、List[str](批处理或单个预标记示例)或 List[List[str]](预标记示例批处理)类型。,当我运行
我想构建一个多类分类模型,我将对话数据作为 BERT 模型的输入(使用 bert-base-uncased)。 QUERY: I want to ask a question. ANSWER: Sur
我很感兴趣如何从 BERT 模型中获得不同句子中词嵌入的相似性(实际上,这意味着词在不同场景中具有不同的含义)。 例如: sent1 = 'I like living in New York.' se
众所周知,BERT 模型的词嵌入能力可能优于 word2vec 和任何其他模型。 我想在 BERT 词嵌入上创建一个模型来生成同义词或相似词。就像我们在 Gensim Word2Vec 中所做的一样。
我是一名优秀的程序员,十分优秀!