gpt4 book ai didi

python - 仅在按下特定按钮时打开文件目录,而不是自动打开

转载 作者:太空宇宙 更新时间:2023-11-03 20:34:59 26 4
gpt4 key购买 nike

我试图在 tkinter GUI 中按下某个按钮时打开文件目录,但当我运行程序时该目录会自动打开。另外,如果我在文件目录中按取消,我的程序就会卡住,我必须关闭程序,我不确定这是为什么。

我尝试将所有 tkinter 相关编码放在一个单独的文件中,但是当我尝试从该文件调用方法时,它会打开 tkinter GUI 两次,所以这不起作用,我无法为了解决这个问题,所以我认为将两者结合起来会更容易一些。我能够让 tkinter GUI 停止出现两次,但现在我陷入了困境。我尝试使用spyder附带的调试器,但除了向我展示为什么 tkinter GUI 不断出现两次之外,它没有多大帮助。

import os
import PyPDF2
import pandas
import webbrowser
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize

#Creats the GUI that will be used to select inputs#
window = tk.Tk()
window.geometry("300x300")
window.resizable(0, 0)
window.title("Word Frequency Program")

#Allows user to select PDF to use in program#
def select_PDF():
filename = filedialog.askopenfilename(initialdir = "/", title = "Select file", filetypes = (("pdf files", "*.pdf"), ("all files", "*.*")))
return filename
button1 = ttk.Button(window, text = "Select File", command = select_PDF)
button1.grid()

#Quits out of the program when certain button clicked#
button3 = ttk.Button(window, text = "Quit", command = window.quit)
button3.grid()

#Loads in PDF into program#
filepath = select_PDF()
PDF_file = open(filepath, 'rb')
read_pdf = PyPDF2.PdfFileReader(PDF_file)

#Determines number of pages in PDF file and sets the document content to 'null'#
number_of_pages = read_pdf.getNumPages()
doc_content = ""

#Extract text from the PDF file#
for i in range(number_of_pages):
page = read_pdf.getPage(0)
page_content = page.extractText()
doc_content += page_content

#Method that a pdf that is read into the program goes through to eliminate any unwanted words or symbols#
def preprocess(text):
#Filters out punctuation from paragraph witch becomes tokenized to words and punctuation#
tokenizer = RegexpTokenizer(r'\w+')
result = tokenizer.tokenize(text)

#Makes all words lowercase#
words = [item.lower() for item in result]

#Removes all remaining tokens that are not alphabetic#
result = [word for word in words if word.isalpha()]

#Imports stopwords to be removed from paragraph#
stop_words = set(stopwords.words("english"))

#Removes the stop words from the paragraph#
filtered_sent = []
for w in result:
if w not in stop_words:
filtered_sent.append(w)

#Return word to root word/chop-off derivational affixes#
ps = PorterStemmer()
stemmed_words = []
for w in filtered_sent:
stemmed_words.append(ps.stem(w))

#Lemmatization, which reduces word to their base word, which is linguistically correct lemmas#
lem = WordNetLemmatizer()
lemmatized_words = ' '.join([lem.lemmatize(w,'n') and lem.lemmatize(w,'v') for w in filtered_sent])

#Re-tokenize lemmatized words string#
tokenized_word = word_tokenize(lemmatized_words)
return tokenized_word

#Turns the text drawn from the PDF file into data the remaining code can understand#
tokenized_words = preprocess(doc_content)

#Determine frequency of words tokenized + lemmatized text#
from nltk.probability import FreqDist
fdist = FreqDist(tokenized_words)
final_list = fdist.most_common(len(fdist))

#Organize data into two columns and export the data to an html that automatically opens#
df = pandas.DataFrame(final_list, columns = ["Word", "Frequency"])
df.to_html('word_frequency.html')
webbrowser.open('file://' + os.path.realpath('word_frequency.html'))

window.mainloop()
window.destroy()

tkinter GUI 应该会自行弹出,而不会出现文件目录,直到您按下 GUI 中的按钮。当您在文件目录中按“取消”时,程序也不应该崩溃。

最佳答案

如果你想在按下按钮后运行,那么你必须运行select_PDF内的所有代码

button1 = ttk.Button(window, text="Select File", command=select_PDF)

def select_PDF():
filename = filedialog.askopenfilename(initialdir = "/", title = "Select file", filetypes = (("pdf files", "*.pdf"), ("all files", "*.*")))
#Loads in PDF into program#
PDF_file = open(filename, 'rb')
read_pdf = PyPDF2.PdfFileReader(PDF_file)

#Determines number of pages in PDF file and sets the document content to 'null'#
number_of_pages = read_pdf.getNumPages()
doc_content = ""

#Extract text from the PDF file#

# ... rest of code ...

Button 的工作方式与 input() 不同 - 它不会停止代码,也不会等待您的点击。它仅定义按钮,mainloop() 将显示它。你应该

你的代码应该是这样的:

import os
import PyPDF2
import pandas
import webbrowser
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize

# --- functions ---

def preprocess(text):
'''Method that a pdf that is read into the program goes through to eliminate any unwanted words or symbols'''

#Filters out punctuation from paragraph witch becomes tokenized to words and punctuation#
tokenizer = RegexpTokenizer(r'\w+')
result = tokenizer.tokenize(text)

#Makes all words lowercase#
words = [item.lower() for item in result]

#Removes all remaining tokens that are not alphabetic#
result = [word for word in words if word.isalpha()]

#Imports stopwords to be removed from paragraph#
stop_words = set(stopwords.words("english"))

#Removes the stop words from the paragraph#
filtered_sent = []
for w in result:
if w not in stop_words:
filtered_sent.append(w)

#Return word to root word/chop-off derivational affixes#
ps = PorterStemmer()
stemmed_words = []
for w in filtered_sent:
stemmed_words.append(ps.stem(w))

#Lemmatization, which reduces word to their base word, which is linguistically correct lemmas#
lem = WordNetLemmatizer()
lemmatized_words = ' '.join([lem.lemmatize(w,'n') and lem.lemmatize(w,'v') for w in filtered_sent])

#Re-tokenize lemmatized words string#
tokenized_word = word_tokenize(lemmatized_words)
return tokenized_word

def select_PDF():
filename = filedialog.askopenfilename(initialdir = "/", title = "Select file", filetypes = (("pdf files", "*.pdf"), ("all files", "*.*")))

PDF_file = open(filename, 'rb')
read_pdf = PyPDF2.PdfFileReader(PDF_file)

#Determines number of pages in PDF file and sets the document content to 'null'#
number_of_pages = read_pdf.getNumPages()
doc_content = ""

#Extract text from the PDF file#
for i in range(number_of_pages):
page = read_pdf.getPage(0)
page_content = page.extractText()
doc_content += page_content

#Turns the text drawn from the PDF file into data the remaining code can understand#
tokenized_words = preprocess(doc_content)

#Determine frequency of words tokenized + lemmatized text#
from nltk.probability import FreqDist
fdist = FreqDist(tokenized_words)
final_list = fdist.most_common(len(fdist))

#Organize data into two columns and export the data to an html that automatically opens#
df = pandas.DataFrame(final_list, columns = ["Word", "Frequency"])
df.to_html('word_frequency.html')
webbrowser.open('file://' + os.path.realpath('word_frequency.html'))

# --- main ---

#Creats the GUI that will be used to select inputs#
window = tk.Tk()
window.geometry("300x300")
window.resizable(0, 0)
window.title("Word Frequency Program")

button1 = ttk.Button(window, text = "Select File", command=select_PDF)
button1.grid()

#Quits out of the program when certain button clicked#
button3 = ttk.Button(window, text="Quit", command=window.quit)
button3.grid()

window.mainloop()
window.destroy()
<小时/>

或者您可以使用按钮选择文件名,将其保存在全局变量中并关闭窗口(window.quit()),并将其余代码放在mainloop()之后。 mainloop() 将等到您关闭窗口,并且 mainloop() 之后的所有代码将在您选择文件(并关闭窗口)后执行

import os
import PyPDF2
import pandas
import webbrowser
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize

# --- functions ---

def preprocess(text):
'''Method that a pdf that is read into the program goes through to eliminate any unwanted words or symbols'''

#Filters out punctuation from paragraph witch becomes tokenized to words and punctuation#
tokenizer = RegexpTokenizer(r'\w+')
result = tokenizer.tokenize(text)

#Makes all words lowercase#
words = [item.lower() for item in result]

#Removes all remaining tokens that are not alphabetic#
result = [word for word in words if word.isalpha()]

#Imports stopwords to be removed from paragraph#
stop_words = set(stopwords.words("english"))

#Removes the stop words from the paragraph#
filtered_sent = []
for w in result:
if w not in stop_words:
filtered_sent.append(w)

#Return word to root word/chop-off derivational affixes#
ps = PorterStemmer()
stemmed_words = []
for w in filtered_sent:
stemmed_words.append(ps.stem(w))

#Lemmatization, which reduces word to their base word, which is linguistically correct lemmas#
lem = WordNetLemmatizer()
lemmatized_words = ' '.join([lem.lemmatize(w,'n') and lem.lemmatize(w,'v') for w in filtered_sent])

#Re-tokenize lemmatized words string#
tokenized_word = word_tokenize(lemmatized_words)
return tokenized_word

def select_PDF():
global filename # to assign to global variable

filename = filedialog.askopenfilename(initialdir = "/", title = "Select file", filetypes = (("pdf files", "*.pdf"), ("all files", "*.*")))

window.close() # close

# --- main ---

filename = None # create global variable with default value at start

#Creats the GUI that will be used to select inputs#
window = tk.Tk()
window.geometry("300x300")
window.resizable(0, 0)
window.title("Word Frequency Program")

button1 = ttk.Button(window, text = "Select File", command=select_PDF)
button1.grid()

#Quits out of the program when certain button clicked#
button3 = ttk.Button(window, text="Quit", command=window.quit)
button3.grid()

window.mainloop()
window.destroy()

# --- executed after closing window ---

if filename: # check if filename was selected

PDF_file = open(filename, 'rb')
read_pdf = PyPDF2.PdfFileReader(PDF_file)

#Determines number of pages in PDF file and sets the document content to 'null'#
number_of_pages = read_pdf.getNumPages()
doc_content = ""

#Extract text from the PDF file#
for i in range(number_of_pages):
page = read_pdf.getPage(0)
page_content = page.extractText()
doc_content += page_content


#Turns the text drawn from the PDF file into data the remaining code can understand#
tokenized_words = preprocess(doc_content)

#Determine frequency of words tokenized + lemmatized text#
from nltk.probability import FreqDist
fdist = FreqDist(tokenized_words)
final_list = fdist.most_common(len(fdist))

#Organize data into two columns and export the data to an html that automatically opens#
df = pandas.DataFrame(final_list, columns = ["Word", "Frequency"])
df.to_html('word_frequency.html')
webbrowser.open('file://' + os.path.realpath('word_frequency.html'))

关于python - 仅在按下特定按钮时打开文件目录,而不是自动打开,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/57225888/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com