gpt4 book ai didi

python - 减少 Python 脚本中的 RAM 使用

转载 作者:太空宇宙 更新时间:2023-11-04 08:13:56 24 4
gpt4 key购买 nike

我编写了一个快速的小程序,用于从包含有关书籍翻译信息的联合国教科文组织网站上抓取书籍数据。该代码正在执行我想要的操作,但是当它处理大约 20 个国家/地区时,它正在使用 ~6GB 的 RAM。因为我需要处理大约 200 个,所以这对我不起作用。

我不确定所有 RAM 使用量从何而来,所以我不确定如何减少它。我假设它是包含所有书籍信息的字典,但我并不肯定。我不确定我是否应该简单地让程序为每个国家运行一次,而不是处理很多国家?或者是否有更好的方法?

这是我第一次写这样的东西,我是一个非常新手,自学成才的程序员,所以请指出代码中的任何重大缺陷,或者你有可能不直接相关的改进提示手头的问题。

这是我的代码,在此先感谢您的帮助。

from __future__ import print_function
import urllib2, os
from bs4 import BeautifulSoup, SoupStrainer

''' Set list of countries and their code for niceness in explaining what
is actually going on as the program runs. '''
countries = {"AFG":"Afghanistan","ALA":"Aland Islands","DZA":"Algeria"}

'''List of country codes since dictionaries aren't sorted in any
way, this makes processing easier to deal with if it fails at
some point, mid run.'''
country_code_list = ["AFG","ALA","DZA"]

base_url = "http://www.unesco.org/xtrans/bsresult.aspx?lg=0&c="
destination_directory = "/Users/robbie/Test/"
only_restable = SoupStrainer(class_="restable")

class Book(object):
def set_author(self,book):
'''Parse the webpage to find author names. Finds last name, then
first name of original author(s) and sets the Book object's
Author attribute to the resulting string.'''

authors = ""
author_last_names = book.find_all('span',class_="sn_auth_name")
author_first_names = book.find_all('span', attrs={\
'class':"sn_auth_first_name"})
if author_last_names == []: self.Author = [" "]

for author in author_last_names:
try:
first_name = author_first_names.pop()
authors = authors + author.getText() + ', ' + \
first_name.getText()

except IndexError:
authors = authors + (author.getText())
self.author = authors

def set_quality(self,book):
''' Check to see if book page is using Quality, then set it if
so.'''

quality = book.find_all('span', class_="sn_auth_quality")

if len(quality) == 0: self.quality = " "

else: self.quality = quality[0].contents[0]

def set_target_title(self,book):
target_title = book.find_all('span', class_="sn_target_title")
if len(target_title) == 0: self.target_title = " "
else: self.target_title = target_title[0].contents[0]

def set_target_language(self,book):
target_language = book.find_all('span', class_="sn_target_lang")
if len(target_language) == 0: self.target_language = " "
else: self.target_language = target_language[0].contents[0]

def set_translator_name(self,book) :
translators = ""
translator_last_names = book.find_all('span', class_="sn_transl_name")
translator_first_names = book.find_all('span', \
class_="sn_transl_first_name")
if translator_first_names == [] and translator_last_names == [] :
self.translators = " "
return None

for translator in translator_last_names:
try:
first_name = translator_first_names.pop()
translators = translators + \
(translator.getText() + ',' \
+ first_name.getText())
except IndexError:
translators = translators + \
(translator.getText())

self.translators = translators

def set_published_city(self,book) :
published_city = book.find_all('span', class_="place")
if len(published_city) == 0:
self.published_city = " "
else: self.published_city = published_city[0].contents[0]

def set_publisher(self,book) :
publisher = book.find_all('span', class_="place")
if len(publisher) == 0:
self.publisher = " "
else: self.publisher = publisher[0].contents[0]

def set_published_country(self,book) :
published_country = book.find_all('span', \
class_="sn_country")
if len(published_country) == 0:
self.published_country = " "
else: self.published_country = published_country[0].contents[0]

def set_year(self,book) :
year = book.find_all('span', class_="sn_year")
if len(year) == 0:
self.year = " "
else: self.year = year[0].contents[0]

def set_pages(self,book) :
pages = book.find_all('span', class_="sn_pagination")
if len(pages) == 0:
self.pages = " "
else: self.pages = pages[0].contents[0]

def set_edition(self, book) :
edition = book.find_all('span', class_="sn_editionstat")
if len(edition) == 0:
self.edition = " "
else: self.edition = edition[0].contents[0]

def set_original_title(self,book) :
original_title = book.find_all('span', class_="sn_orig_title")
if len(original_title) == 0:
self.original_title = " "
else: self.original_title = original_title[0].contents[0]

def set_original_language(self,book) :
languages = ''
original_languages = book.find_all('span', \
class_="sn_orig_lang")

for language in original_languages:
languages = languages + language.getText() + ', '

self.original_languages = languages

def export(self, country):
''' Function to allow us to easilly pull the text from the
contents of the Book object's attributes and write them to the
country in which the book was published's CSV file.'''

file_name = os.path.join(destination_directory + country + ".csv")

with open(file_name, "a") as by_country_csv:
print(self.author.encode('UTF-8') + " & " + \
self.quality.encode('UTF-8') + " & " + \
self.target_title.encode('UTF-8') + " & " + \
self.target_language.encode('UTF-8') + " & " + \
self.translators.encode('UTF-8') + " & " + \
self.published_city.encode('UTF-8') + " & " + \
self.publisher.encode('UTF-8') + " & " + \

self.published_country.encode('UTF-8') + " & " + \
self.year.encode('UTF-8') + " & " + \
self.pages.encode('UTF-8') + " & " + \
self.edition.encode('UTF-8') + " & " + \
self.original_title.encode('UTF-8') + " & " + \
self.original_languages.encode('UTF-8'), file=by_country_csv)

by_country_csv.close()

def __init__(self, book, country):
''' Initialize the Book object by feeding it the HTML for its
row'''
self.set_author(book)
self.set_quality(book)
self.set_target_title(book)
self.set_target_language(book)

self.set_translator_name(book)
self.set_published_city(book)
self.set_publisher(book)
self.set_published_country(book)

self.set_year(book)
self.set_pages(book)
self.set_edition(book)
self.set_original_title(book)

self.set_original_language(book)


def get_all_pages(country,base_url):
''' Create a list of URLs to be crawled by adding the ISO_3166-1_alpha-3
country code to the URL and then iterating through the results every 10
pages. Returns a string.'''

base_page = urllib2.urlopen(base_url+country)
page = BeautifulSoup(base_page, parse_only=only_restable)

result_number = page.find_all('td',class_="res1",limit=1)
if not result_number:
return 0

str_result_number = str(result_number[0].getText())
results_total = int(str_result_number.split('/')[1])

page.decompose()

return results_total


def build_list(country_code_list, countries):
''' Build the list of all the books, and return a list of Book objects
in case you want to do something with them in something else, ever.'''
for country in country_code_list:

print("Processing %s now..." % countries[country])
results_total = get_all_pages(country, base_url)

for url in range(results_total):
if url % 10 == 0 :
all_books = []
target_page = urllib2.urlopen(base_url + country \
+"&fr="+str(url))
page = BeautifulSoup(target_page, parse_only=only_restable)
books = page.find_all('td',class_="res2")
for book in books:
all_books.append(Book (book,country))
page.decompose()

for title in all_books:
title.export(country)
return

if __name__ == "__main__":
build_list(country_code_list,countries)
print("Completed.")

最佳答案

我想我会不分先后顺序列出一些问题或可能的改进:

  1. 关注PEP 8 .

    现在,您有很多使用驼峰命名法命名的变量和函数,例如 setAuthor。这不是 Python 的传统风格; Python 通常会将其命名为 set_author(和 published_country 而不是 PublishedCountry 等)。您甚至可以更改您正在调用的某些事物的名称:首先,BeautifulSoup 支持 findAll 以实现兼容性,但建议使用 find_all

    除了命名,PEP 8 还指定了一些其他的东西;例如,你想重写这个:

    if len(resultNumber) == 0 : return 0

    像这样:

    if len(result_number) == 0:
    return 0

    甚至考虑到空列表是虚假的这一事实:

    if not result_number:
    return 0
  2. SoupStrainer 传递给 BeautifulSoup

    您要查找的信息可能只在文档的一部分中;你不需要把整个东西解析成一棵树。 Pass a SoupStrainer as the parse_only argument to BeautifulSoup.这应该通过尽早丢弃不必要的部分来减少内存使用。

  3. decompose喝完汤。

    Python 主要 使用引用计数,因此删除所有循环引用(如 decompose 所做的那样)应该让它的主要垃圾收集机制,引用计数,释放大量内存。 Python 也有一个半传统的垃圾收集器来处理循环引用,但引用计数要快得多。

  4. 不要让 Book.__init__ 将东西写入磁盘。

    在大多数情况下,我不希望只创建一个类的实例来将某些内容写入磁盘。删除对 export 的调用;让用户调用 export 如果他们想把它放在磁盘上。

  5. 停止在内存中保留这么多数据。

    您将所有这些数据累积到一个字典中只是为了之后导出它。减少内存的明显做法是尽快将其转储到磁盘。您的评论表明您将其放入字典中以保持灵 active ;但这并不意味着您必须将所有内容都收集在一个列表中:使用生成器,在您抓取它们时产生项目。然后用户可以像列表一样对其进行迭代:

    for book in scrape_books():
    book.export()

    …但优点是一次最多可以将一本书保存在内存中。

  6. 使用os.path中的函数而不是自己修改路径。

    当涉及到路径名时,您现在的代码相当脆弱。如果我不小心从 destinationDirectory 中删除了结尾的斜杠,就会发生一些意想不到的事情。使用 os.path.join防止这种情况发生并处理跨平台差异:

    >>> os.path.join("/Users/robbie/Test/", "USA")
    '/Users/robbie/Test/USA'
    >>> os.path.join("/Users/robbie/Test", "USA") # still works!
    '/Users/robbie/Test/USA'
    >>> # or say we were on Windows:
    >>> os.path.join(r"C:\Documents and Settings\robbie\Test", "USA")
    'C:\\Documents and Settings\\robbie\\Test\\USA'
  7. attrs={"class":...} 缩写为 class_=...

    BeautifulSoup 4.1.2 引入了使用 class_ 进行搜索,这消除了对冗长的 attrs={"class":...} 的需要。

    /li>

我想您可以更改的内容甚至更多,但从一开始就很少。

关于python - 减少 Python 脚本中的 RAM 使用,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/17768973/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com