- c - 在位数组中找到第一个零
- linux - Unix 显示有关匹配两种模式之一的文件的信息
- 正则表达式替换多个文件
- linux - 隐藏来自 xtrace 的命令
我有一个多处理工作,我正在排队只读 numpy 数组,作为生产者消费者管道的一部分。
目前它们正在被腌制,因为这是 multiprocessing.Queue
的默认行为。这会降低性能。
是否有任何 pythonic 方法来传递对共享内存的引用而不是酸洗数组?
不幸的是,数组是在消费者启动后生成的,没有简单的方法可以解决这个问题。 (所以全局变量方法会很丑......)。
[请注意,在以下代码中,我们不期望 h(x0) 和 h(x1) 并行计算。相反,我们看到 h(x0) 和 g(h(x1)) 并行计算(就像 CPU 中的流水线)。]
from multiprocessing import Process, Queue
import numpy as np
class __EndToken(object):
pass
def parrallel_pipeline(buffer_size=50):
def parrallel_pipeline_with_args(f):
def consumer(xs, q):
for x in xs:
q.put(x)
q.put(__EndToken())
def parallel_generator(f_xs):
q = Queue(buffer_size)
consumer_process = Process(target=consumer,args=(f_xs,q,))
consumer_process.start()
while True:
x = q.get()
if isinstance(x, __EndToken):
break
yield x
def f_wrapper(xs):
return parallel_generator(f(xs))
return f_wrapper
return parrallel_pipeline_with_args
@parrallel_pipeline(3)
def f(xs):
for x in xs:
yield x + 1.0
@parrallel_pipeline(3)
def g(xs):
for x in xs:
yield x * 3
@parrallel_pipeline(3)
def h(xs):
for x in xs:
yield x * x
def xs():
for i in range(1000):
yield np.random.uniform(0,1,(500,2000))
if __name__ == "__main__":
rs = f(g(h(xs())))
for r in rs:
print r
最佳答案
在线程或进程之间共享内存
使用线程代替多处理
由于您使用的是 numpy,因此您可以利用 the global interpreter lock is released during numpy computations 的优势。 .这意味着您可以使用标准线程和共享内存进行并行处理,而不是多处理和进程间通信。这是您的代码的一个版本,经过调整以使用 threading.Thread 和 Queue.Queue 而不是 multiprocessing.Process 和 multiprocessing.Queue。这通过队列传递一个 numpy ndarray 而不对其进行酸洗。在我的计算机上,它的运行速度比您的代码快 3 倍。 (但是,它只比你的代码的串行版本快 20%。我已经建议了一些其他的方法。)
from threading import Thread
from Queue import Queue
import numpy as np
class __EndToken(object):
pass
def parallel_pipeline(buffer_size=50):
def parallel_pipeline_with_args(f):
def consumer(xs, q):
for x in xs:
q.put(x)
q.put(__EndToken())
def parallel_generator(f_xs):
q = Queue(buffer_size)
consumer_process = Thread(target=consumer,args=(f_xs,q,))
consumer_process.start()
while True:
x = q.get()
if isinstance(x, __EndToken):
break
yield x
def f_wrapper(xs):
return parallel_generator(f(xs))
return f_wrapper
return parallel_pipeline_with_args
@parallel_pipeline(3)
def f(xs):
for x in xs:
yield x + 1.0
@parallel_pipeline(3)
def g(xs):
for x in xs:
yield x * 3
@parallel_pipeline(3)
def h(xs):
for x in xs:
yield x * x
def xs():
for i in range(1000):
yield np.random.uniform(0,1,(500,2000))
rs = f(g(h(xs())))
%time print sum(r.sum() for r in rs) # 12.2s
from multiprocessing import Process, Queue, Array
import numpy as np
class ArrayQueue(object):
def __init__(self, template, maxsize=0):
if type(template) is not np.ndarray:
raise ValueError('ArrayQueue(template, maxsize) must use a numpy.ndarray as the template.')
if maxsize == 0:
# this queue cannot be infinite, because it will be backed by real objects
raise ValueError('ArrayQueue(template, maxsize) must use a finite value for maxsize.')
# find the size and data type for the arrays
# note: every ndarray put on the queue must be this size
self.dtype = template.dtype
self.shape = template.shape
self.byte_count = len(template.data)
# make a pool of numpy arrays, each backed by shared memory,
# and create a queue to keep track of which ones are free
self.array_pool = [None] * maxsize
self.free_arrays = Queue(maxsize)
for i in range(maxsize):
buf = Array('c', self.byte_count, lock=False)
self.array_pool[i] = np.frombuffer(buf, dtype=self.dtype).reshape(self.shape)
self.free_arrays.put(i)
self.q = Queue(maxsize)
def put(self, item, *args, **kwargs):
if type(item) is np.ndarray:
if item.dtype == self.dtype and item.shape == self.shape and len(item.data)==self.byte_count:
# get the ID of an available shared-memory array
id = self.free_arrays.get()
# copy item to the shared-memory array
self.array_pool[id][:] = item
# put the array's id (not the whole array) onto the queue
new_item = id
else:
raise ValueError(
'ndarray does not match type or shape of template used to initialize ArrayQueue'
)
else:
# not an ndarray
# put the original item on the queue (as a tuple, so we know it's not an ID)
new_item = (item,)
self.q.put(new_item, *args, **kwargs)
def get(self, *args, **kwargs):
item = self.q.get(*args, **kwargs)
if type(item) is tuple:
# unpack the original item
return item[0]
else:
# item is the id of a shared-memory array
# copy the array
arr = self.array_pool[item].copy()
# put the shared-memory array back into the pool
self.free_arrays.put(item)
return arr
class __EndToken(object):
pass
def parallel_pipeline(buffer_size=50):
def parallel_pipeline_with_args(f):
def consumer(xs, q):
for x in xs:
q.put(x)
q.put(__EndToken())
def parallel_generator(f_xs):
q = ArrayQueue(template=np.zeros(0,1,(500,2000)), maxsize=buffer_size)
consumer_process = Process(target=consumer,args=(f_xs,q,))
consumer_process.start()
while True:
x = q.get()
if isinstance(x, __EndToken):
break
yield x
def f_wrapper(xs):
return parallel_generator(f(xs))
return f_wrapper
return parallel_pipeline_with_args
@parallel_pipeline(3)
def f(xs):
for x in xs:
yield x + 1.0
@parallel_pipeline(3)
def g(xs):
for x in xs:
yield x * 3
@parallel_pipeline(3)
def h(xs):
for x in xs:
yield x * x
def xs():
for i in range(1000):
yield np.random.uniform(0,1,(500,2000))
print "multiprocessing with shared-memory arrays:"
%time print sum(r.sum() for r in f(g(h(xs())))) # 13.5s
%time print sum(1 for x in xs())
时相同。 .
import multiprocessing
import threading, Queue
import numpy as np
def f(x):
return x + 1.0
def g(x):
return x * 3
def h(x):
return x * x
def final(i):
return f(g(h(x(i))))
def final_sum(i):
return f(g(h(x(i)))).sum()
def x(i):
# produce sample number i
return np.random.uniform(0, 1, (500, 2000))
def rs_serial(func, n):
for i in range(n):
yield func(i)
def rs_parallel_threaded(func, n):
todo = range(n)
q = Queue.Queue(2*n_workers)
def worker():
while True:
try:
# the global interpreter lock ensures only one thread does this at a time
i = todo.pop()
q.put(func(i))
except IndexError:
# none left to do
q.put(None)
break
threads = []
for j in range(n_workers):
t = threading.Thread(target=worker)
t.daemon=False
threads.append(t) # in case it's needed later
t.start()
while True:
x = q.get()
if x is None:
break
else:
yield x
def rs_parallel_mp(func, n):
pool = multiprocessing.Pool(n_workers)
return pool.imap_unordered(func, range(n))
n_workers = 4
n_samples = 1000
print "serial:" # 14.8s
%time print sum(r.sum() for r in rs_serial(final, n_samples))
print "threaded:" # 10.1s
%time print sum(r.sum() for r in rs_parallel_threaded(final, n_samples))
print "mp return arrays:" # 19.6s
%time print sum(r.sum() for r in rs_parallel_mp(final, n_samples))
print "mp return results:" # 8.4s
%time print sum(r_sum for r_sum in rs_parallel_mp(final_sum, n_samples))
import multiprocessing, itertools, math
import numpy as np
def f(xs):
for x in xs:
yield x + 1.0
def g(xs):
for x in xs:
yield x * 3
def h(xs):
for x in xs:
yield x * x
def xs():
for i in range(1000):
yield np.random.uniform(0,1,(500,2000))
def final():
return f(g(h(xs())))
def final_sum():
for x in f(g(h(xs()))):
yield x.sum()
def get_chunk(args):
"""Retrieve n values (n=args[1]) from a generator function (f=args[0]) and return them as a list.
This runs in a worker process and does all the computation."""
return list(itertools.islice(args[0](), args[1]))
def parallelize(gen_func, max_items, n_workers=4, chunk_size=50):
"""Pull up to max_items items from several copies of gen_func, in small groups in parallel processes.
chunk_size should be big enough to improve efficiency (one copy of gen_func will be run for each chunk)
but small enough to avoid exhausting memory (each worker will keep chunk_size items in memory)."""
pool = multiprocessing.Pool(n_workers)
# how many chunks will be needed to yield at least max_items items?
n_chunks = int(math.ceil(float(max_items)/float(chunk_size)))
# generate a suitable series of arguments for get_chunk()
args_list = itertools.repeat((gen_func, chunk_size), n_chunks)
# chunk_gen will yield a series of chunks (lists of results) from the generator function,
# totaling n_chunks * chunk_size items (which is >= max_items)
chunk_gen = pool.imap_unordered(get_chunk, args_list)
# parallel_gen flattens the chunks, and yields individual items
parallel_gen = itertools.chain.from_iterable(chunk_gen)
# limit the output to max_items items
return itertools.islice(parallel_gen, max_items)
# in this case, the parallel version is slower than a single process, probably
# due to overhead of gathering numpy arrays in imap_unordered (via pickle?)
print "serial, return arrays:" # 15.3s
%time print sum(r.sum() for r in final())
print "parallel, return arrays:" # 24.2s
%time print sum(r.sum() for r in parallelize(final, max_items=1000))
# in this case, the parallel version is more than twice as fast as the single-thread version
print "serial, return result:" # 15.1s
%time print sum(r for r in final_sum())
print "parallel, return result:" # 6.8s
%time print sum(r for r in parallelize(final_sum, max_items=1000))
关于python - 只读 numpy 数组的快速队列,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38666078/
如何更改循环中变量的名称?比如 number1 、 number2 、 number3 、 number4 ? var array = [2,4,6,8] func ap ( number1: Int
我想设置 View 的背景颜色并在一定延迟后将其更改为另一种颜色。这是我的尝试方式: print("setting color 1") self.view.backgroundColor = UICo
我在使用 express-session 时遇到问题。 session 数据不会在请求之间持续存在。 正如您在下面的代码中看到的那样,/join 路由设置了一些 session 属性,但是当 /sur
我试图从叶渲染器获得一个非常简单的结果,用于快速 Steam 的 for 循环。 我正在上传叶文件 HTML,因为它不接受此处格式正确的代码 - 下面的pizza.swift代码- import
你们中有人有什么好的链接可以与我分享吗?我正在寻找一个 FAST 程序员编辑器,它可以非常快速地打开包含超过 100, 000 行代码的文件?我目前正在使用记事本自动取款机,打开一个 29000 行长
我现在正在处理眼动追踪数据,因此拥有一个巨大的数据集(想想数百万行),因此希望有一种快速的方法来完成此任务。这是它的简化版本。 数据告诉您眼睛在每个时间点正在查看的位置以及我们正在查看的每个文件。 X
我是新手,想为计时器或其他设备选择提示音。 如何打开此列表,以选择其中一种声音? Alert sound list 最佳答案 您将无法在应用中使用系统声音。 但是,您可以包括自己的声音文件,并将其显示
我编写了以下代码来构建具有顺序字符串的数组。 它的工作方式与我预期的一样,但我希望它能更快地运行。有没有更有效的方法在PowerShell中产生我想要的结果? 我是PowerShell的新手,非常感谢
我有一个包含一些非唯一行的矩阵,例如: x 尝试 y <- rle(apply(x, 1, paste, collapse = " ")) # y$lengths is the vector con
我的函数“keyboardWillShown”有问题。所以我想要的是菜单打开时,菜单正好出现在键盘上方。它可以在Iphone 8 plus,8、7、6上完美运行。但是,当我在模拟器上运行Iphone
我正在尝试通过Swift 5中的HTTP get方法从API提取数据。它在启动时成功加载了数据,但是当我刷新页面时,它说“索引超出范围”,这是因为数据是不再会在我的日志中读取,因此索引中没有任何内容。
我想做什么: 从我的数据库中获取时间戳并将其转换为用户的时区。 我的代码: let tryItNow = "\(model.timestampName)" let format = D
给定字体名称和字体大小,如何查找字符串的宽度(CGFloat)? (目标是将UIView的宽度设置为足以容纳字符串的宽度。) 我有两个字符串:一个重复“1”,重复36次,另一个重复“M”,重复36次。
我正在尝试解析此JSON ["Items": ( { AccountBalance = 0; AlphabetType = 3; Description = "\U0631\U
我在UINavigationBar内放置了一个UILabel。 我想根据navigationBar的高度增加该标签的字体大小。当navigationBar很大时,我希望字体大小更大;当滚动并缩小nav
我想将用户输入限制为仅有效数字并使用以下内容: func textView(_ textView: UITextView, shouldChangeTextIn range: NSRange, rep
目前我有一个包含超过 100.000 张图像的数据库,它们大小不一或类似,但我想为我的公司制作以下内容: 我插入/上传一张图片,系统返回最有可能相同的图片。我不知道使用什么算法,但它需要快速。我可以预
在我的 swift 项目中,我有一个按钮,我想在标签上打印按下该按钮的时间。 如何解决这个问题? 最佳答案 添加到DHEERAJ的答案中,您只需在func press(sender: UIButton
我必须发表评论,尝试在解析中导入数组。然而,有一个问题。 当我尝试从 Parse 加载数组时,我的输出是 ("Blah","Blah","Blah")这是一个元组...而不是一个数组 TT... 如何
我的应用程序有一个名为 MyDevice 的类,我用它来与硬件通信。该硬件是可选的,实例变量也是可选的: var theDevice:MyDevice = nil 然后,在应用程序中,我必须初始化设备
我是一名优秀的程序员,十分优秀!