## 如何在共享内存中使用numpy数组进行多处理？内容来源于 Stack Overflow，并遵循CC BY-SA 3.0许可协议进行翻译与使用

• 回答 (2)
• 关注 (0)
• 查看 (82)

```from multiprocessing import Process, Array
import scipy

def f(a):
a[0] = -a[0]

if __name__ == '__main__':
# Create the array
N = int(10)
unshared_arr = scipy.rand(N)
a = Array('d', unshared_arr)
print "Originally, the first two elements of arr = %s"%(arr[:2])

# Create, start, and finish the child process
p = Process(target=f, args=(a,))
p.start()
p.join()

# Print out the changed values
print "Now, the first two elements of arr = %s"%arr[:2]```

```Originally, the first two elements of arr = [0.3518653236697369, 0.517794725524976]
Now, the first two elements of arr = [-0.3518653236697369, 0.517794725524976]```

### 2 个回答

`Array`对象具有`get_obj()`方法，该方法返回表示缓冲区接口的ctype数组：

```from multiprocessing import Process, Array
import scipy
import numpy

def f(a):
a[0] = -a[0]

if __name__ == '__main__':
# Create the array
N = int(10)
unshared_arr = scipy.rand(N)
a = Array('d', unshared_arr)
print "Originally, the first two elements of arr = %s"%(a[:2])

# Create, start, and finish the child process
p = Process(target=f, args=(a,))
p.start()
p.join()

# Print out the changed values
print "Now, the first two elements of arr = %s"%a[:2]

b = numpy.frombuffer(a.get_obj())

b[0] = 10.0
print a[0]```

```shared_arr = mp.Array(ctypes.c_double, N)
# ...
def f(i): # could be anything numpy accepts as an index such another numpy array
with shared_arr.get_lock(): # synchronize access
arr = np.frombuffer(shared_arr.get_obj()) # no data copying
arr[i] = -arr[i]```

### 例

```import ctypes
import logging
import multiprocessing as mp

from contextlib import closing

import numpy as np

info = mp.get_logger().info

def main():
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)

# create shared array
N, M = 100, 11
shared_arr = mp.Array(ctypes.c_double, N)
arr = tonumpyarray(shared_arr)

# fill with random values
arr[:] = np.random.uniform(size=N)
arr_orig = arr.copy()

# write to arr from different processes
with closing(mp.Pool(initializer=init, initargs=(shared_arr,))) as p:
# many processes access the same slice
stop_f = N // 10
p.map_async(f, [slice(stop_f)]*M)

# many processes access different slices of the same array
assert M % 2 # odd
step = N // 10
p.map_async(g, [slice(i, i + step) for i in range(stop_f, N, step)])
p.join()
assert np.allclose(((-1)**M)*tonumpyarray(shared_arr), arr_orig)

def init(shared_arr_):
global shared_arr
shared_arr = shared_arr_ # must be inhereted, not passed as an argument

def tonumpyarray(mp_arr):
return np.frombuffer(mp_arr.get_obj())

def f(i):
"""synchronized."""
with shared_arr.get_lock(): # synchronize access
g(i)

def g(i):
"""no synchronization."""
info("start %s" % (i,))
arr = tonumpyarray(shared_arr)
arr[i] = -1 * arr[i]
info("end   %s" % (i,))

if __name__ == '__main__':
mp.freeze_support()
main()```