我正在尝试在~/.zshrc
设置中加快创建新窗格的速度。一个时刻的想法是,每当我创建一个新窗格时,zsh
都会被获取。我想知道的是,是否有可能将当前窗格的环境(函数,环境变量,别名等)转移到新窗格中。
我注意到的一件事情是,当我在~/.zshrc
中创建一个子shell时,它采用了父级的配置。创建tmux窗格时可以实现相同的行为吗?
或者,如果def random_choice_noreplace(m,n, axis=-1):
# m, n are the number of rows, cols of output
return np.random.rand(m,n).argsort(axis=axis)
@cuda.jit
def cuda_kernel (d_npart, d_npts, d_data, d_data_index, d_coef, d_datasum, d_tmp):
row, col = cuda.grid(2)
if row < d_npart and col < d_npts :
d_tmp[row, col] = d_data[d_data_index[row, col]]
d_tmp[row, col] =d_tmp[row, col] * d_coef[row, col]
# All threads get to this point ===============================
cuda.syncthreads()
if row == 0 and col ==0 :
d_datasum = np.sum(d_tmp, axis=0)
def calculate_cuda (data, data_index, coef):
npart, npts = data_index.shape
# arrays to copy to GPU memory =====================================
d_npart = cuda.to_device(npart)
d_npts = cuda.to_device(npts)
d_data = cuda.to_device(data)
d_data_index = cuda.to_device(data_index)
d_coef = cuda.to_device(coef)
d_datasum = cuda.device_array(npts, np.complex64)
d_tmp = cuda.device_array((npart,npts), np.complex64)
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(npts / threadsperblock[0]))+1
blockspergrid_y = int(math.ceil(npart / threadsperblock[1]))+1
blockspergrid = (blockspergrid_x, blockspergrid_y)
cuda_kernel[blockspergrid, threadsperblock](d_npart, d_npts, d_data, d_data_index, d_coef, d_datasum, d_tmp)
# Copy data from GPU to CPU ========================================
final_data_sum = d_datasum.copy_to_host()
return final_data_sum
def calculate_python (data, data_index, coef):
npart, npts = data_index.shape
data_sum = np.zeros(npts, dtype=np.complex64)
tmp = np.zeros(npts, dtype=np.complex64)
print(" Calling python function...")
start_time = time.time()
for i in range(npart):
tmp[:] = data[data_index[i]]
data_sum += tmp * coef[i]
return data_sum
if __name__ == '__main__':
data_size = 1200
rows = 31
cols = 1000
rand_float1 = np.random.randn(data_size)
rand_float2 = np.random.randn(data_size)
data = rand_float1 + 1j * rand_float2
coef = np.random.randn(rows, cols)
data_index = random_choice_noreplace(rows, cols)
start_time = time.time()
gpu_data_sum_python = calculate_python (data, data_index, coef)
python_time = time.time() - start_time #print("gpu c : ", c_gpu)
print("---- %s seconds for python ----:" % (python_time))
start_time = time.time()
gpu_data_sum = calculate_cuda (data, data_index, coef)
gpu_time = time.time() - start_time
print("---- %s seconds for gpu ----:" % (gpu_time))
仅是在创建tmux会话时最初获得的,并且对于随后创建的所有窗格都是默认环境,那么它也对我有用。
(我看到了这个tmux start new pane with my current environemnt,但是被接受的答案不能回答我的问题。)