我的代码包括segyio模块 我应该处理的文档.sgy包含107591迹线和每条迹线2501个值,格式为“ float32”。我应该处理输入的sgy文件并编写新的处理过的sgy文件。 如何加快我的代码?也许我应该使用jit或Cython,但我不知道如何使用。 此代码的计算时间大约为4-5个小时,太长了。
import segyio
import statistics
import numpy as np
import time
start_time = time.time()
z=int(input('How many traces would you have to change? '))
T=int(input('Choose the size of window for averaging '))
z1=list(range(z))
t1=int(T//4)
t=T-t1-t1
print('Taken into calculation', t, 'values')
t11 = len(range(t1))
t0 = ([0.0] * t11)
c=np.arange(0, 2502-t)
d=np.arange(t, 2502)
z2=iter(z1)
traces=[]
print ('Precycle', "--- %s seconds ---" % (time.time() - start_time))
with segyio.open('2M.sgy', 'r') as f:
f.mmap()
for i in z1:
full=[]
e=[]
z3 = next(z2)
a=f.trace[z3]
c1 = iter(c)
d1 = iter(d)
for i in d:
c00 = next(c1)
d00 = next(d1)
e00 = statistics.mean(a[c00:d00])
e.append(e00)
full.extend(t0)
full.extend(e)
full.extend(t0)
full=np.array([full], 'float32')
traces.append(full)
print('Changed trace', z3, "--- %s seconds ---" % (time.time() - start_time))
print('Calculation before writing', "--- %s seconds ---" % (time.time() - start_time))
spec=segyio.spec()
spec.sorting=f.sorting
spec.format=f.format
spec.samples=f.samples
spec.ilines=f.ilines
spec.xline=f.xlines
spec.tracecount=f.tracecount
with segyio.create('2M2.sgy', spec) as dst:
dst.mmap()
dst.text[0]= f.text[0]
dst.bin=f.bin
dst.header=f.header
dst.trace=traces
dst.close()
print('End', 'Full calculation time', "--- %s seconds ---" % (time.time() - start_time))