我是python的新手,并试图以递归方式将目录中所有tar文件的gz内容及其内部目录上传到s3。这总共约有1,350,000个tar文件。
我没有空间同时解压所有文件所以我一次只做其中一个。
最初我的脚本工作但是一旦遇到错误(tar文件已损坏),我的脚本就会结束。我添加了一堆try除了子句尝试记录并继续这些错误,现在我的脚本似乎没有上传文件,即使我得到如下输出::
('iter: ', '/srv/nfs/storage/bucket/2015/11/20/KFWS/NWS_NEXRAD_NXL2DP_KFWS_20151120130000_20151120135959.tar')
KFWS20151120_130030_V06.gz
('singlepart', <Bucket: bucket>, '/srv/nfs/storage/bucket/2015/11/20/KFWS/KFWS20151120_130030_V06.gz', '2015/11/20/KFWS/KFWS20151120_130030_V06.gz')
('single_part: ', '2015/11/20/KFWS/KFWS20151120_130030_V06.gz', '/srv/nfs/storage/bucket/2015/11/20/KFWS/KFWS20151120_130030_V06.gz')
KFWS20151120_131000_V06.gz
('iter: ', '/srv/nfs/storage/bucket/2015/11/20/KFWS/NWS_NEXRAD_NXL2DP_KFWS_20151120110000_20151120115959.tar')
KFWS20151120_110630_V06.gz
('singlepart', <Bucket: bucket>, '/srv/nfs/storage/bucket/2015/11/20/KFWS/KFWS20151120_110630_V06.gz', '2015/11/20/KFWS/KFWS20151120_110630_V06.gz')
('single_part: ', '2015/11/20/KFWS/KFWS20151120_110630_V06.gz', '/srv/nfs/storage/bucket/2015/11/20/KFWS/KFWS20151120_110630_V06.gz')
KFWS20151120_111601_V06.gz
它显示它正在进入single_part,对我来说,这意味着它至少是运行singlept函数并尝试上传一个对象但是没有创建Zimport_errors.list或Znoaa_nexrad_files.list而且我没有看到任何桶中的新对象。
下面的代码:(提前抱歉这是多么糟糕。我正在尝试自学python而且只有几个星期。)
这是主要的块
def singlept(bucket, keyname, local_file):
retries = 0
key_size = 0
local_size = os.path.getsize(local_file)
while retries <= 4 and local_size != key_size:
local_md5 = md5file(local_file=local_file)
print('single_part: ', keyname, local_file)
try:
key = bucket.new_key(keyname)
except Exception:
print('couldn\'t create key: ', keyname)
pass
try:
key.set_contents_from_filename(local_file)
key_size = key.size
with open(successfile, 'ab') as f:
f.write('\n')
f.write(str(local_file + ',' + keyname + ',' + str(key_size) + ',' + str(local_size)))
except Exception:
print('couldn\'t upload file: ', local_file, ' as key: ', keyname)
with open(errorfile, 'ab') as f:
f.write('\n')
f.write(str(local_file + ',' + keyname + ',' + str(key_size) + ',' + str(local_size)))
pass
for dir, subdir, files in os.walk(local_bucket):
s3path = "/".join(str(dir).split('/')[5:])
local_path = str(local_bucket + '/' + s3path)
for fname in files:
if fname.endswith("tar"):
fullpath = local_path + '/' + fname
if (debug):
print('iter: ',fullpath)
with tarfile.open(fullpath, 'r') as tarball:
zips = tarball.getmembers()
try:
tarball.extractall(path=local_path)
except Exception:
with open(errorfile, 'ab') as f:
f.write('\n')
f.write(str(fullpath + ',' + str(os.path.getsize(fullpath))))
continue
for zip in zips:
if (debug):
print(zip.name)
local_file = local_path + '/' + zip.name
keyname = s3path + '/' + zip.name
try:
if zip.size >= 1073741824:
if (debug):
print('multipart',bucket, local_file, keyname)
multipt(bucket, local_file, keyname)
else:
if (debug):
print('singlepart',bucket, local_file, keyname)
singlept(bucket, keyname, local_file)
except Exception:
with open(errorfile, 'ab') as f:
f.write('\n')
f.write(str(local_file + "," + keyname))
continue
if local_file.endswith("gz"):
try:
os.remove(local_file)
except Exception:
print('couldn\'t remove file: ', local_file)
continue
非常感谢你提前帮忙!我把头发拉出来!
编辑 - 直接添加代码并希望修复缩进!它看起来在Atom中没有正确粘贴。 : - /
答案 0 :(得分:0)
except Exception
仅捕获类型为Exception
的异常-请参见https://stackoverflow.com/a/18982726/264822。您应该尝试:
try:
key = bucket.new_key(keyname)
except:
print('couldn\'t create key: ', keyname)
pass