在这里,我们在运行Shell脚本以开始深度学习培训时遇到语法错误。 环境Anaconda,python 3.7.0,Ubuntu18.04
我们检查了路线等信息,并且以前已经运行过类似的脚本,但是第一次遇到这种错误
端子:
sudo '/home/labor/pytorch_fnet-master/scripts/train_model.sh'
+ DATASET=dna
+ N_ITER=50000
+ RUN_DIR=saved_models/dna
+ PATH_DATASET_ALL_CSV=data/csvs/dna.csv
+ PATH_DATASET_TRAIN_CSV=data/csvs/dna/train.csv
+ GPU_IDS=0
+ DATASET_KWARGS='{"transform_signal":["fnet.transforms.Normalize()", "fnet.transforms.Resizer((1, 0.37241, 0.37241))"], "transform_target": ["fnet.transforms.Normalize()", "fnet.transforms.Resizer((1, 0.37241, 0.37241))"] }'
+ BPDS_KWARGS='{"buffer_size": 1 }'
+++ dirname /home/labor/pytorch_fnet-master/scripts/train_model.sh
++ cd /home/labor/pytorch_fnet-master/scripts
++ pwd
+ cd /home/labor/pytorch_fnet-master/scripts/..
+ python3 scripts/python/split_dataset.py data/csvs/dna.csv data/csvs --train_size 0.75 -v
Traceback (most recent call last):
File "scripts/python/split_dataset.py", line 61, in <module>
main()
File "scripts/python/split_dataset.py", line 36, in main
df_all = pd.read_csv(opts.src_csv)
File "/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py", line 702, in parser_f
return _read(filepath_or_buffer, kwds)
File "/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py", line 429, in _read
parser = TextFileReader(filepath_or_buffer, **kwds)
File "/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py", line 895, in __init__
self._make_engine(self.engine)
File "/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py", line 1122, in _make_engine
self._engine = CParserWrapper(self.f, **self.options)
File "/usr/local/lib/python3.6/dist-packages/pandas/io/parsers.py", line 1853, in __init__
self._reader = parsers.TextReader(src, **kwds)
File "pandas/_libs/parsers.pyx", line 387, in pandas._libs.parsers.TextReader.__cinit__
File "pandas/_libs/parsers.pyx", line 705, in pandas._libs.parsers.TextReader._setup_parser_source
FileNotFoundError: [Errno 2] File b'data/csvs/dna.csv' does not exist: b'data/csvs/dna.csv'
+ python3 train_model.py --n_iter 50000 --path_dataset_csv data/csvs/dna/train.csv --dataset_kwargs '{"transform_signal": ["fnet.transforms.Normalize()", "fnet.transforms.Resizer((1, 0.37241, 0.37241))"], "transform_target": ["fnet.transforms.Normalize()", "fnet.transforms.Resizer((1, 0.37241, 0.37241))"] }' --bpds_kwargs '{"buffer_size": 1 }' --path_run_dir saved_models/dna --gpu_ids 0
File "train_model.py", line 3
DATASET=${1:dna}
^
SyntaxError: invalid syntax
相应的脚本:
#!/bin/bash -x
DATASET=${1:-dna}
N_ITER=50000
RUN_DIR="saved_models/${DATASET}"
PATH_DATASET_ALL_CSV="data/csvs/${DATASET}.csv"
PATH_DATASET_TRAIN_CSV="data/csvs/${DATASET}/train.csv"
GPU_IDS=${2:-0}
DATASET_KWARGS="{\
\"transform_signal\": [\"fnet.transforms.Normalize()\", \"fnet.transforms.Resizer((1, 0.37241, 0.37241))\"], \
\“ transform_target \”:[\“ fnet.transforms.Normalize()\”,\“ fnet.transforms.Resizer((1,0.37241,0.37241))\”] \ }” BPDS_KWARGS =“ {\ \“ buffer_size \”:1 \ }“
cd $(cd "$(dirname ${BASH_SOURCE})" && pwd)/..
python3 scripts/python/split_dataset.py ${PATH_DATASET_ALL_CSV} "data/csvs" --train_size 0.75 -v
python3 train_model.py \
--n_iter ${N_ITER} \
--path_dataset_csv ${PATH_DATASET_TRAIN_CSV} \
--dataset_kwargs "${DATASET_KWARGS}" \
--bpds_kwargs "${BPDS_KWARGS}" \
--path_run_dir ${RUN_DIR} \
--gpu_ids ${GPU_IDS}