processUsageFile()
{
#sdate=`pin_virtual_time | awk -F" " '{print $3}'`;
#Get all new files to be loaded to brm staging data.
count=`ls ${PRE_STAGING}/TWN* 2>/dev/null|grep -v reprocess|wc -l`
if [ $count -ne 0 ];then
# Fork subshell
(./efx_omc_brm_rpt_process.sh -t TWN & )&
exitOnError
fi
#Process Rapid Report files
count=`ls $PRE_STAGING/RR* 2>/dev/null|grep -v reprocess|wc -l`
if [ $count -ne 0 ];then
(./efx_omc_brm_rpt_process.sh -t RR &)&
exitOnError
fi
...
...
}
#Reprocessing. Process the reprocessed files.
#This method updates the records in the BRM staging table.
reprocessingUsageFile()
{
#Process TWN fulfillment reprocess files
count=`ls $PRE_STAGING/TWN*reprocess* 2>/dev/null|wc -l`
if [ $count -ne 0 ];then
# Fork subshell
(./efx_omc_brm_rpt_reprocess.sh -t TWN & ) &
fi
#Process Rapid Report files
count=`ls $PRE_STAGING/RR*reprocess* 2>/dev/null|wc -l`
if [ $count -ne 0 ];then
(./efx_omc_brm_rpt_reprocess.sh -t RR &) &
fi
...
...
}
#Pre processing
PreProcessing
# Start processing usage files.
processUsageFile
processErrFile
上述代码的思想是做并行处理。所有方法都调用多个子shell并从tty中分离。我想知道是否有办法等待前两个方法先完成执行然后运行最后一个方法。
等待PID在某种程度上是不准确的。还在努力......
waitPids() {
echo "Testing $pids -- ${#pids[@]}"
while [ ${#pids[@]} -ne 0 ]; do
local range=$(eval echo {0..$((${#pids[@]}-1))})
local i
for i in $range; do
if ! kill -0 ${pids[$i]} 2> /dev/null; then
echo "Done -- ${pids[$i]}"
unset pids[$i]
fi
done
pids=("${pids[@]}")
sleep 1
done
}
答案 0 :(得分:4)
似乎主要的问题是,你正在使用分离的子shell。
也许最简单的解决方案是使用不同的机制来分离子shell,这样你就可以使用wait
。
e.g。通过nohup
nohup ./process1 &
nohup ./process2 &
wait
答案 1 :(得分:2)
$ help wait
wait: wait [-n] [id ...]
Wait for job completion and return exit status.
Waits for each process identified by an ID, which may be a process ID or a
job specification, and reports its termination status. If ID is not
given, waits for all currently active child processes, and the return
status is zero. If ID is a a job specification, waits for all processes
in that job's pipeline.
If the -n option is supplied, waits for the next job to terminate and
returns its exit status.
Exit Status:
Returns the status of the last ID; fails if ID is invalid or an invalid
option is given.
$ wait -n; (sleep 3; false); echo $?
1
后台任务立即返回。你的诀窍是将你的函数包装在子shell中,以便你等待子shell (而不是后台作业)来完成。例如:
$ wait -n; (processUsageFile); echo $?
如果你想要变得更加复杂,你将不得不捕获你在变量中产生的后台任务的PID,这样你就可以等待具有{{{}的结构的特定进程。 {1}}。
将子函数包装在子shell中更容易。但是,您的具体需求可能会有所不同。
答案 2 :(得分:1)
可能等待'进程和重新处理之间的命令。
自: http://www.tldp.org/LDP/abs/html/subshells.html
例21-3。在子shell中运行并行进程
(cat list1 list2 list3 | sort | uniq > list123) &
(cat list4 list5 list6 | sort | uniq > list456) &
# Merges and sorts both sets of lists simultaneously.
# Running in background ensures parallel execution.
#
# Same effect as
# cat list1 list2 list3 | sort | uniq > list123 &
# cat list4 list5 list6 | sort | uniq > list456 &
wait # Don't execute the next command until subshells finish.
diff list123 list456
答案 3 :(得分:0)
我发现并行化和等待的最佳方法是导出一个函数以在子外壳中使用,并使用 xargs 和 -P 来获得最大并行线程数,同时使用 -n 或 -L 将特定数量的参数提供给工作函数。
来自:https://man7.org/linux/man-pages/man1/xargs.1.html
-P max-procs, --max-procs=max-procs
Run up to max-procs processes at a time; the default is 1.
If max-procs is 0, xargs will run as many processes as
possible at a time. Use the -n option or the -L option
with -P;
示例代码:
# define some work function and export it
function unit_action() {
echo action $*
sleep 5
echo action $* done
}
export -f unit_action
# list all arguments to feed into function
# with 2 parameters at a time in a maximum of 3 parallel threads
echo {1..9} | xargs -t -n 2 -P 3 bash -c 'unit_action $@' --
echo all done
xargs 将隐式等待,直到所有输入都被消耗掉,因此不需要显式等待命令。