我有一个打开文件的双循环,并使用awk获取每行的第一部分和第二部分。第一部分是文件的md5sum,第二部分是文件名。但是,当我运行脚本以查看是否有重复文件时,file1会对文件1进行罚款,因此即使它们是同一个文件,它也认为它们是重复的。这是我的代码:
echo start
for i in $(<dump.txt) ; do
md=$(echo $i|awk -F'|' '{print $1}')
file=$(echo $i|awk -F'|' '{print $2}')
for j in $(<dump.txt) ; do
m=$(echo $j|awk -F'|' '{print $1}')
f=$(echo $j|awk -F'|' '{print $2}')
if [ "$md" == "$m" ]; then
echo $file and $f are duplicates
fi
done
done
echo end
转储文件如下所示:
404460c24654e3d64024851dd0562ff1 *./extest.sh
7a900fdfa67739adcb1b764e240be05f *./test.txt
7a900fdfa67739adcb1b764e240be05f *./test2.txt
88f5a6b83182ce5c34c4cf3b17f21af2 *./dump.txt
c8709e009da4cce3ee2675f2a1ae9d4f *./test3.txt
d41d8cd98f00b204e9800998ecf8427e *./checksums.txt
整个代码是:
#!/bin/sh
func ()
{
if [ "$1" == "" ]; then
echo "Default";
for i in `find` ;
do
#if [ -d $i ]; then
#echo $i "is a directory";
#fi
if [ -f $i ]; then
if [ "$i" != "./ex.sh" ]; then
#echo $i "is a file";
md5sum $i >> checksums.txt;
sort --output=dump.txt checksums.txt;
fi
fi
done
fi
if [ "$1" == "--long" ]; then
echo "--long";
for i in `find` ;
do
#if [ -d $i ]; then
#echo $i "is a directory";
#fi
if [ -f $i ]; then
echo $i "is a file";
fi
done
fi
if [ "$1" == "--rm" ]; then
echo "--rm";
for i in `find` ;
do
#if [ -d $i ]; then
#echo $i "is a directory";
#fi
if [ -f $i ]; then
echo $i "is a file";
fi
done
fi
}
parse () {
echo start
for i in $(<dump.txt) ; do
md=$(echo $i|awk -F'|' '{print $1}')
file=$(echo $i|awk -F'|' '{print $2}')
for j in $(<dump.txt) ; do
m=$(echo $j|awk -F'|' '{print $1}')
f=$(echo $j|awk -F'|' '{print $2}')
#echo $md
#echo $m
if [ "$file" != "$f" ] && [ "$md" == "$m" ]; then
echo Files $file and $f are duplicates.
fi
done
done
echo end
}
getArgs () {
if [ "$1" == "--long" ]; then
echo "got the first param $1";
else
if [ "$1" == "--rm" ]; then
echo "got the second param $1";
else
if [ "$1" == "" ]; then
echo "got default param";
else
echo "script.sh: unknown option $1";
exit;
fi
fi
fi
}
#start script
cat /dev/null > checksums.txt;
cat /dev/null > dump.txt;
getArgs $1;
func $1;
parse;
#end script
答案 0 :(得分:1)
这很简单:
if [ "$file" != "$f" ] && [ "$md" = "$m" ]; then
echo "Files $file and $f are duplicates."
fi
请注意,我将比较运算符从==
更改为=
,这是常见形式。我还用双引号将消息包围起来,以明确它是一个字符串,并且我不希望字扩展发生在两个变量file
和{{1 }}
[更新:]
查找重复项的另一种方法是使用awk进行字符串处理:
f
答案 1 :(得分:0)
awk -F'|' '{if($1 in a)print "duplicate found:" $0 " AND "a[$1];else a[$1]=$0 }' yourfile
将带来您所需要的。当然,您可以自定义文本信息。
见下面的测试
kent$ cat md5chk.txt
abcdefg|/foo/bar/a.txt
bbcdefg|/foo/bar2/ax.txt
cbcdefg|/foo/bar3/ay.txt
abcdefg|/foo/bar4/a.txt
1234567|/seven/7.txt
1234568|/seven/8.txt
1234567|/seven2/7.txt
kent$ awk -F'|' '{if($1 in a)print "duplicate found:" $0 " AND "a[$1];else a[$1]=$0 }' md5chk.txt
duplicate found:abcdefg|/foo/bar4/a.txt AND abcdefg|/foo/bar/a.txt
duplicate found:1234567|/seven2/7.txt AND 1234567|/seven/7.txt
<强>更新强>
awk # the name of the tool/command
-F'|' # declare delimiter is "|"
'{if($1 in a) # if the first column was already saved
print "duplicate found:" $0 " AND "a[$1]; # print the info
else # else
a[$1]=$0 }' # save in an array named a, index=the 1st column (md5), value is the whole line.
yourfile # your input file