将文件名添加到计数数据

时间:2018-12-17 05:13:20

标签: unix awk compare comparison string-comparison

假设我有类似以下文件。

文件1

1,144931087,144931087,T,C  
16,89017167,89017167,C,G  
17,7330235,7330235,G,T  
17,10222478,10222478,C,T  

文件2

1,144931087,144931087,T,C
16,89017167,89017167,C,G
17,10222478,10222478,C,T

文件3

17,10222478,10222478,C,T  

我想查找每个文件中存在多少次重复值,所以理想情况下,输出将是:

输出

2 1,144931087,144931087,T,C  
2 16,89017167,89017167,C,G  
3 17,10222478,10222478,C,T  
1 17,7330235,7330235,G,T 

我使用以下命令来计算重复值。

sort Test1.csv Test2.csv Test3.csv | uniq --count

现在,我希望为计数的输出添加文件名。 我想要的输出应如下所示:

Test1 Test2 2 1,144931087,144931087,T,C  
Test1 Test2 2 16,89017167,89017167,C,G  
Test1 Test2 Test 3 3 17,10222478,10222478,C,T  
Test1 1 17,7330235,7330235,G,T  

有人可以帮助我获得所需的输出吗?有人可以建议我一种更好的方法来获得所需的输出吗?

4 个答案:

答案 0 :(得分:3)

使用awk。对不起我聪明的文件命名方案:

$ awk '{
    a[$0]++                   # count hits
    b[$0]=b[$0] FILENAME " "  # store filenames
}
END {
    for(i in a)               
        print b[i] a[i],i     # output them
}' foo bar baz
foo bar 2 1,144931087,144931087,T,C
foo bar 2 16,89017167,89017167,C,G
foo bar baz 3 17,10222478,10222478,C,T
foo 1 17,7330235,7330235,G,T
每个评论

更新

$ awk 'BEGIN {
    FS=OFS=","
} 
{
    a[$1 OFS $2 OFS $3 OFS $4]++ 
    b[$1 OFS $2 OFS $3 OFS $4]=b[$1 OFS $2 OFS $3 OFS $4] FILENAME "|"
    c[$1 OFS $2 OFS $3 OFS $4]=$0                      # keep the last record with 
}                                                      # specific key combination 
END { 
    for(i in a) 
        print b[i] "," a[i],c[i]  
}' foo  bar baz
foo|bar|,2,16,89017167,89017167,C
foo|,1,17,7330235,7330235,G
foo|bar|,2,1,144931087,144931087,T
foo|bar|baz|,3,17,10222478,10222478,C

答案 1 :(得分:2)

输入:

more Test*.csv
::::::::::::::
Test1.csv
::::::::::::::
1,144931087,144931087,T,C
16,89017167,89017167,C,G
17,7330235,7330235,G,T
17,10222478,10222478,C,T
::::::::::::::
Test2.csv
::::::::::::::
1,144931087,144931087,T,C
16,89017167,89017167,C,G
17,10222478,10222478,C,T
::::::::::::::
Test3.csv
::::::::::::::
17,10222478,10222478,C,T

命令:

awk '{tmp[$0]++;if(length(tmp2[$0])==0){tmp2[$0]=FILENAME;next}tmp2[$0]=tmp2[$0] OFS FILENAME}END{for(elem in tmp){print tmp2[elem] OFS tmp[elem] OFS elem}}' Test*.csv

输出:

Test1.csv Test2.csv 2 1,144931087,144931087,T,C
Test1.csv Test2.csv 2 16,89017167,89017167,C,G
Test1.csv Test2.csv Test3.csv 3 17,10222478,10222478,C,T
Test1.csv 1 17,7330235,7330235,G,T

说明:

  # gawk profile, created Mon Dec 17 14:46:47 2018

  # Rule(s)

   {
           tmp[$0]++ #associative array to count the occurrences freq
           if (length(tmp2[$0]) == 0) {  #when you add the first occurrence filename you do not need to add a space
                   tmp2[$0] = FILENAME
                   next
            }
           #append to variable with a space
           tmp2[$0] = tmp2[$0] OFS FILENAME
    }

    # END rule(s)

    END {
           # loop on each element of the associative arrays and print them
           for (elem in tmp) {
                   print tmp2[elem] OFS tmp[elem] OFS elem
            }
    }

if...next...可以替换为(length(tmp2[$0]) == 0 ? tmp2[$0] = FILENAME : tmp2[$0] = tmp2[$0] OFS FILENAME),以将awk脚本简化为:

  {
       tmp[$0]++
       (length(tmp2[$0]) == 0 ? tmp2[$0] = FILENAME : tmp2[$0] = tmp2[$0] OFS FILENAME)
  }

  END {
         for (elem in tmp) {
              print tmp2[elem] OFS tmp[elem] OFS elem
         }
  }

答案 2 :(得分:1)

请尝试以下操作,这将使您输出到Input_file的行的Input出现中。我使用gsub(/[[:space:]]+$/,"")是因为您的Input_file的最后几行中有空格,因此在此处删除它们,可以在没有这种情况的情况下将其删除。

awk '
{
  gsub(/[[:space:]]+$/,"")
}
!a[$0]++{
  b[++count]=$0
}
{
  c[$0]++
  d[$0]=d[$0]?d[$0] OFS FILENAME:FILENAME
}
END{
  for(i=1;i<=count;i++){
    print d[b[i]]"|"c[b[i]],b[i]
  }
}'  test1 test2 test3

输出如下。

test1 test2|2 1,144931087,144931087,T,C
test1 test2|2 16,89017167,89017167,C,G
test1|1 17,7330235,7330235,G,T
test1 test2 test3|3 17,10222478,10222478,C,T

答案 3 :(得分:1)

使用Perl的另一个答案。

> cat file1m.csv
1,144931087,144931087,T,C
16,89017167,89017167,C,G
17,7330235,7330235,G,T
17,10222478,10222478,C,T
> cat file2m.csv 
1,144931087,144931087,T,C
16,89017167,89017167,C,G
17,10222478,10222478,C,T
> cat file3m.csv
17,10222478,10222478,C,T
> cat uniq_perl.ksh
perl -lne ' 
@t=@{ $kvf{$_} };
if( not $ARGV ~~ @t ) { push(@t,$ARGV); $kvf{$_}=[ @t ] ;  }
close(ARGV) if eof; 
END { for(keys %kvf) { @x=@{$kvf{$_}};  print join(" ",@x)." ".scalar(@x)." ".$_  } }   
' file*m*csv 
> ./uniq_perl.ksh
file1m.csv file2m.csv file3m.csv 3 17,10222478,10222478,C,T
file1m.csv 1 17,7330235,7330235,G,T
file1m.csv file2m.csv 2 1,144931087,144931087,T,C
file1m.csv file2m.csv 2 16,89017167,89017167,C,G
>