Go Code:
package main
import (
"bufio"
_ "bytes"
"fmt"
_ "io"
"log"
"os"
"os/user"
"path/filepath"
_ "reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
var (
LocalDirectory = "s3logs" // Into this directory
Lock sync.Mutex
totalImpressions int
)
var data = make(map[string]map[string]int)
func main() {
start := time.Now()
// by adding this line i'm telling the program to run the threads on 4 different cores at the same time, Parallelism!!
//REMEMBER TO ADD BLOCKS TO STOP RACE CONDITIONS
runtime.GOMAXPROCS(4)
var wg sync.WaitGroup
var year, month, day = time.Now().Date()
str_year := strconv.Itoa(year)
str_month := strconv.Itoa(int(month))
str_day := strconv.Itoa(day)
if int(month) < 10 {
str_month = "0" + strconv.Itoa(int(month))
}
if day < 10 {
str_day = "0" + strconv.Itoa(day)
}
regBuckets := map[string]string{
"us-west-1": "pubgears-ca",
"test": "test",
}
for region, bucket := range regBuckets {
prefix := fmt.Sprintf("tagserver/logs/AWSLogs/978380792767/elasticloadbalancing/%s/%s/%s/%s/", region, str_year, str_month, str_day)
wg.Add(1)
go getLogs(region, bucket, LocalDirectory+bucket, &prefix, &wg)
}
wg.Wait()
//salon/t1/728x90/index
//totalImpressions := 0
// var provider = make(map[string]int)
// for key, value := range data {
// key = strings.TrimSpace(key)
// pro := strings.Split(key, "_")[3]
//
// if strings.Contains(pro, "pp") == true || (pro == "pulsepoint") || (pro == "cweb") {
// provider["pulsepoint"] += value
// } else if (pro == "openx") || (pro == "openx3") {
// provider["openx"] += value
// } else if key == " " {
// continue
// } else {
// provider[pro] += value
// }
// totalImpressions += value
// }
for tag, value := range data {
for hour, imp := range value {
fmt.Printf("tag: %s \n hour: %s impression %s\n", tag, hour, imp)
}
}
//sl = sl[:len(sl)-1]
elapsed := time.Since(start)
fmt.Printf("\nTime took %s\n", elapsed)
}
func getLogs(region string, bucket string, directory string, prefix *string, wg *sync.WaitGroup) {
sess := session.New()
client := s3.New(sess, &aws.Config{Region: aws.String(region)})
params := &s3.ListObjectsInput{Bucket: &bucket, Prefix: prefix}
manager := s3manager.NewDownloaderWithClient(client, func(d *s3manager.Downloader) {
d.PartSize = 5 * 1024 * 1024 // 6MB per part
d.Concurrency = 4
})
d := downloader{bucket: bucket, dir: directory, Downloader: manager}
client.ListObjectsPages(params, d.eachPage)
wg.Done()
}
// downloader object and methods
type downloader struct {
*s3manager.Downloader
bucket, dir string
}
func (d *downloader) eachPage(page *s3.ListObjectsOutput, more bool) bool {
for _, obj := range page.Contents {
// fmt.Println(obj)
// return true
d.downloadToFile(*obj.Key)
}
return true
}
func (d *downloader) downloadToFile(key string) {
// Create the directories in the path
// desktop path
user, errs := user.Current()
if errs != nil {
panic(errs)
}
homedir := user.HomeDir
desktop := homedir + "/Desktop/" + d.dir
file := filepath.Join(desktop, key)
if err := os.MkdirAll(filepath.Dir(file), 0775); err != nil {
panic(err)
}
// Setup the local file
fd, err := os.Create(file)
if err != nil {
panic(err)
}
defer fd.Close()
// Download the file using the AWS SDK
//fmt.Printf("Downloading s3://%s/%s to %s...\n", d.bucket, key, file)
params := &s3.GetObjectInput{Bucket: &d.bucket, Key: &key}
d.Download(fd, params)
_, e := d.Download(fd, params)
if e != nil {
panic(e)
}
f, err := os.Open(file)
if err != nil {
log.Fatal(err)
}
defer f.Close()
tag := regexp.MustCompile("/([a-zA-Z0-9_]+/{1}[a-zA-Z0-9_]+/{1}[a-zA-Z0-9_]+/{1}[a-zA-Z0-9_]+)")
date := regexp.MustCompile("T([^:]+)")
scanner := bufio.NewScanner(f)
// HAVING TROUBLE HERE
for scanner.Scan() {
//dateCollection := make(map[string]int)
m := tag.FindString(scanner.Text())
if m != "" {
// stop races
Lock.Lock()
arr := strings.Split(m, "/")
taghash := strings.Join(arr, "_")
taghash = strings.TrimLeft(taghash, "_")
//data[taghash]++
m = date.FindString(scanner.Text())
if m != "" {
hour := m
data[taghash] = make(map[string]int)
data[taghash][hour]++
}
Lock.Unlock()
}
}
fmt.Println(file)
os.Remove(file)
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
我在第167行开头遇到问题的区域:
for scanner.Scan() {
//dateCollection := make(map[string]int)
m := tag.FindString(scanner.Text())
if m != "" {
// stop races
Lock.Lock()
arr := strings.Split(m, "/")
taghash := strings.Join(arr, "_")
taghash = strings.TrimLeft(taghash, "_")
//data[taghash]++
m = date.FindString(scanner.Text())
if m != "" {
hour := m
data[taghash] = make(map[string]int)
data[taghash][hour]++
}
Lock.Unlock()
}
}
fmt.Println(file)
os.Remove(file)
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
我在主func中打印值时得到的输出
fstvt1_ros_300x600_pp8_1 T07 1
我希望输出看起来像这样:
fstvt1_ros_300x600_pp8_1
T00 213434
T01 23432
T02 3324
T03 324
T04 324
T05 0 ...
(this isn't real data)
我想做的事情:
我有一张数据地图var data = make(map[string]map[string]int)
,其密钥等于taghash,例如fstvt1_ros_300x600_pp8_1
。该值是数据映射。该映射的键值应为字符串和整数。我想要多张地图。每个键一个... T01,T02 ..当前输出的是最后一个项目i中的最后一项,而不是每个taghash的键和值的集合。我如何制作是这样,它不是覆盖数据,而是添加新的键值T01,T02 ..如果taghash和hour是增加该特定对象的那些。
目前使用的代码行:
T01,T02 ..
data[taghash][hour]++
如果taghash和hour存在,那么它应该递增。如果taghash和hour不存在,则创建taghash并添加新密钥并增加。
答案 0 :(得分:1)
错误来自
data[taghash] = make(map[string]int)
每次都会将data[taghash]
设置为新分配的地图。这绝对不是你想要做的。代替:
if _, ok := data[taghash]; !ok {
// data[taghash] does not exist -- create it!
data[taghash] := make(map[string]int)
}
data[taghash][hour]++
这相当于:
# Python
data.setdefault(taghash, {}).setdefault(hour, 0) += 1
或者
if taghash not in data:
data[taghash] = {}
if hour not in data[taghash]:
data[taghash][hour] = 1
else:
data[taghash][hour] += 1