我正在创建一个端点,该端点允许用户同时上传多个文件并将其存储在S3中。目前,我可以使用MultipartReader和s3manager来实现此目的,但只能以非同步方式进行。
我正在尝试实现Go例程以加快此功能的速度,并同时将多个文件上传到S3,但是数据争用错误引起麻烦。我认为* s3manager可能不像文档所说的那样是goroutine安全的。 (如果将go语句替换为功能代码,则代码将同步工作。)
实现互斥锁是否可以解决我的错误?
func uploadHandler(w http.ResponseWriter, r *http.Request) {
counter := 0
switch r.Method {
// GET to display the upload form.
case "GET":
err := templates.Execute(w, nil)
if err != nil {
log.Print(err)
}
// POST uploads each file and sends them to S3
case "POST":
c := make(chan string)
// grab the request.MultipartReader
reader, err := r.MultipartReader()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// copy each part to destination.
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
// if part.FileName() is empty, skip this iteration.
if part.FileName() == "" {
continue
}
counter++
go S3Upload(c, part)
}
for i := 0; i < counter; i++ {
fmt.Println(<-c)
}
// displaying a success message.
err = templates.Execute(w, "Upload successful.")
if err != nil {
log.Print(err)
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
func S3Upload(c chan string, part *multipart.Part) {
bucket := os.Getenv("BUCKET")
sess, err := session.NewSession(&aws.Config{
Region: aws.String(os.Getenv("REGION"))},
)
if err != nil {
c <- "error occured creating session"
return
}
uploader := s3manager.NewUploader(sess)
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(part.FileName()),
Body: part,
})
if err != nil {
c <- "Error occurred attempting to upload to S3"
return
}
// successful upload
c <- "successful upload"
}
答案 0 :(得分:0)
^查看以上所有评论,
这是一些修改后的代码示例,通道在这里没有用。
package main
import (
"bytes"
"io"
"log"
"net/http"
"os"
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
var (
setupUploaderOnce sync.Once
uploader *s3manager.Uploader
bucket string
region string
)
// ensure sessions and uploader are setup only once using a Singleton pattern
func setupUploader() {
setupUploaderOnce.Do(func() {
bucket = os.Getenv("BUCKET")
region = os.Getenv("REGION")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region)})
if err != nil {
log.Fatal(err)
}
uploader := s3manager.NewUploader(sess)
})
}
// normally singleton stuff is packaged out and called before starting the server, but to keep the example a single file, load it up here
func init() {
setupUploader()
}
func uploadHandler(w http.ResponseWriter, r *http.Request) {
counter := 0
switch r.Method {
// GET to display the upload form.
case "GET":
err := templates.Execute(w, nil)
if err != nil {
log.Print(err)
}
// POST uploads each file and sends them to S3
case "POST":
var buf bytes.Buffer
// "file" is defined by the form field, change it to whatever your form sets it too
file, header, err := r.FormFile("file")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// close the file
defer file.Close()
fileName := strings.Split(header.Filename, ".")
// load the entire file data to the buffer
_, err = io.Copy(&buf, file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// copy each part to destination.
go S3Upload(buf, fileName[0])
// displaying a success message.
err = templates.Execute(w, "Upload successful.")
if err != nil {
log.Print(err)
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
// keeping this simple, do something with the err, like log
// if the uploader fails in the goroutine, there is potential
// for false positive uploads... channels are not really good here
// either, for that, bubble the error up,
// and don't spin up a goroutine.. same thing as waiting for the channel to return.
func S3Upload(body bytes.Buffer, fileName string) {
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(fileName),
Body: bytes.NewReader(body.Bytes()),
})
}