我在文章上使用给定的Origin-Response和Viewer-Request功能创建了项目文件夹,并下载了相关性,并使用cloudformation模板部署了zip包。
IAM角色,s3存储桶,存储桶策略,具有lambda @ edge函数的分发均已创建,没有任何错误,而且它们似乎都是兼容的。
但是当我尝试在原始存储桶中调整图像大小时,出现以下错误;
“ 503错误 无法满足该请求。 与CloudFront分配关联的Lambda函数无效或没有所需的权限。 “
我也没有在监视中看到任何东西,这意味着我的函数没有被调用。
我使用“ AdministratorAccess”策略和信任关系创建了另一个管理员角色,即“ edgelambda.amazonaws.com”,“ lambda.amazonaws.com”
我将自己的存储桶政策更改为面向所有人。
我可以查看图像,但是在尝试向Cloudfront发行网址中添加querystring来调整大小时仍然出现503错误
“ xxxxxxxxx.net/images/pexels.jpeg?d=100x100”
以下是我的存储桶,存储桶策略,IAM角色和功能。
存储桶名称:image-resize-488052071209-us-east-1
Bukcet政策:
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "AllowPublicRead",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::image-resize-488052071209-us-east-1/*"
}
]
}
IAM角色:
Admin:
AdministratorAccess, "edgelambda.amazonaws.com","lambda.amazonaws.com" trust relationships
ImageFunctionsAndRole-EdgeLambdaRole-1U93T440VWXKT:
AmazonS3FullAccess, CloudFrontFullAccess, AWSLambdaExecute, CloudFrontReadOnlyAccess, AWSLambdaBasicExecutionRole
查看者请求功能
'use strict';
const querystring = require('querystring');
// defines the allowed dimensions, default dimensions and how much variance from allowed
// dimension is allowed.
const variables = {
allowedDimension : [ {w:100,h:100}, {w:200,h:200}, {w:300,h:300}, {w:400,h:400} ],
defaultDimension : {w:200,h:200},
variance: 20,
webpExtension: 'webp'
};
exports.handler = (event, context, callback) => {
const request = event.Records[0].cf.request;
const headers = request.headers;
// parse the querystrings key-value pairs. In our case it would be d=100x100
const params = querystring.parse(request.querystring);
// fetch the uri of original image
let fwdUri = request.uri;
// if there is no dimension attribute, just pass the request
if(!params.d){
callback(null, request);
return;
}
// read the dimension parameter value = width x height and split it by 'x'
const dimensionMatch = params.d.split("x");
// set the width and height parameters
let width = dimensionMatch[0];
let height = dimensionMatch[1];
// parse the prefix, image name and extension from the uri.
// In our case /images/image.jpg
const match = fwdUri.match(/(.*)\/(.*)\.(.*)/);
let prefix = match[1];
let imageName = match[2];
let extension = match[3];
// define variable to be set to true if requested dimension is allowed.
let matchFound = false;
// calculate the acceptable variance. If image dimension is 105 and is within acceptable
// range, then in our case, the dimension would be corrected to 100.
let variancePercent = (variables.variance/100);
for (let dimension of variables.allowedDimension) {
let minWidth = dimension.w - (dimension.w * variancePercent);
let maxWidth = dimension.w + (dimension.w * variancePercent);
if(width >= minWidth && width <= maxWidth){
width = dimension.w;
height = dimension.h;
matchFound = true;
break;
}
}
// if no match is found from allowed dimension with variance then set to default
//dimensions.
if(!matchFound){
width = variables.defaultDimension.w;
height = variables.defaultDimension.h;
}
// read the accept header to determine if webP is supported.
let accept = headers['accept']?headers['accept'][0].value:"";
let url = [];
// build the new uri to be forwarded upstream
url.push(prefix);
url.push(width+"x"+height);
// check support for webp
if (accept.includes(variables.webpExtension)) {
url.push(variables.webpExtension);
}
else{
url.push(extension);
}
url.push(imageName+"."+extension);
fwdUri = url.join("/");
// final modified url is of format /images/200x200/webp/image.jpg
request.uri = fwdUri;
callback(null, request);
};
原始响应功能:
'use strict';
const http = require('http');
const https = require('https');
const querystring = require('querystring');
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
signatureVersion: 'v4',
});
const Sharp = require('sharp');
// set the S3 and API GW endpoints
const BUCKET = 'image-resize-${AWS::AccountId}-us-east-1';
exports.handler = (event, context, callback) => {
let response = event.Records[0].cf.response;
console.log("Response status code :%s", response.status);
//check if image is not present
if (response.status == 404) {
let request = event.Records[0].cf.request;
let params = querystring.parse(request.querystring);
// if there is no dimension attribute, just pass the response
if (!params.d) {
callback(null, response);
return;
}
// read the dimension parameter value = width x height and split it by 'x'
let dimensionMatch = params.d.split("x");
// read the required path. Ex: uri /images/100x100/webp/image.jpg
let path = request.uri;
// read the S3 key from the path variable.
// Ex: path variable /images/100x100/webp/image.jpg
let key = path.substring(1);
// parse the prefix, width, height and image name
// Ex: key=images/200x200/webp/image.jpg
let prefix, originalKey, match, width, height, requiredFormat, imageName;
let startIndex;
try {
match = key.match(/(.*)\/(\d+)x(\d+)\/(.*)\/(.*)/);
prefix = match[1];
width = parseInt(match[2], 10);
height = parseInt(match[3], 10);
// correction for jpg required for 'Sharp'
requiredFormat = match[4] == "jpg" ? "jpeg" : match[4];
imageName = match[5];
originalKey = prefix + "/" + imageName;
}
catch (err) {
// no prefix exist for image..
console.log("no prefix present..");
match = key.match(/(\d+)x(\d+)\/(.*)\/(.*)/);
width = parseInt(match[1], 10);
height = parseInt(match[2], 10);
// correction for jpg required for 'Sharp'
requiredFormat = match[3] == "jpg" ? "jpeg" : match[3];
imageName = match[4];
originalKey = imageName;
}
// get the source image file
S3.getObject({ Bucket: BUCKET, Key: originalKey }).promise()
// perform the resize operation
.then(data => Sharp(data.Body)
.resize(width, height)
.toFormat(requiredFormat)
.toBuffer()
)
.then(buffer => {
// save the resized object to S3 bucket with appropriate object key.
S3.putObject({
Body: buffer,
Bucket: BUCKET,
ContentType: 'image/' + requiredFormat,
CacheControl: 'max-age=31536000',
Key: key,
StorageClass: 'STANDARD'
}).promise()
// even if there is exception in saving the object we send back the generated
// image back to viewer below
.catch(() => { console.log("Exception while writing resized image to bucket")});
// generate a binary response with resized image
response.status = 200;
response.body = buffer.toString('base64');
response.bodyEncoding = 'base64';
response.headers['content-type'] = [{ key: 'Content-Type', value: 'image/' + requiredFormat }];
callback(null, response);
})
.catch( err => {
console.log("Exception while reading source image :%j",err);
});
} // end of if block checking response statusCode
else {
// allow the response to pass through
callback(null, response);
}
};
答案 0 :(得分:0)
我关注了同一篇博客文章,并提出了同样的问题,而我已经奋斗了几个小时。我现在有一个可行的解决方案,因此想与我共享设置。
我没有使用CloudFormation,而是手动创建了资源。
所需文章的第一个更改是在origin-response/index.js
脚本中。 AWS对于不存在的文件返回403
状态,因此检查if (response.status == 404) {
的行需要更改为以下内容:
if (response.status == 404 || response.status == 403) {
我的下一个更改是在AWSLambdaBasicExecutionRole
策略上。由于Lambda函数可以在多个区域中运行,因此它会将日志写入多个区域中的CloudWatch。因此,我将Resource ARN更改为通配区域。以下是策略JSON:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": [
"arn:aws:logs:*:*:*"
]
}
]
}
接下来,我确保存储桶策略允许访问Lambda角色和CloudFront:
{
"Version": "2008-10-17",
"Id": "PolicyForCloudFrontPrivateContent",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity XXXXXXXXXXXXX"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
},
{
"Sid": "2",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::XXXXXXXXXXXXX:role/service-role/image-resize-origin-response-role-XXXXXXXXX"
},
"Action": [
"s3:PutObject",
"s3:GetObject",
],
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
}
]
}
最后一个难题是为相同的Lambda角色创建策略以使其与S3 Bucket一起使用:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
}
]
}