From 7abcab2a867c66f45ca20d486b3f372e6c1910b5 Mon Sep 17 00:00:00 2001 From: Shawn Kim Date: Sun, 9 Apr 2023 20:26:05 -0700 Subject: [PATCH] feat: payload hash function and remote s3 gateway --- .../00-check-for-required-env.sh | 3 +- common/etc/nginx/include/awscredentials.js | 5 +- common/etc/nginx/include/awssig4.js | 40 +- common/etc/nginx/include/lambdagateway.js | 1 - common/etc/nginx/include/s3gateway.js | 478 ------------------ common/etc/nginx/nginx.conf | 1 - .../etc/nginx/templates/default.conf.template | 125 +---- .../gateway/v2_headers.conf.template | 3 - .../gateway/v2_js_vars.conf.template | 4 - deployments/ecs/cloudformation/s3gateway.yaml | 2 - docker-compose.yml | 2 +- docs/getting_started.md | 3 - .../conf.d/gateway/server_variables.conf | 2 + .../conf.d/gateway/server_variables.conf | 2 + settings.example | 1 - standalone_ubuntu_oss_install.sh | 7 +- test.sh | 2 - test/docker-compose.yaml | 1 - test/unit/awssig2_test.js | 1 - test/unit/awssig4_test.js | 1 - test/unit/s3gateway_test.js | 1 - 21 files changed, 47 insertions(+), 638 deletions(-) delete mode 100644 common/etc/nginx/include/s3gateway.js delete mode 100644 common/etc/nginx/templates/gateway/v2_headers.conf.template delete mode 100644 common/etc/nginx/templates/gateway/v2_js_vars.conf.template diff --git a/common/docker-entrypoint.d/00-check-for-required-env.sh b/common/docker-entrypoint.d/00-check-for-required-env.sh index 0492051..13e0705 100755 --- a/common/docker-entrypoint.d/00-check-for-required-env.sh +++ b/common/docker-entrypoint.d/00-check-for-required-env.sh @@ -22,7 +22,7 @@ set -e failed=0 -required=("S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" +required=("S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" "S3_REGION" "S3_STYLE" "ALLOW_DIRECTORY_LIST" "AWS_SIGS_VERSION" "CORS_ENABLED") @@ -103,7 +103,6 @@ fi echo "S3 Backend Environment" echo "Access Key ID: ${AWS_ACCESS_KEY_ID}" -echo "Origin: ${S3_SERVER_PROTO}://${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" echo "Region: ${S3_REGION}" echo "Addressing Style: ${S3_STYLE}" echo "AWS Signatures Version: v${AWS_SIGS_VERSION}" diff --git a/common/etc/nginx/include/awscredentials.js b/common/etc/nginx/include/awscredentials.js index 2a708fa..507b630 100644 --- a/common/etc/nginx/include/awscredentials.js +++ b/common/etc/nginx/include/awscredentials.js @@ -223,6 +223,8 @@ function _writeCredentialsToFile(credentials) { * @returns {Promise} */ async function fetchCredentials(r) { + r.log("start fetchCredentials(): ") + r.log(" - request body in fetching credentials: " + r.variables.request_body) /* If we are not using an AWS instance profile to set our credentials we exit quickly and don't write a credentials file. */ if (process.env['AWS_ACCESS_KEY_ID'] && process.env['AWS_SECRET_ACCESS_KEY']) { @@ -289,12 +291,13 @@ async function fetchCredentials(r) { r.return(500); return; } + r.return(200); } /** * Get the credentials needed to generate AWS signatures from the ECS - * (Elastic Container Service) metadata endpoint. + * (Elasticd Container Service) metadata endpoint. * * @param credentialsUri {string} endpoint to get credentials from * @returns {Promise<{accessKeyId: (string), secretAccessKey: (string), sessionToken: (string), expiration: (string)}>} diff --git a/common/etc/nginx/include/awssig4.js b/common/etc/nginx/include/awssig4.js index 2b1d2e0..5e940cf 100644 --- a/common/etc/nginx/include/awssig4.js +++ b/common/etc/nginx/include/awssig4.js @@ -75,8 +75,7 @@ function signatureV4(r, timestamp, region, service, uri, queryParams, host, cred */ function _buildCanonicalRequest(r, method, uri, queryParams, host, amzDatetime, sessionToken) { - r.log('##### ---------------------------- #####') - r.log(' _buildCanonicalRequest(): ') + r.log('start _buildCanonicalRequest(): ---------------------<<< ') r.log(' - uri : ' + uri) r.log(' - method : ' + method) r.log(' - queryParams : ' + queryParams) @@ -84,7 +83,8 @@ function _buildCanonicalRequest(r, r.log(' - amzDatetime : ' + amzDatetime) r.log(' - request body: ' + r.variables.request_body) r.log(' - content_type: ' + r.variables.content_type) - const payloadHash = awsHeaderPayloadHash(r); + const payloadHash = r.variables.awsPayloadHash; + r.log(' - payloadHash: ' + payloadHash) let canonicalHeaders = ''; if (r.variables.content_type) { canonicalHeaders += 'content-type:' + r.variables.content_type + '\n' @@ -104,6 +104,7 @@ function _buildCanonicalRequest(r, canonicalRequest += _signedHeaders(r, sessionToken) + '\n'; canonicalRequest += payloadHash; + r.log('finish _buildCanonicalRequest(): --------------------->>> ') return canonicalRequest; } @@ -276,19 +277,36 @@ function _splitCachedValues(cached) { * @param r {Request} HTTP request object * @returns {string} payload hash */ -function awsHeaderPayloadHash(r) { - // Empty payload only works with this crypt library. - // TODO: Need to either find the right library or implement the crypto lib. - // const reqBodyStr = JSON.stringify(r.variables.request_body); +async function awsHeaderPayloadHash(r) { + r.log('start awsHeaderPayloadHash(): '); const reqBodyStr = r.variables.request_body; r.log(' - req body str: ' + reqBodyStr) - const payloadHash = mod_hmac.createHash('sha256', 'utf8') - .update(reqBodyStr) - .digest('hex'); + + const encoder = new TextEncoder(); + const data = encoder.encode(reqBodyStr); + const hash = await crypto.subtle.digest("SHA-256", data); + const payloadHash = Buffer.from(hash).toString('hex'); r.log(' - payload Hash: ' + payloadHash) - return payloadHash; + r.variables.lambda_payload_hash = payloadHash; + r.setReturnValue(payloadHash); + r.log('finish awsHeaderPayloadHash(): '); } +// function awsHeaderPayloadHash(r) { +// // Empty payload only works with this crypt library. +// // TODO: Need to either find the right library or implement the crypto lib. +// // const reqBodyStr = JSON.stringify(r.variables.request_body); +// const reqBodyStr = r.variables.request_body; +// r.log(' - req body str: ' + reqBodyStr) +// const payloadHash = mod_hmac.createHash('sha256', 'utf8') +// .update(reqBodyStr) +// .digest('hex'); +// r.log(' - payload Hash: ' + payloadHash) +// r.variables.lambda_payload_hash = payloadHash; + +// return payloadHash; +// } + export default { awsHeaderPayloadHash, signatureV4, diff --git a/common/etc/nginx/include/lambdagateway.js b/common/etc/nginx/include/lambdagateway.js index d48682d..e482300 100644 --- a/common/etc/nginx/include/lambdagateway.js +++ b/common/etc/nginx/include/lambdagateway.js @@ -40,7 +40,6 @@ utils.requireEnvVar('LAMBDA_REGION'); function lambdaAuth(r) { const host = process.env['LAMBDA_SERVER']; const region = process.env['LAMBDA_REGION']; - // const uri = '/2015-03-31/' + r.variables.request_uri + '/invocations'; r.log('#### URI for lambdaAuth(): ' + r.variables.request_uri) const queryParams = ''; const credentials = awscred.readCredentials(r); diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js deleted file mode 100644 index 09e9d72..0000000 --- a/common/etc/nginx/include/s3gateway.js +++ /dev/null @@ -1,478 +0,0 @@ -/* - * Copyright 2020 F5 Networks - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import awscred from "./awscredentials.js"; -import awssig2 from "./awssig2.js"; -import awssig4 from "./awssig4.js"; -import utils from "./utils.js"; - -_require_env_var('S3_BUCKET_NAME'); -_require_env_var('S3_SERVER'); -_require_env_var('S3_SERVER_PROTO'); -_require_env_var('S3_SERVER_PORT'); -_require_env_var('S3_REGION'); -_require_env_var('AWS_SIGS_VERSION'); -_require_env_var('S3_STYLE'); - - -/** - * Flag indicating debug mode operation. If true, additional information - * about signature generation will be logged. - * @type {boolean} - */ -const ALLOW_LISTING = utils.parseBoolean(process.env['ALLOW_DIRECTORY_LIST']); -const PROVIDE_INDEX_PAGE = utils.parseBoolean(process.env['PROVIDE_INDEX_PAGE']); -const APPEND_SLASH = utils.parseBoolean(process.env['APPEND_SLASH_FOR_POSSIBLE_DIRECTORY']); -const FOUR_O_FOUR_ON_EMPTY_BUCKET = utils.parseBoolean(process.env['FOUR_O_FOUR_ON_EMPTY_BUCKET']); -const S3_STYLE = process.env['S3_STYLE']; - -const ADDITIONAL_HEADER_PREFIXES_TO_STRIP = utils.parseArray(process.env['HEADER_PREFIXES_TO_STRIP']); - -/** - * Default filename for index pages to be read off of the backing object store. - * @type {string} - */ -const INDEX_PAGE = "index.html"; - -/** - * Constant defining the service requests are being signed for. - * @type {string} - */ -const SERVICE = 's3'; - -/** - * Transform the headers returned from S3 such that there isn't information - * leakage about S3 and do other tasks needed for appropriate gateway output. - * @param r HTTP request - */ -function editHeaders(r) { - const isDirectoryHeadRequest = - ALLOW_LISTING && - r.method === 'HEAD' && - _isDirectory(decodeURIComponent(r.variables.uri_path)); - - /* Strips all x-amz- headers from the output HTTP headers so that the - * requesters to the gateway will not know you are proxying S3. */ - if ('headersOut' in r) { - for (const key in r.headersOut) { - /* We delete all headers when it is a directory head request because - * none of the information is relevant for passing on via a gateway. */ - if (isDirectoryHeadRequest) { - delete r.headersOut[key]; - } else if (_isHeaderToBeStripped(key.toLowerCase(), ADDITIONAL_HEADER_PREFIXES_TO_STRIP)) { - delete r.headersOut[key]; - } - } - - /* Transform content type returned on HEAD requests for directories - * if directory listing is enabled. If you change the output format - * for the XSL stylesheet from HTML to something else, you will - * want to change the content type below. */ - if (isDirectoryHeadRequest) { - r.headersOut['Content-Type'] = 'text/html; charset=utf-8' - } - } -} - -/** - * Determines if a given HTTP header should be removed before being - * sent on to the requesting client. - * @param headerName {string} Lowercase HTTP header name - * @param additionalHeadersToStrip {Array[string]} array of additional headers to remove - * @returns {boolean} true if header should be removed - */ -function _isHeaderToBeStripped(headerName, additionalHeadersToStrip) { - if (headerName.indexOf('x-amz-', 0) >= 0) { - return true; - } - - for (let i = 0; i < additionalHeadersToStrip.length; i++) { - const headerToStrip = additionalHeadersToStrip[i]; - if (headerName.indexOf(headerToStrip, 0) >= 0) { - return true; - } - } - - return false; -} - -/** - * Outputs the timestamp used to sign the request, so that it can be added to - * the 'Date' header and sent by NGINX. - * - * @param r {Request} HTTP request object (not used, but required for NGINX configuration) - * @returns {string} RFC2616 timestamp - */ -function s3date(r) { - return awscred.getNow().toUTCString(); -} - -/** - * Outputs the timestamp used to sign the request, so that it can be added to - * the 'x-amz-date' header and sent by NGINX. The output format is - * ISO 8601: YYYYMMDD'T'HHMMSS'Z'. - * @see {@link https://docs.aws.amazon.com/general/latest/gr/sigv4-date-handling.html | Handling dates in Signature Version 4} - * - * @param r {Request} HTTP request object (not used, but required for NGINX configuration) - * @returns {string} ISO 8601 timestamp - */ -function awsHeaderDate(r) { - return utils.getAmzDatetime( - awscred.getNow(), - utils.getEightDigitDate(awscred.getNow()) - ); -} - -/** - * Creates an AWS authentication signature based on the global settings and - * the passed request parameter. - * - * @param r {Request} HTTP request object - * @returns {string} AWS authentication signature - */ -function s3auth(r) { - const bucket = process.env['S3_BUCKET_NAME']; - const region = process.env['S3_REGION']; - let server; - if (S3_STYLE === 'path') { - server = process.env['S3_SERVER'] + ':' + process.env['S3_SERVER_PORT']; - } else { - server = process.env['S3_SERVER']; - } - const sigver = process.env['AWS_SIGS_VERSION']; - - let signature; - - const credentials = awscred.readCredentials(r); - if (sigver == '2') { - let req = _s3ReqParamsForSigV2(r, bucket); - signature = awssig2.signatureV2(r, req.uri, req.httpDate, credentials); - } else { - let req = _s3ReqParamsForSigV4(r, bucket, server); - signature = awssig4.signatureV4(r, awscred.getNow(), region, SERVICE, - req.uri, req.queryParams, req.host, credentials); - } - - return signature; -} - -/** - * Generate some of request parameters for AWS signature version 2 - * - * @see {@link https://docs.aws.amazon.com/AmazonS3/latest/userguide/auth-request-sig-v2.html | AWS signature version 2} - * @param r {Request} HTTP request object - * @param bucket {string} S3 bucket associated with request - * @returns s3ReqParams {object} s3ReqParams object (host, method, uri, queryParams) - * @private - */ -function _s3ReqParamsForSigV2(r, bucket) { - /* If the source URI is a directory, we are sending to S3 a query string - * local to the root URI, so this is what we need to encode within the - * string to sign. For example, if we are requesting /bucket/dir1/ from - * nginx, then in S3 we need to request /?delimiter=/&prefix=dir1/ - * Thus, we can't put the path /dir1/ in the string to sign. */ - let uri = _isDirectory(r.variables.uri_path) ? '/' : r.variables.uri_path; - // To return index pages + index.html - if (PROVIDE_INDEX_PAGE && _isDirectory(r.variables.uri_path)){ - uri = r.variables.uri_path + INDEX_PAGE - } - - return { - uri: '/' + bucket + uri, - httpDate: s3date(r) - }; -} - -/** - * Generate some of request parameters for AWS signature version 4 - * - * @see {@link https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html | AWS V4 Signing Process} - * @param r {Request} HTTP request object - * @param bucket {string} S3 bucket associated with request - * @param server {string} S3 host associated with request - * @returns s3ReqParams {object} s3ReqParams object (host, uri, queryParams) - * @private - */ -function _s3ReqParamsForSigV4(r, bucket, server) { - let host = server; - if (S3_STYLE === 'virtual' || S3_STYLE === 'default' || S3_STYLE === undefined) { - host = bucket + '.' + host; - } - const baseUri = s3BaseUri(r); - const queryParams = _s3DirQueryParams(r.variables.uri_path, r.method); - let uri; - if (queryParams.length > 0) { - if (baseUri.length > 0) { - uri = baseUri; - } else { - uri = '/'; - } - } else { - uri = s3uri(r); - } - return { - host: host, - uri: uri, - queryParams: queryParams - }; -} - -/** - * Build the base file path for a S3 request URI. This function allows for - * path style S3 URIs to be created that do not use a subdomain to specify - * the bucket name. - * - * @param r {Request} HTTP request object (not used, but required for NGINX configuration) - * @returns {string} start of the file path for the S3 object URI - */ -function s3BaseUri(r) { - const bucket = process.env['S3_BUCKET_NAME']; - let basePath; - - if (S3_STYLE === 'path') { - utils.debug_log(r, 'Using path style uri : ' + '/' + bucket); - basePath = '/' + bucket; - } else { - basePath = ''; - } - - return basePath; -} - -/** - * Returns the s3 path given the incoming request - * - * @param r HTTP request - * @returns {string} uri for s3 request - */ -function s3uri(r) { - let uriPath = r.variables.uri_path; - const basePath = s3BaseUri(r); - let path; - - // Create query parameters only if directory listing is enabled. - if (ALLOW_LISTING) { - const queryParams = _s3DirQueryParams(uriPath, r.method); - if (queryParams.length > 0) { - path = basePath + '?' + queryParams; - } else { - path = _escapeURIPath(basePath + uriPath); - } - } else { - // This is a path that will resolve to an index page - if (PROVIDE_INDEX_PAGE && _isDirectory(uriPath) ) { - uriPath += INDEX_PAGE; - } - path = _escapeURIPath(basePath + uriPath); - } - - utils.debug_log(r, 'S3 Request URI: ' + r.method + ' ' + path); - return path; -} - -/** - * Create and encode the query parameters needed to query S3 for an object - * listing. - * - * @param uriPath request URI path - * @param method request HTTP method - * @returns {string} query parameters to use with S3 request - * @private - */ -function _s3DirQueryParams(uriPath, method) { - if (!_isDirectory(uriPath) || method !== 'GET') { - return ''; - } - - /* Return if static website. We don't want to list the files in the - directory, we want to append the index page and get the fil. */ - if (PROVIDE_INDEX_PAGE){ - return ''; - } - - let path = 'delimiter=%2F' - - if (uriPath !== '/') { - let decodedUriPath = decodeURIComponent(uriPath); - let without_leading_slash = decodedUriPath.charAt(0) === '/' ? - decodedUriPath.substring(1, decodedUriPath.length) : decodedUriPath; - path += '&prefix=' + _encodeURIComponent(without_leading_slash); - } - - return path; -} - -/** - * Redirects the request to the appropriate location. If the request is not - * a read (GET/HEAD) request, then we reject the request outright by returning - * a HTTP 405 error with a list of allowed methods. - * - * @param r {Request} HTTP request object - */ -function redirectToS3(r) { - // This is a read-only S3 gateway, so we do not support any other methods - if (!(r.method === 'GET' || r.method === 'HEAD')) { - utils.debug_log(r, 'Invalid method requested: ' + r.method); - r.internalRedirect("@error405"); - return; - } - - const uriPath = r.variables.uri_path; - const isDirectoryListing = ALLOW_LISTING && _isDirectory(uriPath); - - if (isDirectoryListing && r.method === 'GET') { - r.internalRedirect("@s3Listing"); - } else if ( PROVIDE_INDEX_PAGE == true ) { - r.internalRedirect("@s3"); - } else if ( !ALLOW_LISTING && !PROVIDE_INDEX_PAGE && uriPath == "/" ) { - r.internalRedirect("@error404"); - } else { - r.internalRedirect("@s3"); - } -} - -function trailslashControl(r) { - if (APPEND_SLASH) { - const hasExtension = /\/[^.\/]+\.[^.]+$/; - if (!hasExtension.test(r.variables.uri_path) && !_isDirectory(r.variables.uri_path)){ - return r.internalRedirect("@trailslash"); - } - } - r.internalRedirect("@error404"); -} - -/** - * Processes the directory listing output as returned from S3. If - * FOUR_O_FOUR_ON_EMPTY_BUCKET is enabled, this function will corrupt the - * XML output by inserting the string 'junk' into the output thereby causing - * nginx to return a 404 for empty directory listings. - * - * If anyone finds a better way to do this, please submit a PR. - * - * @param r {Request} HTTP request object (not used, but required for NGINX configuration) - * @param data chunked data buffer - * @param flags contains field that indicates that a chunk is last - */ -function filterListResponse(r, data, flags) { - if (FOUR_O_FOUR_ON_EMPTY_BUCKET) { - let indexIsEmpty = utils.parseBoolean(r.variables.indexIsEmpty); - - if (indexIsEmpty && data.indexOf('= 0) { - r.variables.indexIsEmpty = false; - indexIsEmpty = false; - } - - if (indexIsEmpty && data.indexOf('= 0) { - r.variables.indexIsEmpty = false; - indexIsEmpty = false; - } - - if (flags.last && indexIsEmpty) { - r.sendBuffer('junk', flags); - } else { - r.sendBuffer(data, flags); - } - } else { - r.sendBuffer(data, flags); - } -} - -/** - * Adds additional encoding to a URI component - * - * @param string {string} string to encode - * @returns {string} an encoded string - * @private - */ -function _encodeURIComponent(string) { - return encodeURIComponent(string) - .replace(/[!*'()]/g, (c) => - `%${c.charCodeAt(0).toString(16).toUpperCase()}`); -} - -/** - * Escapes the path portion of a URI without escaping the path separator - * characters (/). - * - * @param uri {string} unescaped URI - * @returns {string} URI with each path component separately escaped - * @private - */ -function _escapeURIPath(uri) { - // Check to see if the URI path was already encoded. If so, we decode it. - let decodedUri = (uri.indexOf('%') >= 0) ? decodeURIComponent(uri) : uri; - let components = []; - - decodedUri.split('/').forEach(function (item, i) { - components[i] = _encodeURIComponent(item); - }); - - return components.join('/'); -} - -/** - * Determines if a given path is a directory based on whether or not the last - * character in the path is a forward slash (/). - * - * @param path {string} path to parse - * @returns {boolean} true if path is a directory - * @private - */ -function _isDirectory(path) { - if (path === undefined) { - return false; - } - const len = path.length; - - if (len < 1) { - return false; - } - - return path.charAt(len - 1) === '/'; -} - -/** - * Checks to see if the given environment variable is present. If not, an error - * is thrown. - * @param envVarName {string} environment variable to check for - * @private - */ -function _require_env_var(envVarName) { - const isSet = envVarName in process.env; - - if (!isSet) { - throw('Required environment variable ' + envVarName + ' is missing'); - } -} - - -export default { - awsHeaderDate, - s3date, - s3auth, - s3uri, - trailslashControl, - redirectToS3, - editHeaders, - filterListResponse, - // These functions do not need to be exposed, but they are exposed so that - // unit tests can run against them. - _s3ReqParamsForSigV2, - _s3ReqParamsForSigV4, - _encodeURIComponent, - _escapeURIPath, - _isHeaderToBeStripped -}; diff --git a/common/etc/nginx/nginx.conf b/common/etc/nginx/nginx.conf index 035f909..0c649cf 100644 --- a/common/etc/nginx/nginx.conf +++ b/common/etc/nginx/nginx.conf @@ -12,7 +12,6 @@ load_module modules/ngx_http_xslt_filter_module.so; env AWS_ACCESS_KEY_ID; env AWS_SECRET_ACCESS_KEY; env AWS_SESSION_TOKEN; -env S3_BUCKET_NAME; env S3_SERVER; env S3_SERVER_PORT; env S3_SERVER_PROTO; diff --git a/common/etc/nginx/templates/default.conf.template b/common/etc/nginx/templates/default.conf.template index b473d0c..ebef2a7 100644 --- a/common/etc/nginx/templates/default.conf.template +++ b/common/etc/nginx/templates/default.conf.template @@ -1,6 +1,5 @@ js_import /etc/nginx/include/awscredentials.js; js_import /etc/nginx/include/awssig4.js; -js_import /etc/nginx/include/s3gateway.js; js_import /etc/nginx/include/lambdagateway.js; # We include only the variables needed for the authentication signatures that @@ -15,9 +14,7 @@ map $request_uri $uri_path { } map $S3_STYLE $s3_host_hdr { - virtual "${S3_BUCKET_NAME}.${S3_SERVER}"; path "${S3_SERVER}:${S3_SERVER_PORT}"; - default "${S3_BUCKET_NAME}.${S3_SERVER}"; } map $host $lambda_host { @@ -27,9 +24,7 @@ map $host $lambda_host { js_var $indexIsEmpty true; # This creates the HTTP authentication header to be sent to S3 -js_set $s3auth s3gateway.s3auth; js_set $awsSessionToken awscredentials.sessionToken; -js_set $s3uri s3gateway.s3uri; js_set $lambdaAuth lambdagateway.lambdaAuth; js_set $lambdaURI lambdagateway.lambdaURI; @@ -91,9 +86,8 @@ server { auth_request /aws/credentials/retrieve; # Redirect to the proper location based on the client request - either - # @s3, @s3Listing or @error405. + # @lambda or @error405. - #js_content s3gateway.redirectToS3; js_content lambdagateway.redirectToLambda; } @@ -105,10 +99,6 @@ server { } location @lambda { - # We include only the headers needed for the authentication signatures that - # we plan to use. - include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; - # The CORS configuration needs to be imported in several places in order for # it to be applied within different contexts. # include /etc/nginx/conf.d/gateway/cors.conf; @@ -136,7 +126,7 @@ server { # We strip off all of the AWS specific headers from the server so that # there is nothing identifying the object as having originated in an # object store. - #js_header_filter s3gateway.editHeaders; + #js_header_filter lambdagateway.editHeaders; # Catch all errors from S3 and sanitize them so that the user can't # gain intelligence about the S3 bucket being proxied. @@ -147,116 +137,15 @@ server { #error_page 404 @trailslashControl; - #proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3uri; - #proxy_pass ${LAMBDA_SERVER_PROTO}://lambda_urls$lambdaURI; - proxy_pass $lambda_host$lambdaURI; - #proxy_pass https://lambda.us-east-2.amazonaws.com/2015-03-31/functions/nginx-serverless/invocations; - - #include /etc/nginx/conf.d/gateway/s3_location.conf; - } - - location @s3 { - # We include only the headers needed for the authentication signatures that - # we plan to use. - include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; - - # The CORS configuration needs to be imported in several places in order for - # it to be applied within different contexts. - include /etc/nginx/conf.d/gateway/cors.conf; - - # Don't allow any headers from the client - we don't want them messing - # with S3 at all. - proxy_pass_request_headers off; - - # Enable passing of the server name through TLS Server Name Indication extension. - proxy_ssl_server_name on; - proxy_ssl_name ${S3_SERVER}; - - # Set the Authorization header to the AWS Signatures credentials - proxy_set_header Authorization $s3auth; - proxy_set_header X-Amz-Security-Token $awsSessionToken; - - # We set the host as the bucket name to inform the S3 API of the bucket - proxy_set_header Host $s3_host_hdr; - - # Use keep alive connections in order to improve performance - proxy_http_version 1.1; - proxy_set_header Connection ''; - - # We strip off all of the AWS specific headers from the server so that - # there is nothing identifying the object as having originated in an - # object store. - js_header_filter s3gateway.editHeaders; - - # Catch all errors from S3 and sanitize them so that the user can't - # gain intelligence about the S3 bucket being proxied. - proxy_intercept_errors on; - - # Comment out this line to receive the error messages returned by S3 - error_page 400 401 402 403 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; - - error_page 404 @trailslashControl; - - proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3uri; - - include /etc/nginx/conf.d/gateway/s3_location.conf; - } - - location @s3Listing { # We include only the headers needed for the authentication signatures that # we plan to use. include /etc/nginx/conf.d/gateway/v${AWS_SIGS_VERSION}_headers.conf; - # The CORS configuration needs to be imported in several places in order for - # it to be applied within different contexts. - include /etc/nginx/conf.d/gateway/cors.conf; - - # Don't allow any headers from the client - we don't want them messing - # with S3 at all. - proxy_pass_request_headers off; - - # Enable passing of the server name through TLS Server Name Indication extension. - proxy_ssl_server_name on; - proxy_ssl_name ${S3_SERVER}; - - # Set the Authorization header to the AWS Signatures credentials - proxy_set_header Authorization $s3auth; - proxy_set_header X-Amz-Security-Token $awsSessionToken; - - # We set the host as the bucket name to inform the S3 API of the bucket - proxy_set_header Host $s3_host_hdr; - - # Use keep alive connections in order to improve performance - proxy_http_version 1.1; - proxy_set_header Connection ''; - - # We strip off all of the AWS specific headers from the server so that - # there is nothing identifying the object as having originated in an - # object store. - js_header_filter s3gateway.editHeaders; - - # Apply XSL transformation to the XML returned from S3 directory listing - # results such that we can output an HTML directory contents list. - xslt_stylesheet /etc/nginx/include/listing.xsl; - xslt_types application/xml; - - # We apply an output filter to the XML input received from S3 before it - # is passed to XSLT in order to determine if the resource is not a valid - # S3 directory. If it isn't a valid directory, we do a dirty hack to - # corrupt the contents of the XML causing the XSLT to fail and thus - # nginx to return a 404 to the client. If you don't care about empty - # directory listings for invalid directories, remove this. - js_body_filter s3gateway.filterListResponse; - - # Catch all errors from S3 and sanitize them so that the user can't - # gain intelligence about the S3 bucket being proxied. - proxy_intercept_errors on; - - # Comment out this line to receive the error messages returned by S3 - error_page 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 420 422 423 424 426 428 429 431 444 449 450 451 500 501 502 503 504 505 506 507 508 509 510 511 =404 @error404; + #proxy_pass ${LAMBDA_SERVER_PROTO}://lambda_urls$lambdaURI; + proxy_pass $lambda_host$lambdaURI; + #proxy_pass https://lambda.us-east-2.amazonaws.com/2015-03-31/functions/nginx-serverless/invocations; - proxy_pass ${S3_SERVER_PROTO}://storage_urls$s3Uri; - include /etc/nginx/conf.d/gateway/s3listing_location.conf; + #include /etc/nginx/conf.d/gateway/s3_location.conf; } location @error404 { @@ -266,7 +155,7 @@ server { location @trailslashControl { # Checks if requesting a folder without trailing slash, and return 302 # appending a slash to it when using for static site hosting. - js_content s3gateway.trailslashControl; + js_content lambdagateway.trailslashControl; } location @trailslash { diff --git a/common/etc/nginx/templates/gateway/v2_headers.conf.template b/common/etc/nginx/templates/gateway/v2_headers.conf.template deleted file mode 100644 index 5798b25..0000000 --- a/common/etc/nginx/templates/gateway/v2_headers.conf.template +++ /dev/null @@ -1,3 +0,0 @@ -# This header is needed when doing v2 signature authentication. It -# specifies the timestamp in which the signature was generated. -proxy_set_header Date $httpDate; diff --git a/common/etc/nginx/templates/gateway/v2_js_vars.conf.template b/common/etc/nginx/templates/gateway/v2_js_vars.conf.template deleted file mode 100644 index 04e9eda..0000000 --- a/common/etc/nginx/templates/gateway/v2_js_vars.conf.template +++ /dev/null @@ -1,4 +0,0 @@ -# This header is needed when doing v2 signature authentication. It -# specifies the timestamp in which the signature was generated and is used with -# the HTTP Date header. -js_set $httpDate s3gateway.s3date; diff --git a/deployments/ecs/cloudformation/s3gateway.yaml b/deployments/ecs/cloudformation/s3gateway.yaml index a64bb2d..0241a9e 100644 --- a/deployments/ecs/cloudformation/s3gateway.yaml +++ b/deployments/ecs/cloudformation/s3gateway.yaml @@ -223,8 +223,6 @@ Resources: Value: 'true' - Name: AWS_SIGS_VERSION Value: '4' - - Name: S3_BUCKET_NAME - Value: !Ref 'S3Bucket' - Name: S3_REGION Value: !Ref 'AWS::Region' - Name: S3_SERVER_PORT diff --git a/docker-compose.yml b/docker-compose.yml index 23873c7..bb678b9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,7 +7,7 @@ services: image: nplus_lambda_gateway build: context: ./ - dockerfile: Dockerfile.plus + dockerfile: Dockerfile.oss env_file: - settings.test volumes: diff --git a/docs/getting_started.md b/docs/getting_started.md index ebb7e6c..6a37b4b 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -20,7 +20,6 @@ running as a Container or as a Systemd service. | `AWS_ACCESS_KEY_ID` | Yes | | | Access key | | `AWS_SECRET_ACCESS_KEY` | Yes | | | Secret access key | | `AWS_SESSION_TOKEN` | No | | | Session token. | -| `S3_BUCKET_NAME` | Yes | | | Name of S3 bucket to proxy requests to | | `S3_REGION` | Yes | | | Region associated with API | | `S3_SERVER_PORT` | Yes | | | SSL/TLS port to connect to | | `S3_SERVER_PROTO` | Yes | `http`, `https` | | Protocol to used connect to S3 server | @@ -332,8 +331,6 @@ spec: image: "ghcr.io/nginxinc/nginx-s3-gateway/nginx-oss-s3-gateway:latest-20220916" imagePullPolicy: IfNotPresent env: - - name: S3_BUCKET_NAME - value: "" - name: S3_SERVER value: "s3..amazonaws.com" - name: S3_SERVER_PROTO diff --git a/oss/etc/nginx/conf.d/gateway/server_variables.conf b/oss/etc/nginx/conf.d/gateway/server_variables.conf index cf0077e..e560fab 100644 --- a/oss/etc/nginx/conf.d/gateway/server_variables.conf +++ b/oss/etc/nginx/conf.d/gateway/server_variables.conf @@ -7,3 +7,5 @@ set $cache_signing_key_enabled 0; # caching is turned on. This feature uses the keyval store, so it # is only enabled when using NGINX Plus. set $cache_instance_credentials_enabled 0; + +set $lambda_payload_hash "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; diff --git a/plus/etc/nginx/conf.d/gateway/server_variables.conf b/plus/etc/nginx/conf.d/gateway/server_variables.conf index 851f0a9..ca11ee9 100644 --- a/plus/etc/nginx/conf.d/gateway/server_variables.conf +++ b/plus/etc/nginx/conf.d/gateway/server_variables.conf @@ -7,3 +7,5 @@ set $cache_signing_key_enabled 1; # caching is turned on. This feature uses the keyval store, so it # is only enabled when using NGINX Plus. set $cache_instance_credentials_enabled 1; + +set $lambda_payload_hash "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; diff --git a/settings.example b/settings.example index 4cefe71..23c8161 100644 --- a/settings.example +++ b/settings.example @@ -1,4 +1,3 @@ -S3_BUCKET_NAME=my-bucket AWS_ACCESS_KEY_ID=ZZZZZZZZZZZZZZZZZZZZ AWS_SECRET_ACCESS_KEY=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa AWS_SESSION_TOKEN=bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb diff --git a/standalone_ubuntu_oss_install.sh b/standalone_ubuntu_oss_install.sh index 3cccfb3..ad9257c 100644 --- a/standalone_ubuntu_oss_install.sh +++ b/standalone_ubuntu_oss_install.sh @@ -30,7 +30,7 @@ fi failed=0 -required=("S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" +required=("S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" "S3_REGION" "S3_STYLE" "ALLOW_DIRECTORY_LIST" "AWS_SIGS_VERSION") if [ ! -z ${AWS_CONTAINER_CREDENTIALS_RELATIVE_URI+x} ]; then @@ -79,7 +79,6 @@ echo "Installing using github '${branch}' branch" echo "S3 Backend Environment" echo "Access Key ID: ${AWS_ACCESS_KEY_ID}" -echo "Origin: ${S3_SERVER_PROTO}://${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" echo "Region: ${S3_REGION}" echo "Addressing Style: ${S3_STYLE}" echo "AWS Signatures Version: v${AWS_SIGS_VERSION}" @@ -135,8 +134,6 @@ cat > "/etc/nginx/environment" << EOF ALLOW_DIRECTORY_LIST=${ALLOW_DIRECTORY_LIST} # AWS Authentication signature version (2=v2 authentication, 4=v4 authentication) AWS_SIGS_VERSION=${AWS_SIGS_VERSION} -# Name of S3 bucket to proxy requests to -S3_BUCKET_NAME=${S3_BUCKET_NAME} # Region associated with API S3_REGION=${S3_REGION} # SSL/TLS port to connect to @@ -304,7 +301,6 @@ EOF fi cat >> /etc/nginx/nginx.conf << 'EOF' -env S3_BUCKET_NAME; env S3_SERVER; env S3_SERVER_PORT; env S3_SERVER_PROTO; @@ -353,7 +349,6 @@ download "common/etc/nginx/include/awscredentials.js" "/etc/nginx/include/awscre download "common/etc/nginx/include/awssig2.js" "/etc/nginx/include/awssig2.js" download "common/etc/nginx/include/awssig4.js" "/etc/nginx/include/awssig4.js" download "common/etc/nginx/include/lambdagateway.js" "/etc/nginx/include/lambdagateway.js" -download "common/etc/nginx/include/s3gateway.js" "/etc/nginx/include/s3gateway.js" download "common/etc/nginx/include/utils.js" "/etc/nginx/include/utils.js" download "common/etc/nginx/templates/default.conf.template" "/etc/nginx/templates/default.conf.template" download "common/etc/nginx/templates/gateway/v2_headers.conf.template" "/etc/nginx/templates/gateway/v2_headers.conf.template" diff --git a/test.sh b/test.sh index 6099163..87ebb29 100755 --- a/test.sh +++ b/test.sh @@ -278,7 +278,6 @@ runUnitTestWithOutSessionToken() { -e "S3_STYLE=virtual" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ - -e "S3_BUCKET_NAME=unit_test" \ -e "S3_SERVER=unit_test" \ -e "S3_SERVER_PROTO=https" \ -e "S3_SERVER_PORT=443" \ @@ -302,7 +301,6 @@ runUnitTestWithSessionToken() { -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ -e "AWS_SESSION_TOKEN=unit_test" \ - -e "S3_BUCKET_NAME=unit_test" \ -e "S3_SERVER=unit_test" \ -e "S3_SERVER_PROTO=https" \ -e "S3_SERVER_PORT=443" \ diff --git a/test/docker-compose.yaml b/test/docker-compose.yaml index ffe09ff..88e2787 100644 --- a/test/docker-compose.yaml +++ b/test/docker-compose.yaml @@ -14,7 +14,6 @@ services: - "minio" restart: "no" environment: - S3_BUCKET_NAME: "bucket-1" AWS_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE" AWS_SECRET_ACCESS_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" S3_SERVER: "minio" diff --git a/test/unit/awssig2_test.js b/test/unit/awssig2_test.js index c61507b..6314375 100644 --- a/test/unit/awssig2_test.js +++ b/test/unit/awssig2_test.js @@ -17,7 +17,6 @@ */ import awssig2 from "include/awssig2.js"; -import s3gateway from "include/s3gateway.js"; function _runSignatureV2(r) { diff --git a/test/unit/awssig4_test.js b/test/unit/awssig4_test.js index 1d8f9ca..9d94d87 100644 --- a/test/unit/awssig4_test.js +++ b/test/unit/awssig4_test.js @@ -17,7 +17,6 @@ */ import awssig4 from "include/awssig4.js"; -import s3gateway from "include/s3gateway.js"; import utils from "include/utils.js"; diff --git a/test/unit/s3gateway_test.js b/test/unit/s3gateway_test.js index 8c7e6f0..bcaaafb 100755 --- a/test/unit/s3gateway_test.js +++ b/test/unit/s3gateway_test.js @@ -17,7 +17,6 @@ */ import awssig4 from "include/awssig4.js"; -import s3gateway from "include/s3gateway.js"; var fakeRequest = { "remoteAddress" : "172.17.0.1", pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy