From ec8553087899dc2db5ab23ec745c3dbe9236bd48 Mon Sep 17 00:00:00 2001 From: Johan Kielbaey Date: Thu, 10 Mar 2022 10:30:16 +0100 Subject: [PATCH 1/3] Allow use of session tokens --- doc/sigv4_post_sample.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/sigv4_post_sample.py b/doc/sigv4_post_sample.py index a864931..a83c2fd 100644 --- a/doc/sigv4_post_sample.py +++ b/doc/sigv4_post_sample.py @@ -62,6 +62,8 @@ def lambda_handler(event, context): # to embed credentials in code. access_key = os.environ.get('AWS_ACCESS_KEY_ID') secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') + if 'AWS_SESSION_TOKEN' in os.environ: + session_token = os.environ.get('AWS_SESSION_TOKEN') if access_key is None or secret_key is None: print('No access key is available.') sys.exit() @@ -89,7 +91,10 @@ def lambda_handler(event, context): # Step 4: Create the canonical headers. Header names must be trimmed # and lowercase, and sorted in code point order from low to high. # Note that there is a trailing \n. - canonical_headers = 'host:' + host + '\n' + 'x-amz-date:' + amz_date + '\n' + canonical_headers = 'host:' + host + '\n' + \ + 'x-amz-date:' + amz_date + '\n' + if session_token is not None: + canonical_headers += 'x-amz-security-token:' + session_token + '\n' # Step 5: Create the list of signed headers. This lists the headers # in the canonical_headers list, delimited with ";" and in alpha order. @@ -97,6 +102,8 @@ def lambda_handler(event, context): # signed_headers include those that you want to be included in the # hash of the request. "Host" and "x-amz-date" are always required. signed_headers = 'host;x-amz-date' + if session_token is not None: + signed_headers += ';x-amz-security-token' # Step 6: Create payload hash. In this example, the payload (body of # the request) contains the request parameters. @@ -132,6 +139,8 @@ def lambda_handler(event, context): headers = {'Authorization': authorization_header, 'Content-Type': content_type, 'x-amz-date': amz_date} + if session_token: + headers['x-amz-security-token'] = session_token # ************* SEND THE REQUEST ************* print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++') From 0bd9d52dad343f0e0291cfb85a8244a061e03429 Mon Sep 17 00:00:00 2001 From: Johan Kielbaey Date: Thu, 10 Mar 2022 10:39:16 +0100 Subject: [PATCH 2/3] Get details from the Lambda environment variables --- doc/sigv4_post_sample.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/sigv4_post_sample.py b/doc/sigv4_post_sample.py index a83c2fd..de4d907 100644 --- a/doc/sigv4_post_sample.py +++ b/doc/sigv4_post_sample.py @@ -45,15 +45,16 @@ def lambda_handler(event, context): method = 'POST' service = 'execute-api' # Host is the base URL for your REST API, where {restapi_id} is the API identifier, {region} is the Region of the API deployment. - host = '{restapi_id}.execute-api.{region}.amazonaws.com' - region = '{region}' + region = os.environ.get('AWS_REGION') + rest_api_id = os.environ.get('REST_API_ID') + host = f"{rest_api_id}.execute-api.{region}.amazonaws.com" # Endpoint is the endpoint URL for your REST API resource, where {api_name} is the API name, and {method_name} is the name of the method resource of the API deployment. - endpoint = 'https://{restapi_id}.execute-api.us-west-2.amazonaws.com/api/{api_name}/{method_name}' + endpoint = f'https://{host}/api/workflow/execution' # POST requests use a content type header. content_type = 'application/json' # Specify the Amazon S3 location for the input media file: - s3_bucket = '{s3_bucket}' + s3_bucket = event["Records"][0]["s3"]["bucket"]["name"] s3_key = event["Records"][0]["s3"]["object"]["key"] # Request parameters for executing the CasImageWorkflow in its default configuration request_parameters = '{"Name":"ContentLocalizationWorkflow","Configuration":{"PreprocessVideo":{"Thumbnail":{"ThumbnailPosition":"2","Enabled":true},"Mediainfo":{"Enabled":true}},"AnalyzeVideo":{"faceDetection":{"Enabled":false},"technicalCueDetection":{"Enabled":false},"shotDetection":{"Enabled":false},"celebrityRecognition":{"MediaType":"Video","Enabled":false},"labelDetection":{"MediaType":"Video","Enabled":true},"personTracking":{"MediaType":"Video","Enabled":false},"faceSearch":{"MediaType":"Video","Enabled":false,"CollectionId":"undefined"},"textDetection":{"MediaType":"Video","Enabled":false},"Mediaconvert":{"MediaType":"Video","Enabled":false},"TranscribeVideo":{"Enabled":true,"TranscribeLanguage":"en-US","MediaType":"Audio"}},"TransformText":{"WebToSRTCaptions":{"MediaType":"MetadataOnly","TargetLanguageCodes":["es","de","en"],"Enabled":true},"WebToVTTCaptions":{"MediaType":"MetadataOnly","TargetLanguageCodes":["es","de","en"],"Enabled":true},"PollyWebCaptions":{"MediaType":"MetadataOnly","Enabled":false,"SourceLanguageCode":"en"}},"WebCaptions":{"WebCaptions":{"MediaType":"MetadataOnly","SourceLanguageCode":"en","Enabled":true}},"Translate":{"Translate":{"MediaType":"Text","Enabled":false},"TranslateWebCaptions":{"MediaType":"MetadataOnly","Enabled":true,"TargetLanguageCodes":["es","de"],"SourceLanguageCode":"en","TerminologyNames":[],"ParallelDataNames":[]}},"AnalyzeText":{"ComprehendEntities":{"MediaType":"Text","Enabled":false},"ComprehendKeyPhrases":{"MediaType":"Text","Enabled":false}}}, "Input":{"Media":{"Video":{"S3Bucket": "' + s3_bucket + '", "S3Key":"' + s3_key + '"}}}}' From 7f9d00006ba83595c739d139763cf9a35d6e5b84 Mon Sep 17 00:00:00 2001 From: Johan Kielbaey Date: Thu, 10 Mar 2022 10:39:50 +0100 Subject: [PATCH 3/3] Update documentation --- README.md | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index dfbcb9d..709b894 100644 --- a/README.md +++ b/README.md @@ -197,17 +197,8 @@ Workflows can be started automatically when files are copied to a designated S3 Copy and paste the code from [sigv4_post_sample.py](https://github.com/aws-solutions/aws-media-insights-content-localization/blob/development/docs/sigv4_post_sample.py) into a new Lambda function. 3. Under "Layers", click Add a layer 4. Select Custom layers, then select `media-insights-engine-python38` from the drop-down menu, and click Add. - 5. Make the following code changes to the copied code: - -- Put `import json` at the top -- Replace `{restapi_id}` with the value of the `WorkflowApiRestID` key in the MieStack outputs -- Replace `{region}` with the value of the region your stack is running in. -- Replace `{api_name}` with `workflow` -- Replace `{method_name}` with `execution` -- Replace `{s3_bucket}` with the name of the S3 bucket you specified in `ExternalBucketArn`, above. -- Replace `{s3_key}` with `event["Records"][0]["s3"]["object"]["key"]` -- Replace `os.environ.get('AWS_ACCESS_KEY_ID')` with your AWS_ACCESS_KEY_ID -- Replace `os.environ.get('AWS_SECRET_ACCESS_KEY')` with your AWS_SECRET_ACCESS_KEY + 5. Add an environment variable `REST_API_ID` with the value of the `WorkflowApiRestID` key in the MieStack outputs. + 6. Update the IAM role used by the Lambda function to grant it permission to invoke the WorkflowAPI. 3. Setup an S3 trigger for the Lambda function, using the name of the S3 bucket you specified in `ExternalBucketArn`, above.