Skip to content

Commit

Permalink
Reduce likelihood of lambda timeouts
Browse files Browse the repository at this point in the history
If the lambda times out, all anime in that batch will be retried, even if some have already succeeded. This is wasteful.
  • Loading branch information
YoshiWalsh committed Nov 3, 2022
1 parent 0064577 commit efbb59e
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions infrastructure/resources.tf
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ resource "aws_lambda_function" "function_limited" {
architectures = [ "arm64" ]
runtime = "nodejs16.x"
memory_size = "128"
timeout = "120"
timeout = "240" # Usually this function will complete in ~100 seconds, but if it times out it breaks our queue length logic and we really want to avoid that.

reserved_concurrent_executions = 1

Expand Down Expand Up @@ -362,7 +362,7 @@ resource "aws_lambda_permission" "lambda_apigateway_permission_heavy" {
resource "aws_sqs_queue" "fast_queue" {
name = join("-", ["wrongopinions", random_id.environment_identifier.hex, "fast"])
delay_seconds = 0
visibility_timeout_seconds = 120
visibility_timeout_seconds = 250 # Slightly longer than the lambda timeout
sqs_managed_sse_enabled = true
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.slow_queue.arn
Expand Down Expand Up @@ -399,7 +399,7 @@ resource "aws_lambda_event_source_mapping" "slow_queue_lambda" {
function_name = aws_lambda_function.function_limited.arn

batch_size = 10
maximum_batching_window_in_seconds = 300
maximum_batching_window_in_seconds = 15

depends_on = [
aws_iam_role_policy.function_role_sqs
Expand Down

0 comments on commit efbb59e

Please sign in to comment.