File size: 3,703 Bytes
d8ad0fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import functools
import time
from concurrent.futures import Future, ThreadPoolExecutor
from datetime import datetime
from typing import List, Optional, Tuple

import boto3
import requests
from mypy_boto3_lambda.client import LambdaClient
from mypy_boto3_lambda.type_defs import InvocationResponseTypeDef
from pydantic import BaseModel, ConfigDict
from requests import Request, Response
from requests.exceptions import RequestException
from retry import retry

GetLambdaResponse = Tuple[InvocationResponseTypeDef, datetime]


class GetLambdaResponseOptions(BaseModel):
    lambda_arn: str
    payload: Optional[str] = None
    client: Optional[LambdaClient] = None
    raise_on_error: bool = True

    model_config = ConfigDict(
        arbitrary_types_allowed=True,
    )


def get_lambda_response(
    lambda_arn: str,
    payload: Optional[str] = None,
    client: Optional[LambdaClient] = None,
    raise_on_error: bool = True,
) -> GetLambdaResponse:
    """Invoke function synchronously

    Parameters
    ----------
    lambda_arn : str
        Lambda function ARN to invoke
    payload : Optional[str], optional
        JSON payload for Lambda invocation, by default None
    client : Optional[LambdaClient], optional
        Boto3 Lambda SDK client, by default None
    raise_on_error : bool, optional
        Whether to raise exception upon invocation error, by default True

    Returns
    -------
    Tuple[InvocationResponseTypeDef, datetime]
        Function response and approximate execution time

    Raises
    ------
    RuntimeError
        Function invocation error details
    """
    client = client or boto3.client("lambda")
    payload = payload or ""
    execution_time = datetime.utcnow()
    response: InvocationResponseTypeDef = client.invoke(
        FunctionName=lambda_arn,
        InvocationType="RequestResponse",
        Payload=payload,
    )

    has_error = response.get("FunctionError", "") == "Unhandled"
    if has_error and raise_on_error:
        error_payload = response["Payload"].read().decode()
        raise RuntimeError(f"Function failed invocation: {error_payload}")

    return response, execution_time


@retry(RequestException, delay=2, jitter=1.5, tries=5)
def get_http_response(request: Request) -> Response:
    session = requests.Session()
    result = session.send(request.prepare())
    result.raise_for_status()
    return result


def get_lambda_response_in_parallel(
    get_lambda_response_options: List[GetLambdaResponseOptions],
) -> List[GetLambdaResponse]:
    """Invoke functions in parallel

    Parameters
    ----------
    get_lambda_response_options : List[GetLambdaResponseOptions]
        List of options to call get_lambda_response with

    Returns
    -------
    List[GetLambdaResponse]
        Function responses and approximate execution time
    """
    result_list = []
    with ThreadPoolExecutor() as executor:
        running_tasks: List[Future] = []
        for options in get_lambda_response_options:
            # Sleep 0.5, 1, 1.5, ... seconds between each invocation. This way
            # we can guarantee that lambdas are executed in parallel, but they are
            # called in the same "order" as they are passed in, thus guaranteeing that
            # we can assert on the correct output.
            time.sleep(0.5 * len(running_tasks))

            get_lambda_response_callback = functools.partial(get_lambda_response, **options.model_dump())
            running_tasks.append(
                executor.submit(get_lambda_response_callback),
            )

        executor.shutdown(wait=True)
        result_list.extend(running_task.result() for running_task in running_tasks)

    return result_list