[feat] add input parsing and validation with pydantic, refactor code
Browse files- events/payload_point.json +3 -2
- src/app.py +10 -6
- src/io/coordinates_pixel_conversion.py +14 -7
- src/io/lambda_helpers.py +44 -42
- src/io/tms2geotiff.py +4 -4
- src/prediction_api/predictors.py +1 -1
- src/utilities/type_hints.py +25 -17
- tests/events/get_parsed_bbox_points.json +4 -4
- tests/io/test_lambda_helpers.py +11 -2
events/payload_point.json
CHANGED
|
@@ -9,5 +9,6 @@
|
|
| 9 |
"label": 0
|
| 10 |
}],
|
| 11 |
"zoom": 6,
|
| 12 |
-
"source_type": "Satellite"
|
| 13 |
-
|
|
|
|
|
|
| 9 |
"label": 0
|
| 10 |
}],
|
| 11 |
"zoom": 6,
|
| 12 |
+
"source_type": "Satellite",
|
| 13 |
+
"debug": true
|
| 14 |
+
}
|
src/app.py
CHANGED
|
@@ -2,6 +2,7 @@ import time
|
|
| 2 |
from http import HTTPStatus
|
| 3 |
|
| 4 |
from aws_lambda_powertools.utilities.typing import LambdaContext
|
|
|
|
| 5 |
|
| 6 |
from src import app_logger
|
| 7 |
from src.io.lambda_helpers import get_parsed_request_body, get_parsed_bbox_points, get_response
|
|
@@ -16,19 +17,22 @@ def lambda_handler(event: dict, context: LambdaContext):
|
|
| 16 |
app_logger.info(f"event version: {event['version']}.")
|
| 17 |
|
| 18 |
try:
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
try:
|
| 22 |
-
|
| 23 |
-
app_logger.debug(f"prompt_latlng:{prompt_latlng}.")
|
| 24 |
-
body_request = get_parsed_bbox_points(body)
|
| 25 |
-
app_logger.info(f"body_request=> {type(body_request)}, {body_request}.")
|
| 26 |
body_response = samexporter_predict(body_request["bbox"], body_request["prompt"], body_request["zoom"])
|
| 27 |
app_logger.info(f"output body_response:{body_response}.")
|
| 28 |
response = get_response(HTTPStatus.OK.value, start_time, context.aws_request_id, body_response)
|
| 29 |
except Exception as ex2:
|
| 30 |
app_logger.error(f"exception2:{ex2}.")
|
| 31 |
-
response = get_response(HTTPStatus.
|
|
|
|
|
|
|
|
|
|
| 32 |
except Exception as ex1:
|
| 33 |
app_logger.error(f"exception1:{ex1}.")
|
| 34 |
response = get_response(HTTPStatus.INTERNAL_SERVER_ERROR.value, start_time, context.aws_request_id, {})
|
|
|
|
| 2 |
from http import HTTPStatus
|
| 3 |
|
| 4 |
from aws_lambda_powertools.utilities.typing import LambdaContext
|
| 5 |
+
from pydantic import ValidationError
|
| 6 |
|
| 7 |
from src import app_logger
|
| 8 |
from src.io.lambda_helpers import get_parsed_request_body, get_parsed_bbox_points, get_response
|
|
|
|
| 17 |
app_logger.info(f"event version: {event['version']}.")
|
| 18 |
|
| 19 |
try:
|
| 20 |
+
app_logger.info(f"try get_parsed_event...")
|
| 21 |
+
request_input = get_parsed_request_body(event)
|
| 22 |
+
app_logger.info(f"event parsed: ok")
|
| 23 |
+
body_request = get_parsed_bbox_points(request_input)
|
| 24 |
|
| 25 |
try:
|
| 26 |
+
app_logger.info(f"body_request => {type(body_request)}, {body_request}.")
|
|
|
|
|
|
|
|
|
|
| 27 |
body_response = samexporter_predict(body_request["bbox"], body_request["prompt"], body_request["zoom"])
|
| 28 |
app_logger.info(f"output body_response:{body_response}.")
|
| 29 |
response = get_response(HTTPStatus.OK.value, start_time, context.aws_request_id, body_response)
|
| 30 |
except Exception as ex2:
|
| 31 |
app_logger.error(f"exception2:{ex2}.")
|
| 32 |
+
response = get_response(HTTPStatus.BAD_REQUEST.value, start_time, context.aws_request_id, {})
|
| 33 |
+
except ValidationError as va1:
|
| 34 |
+
app_logger.error(f"ValidationError:{va1}.")
|
| 35 |
+
response = get_response(HTTPStatus.UNPROCESSABLE_ENTITY.value, start_time, context.aws_request_id, {})
|
| 36 |
except Exception as ex1:
|
| 37 |
app_logger.error(f"exception1:{ex1}.")
|
| 38 |
response = get_response(HTTPStatus.INTERNAL_SERVER_ERROR.value, start_time, context.aws_request_id, {})
|
src/io/coordinates_pixel_conversion.py
CHANGED
|
@@ -3,6 +3,7 @@ from typing import TypedDict
|
|
| 3 |
|
| 4 |
from src import app_logger
|
| 5 |
from src.utilities.constants import TILE_SIZE
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
class PixelCoordinate(TypedDict):
|
|
@@ -10,16 +11,16 @@ class PixelCoordinate(TypedDict):
|
|
| 10 |
y: int
|
| 11 |
|
| 12 |
|
| 13 |
-
def get_latlng2pixel_projection(latlng) -> PixelCoordinate:
|
| 14 |
app_logger.info(f"latlng: {type(latlng)}, value:{latlng}.")
|
| 15 |
-
app_logger.info(f'latlng lat: {type(latlng
|
| 16 |
-
app_logger.info(f'latlng lng: {type(latlng
|
| 17 |
try:
|
| 18 |
-
sin_y: float = math.sin(latlng
|
| 19 |
app_logger.info(f"sin_y, #1:{sin_y}.")
|
| 20 |
sin_y = min(max(sin_y, -0.9999), 0.9999)
|
| 21 |
app_logger.info(f"sin_y, #2:{sin_y}.")
|
| 22 |
-
x = TILE_SIZE * (0.5 + latlng
|
| 23 |
app_logger.info(f"x:{x}.")
|
| 24 |
y = TILE_SIZE * (0.5 - math.log((1 + sin_y) / (1 - sin_y)) / (4 * math.pi))
|
| 25 |
app_logger.info(f"y:{y}.")
|
|
@@ -30,7 +31,7 @@ def get_latlng2pixel_projection(latlng) -> PixelCoordinate:
|
|
| 30 |
raise e_get_latlng2pixel_projection
|
| 31 |
|
| 32 |
|
| 33 |
-
def get_point_latlng_to_pixel_coordinates(latlng, zoom: int) -> PixelCoordinate:
|
| 34 |
try:
|
| 35 |
world_coordinate: PixelCoordinate = get_latlng2pixel_projection(latlng)
|
| 36 |
app_logger.info(f"world_coordinate:{world_coordinate}.")
|
|
@@ -45,7 +46,13 @@ def get_point_latlng_to_pixel_coordinates(latlng, zoom: int) -> PixelCoordinate:
|
|
| 45 |
raise e_format_latlng_to_pixel_coordinates
|
| 46 |
|
| 47 |
|
| 48 |
-
def get_latlng_to_pixel_coordinates(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
app_logger.info(f"latlng_origin - {k}: {type(latlng_origin_ne)}, value:{latlng_origin_ne}.")
|
| 50 |
app_logger.info(f"latlng_current_point - {k}: {type(latlng_current_point)}, value:{latlng_current_point}.")
|
| 51 |
latlng_map_origin_ne = get_point_latlng_to_pixel_coordinates(latlng_origin_ne, zoom)
|
|
|
|
| 3 |
|
| 4 |
from src import app_logger
|
| 5 |
from src.utilities.constants import TILE_SIZE
|
| 6 |
+
from src.utilities.type_hints import LatLngDict
|
| 7 |
|
| 8 |
|
| 9 |
class PixelCoordinate(TypedDict):
|
|
|
|
| 11 |
y: int
|
| 12 |
|
| 13 |
|
| 14 |
+
def get_latlng2pixel_projection(latlng: LatLngDict) -> PixelCoordinate:
|
| 15 |
app_logger.info(f"latlng: {type(latlng)}, value:{latlng}.")
|
| 16 |
+
app_logger.info(f'latlng lat: {type(latlng.lat)}, value:{latlng.lat}.')
|
| 17 |
+
app_logger.info(f'latlng lng: {type(latlng.lng)}, value:{latlng.lng}.')
|
| 18 |
try:
|
| 19 |
+
sin_y: float = math.sin(latlng.lat * math.pi / 180)
|
| 20 |
app_logger.info(f"sin_y, #1:{sin_y}.")
|
| 21 |
sin_y = min(max(sin_y, -0.9999), 0.9999)
|
| 22 |
app_logger.info(f"sin_y, #2:{sin_y}.")
|
| 23 |
+
x = TILE_SIZE * (0.5 + latlng.lng / 360)
|
| 24 |
app_logger.info(f"x:{x}.")
|
| 25 |
y = TILE_SIZE * (0.5 - math.log((1 + sin_y) / (1 - sin_y)) / (4 * math.pi))
|
| 26 |
app_logger.info(f"y:{y}.")
|
|
|
|
| 31 |
raise e_get_latlng2pixel_projection
|
| 32 |
|
| 33 |
|
| 34 |
+
def get_point_latlng_to_pixel_coordinates(latlng: LatLngDict, zoom: int | float) -> PixelCoordinate:
|
| 35 |
try:
|
| 36 |
world_coordinate: PixelCoordinate = get_latlng2pixel_projection(latlng)
|
| 37 |
app_logger.info(f"world_coordinate:{world_coordinate}.")
|
|
|
|
| 46 |
raise e_format_latlng_to_pixel_coordinates
|
| 47 |
|
| 48 |
|
| 49 |
+
def get_latlng_to_pixel_coordinates(
|
| 50 |
+
latlng_origin_ne: LatLngDict,
|
| 51 |
+
latlng_origin_sw: LatLngDict,
|
| 52 |
+
latlng_current_point: LatLngDict,
|
| 53 |
+
zoom: int | float,
|
| 54 |
+
k: str
|
| 55 |
+
):
|
| 56 |
app_logger.info(f"latlng_origin - {k}: {type(latlng_origin_ne)}, value:{latlng_origin_ne}.")
|
| 57 |
app_logger.info(f"latlng_current_point - {k}: {type(latlng_current_point)}, value:{latlng_current_point}.")
|
| 58 |
latlng_map_origin_ne = get_point_latlng_to_pixel_coordinates(latlng_origin_ne, zoom)
|
src/io/lambda_helpers.py
CHANGED
|
@@ -1,14 +1,16 @@
|
|
| 1 |
import json
|
|
|
|
| 2 |
import time
|
| 3 |
from typing import Dict
|
| 4 |
|
| 5 |
from aws_lambda_powertools.event_handler import content_types
|
|
|
|
| 6 |
|
| 7 |
from src import app_logger
|
| 8 |
from src.io.coordinates_pixel_conversion import get_latlng_to_pixel_coordinates
|
| 9 |
-
from src.utilities.constants import CUSTOM_RESPONSE_MESSAGES
|
| 10 |
from src.utilities.type_hints import RawRequestInput
|
| 11 |
-
from src.utilities.utilities import base64_decode
|
| 12 |
|
| 13 |
|
| 14 |
def get_response(status: int, start_time: float, request_id: str, response_body: Dict = None) -> str:
|
|
@@ -42,59 +44,59 @@ def get_response(status: int, start_time: float, request_id: str, response_body:
|
|
| 42 |
|
| 43 |
def get_parsed_bbox_points(request_input: RawRequestInput) -> Dict:
|
| 44 |
app_logger.info(f"try to parsing input request {request_input}...")
|
| 45 |
-
|
|
|
|
| 46 |
app_logger.debug(f"request bbox: {type(bbox)}, value:{bbox}.")
|
| 47 |
-
ne = bbox
|
| 48 |
-
sw = bbox
|
| 49 |
app_logger.debug(f"request ne: {type(ne)}, value:{ne}.")
|
| 50 |
app_logger.debug(f"request sw: {type(sw)}, value:{sw}.")
|
| 51 |
-
ne_latlng = [float(ne
|
| 52 |
-
sw_latlng = [float(sw
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
for prompt in request_input
|
| 56 |
app_logger.debug(f"current prompt: {type(prompt)}, value:{prompt}.")
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
| 66 |
|
| 67 |
app_logger.debug(f"bbox => {bbox}.")
|
| 68 |
-
app_logger.debug(f'request_input-prompt updated => {
|
| 69 |
|
| 70 |
-
app_logger.info(f"unpacking elaborated
|
| 71 |
return {
|
| 72 |
-
"bbox":
|
| 73 |
-
"prompt":
|
| 74 |
-
"zoom":
|
| 75 |
}
|
| 76 |
|
| 77 |
|
| 78 |
-
def get_parsed_request_body(event
|
| 79 |
app_logger.info(f"event:{json.dumps(event)}...")
|
| 80 |
-
app_logger.info(f"context:{context}...")
|
| 81 |
try:
|
| 82 |
-
|
| 83 |
except Exception as e_constants1:
|
| 84 |
app_logger.error(f"e_constants1:{e_constants1}.")
|
| 85 |
-
|
| 86 |
-
app_logger.debug(f"
|
| 87 |
-
if isinstance(
|
| 88 |
-
body_decoded_str = base64_decode(
|
| 89 |
app_logger.debug(f"body_decoded_str: {type(body_decoded_str)}, {body_decoded_str}...")
|
| 90 |
-
|
| 91 |
-
app_logger.info(f"body, #2: {type(
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
app_logger.warning(f"logger level is {app_logger.log_level}.")
|
| 100 |
-
return body
|
|
|
|
| 1 |
import json
|
| 2 |
+
import logging
|
| 3 |
import time
|
| 4 |
from typing import Dict
|
| 5 |
|
| 6 |
from aws_lambda_powertools.event_handler import content_types
|
| 7 |
+
from aws_lambda_powertools.utilities.parser import parse
|
| 8 |
|
| 9 |
from src import app_logger
|
| 10 |
from src.io.coordinates_pixel_conversion import get_latlng_to_pixel_coordinates
|
| 11 |
+
from src.utilities.constants import CUSTOM_RESPONSE_MESSAGES
|
| 12 |
from src.utilities.type_hints import RawRequestInput
|
| 13 |
+
from src.utilities.utilities import base64_decode
|
| 14 |
|
| 15 |
|
| 16 |
def get_response(status: int, start_time: float, request_id: str, response_body: Dict = None) -> str:
|
|
|
|
| 44 |
|
| 45 |
def get_parsed_bbox_points(request_input: RawRequestInput) -> Dict:
|
| 46 |
app_logger.info(f"try to parsing input request {request_input}...")
|
| 47 |
+
|
| 48 |
+
bbox = request_input.bbox
|
| 49 |
app_logger.debug(f"request bbox: {type(bbox)}, value:{bbox}.")
|
| 50 |
+
ne = bbox.ne
|
| 51 |
+
sw = bbox.sw
|
| 52 |
app_logger.debug(f"request ne: {type(ne)}, value:{ne}.")
|
| 53 |
app_logger.debug(f"request sw: {type(sw)}, value:{sw}.")
|
| 54 |
+
ne_latlng = [float(ne.lat), float(ne.lng)]
|
| 55 |
+
sw_latlng = [float(sw.lat), float(sw.lng)]
|
| 56 |
+
new_zoom = int(request_input.zoom)
|
| 57 |
+
new_prompt_list = []
|
| 58 |
+
for prompt in request_input.prompt:
|
| 59 |
app_logger.debug(f"current prompt: {type(prompt)}, value:{prompt}.")
|
| 60 |
+
current_point = get_latlng_to_pixel_coordinates(ne, sw, prompt.data, new_zoom, prompt.type)
|
| 61 |
+
app_logger.debug(f"current prompt: {type(current_point)}, value:{current_point}.")
|
| 62 |
+
new_prompt_data = [current_point['x'], current_point['y']]
|
| 63 |
+
app_logger.debug(f"new_prompt_data: {type(new_prompt_data)}, value:{new_prompt_data}.")
|
| 64 |
+
new_prompt = {
|
| 65 |
+
"type": prompt.type,
|
| 66 |
+
"data": new_prompt_data
|
| 67 |
+
}
|
| 68 |
+
if prompt.label is not None:
|
| 69 |
+
new_prompt["label"] = prompt.label
|
| 70 |
+
new_prompt_list.append(new_prompt)
|
| 71 |
|
| 72 |
app_logger.debug(f"bbox => {bbox}.")
|
| 73 |
+
app_logger.debug(f'request_input-prompt updated => {new_prompt_list}.')
|
| 74 |
|
| 75 |
+
app_logger.info(f"unpacking elaborated request...")
|
| 76 |
return {
|
| 77 |
+
"bbox": [ne_latlng, sw_latlng],
|
| 78 |
+
"prompt": new_prompt_list,
|
| 79 |
+
"zoom": new_zoom
|
| 80 |
}
|
| 81 |
|
| 82 |
|
| 83 |
+
def get_parsed_request_body(event):
|
| 84 |
app_logger.info(f"event:{json.dumps(event)}...")
|
|
|
|
| 85 |
try:
|
| 86 |
+
raw_body = event["body"]
|
| 87 |
except Exception as e_constants1:
|
| 88 |
app_logger.error(f"e_constants1:{e_constants1}.")
|
| 89 |
+
raw_body = event
|
| 90 |
+
app_logger.debug(f"raw_body, #1: {type(raw_body)}, {raw_body}...")
|
| 91 |
+
if isinstance(raw_body, str):
|
| 92 |
+
body_decoded_str = base64_decode(raw_body)
|
| 93 |
app_logger.debug(f"body_decoded_str: {type(body_decoded_str)}, {body_decoded_str}...")
|
| 94 |
+
raw_body = json.loads(body_decoded_str)
|
| 95 |
+
app_logger.info(f"body, #2: {type(raw_body)}, {raw_body}...")
|
| 96 |
+
|
| 97 |
+
parsed_body = parse(event=raw_body, model=RawRequestInput)
|
| 98 |
+
log_level = "DEBUG" if parsed_body.debug else "INFO"
|
| 99 |
+
app_logger.setLevel(log_level)
|
| 100 |
+
app_logger.warning(f"set log level to {logging.getLevelName(app_logger.log_level)}.")
|
| 101 |
+
|
| 102 |
+
return parsed_body
|
|
|
|
|
|
src/io/tms2geotiff.py
CHANGED
|
@@ -128,7 +128,7 @@ def get_tile(url):
|
|
| 128 |
|
| 129 |
def print_progress(progress, total, done=False):
|
| 130 |
if done:
|
| 131 |
-
|
| 132 |
|
| 133 |
|
| 134 |
class ProgressBar:
|
|
@@ -160,7 +160,7 @@ class ProgressBar:
|
|
| 160 |
if self.tqdm_bar:
|
| 161 |
self.tqdm_bar.close()
|
| 162 |
else:
|
| 163 |
-
|
| 164 |
|
| 165 |
|
| 166 |
def mbtiles_save(db, img_data, xy, zoom, img_format):
|
|
@@ -643,8 +643,8 @@ def downloader(input_args, input_parser):
|
|
| 643 |
coords1 = tuple(map(float, getattr(input_args, 'to').split(',')))
|
| 644 |
print("coords1:", coords1, "#")
|
| 645 |
download_args.extend((coords0[0], coords0[1], coords1[0], coords1[1]))
|
| 646 |
-
except Exception as
|
| 647 |
-
print(f"
|
| 648 |
input_parser.print_help()
|
| 649 |
return 1
|
| 650 |
download_args.append(input_args.zoom)
|
|
|
|
| 128 |
|
| 129 |
def print_progress(progress, total, done=False):
|
| 130 |
if done:
|
| 131 |
+
app_logger.info('Downloaded image %d/%d, %.2f%%' % (progress, total, progress * 100 / total))
|
| 132 |
|
| 133 |
|
| 134 |
class ProgressBar:
|
|
|
|
| 160 |
if self.tqdm_bar:
|
| 161 |
self.tqdm_bar.close()
|
| 162 |
else:
|
| 163 |
+
app_logger.info('\nDone.')
|
| 164 |
|
| 165 |
|
| 166 |
def mbtiles_save(db, img_data, xy, zoom, img_format):
|
|
|
|
| 643 |
coords1 = tuple(map(float, getattr(input_args, 'to').split(',')))
|
| 644 |
print("coords1:", coords1, "#")
|
| 645 |
download_args.extend((coords0[0], coords0[1], coords1[0], coords1[1]))
|
| 646 |
+
except Exception as e_downloader:
|
| 647 |
+
print(f"e_downloader:", e_downloader, "#")
|
| 648 |
input_parser.print_help()
|
| 649 |
return 1
|
| 650 |
download_args.append(input_args.zoom)
|
src/prediction_api/predictors.py
CHANGED
|
@@ -29,7 +29,7 @@ def samexporter_predict(bbox, prompt: list[dict], zoom: float, model_name: str =
|
|
| 29 |
with tempfile.TemporaryDirectory() as input_tmp_dir:
|
| 30 |
app_logger.info(f'tile_source: {DEFAULT_TMS}!')
|
| 31 |
pt0, pt1 = bbox
|
| 32 |
-
app_logger.info("downloading
|
| 33 |
img, matrix = download_extent(DEFAULT_TMS, pt0[0], pt0[1], pt1[0], pt1[1], zoom)
|
| 34 |
app_logger.debug(f"img type {type(img)} with shape/size:{img.size}, matrix:{matrix}.")
|
| 35 |
|
|
|
|
| 29 |
with tempfile.TemporaryDirectory() as input_tmp_dir:
|
| 30 |
app_logger.info(f'tile_source: {DEFAULT_TMS}!')
|
| 31 |
pt0, pt1 = bbox
|
| 32 |
+
app_logger.info(f"downloading geo-referenced raster with bbox {bbox}, zoom {zoom}.")
|
| 33 |
img, matrix = download_extent(DEFAULT_TMS, pt0[0], pt0[1], pt1[0], pt1[1], zoom)
|
| 34 |
app_logger.debug(f"img type {type(img)} with shape/size:{img.size}, matrix:{matrix}.")
|
| 35 |
|
src/utilities/type_hints.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
| 1 |
"""custom type hints"""
|
|
|
|
|
|
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from typing import TypedDict
|
| 4 |
|
|
@@ -15,26 +17,13 @@ class LatLngDict(BaseModel):
|
|
| 15 |
lng: float
|
| 16 |
|
| 17 |
|
| 18 |
-
class
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
class RawPrompt(BaseModel):
|
| 24 |
-
type: str
|
| 25 |
-
data: LatLngDict
|
| 26 |
-
label: int = 0
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
class RawRequestInput(BaseModel):
|
| 30 |
-
bbox: RawBBox
|
| 31 |
-
prompt: RawPrompt
|
| 32 |
-
zoom: int | float
|
| 33 |
-
source_type: str = "Satellite"
|
| 34 |
|
| 35 |
|
| 36 |
class ParsedPrompt(BaseModel):
|
| 37 |
-
type:
|
| 38 |
data: llist_float
|
| 39 |
label: int = 0
|
| 40 |
|
|
@@ -48,3 +37,22 @@ class ParsedRequestInput(BaseModel):
|
|
| 48 |
class PixelCoordinate(TypedDict):
|
| 49 |
x: int
|
| 50 |
y: int
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
"""custom type hints"""
|
| 2 |
+
from enum import Enum
|
| 3 |
+
|
| 4 |
from pydantic import BaseModel
|
| 5 |
from typing import TypedDict
|
| 6 |
|
|
|
|
| 17 |
lng: float
|
| 18 |
|
| 19 |
|
| 20 |
+
class PromptType(str, Enum):
|
| 21 |
+
point = "point"
|
| 22 |
+
# rectangle = "rectangle"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
class ParsedPrompt(BaseModel):
|
| 26 |
+
type: PromptType
|
| 27 |
data: llist_float
|
| 28 |
label: int = 0
|
| 29 |
|
|
|
|
| 37 |
class PixelCoordinate(TypedDict):
|
| 38 |
x: int
|
| 39 |
y: int
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class RawBBox(BaseModel):
|
| 43 |
+
ne: LatLngDict
|
| 44 |
+
sw: LatLngDict
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class RawPrompt(BaseModel):
|
| 48 |
+
type: PromptType
|
| 49 |
+
data: LatLngDict
|
| 50 |
+
label: int = 0
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class RawRequestInput(BaseModel):
|
| 54 |
+
bbox: RawBBox
|
| 55 |
+
prompt: list[RawPrompt]
|
| 56 |
+
zoom: int | float
|
| 57 |
+
source_type: str = "Satellite"
|
| 58 |
+
debug: bool = False
|
tests/events/get_parsed_bbox_points.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
"europe": {
|
| 3 |
"input": {
|
| 4 |
-
"
|
| 5 |
"bbox": {
|
| 6 |
"ne": {
|
| 7 |
"lat": 38.03932961278458,
|
|
@@ -53,7 +53,7 @@
|
|
| 53 |
},
|
| 54 |
"north_america": {
|
| 55 |
"input": {
|
| 56 |
-
"
|
| 57 |
"bbox": {
|
| 58 |
"ne": {
|
| 59 |
"lat": 44.918201144476456,
|
|
@@ -104,7 +104,7 @@
|
|
| 104 |
},
|
| 105 |
"oceania": {
|
| 106 |
"input": {
|
| 107 |
-
"
|
| 108 |
"bbox": {
|
| 109 |
"ne": {
|
| 110 |
"lat": -1.4939713066293112,
|
|
@@ -155,7 +155,7 @@
|
|
| 155 |
},
|
| 156 |
"south_america": {
|
| 157 |
"input": {
|
| 158 |
-
"
|
| 159 |
"bbox": {
|
| 160 |
"ne": {
|
| 161 |
"lat": -24.80663308621806,
|
|
|
|
| 1 |
{
|
| 2 |
"europe": {
|
| 3 |
"input": {
|
| 4 |
+
"event": {
|
| 5 |
"bbox": {
|
| 6 |
"ne": {
|
| 7 |
"lat": 38.03932961278458,
|
|
|
|
| 53 |
},
|
| 54 |
"north_america": {
|
| 55 |
"input": {
|
| 56 |
+
"event": {
|
| 57 |
"bbox": {
|
| 58 |
"ne": {
|
| 59 |
"lat": 44.918201144476456,
|
|
|
|
| 104 |
},
|
| 105 |
"oceania": {
|
| 106 |
"input": {
|
| 107 |
+
"event": {
|
| 108 |
"bbox": {
|
| 109 |
"ne": {
|
| 110 |
"lat": -1.4939713066293112,
|
|
|
|
| 155 |
},
|
| 156 |
"south_america": {
|
| 157 |
"input": {
|
| 158 |
+
"event": {
|
| 159 |
"bbox": {
|
| 160 |
"ne": {
|
| 161 |
"lat": -24.80663308621806,
|
tests/io/test_lambda_helpers.py
CHANGED
|
@@ -1,5 +1,14 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
|
| 4 |
def test_get_parsed_bbox_points():
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from src.io.lambda_helpers import get_parsed_bbox_points, get_parsed_request_body
|
| 4 |
+
from tests import TEST_EVENTS_FOLDER
|
| 5 |
|
| 6 |
|
| 7 |
def test_get_parsed_bbox_points():
|
| 8 |
+
with open(TEST_EVENTS_FOLDER / "get_parsed_bbox_points.json") as tst_json:
|
| 9 |
+
inputs_outputs = json.load(tst_json)
|
| 10 |
+
for k, input_output in inputs_outputs.items():
|
| 11 |
+
print(f"k:{k}.")
|
| 12 |
+
raw_body = get_parsed_request_body(**input_output["input"])
|
| 13 |
+
output = get_parsed_bbox_points(raw_body)
|
| 14 |
+
assert output == input_output["output"]
|