partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
execute
|
Read, stretch and return raster data.
Inputs:
-------
raster
raster file
Parameters:
-----------
resampling : str
rasterio.Resampling method
scale_method : str
- dtype_scale: use dtype minimum and maximum values
- minmax_scale: use dataset bands minimum and maximum values
- crop: clip data to output dtype
scales_minmax : tuple
tuple of band specific scale values
Output:
-------
np.ndarray
|
mapchete/processes/pyramid/tilify.py
|
def execute(
mp,
resampling="nearest",
scale_method=None,
scales_minmax=None
):
"""
Read, stretch and return raster data.
Inputs:
-------
raster
raster file
Parameters:
-----------
resampling : str
rasterio.Resampling method
scale_method : str
- dtype_scale: use dtype minimum and maximum values
- minmax_scale: use dataset bands minimum and maximum values
- crop: clip data to output dtype
scales_minmax : tuple
tuple of band specific scale values
Output:
-------
np.ndarray
"""
with mp.open("raster", resampling=resampling) as raster_file:
# exit if input tile is empty
if raster_file.is_empty():
return "empty"
# actually read data and iterate through bands
scaled = ()
mask = ()
raster_data = raster_file.read()
if raster_data.ndim == 2:
raster_data = ma.expand_dims(raster_data, axis=0)
if not scale_method:
scales_minmax = [(i, i) for i in range(len(raster_data))]
for band, (scale_min, scale_max) in zip(raster_data, scales_minmax):
if scale_method in ["dtype_scale", "minmax_scale"]:
scaled += (_stretch_array(band, scale_min, scale_max), )
elif scale_method == "crop":
scaled += (np.clip(band, scale_min, scale_max), )
else:
scaled += (band, )
mask += (band.mask, )
return ma.masked_array(np.stack(scaled), np.stack(mask))
|
def execute(
mp,
resampling="nearest",
scale_method=None,
scales_minmax=None
):
"""
Read, stretch and return raster data.
Inputs:
-------
raster
raster file
Parameters:
-----------
resampling : str
rasterio.Resampling method
scale_method : str
- dtype_scale: use dtype minimum and maximum values
- minmax_scale: use dataset bands minimum and maximum values
- crop: clip data to output dtype
scales_minmax : tuple
tuple of band specific scale values
Output:
-------
np.ndarray
"""
with mp.open("raster", resampling=resampling) as raster_file:
# exit if input tile is empty
if raster_file.is_empty():
return "empty"
# actually read data and iterate through bands
scaled = ()
mask = ()
raster_data = raster_file.read()
if raster_data.ndim == 2:
raster_data = ma.expand_dims(raster_data, axis=0)
if not scale_method:
scales_minmax = [(i, i) for i in range(len(raster_data))]
for band, (scale_min, scale_max) in zip(raster_data, scales_minmax):
if scale_method in ["dtype_scale", "minmax_scale"]:
scaled += (_stretch_array(band, scale_min, scale_max), )
elif scale_method == "crop":
scaled += (np.clip(band, scale_min, scale_max), )
else:
scaled += (band, )
mask += (band.mask, )
return ma.masked_array(np.stack(scaled), np.stack(mask))
|
[
"Read",
"stretch",
"and",
"return",
"raster",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/processes/pyramid/tilify.py#L16-L69
|
[
"def",
"execute",
"(",
"mp",
",",
"resampling",
"=",
"\"nearest\"",
",",
"scale_method",
"=",
"None",
",",
"scales_minmax",
"=",
"None",
")",
":",
"with",
"mp",
".",
"open",
"(",
"\"raster\"",
",",
"resampling",
"=",
"resampling",
")",
"as",
"raster_file",
":",
"# exit if input tile is empty",
"if",
"raster_file",
".",
"is_empty",
"(",
")",
":",
"return",
"\"empty\"",
"# actually read data and iterate through bands",
"scaled",
"=",
"(",
")",
"mask",
"=",
"(",
")",
"raster_data",
"=",
"raster_file",
".",
"read",
"(",
")",
"if",
"raster_data",
".",
"ndim",
"==",
"2",
":",
"raster_data",
"=",
"ma",
".",
"expand_dims",
"(",
"raster_data",
",",
"axis",
"=",
"0",
")",
"if",
"not",
"scale_method",
":",
"scales_minmax",
"=",
"[",
"(",
"i",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"raster_data",
")",
")",
"]",
"for",
"band",
",",
"(",
"scale_min",
",",
"scale_max",
")",
"in",
"zip",
"(",
"raster_data",
",",
"scales_minmax",
")",
":",
"if",
"scale_method",
"in",
"[",
"\"dtype_scale\"",
",",
"\"minmax_scale\"",
"]",
":",
"scaled",
"+=",
"(",
"_stretch_array",
"(",
"band",
",",
"scale_min",
",",
"scale_max",
")",
",",
")",
"elif",
"scale_method",
"==",
"\"crop\"",
":",
"scaled",
"+=",
"(",
"np",
".",
"clip",
"(",
"band",
",",
"scale_min",
",",
"scale_max",
")",
",",
")",
"else",
":",
"scaled",
"+=",
"(",
"band",
",",
")",
"mask",
"+=",
"(",
"band",
".",
"mask",
",",
")",
"return",
"ma",
".",
"masked_array",
"(",
"np",
".",
"stack",
"(",
"scaled",
")",
",",
"np",
".",
"stack",
"(",
"mask",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.read
|
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
|
mapchete/formats/default/gtiff.py
|
def read(self, output_tile, **kwargs):
"""
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
"""
try:
return read_raster_no_crs(self.get_path(output_tile))
except FileNotFoundError:
return self.empty(output_tile)
|
def read(self, output_tile, **kwargs):
"""
Read existing process output.
Parameters
----------
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
NumPy array
"""
try:
return read_raster_no_crs(self.get_path(output_tile))
except FileNotFoundError:
return self.empty(output_tile)
|
[
"Read",
"existing",
"process",
"output",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L104-L120
|
[
"def",
"read",
"(",
"self",
",",
"output_tile",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"read_raster_no_crs",
"(",
"self",
".",
"get_path",
"(",
"output_tile",
")",
")",
"except",
"FileNotFoundError",
":",
"return",
"self",
".",
"empty",
"(",
"output_tile",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.write
|
Write data from process tiles into GeoTIFF file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
data : ``np.ndarray``
|
mapchete/formats/default/gtiff.py
|
def write(self, process_tile, data):
"""
Write data from process tiles into GeoTIFF file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
data : ``np.ndarray``
"""
if (
isinstance(data, tuple) and
len(data) == 2 and
isinstance(data[1], dict)
):
data, tags = data
else:
tags = {}
data = prepare_array(
data,
masked=True,
nodata=self.nodata,
dtype=self.profile(process_tile)["dtype"]
)
if data.mask.all():
logger.debug("data empty, nothing to write")
else:
# in case of S3 output, create an boto3 resource
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
# Convert from process_tile to output_tiles and write
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_raster_window(
in_tile=process_tile,
in_data=data,
out_profile=self.profile(out_tile),
out_tile=out_tile,
out_path=out_path,
tags=tags,
bucket_resource=bucket_resource
)
|
def write(self, process_tile, data):
"""
Write data from process tiles into GeoTIFF file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
data : ``np.ndarray``
"""
if (
isinstance(data, tuple) and
len(data) == 2 and
isinstance(data[1], dict)
):
data, tags = data
else:
tags = {}
data = prepare_array(
data,
masked=True,
nodata=self.nodata,
dtype=self.profile(process_tile)["dtype"]
)
if data.mask.all():
logger.debug("data empty, nothing to write")
else:
# in case of S3 output, create an boto3 resource
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
# Convert from process_tile to output_tiles and write
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_raster_window(
in_tile=process_tile,
in_data=data,
out_profile=self.profile(out_tile),
out_tile=out_tile,
out_path=out_path,
tags=tags,
bucket_resource=bucket_resource
)
|
[
"Write",
"data",
"from",
"process",
"tiles",
"into",
"GeoTIFF",
"file",
"(",
"s",
")",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L122-L166
|
[
"def",
"write",
"(",
"self",
",",
"process_tile",
",",
"data",
")",
":",
"if",
"(",
"isinstance",
"(",
"data",
",",
"tuple",
")",
"and",
"len",
"(",
"data",
")",
"==",
"2",
"and",
"isinstance",
"(",
"data",
"[",
"1",
"]",
",",
"dict",
")",
")",
":",
"data",
",",
"tags",
"=",
"data",
"else",
":",
"tags",
"=",
"{",
"}",
"data",
"=",
"prepare_array",
"(",
"data",
",",
"masked",
"=",
"True",
",",
"nodata",
"=",
"self",
".",
"nodata",
",",
"dtype",
"=",
"self",
".",
"profile",
"(",
"process_tile",
")",
"[",
"\"dtype\"",
"]",
")",
"if",
"data",
".",
"mask",
".",
"all",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"data empty, nothing to write\"",
")",
"else",
":",
"# in case of S3 output, create an boto3 resource",
"bucket_resource",
"=",
"get_boto3_bucket",
"(",
"self",
".",
"_bucket",
")",
"if",
"self",
".",
"_bucket",
"else",
"None",
"# Convert from process_tile to output_tiles and write",
"for",
"tile",
"in",
"self",
".",
"pyramid",
".",
"intersecting",
"(",
"process_tile",
")",
":",
"out_path",
"=",
"self",
".",
"get_path",
"(",
"tile",
")",
"self",
".",
"prepare_path",
"(",
"tile",
")",
"out_tile",
"=",
"BufferedTile",
"(",
"tile",
",",
"self",
".",
"pixelbuffer",
")",
"write_raster_window",
"(",
"in_tile",
"=",
"process_tile",
",",
"in_data",
"=",
"data",
",",
"out_profile",
"=",
"self",
".",
"profile",
"(",
"out_tile",
")",
",",
"out_tile",
"=",
"out_tile",
",",
"out_path",
"=",
"out_path",
",",
"tags",
"=",
"tags",
",",
"bucket_resource",
"=",
"bucket_resource",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.profile
|
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
|
mapchete/formats/default/gtiff.py
|
def profile(self, tile=None):
"""
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
"""
dst_metadata = GTIFF_DEFAULT_PROFILE
dst_metadata.pop("transform", None)
dst_metadata.update(
count=self.output_params["bands"],
dtype=self.output_params["dtype"],
driver="GTiff"
)
if tile is not None:
dst_metadata.update(
crs=tile.crs, width=tile.width, height=tile.height,
affine=tile.affine)
else:
for k in ["crs", "width", "height", "affine"]:
dst_metadata.pop(k, None)
if "nodata" in self.output_params:
dst_metadata.update(nodata=self.output_params["nodata"])
try:
if "compression" in self.output_params:
warnings.warn(
DeprecationWarning("use 'compress' instead of 'compression'")
)
dst_metadata.update(compress=self.output_params["compression"])
else:
dst_metadata.update(compress=self.output_params["compress"])
dst_metadata.update(predictor=self.output_params["predictor"])
except KeyError:
pass
return dst_metadata
|
def profile(self, tile=None):
"""
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
"""
dst_metadata = GTIFF_DEFAULT_PROFILE
dst_metadata.pop("transform", None)
dst_metadata.update(
count=self.output_params["bands"],
dtype=self.output_params["dtype"],
driver="GTiff"
)
if tile is not None:
dst_metadata.update(
crs=tile.crs, width=tile.width, height=tile.height,
affine=tile.affine)
else:
for k in ["crs", "width", "height", "affine"]:
dst_metadata.pop(k, None)
if "nodata" in self.output_params:
dst_metadata.update(nodata=self.output_params["nodata"])
try:
if "compression" in self.output_params:
warnings.warn(
DeprecationWarning("use 'compress' instead of 'compression'")
)
dst_metadata.update(compress=self.output_params["compression"])
else:
dst_metadata.update(compress=self.output_params["compress"])
dst_metadata.update(predictor=self.output_params["predictor"])
except KeyError:
pass
return dst_metadata
|
[
"Create",
"a",
"metadata",
"dictionary",
"for",
"rasterio",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L188-L228
|
[
"def",
"profile",
"(",
"self",
",",
"tile",
"=",
"None",
")",
":",
"dst_metadata",
"=",
"GTIFF_DEFAULT_PROFILE",
"dst_metadata",
".",
"pop",
"(",
"\"transform\"",
",",
"None",
")",
"dst_metadata",
".",
"update",
"(",
"count",
"=",
"self",
".",
"output_params",
"[",
"\"bands\"",
"]",
",",
"dtype",
"=",
"self",
".",
"output_params",
"[",
"\"dtype\"",
"]",
",",
"driver",
"=",
"\"GTiff\"",
")",
"if",
"tile",
"is",
"not",
"None",
":",
"dst_metadata",
".",
"update",
"(",
"crs",
"=",
"tile",
".",
"crs",
",",
"width",
"=",
"tile",
".",
"width",
",",
"height",
"=",
"tile",
".",
"height",
",",
"affine",
"=",
"tile",
".",
"affine",
")",
"else",
":",
"for",
"k",
"in",
"[",
"\"crs\"",
",",
"\"width\"",
",",
"\"height\"",
",",
"\"affine\"",
"]",
":",
"dst_metadata",
".",
"pop",
"(",
"k",
",",
"None",
")",
"if",
"\"nodata\"",
"in",
"self",
".",
"output_params",
":",
"dst_metadata",
".",
"update",
"(",
"nodata",
"=",
"self",
".",
"output_params",
"[",
"\"nodata\"",
"]",
")",
"try",
":",
"if",
"\"compression\"",
"in",
"self",
".",
"output_params",
":",
"warnings",
".",
"warn",
"(",
"DeprecationWarning",
"(",
"\"use 'compress' instead of 'compression'\"",
")",
")",
"dst_metadata",
".",
"update",
"(",
"compress",
"=",
"self",
".",
"output_params",
"[",
"\"compression\"",
"]",
")",
"else",
":",
"dst_metadata",
".",
"update",
"(",
"compress",
"=",
"self",
".",
"output_params",
"[",
"\"compress\"",
"]",
")",
"dst_metadata",
".",
"update",
"(",
"predictor",
"=",
"self",
".",
"output_params",
"[",
"\"predictor\"",
"]",
")",
"except",
"KeyError",
":",
"pass",
"return",
"dst_metadata"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.empty
|
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type provided in output profile
|
mapchete/formats/default/gtiff.py
|
def empty(self, process_tile):
"""
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type provided in output profile
"""
profile = self.profile(process_tile)
return ma.masked_array(
data=np.full(
(profile["count"], ) + process_tile.shape, profile["nodata"],
dtype=profile["dtype"]),
mask=True
)
|
def empty(self, process_tile):
"""
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type provided in output profile
"""
profile = self.profile(process_tile)
return ma.masked_array(
data=np.full(
(profile["count"], ) + process_tile.shape, profile["nodata"],
dtype=profile["dtype"]),
mask=True
)
|
[
"Return",
"empty",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L230-L250
|
[
"def",
"empty",
"(",
"self",
",",
"process_tile",
")",
":",
"profile",
"=",
"self",
".",
"profile",
"(",
"process_tile",
")",
"return",
"ma",
".",
"masked_array",
"(",
"data",
"=",
"np",
".",
"full",
"(",
"(",
"profile",
"[",
"\"count\"",
"]",
",",
")",
"+",
"process_tile",
".",
"shape",
",",
"profile",
"[",
"\"nodata\"",
"]",
",",
"dtype",
"=",
"profile",
"[",
"\"dtype\"",
"]",
")",
",",
"mask",
"=",
"True",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.for_web
|
Convert data to web output (raster only).
Parameters
----------
data : array
Returns
-------
web data : array
|
mapchete/formats/default/gtiff.py
|
def for_web(self, data):
"""
Convert data to web output (raster only).
Parameters
----------
data : array
Returns
-------
web data : array
"""
return memory_file(
prepare_array(
data, masked=True, nodata=self.nodata, dtype=self.profile()["dtype"]
),
self.profile()
), "image/tiff"
|
def for_web(self, data):
"""
Convert data to web output (raster only).
Parameters
----------
data : array
Returns
-------
web data : array
"""
return memory_file(
prepare_array(
data, masked=True, nodata=self.nodata, dtype=self.profile()["dtype"]
),
self.profile()
), "image/tiff"
|
[
"Convert",
"data",
"to",
"web",
"output",
"(",
"raster",
"only",
")",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L252-L269
|
[
"def",
"for_web",
"(",
"self",
",",
"data",
")",
":",
"return",
"memory_file",
"(",
"prepare_array",
"(",
"data",
",",
"masked",
"=",
"True",
",",
"nodata",
"=",
"self",
".",
"nodata",
",",
"dtype",
"=",
"self",
".",
"profile",
"(",
")",
"[",
"\"dtype\"",
"]",
")",
",",
"self",
".",
"profile",
"(",
")",
")",
",",
"\"image/tiff\""
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.open
|
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
|
mapchete/formats/default/gtiff.py
|
def open(self, tile, process, **kwargs):
"""
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
"""
return InputTile(tile, process, kwargs.get("resampling", None))
|
def open(self, tile, process, **kwargs):
"""
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
"""
return InputTile(tile, process, kwargs.get("resampling", None))
|
[
"Open",
"process",
"output",
"as",
"input",
"for",
"other",
"process",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L271-L281
|
[
"def",
"open",
"(",
"self",
",",
"tile",
",",
"process",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"InputTile",
"(",
"tile",
",",
"process",
",",
"kwargs",
".",
"get",
"(",
"\"resampling\"",
",",
"None",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputTile.read
|
Read reprojected & resampled input data.
Parameters
----------
indexes : integer or list
band number or list of band numbers
Returns
-------
data : array
|
mapchete/formats/default/gtiff.py
|
def read(self, indexes=None, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
indexes : integer or list
band number or list of band numbers
Returns
-------
data : array
"""
band_indexes = self._get_band_indexes(indexes)
arr = self.process.get_raw_output(self.tile)
if len(band_indexes) == 1:
return arr[band_indexes[0] - 1]
else:
return ma.concatenate([ma.expand_dims(arr[i - 1], 0) for i in band_indexes])
|
def read(self, indexes=None, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
indexes : integer or list
band number or list of band numbers
Returns
-------
data : array
"""
band_indexes = self._get_band_indexes(indexes)
arr = self.process.get_raw_output(self.tile)
if len(band_indexes) == 1:
return arr[band_indexes[0] - 1]
else:
return ma.concatenate([ma.expand_dims(arr[i - 1], 0) for i in band_indexes])
|
[
"Read",
"reprojected",
"&",
"resampled",
"input",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L311-L329
|
[
"def",
"read",
"(",
"self",
",",
"indexes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"band_indexes",
"=",
"self",
".",
"_get_band_indexes",
"(",
"indexes",
")",
"arr",
"=",
"self",
".",
"process",
".",
"get_raw_output",
"(",
"self",
".",
"tile",
")",
"if",
"len",
"(",
"band_indexes",
")",
"==",
"1",
":",
"return",
"arr",
"[",
"band_indexes",
"[",
"0",
"]",
"-",
"1",
"]",
"else",
":",
"return",
"ma",
".",
"concatenate",
"(",
"[",
"ma",
".",
"expand_dims",
"(",
"arr",
"[",
"i",
"-",
"1",
"]",
",",
"0",
")",
"for",
"i",
"in",
"band_indexes",
"]",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputTile.is_empty
|
Check if there is data within this tile.
Returns
-------
is empty : bool
|
mapchete/formats/default/gtiff.py
|
def is_empty(self, indexes=None):
"""
Check if there is data within this tile.
Returns
-------
is empty : bool
"""
# empty if tile does not intersect with file bounding box
return not self.tile.bbox.intersects(self.process.config.area_at_zoom())
|
def is_empty(self, indexes=None):
"""
Check if there is data within this tile.
Returns
-------
is empty : bool
"""
# empty if tile does not intersect with file bounding box
return not self.tile.bbox.intersects(self.process.config.area_at_zoom())
|
[
"Check",
"if",
"there",
"is",
"data",
"within",
"this",
"tile",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L331-L340
|
[
"def",
"is_empty",
"(",
"self",
",",
"indexes",
"=",
"None",
")",
":",
"# empty if tile does not intersect with file bounding box",
"return",
"not",
"self",
".",
"tile",
".",
"bbox",
".",
"intersects",
"(",
"self",
".",
"process",
".",
"config",
".",
"area_at_zoom",
"(",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputTile._get_band_indexes
|
Return valid band indexes.
|
mapchete/formats/default/gtiff.py
|
def _get_band_indexes(self, indexes=None):
"""Return valid band indexes."""
if indexes:
if isinstance(indexes, list):
return indexes
else:
return [indexes]
else:
return range(1, self.process.config.output.profile(self.tile)["count"] + 1)
|
def _get_band_indexes(self, indexes=None):
"""Return valid band indexes."""
if indexes:
if isinstance(indexes, list):
return indexes
else:
return [indexes]
else:
return range(1, self.process.config.output.profile(self.tile)["count"] + 1)
|
[
"Return",
"valid",
"band",
"indexes",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/gtiff.py#L342-L350
|
[
"def",
"_get_band_indexes",
"(",
"self",
",",
"indexes",
"=",
"None",
")",
":",
"if",
"indexes",
":",
"if",
"isinstance",
"(",
"indexes",
",",
"list",
")",
":",
"return",
"indexes",
"else",
":",
"return",
"[",
"indexes",
"]",
"else",
":",
"return",
"range",
"(",
"1",
",",
"self",
".",
"process",
".",
"config",
".",
"output",
".",
"profile",
"(",
"self",
".",
"tile",
")",
"[",
"\"count\"",
"]",
"+",
"1",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.profile
|
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
|
mapchete/formats/default/png.py
|
def profile(self, tile=None):
"""
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
"""
dst_metadata = PNG_DEFAULT_PROFILE
dst_metadata.pop("transform", None)
if tile is not None:
dst_metadata.update(
width=tile.width, height=tile.height, affine=tile.affine,
crs=tile.crs)
try:
dst_metadata.update(count=self.output_params["count"])
except KeyError:
pass
return dst_metadata
|
def profile(self, tile=None):
"""
Create a metadata dictionary for rasterio.
Parameters
----------
tile : ``BufferedTile``
Returns
-------
metadata : dictionary
output profile dictionary used for rasterio.
"""
dst_metadata = PNG_DEFAULT_PROFILE
dst_metadata.pop("transform", None)
if tile is not None:
dst_metadata.update(
width=tile.width, height=tile.height, affine=tile.affine,
crs=tile.crs)
try:
dst_metadata.update(count=self.output_params["count"])
except KeyError:
pass
return dst_metadata
|
[
"Create",
"a",
"metadata",
"dictionary",
"for",
"rasterio",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/png.py#L156-L179
|
[
"def",
"profile",
"(",
"self",
",",
"tile",
"=",
"None",
")",
":",
"dst_metadata",
"=",
"PNG_DEFAULT_PROFILE",
"dst_metadata",
".",
"pop",
"(",
"\"transform\"",
",",
"None",
")",
"if",
"tile",
"is",
"not",
"None",
":",
"dst_metadata",
".",
"update",
"(",
"width",
"=",
"tile",
".",
"width",
",",
"height",
"=",
"tile",
".",
"height",
",",
"affine",
"=",
"tile",
".",
"affine",
",",
"crs",
"=",
"tile",
".",
"crs",
")",
"try",
":",
"dst_metadata",
".",
"update",
"(",
"count",
"=",
"self",
".",
"output_params",
"[",
"\"count\"",
"]",
")",
"except",
"KeyError",
":",
"pass",
"return",
"dst_metadata"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.for_web
|
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
|
mapchete/formats/default/png.py
|
def for_web(self, data):
"""
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
"""
rgba = self._prepare_array_for_png(data)
data = ma.masked_where(rgba == self.nodata, rgba)
return memory_file(data, self.profile()), 'image/png'
|
def for_web(self, data):
"""
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
"""
rgba = self._prepare_array_for_png(data)
data = ma.masked_where(rgba == self.nodata, rgba)
return memory_file(data, self.profile()), 'image/png'
|
[
"Convert",
"data",
"to",
"web",
"output",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/png.py#L181-L195
|
[
"def",
"for_web",
"(",
"self",
",",
"data",
")",
":",
"rgba",
"=",
"self",
".",
"_prepare_array_for_png",
"(",
"data",
")",
"data",
"=",
"ma",
".",
"masked_where",
"(",
"rgba",
"==",
"self",
".",
"nodata",
",",
"rgba",
")",
"return",
"memory_file",
"(",
"data",
",",
"self",
".",
"profile",
"(",
")",
")",
",",
"'image/png'"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
OutputData.empty
|
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type given in output parameters
|
mapchete/formats/default/png.py
|
def empty(self, process_tile):
"""
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type given in output parameters
"""
bands = (
self.output_params["bands"]
if "bands" in self.output_params
else PNG_DEFAULT_PROFILE["count"]
)
return ma.masked_array(
data=ma.zeros((bands, ) + process_tile.shape),
mask=ma.zeros((bands, ) + process_tile.shape),
dtype=PNG_DEFAULT_PROFILE["dtype"]
)
|
def empty(self, process_tile):
"""
Return empty data.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
Returns
-------
empty data : array
empty array with data type given in output parameters
"""
bands = (
self.output_params["bands"]
if "bands" in self.output_params
else PNG_DEFAULT_PROFILE["count"]
)
return ma.masked_array(
data=ma.zeros((bands, ) + process_tile.shape),
mask=ma.zeros((bands, ) + process_tile.shape),
dtype=PNG_DEFAULT_PROFILE["dtype"]
)
|
[
"Return",
"empty",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/png.py#L197-L220
|
[
"def",
"empty",
"(",
"self",
",",
"process_tile",
")",
":",
"bands",
"=",
"(",
"self",
".",
"output_params",
"[",
"\"bands\"",
"]",
"if",
"\"bands\"",
"in",
"self",
".",
"output_params",
"else",
"PNG_DEFAULT_PROFILE",
"[",
"\"count\"",
"]",
")",
"return",
"ma",
".",
"masked_array",
"(",
"data",
"=",
"ma",
".",
"zeros",
"(",
"(",
"bands",
",",
")",
"+",
"process_tile",
".",
"shape",
")",
",",
"mask",
"=",
"ma",
".",
"zeros",
"(",
"(",
"bands",
",",
")",
"+",
"process_tile",
".",
"shape",
")",
",",
"dtype",
"=",
"PNG_DEFAULT_PROFILE",
"[",
"\"dtype\"",
"]",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
serve
|
Serve a Mapchete process.
Creates the Mapchete host and serves both web page with OpenLayers and the
WMTS simple REST endpoint.
|
mapchete/cli/default/serve.py
|
def serve(
mapchete_file,
port=None,
internal_cache=None,
zoom=None,
bounds=None,
overwrite=False,
readonly=False,
memory=False,
input_file=None,
debug=False,
logfile=None
):
"""
Serve a Mapchete process.
Creates the Mapchete host and serves both web page with OpenLayers and the
WMTS simple REST endpoint.
"""
app = create_app(
mapchete_files=[mapchete_file], zoom=zoom,
bounds=bounds, single_input_file=input_file,
mode=_get_mode(memory, readonly, overwrite), debug=debug
)
if os.environ.get("MAPCHETE_TEST") == "TRUE":
logger.debug("don't run flask app, MAPCHETE_TEST environment detected")
else:
app.run(
threaded=True, debug=True, port=port, host='0.0.0.0',
extra_files=[mapchete_file]
)
|
def serve(
mapchete_file,
port=None,
internal_cache=None,
zoom=None,
bounds=None,
overwrite=False,
readonly=False,
memory=False,
input_file=None,
debug=False,
logfile=None
):
"""
Serve a Mapchete process.
Creates the Mapchete host and serves both web page with OpenLayers and the
WMTS simple REST endpoint.
"""
app = create_app(
mapchete_files=[mapchete_file], zoom=zoom,
bounds=bounds, single_input_file=input_file,
mode=_get_mode(memory, readonly, overwrite), debug=debug
)
if os.environ.get("MAPCHETE_TEST") == "TRUE":
logger.debug("don't run flask app, MAPCHETE_TEST environment detected")
else:
app.run(
threaded=True, debug=True, port=port, host='0.0.0.0',
extra_files=[mapchete_file]
)
|
[
"Serve",
"a",
"Mapchete",
"process",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/cli/default/serve.py#L30-L60
|
[
"def",
"serve",
"(",
"mapchete_file",
",",
"port",
"=",
"None",
",",
"internal_cache",
"=",
"None",
",",
"zoom",
"=",
"None",
",",
"bounds",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"readonly",
"=",
"False",
",",
"memory",
"=",
"False",
",",
"input_file",
"=",
"None",
",",
"debug",
"=",
"False",
",",
"logfile",
"=",
"None",
")",
":",
"app",
"=",
"create_app",
"(",
"mapchete_files",
"=",
"[",
"mapchete_file",
"]",
",",
"zoom",
"=",
"zoom",
",",
"bounds",
"=",
"bounds",
",",
"single_input_file",
"=",
"input_file",
",",
"mode",
"=",
"_get_mode",
"(",
"memory",
",",
"readonly",
",",
"overwrite",
")",
",",
"debug",
"=",
"debug",
")",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"MAPCHETE_TEST\"",
")",
"==",
"\"TRUE\"",
":",
"logger",
".",
"debug",
"(",
"\"don't run flask app, MAPCHETE_TEST environment detected\"",
")",
"else",
":",
"app",
".",
"run",
"(",
"threaded",
"=",
"True",
",",
"debug",
"=",
"True",
",",
"port",
"=",
"port",
",",
"host",
"=",
"'0.0.0.0'",
",",
"extra_files",
"=",
"[",
"mapchete_file",
"]",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
create_app
|
Configure and create Flask app.
|
mapchete/cli/default/serve.py
|
def create_app(
mapchete_files=None, zoom=None, bounds=None, single_input_file=None,
mode="continue", debug=None
):
"""Configure and create Flask app."""
from flask import Flask, render_template_string
app = Flask(__name__)
mapchete_processes = {
os.path.splitext(os.path.basename(mapchete_file))[0]: mapchete.open(
mapchete_file, zoom=zoom, bounds=bounds,
single_input_file=single_input_file, mode=mode, with_cache=True,
debug=debug)
for mapchete_file in mapchete_files
}
mp = next(iter(mapchete_processes.values()))
pyramid_type = mp.config.process_pyramid.grid
pyramid_srid = mp.config.process_pyramid.crs.to_epsg()
process_bounds = ",".join([str(i) for i in mp.config.bounds_at_zoom()])
grid = "g" if pyramid_srid == 3857 else "WGS84"
web_pyramid = BufferedTilePyramid(pyramid_type)
@app.route('/', methods=['GET'])
def index():
"""Render and hosts the appropriate OpenLayers instance."""
return render_template_string(
pkgutil.get_data(
'mapchete.static', 'index.html').decode("utf-8"),
srid=pyramid_srid,
process_bounds=process_bounds,
is_mercator=(pyramid_srid == 3857),
process_names=mapchete_processes.keys()
)
@app.route(
"/".join([
"", "wmts_simple", "1.0.0", "<string:mp_name>", "default",
grid, "<int:zoom>", "<int:row>", "<int:col>.<string:file_ext>"]),
methods=['GET'])
def get(mp_name, zoom, row, col, file_ext):
"""Return processed, empty or error (in pink color) tile."""
logger.debug(
"received tile (%s, %s, %s) for process %s", zoom, row, col,
mp_name)
# convert zoom, row, col into tile object using web pyramid
return _tile_response(
mapchete_processes[mp_name], web_pyramid.tile(zoom, row, col),
debug)
return app
|
def create_app(
mapchete_files=None, zoom=None, bounds=None, single_input_file=None,
mode="continue", debug=None
):
"""Configure and create Flask app."""
from flask import Flask, render_template_string
app = Flask(__name__)
mapchete_processes = {
os.path.splitext(os.path.basename(mapchete_file))[0]: mapchete.open(
mapchete_file, zoom=zoom, bounds=bounds,
single_input_file=single_input_file, mode=mode, with_cache=True,
debug=debug)
for mapchete_file in mapchete_files
}
mp = next(iter(mapchete_processes.values()))
pyramid_type = mp.config.process_pyramid.grid
pyramid_srid = mp.config.process_pyramid.crs.to_epsg()
process_bounds = ",".join([str(i) for i in mp.config.bounds_at_zoom()])
grid = "g" if pyramid_srid == 3857 else "WGS84"
web_pyramid = BufferedTilePyramid(pyramid_type)
@app.route('/', methods=['GET'])
def index():
"""Render and hosts the appropriate OpenLayers instance."""
return render_template_string(
pkgutil.get_data(
'mapchete.static', 'index.html').decode("utf-8"),
srid=pyramid_srid,
process_bounds=process_bounds,
is_mercator=(pyramid_srid == 3857),
process_names=mapchete_processes.keys()
)
@app.route(
"/".join([
"", "wmts_simple", "1.0.0", "<string:mp_name>", "default",
grid, "<int:zoom>", "<int:row>", "<int:col>.<string:file_ext>"]),
methods=['GET'])
def get(mp_name, zoom, row, col, file_ext):
"""Return processed, empty or error (in pink color) tile."""
logger.debug(
"received tile (%s, %s, %s) for process %s", zoom, row, col,
mp_name)
# convert zoom, row, col into tile object using web pyramid
return _tile_response(
mapchete_processes[mp_name], web_pyramid.tile(zoom, row, col),
debug)
return app
|
[
"Configure",
"and",
"create",
"Flask",
"app",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/cli/default/serve.py#L63-L112
|
[
"def",
"create_app",
"(",
"mapchete_files",
"=",
"None",
",",
"zoom",
"=",
"None",
",",
"bounds",
"=",
"None",
",",
"single_input_file",
"=",
"None",
",",
"mode",
"=",
"\"continue\"",
",",
"debug",
"=",
"None",
")",
":",
"from",
"flask",
"import",
"Flask",
",",
"render_template_string",
"app",
"=",
"Flask",
"(",
"__name__",
")",
"mapchete_processes",
"=",
"{",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"mapchete_file",
")",
")",
"[",
"0",
"]",
":",
"mapchete",
".",
"open",
"(",
"mapchete_file",
",",
"zoom",
"=",
"zoom",
",",
"bounds",
"=",
"bounds",
",",
"single_input_file",
"=",
"single_input_file",
",",
"mode",
"=",
"mode",
",",
"with_cache",
"=",
"True",
",",
"debug",
"=",
"debug",
")",
"for",
"mapchete_file",
"in",
"mapchete_files",
"}",
"mp",
"=",
"next",
"(",
"iter",
"(",
"mapchete_processes",
".",
"values",
"(",
")",
")",
")",
"pyramid_type",
"=",
"mp",
".",
"config",
".",
"process_pyramid",
".",
"grid",
"pyramid_srid",
"=",
"mp",
".",
"config",
".",
"process_pyramid",
".",
"crs",
".",
"to_epsg",
"(",
")",
"process_bounds",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"mp",
".",
"config",
".",
"bounds_at_zoom",
"(",
")",
"]",
")",
"grid",
"=",
"\"g\"",
"if",
"pyramid_srid",
"==",
"3857",
"else",
"\"WGS84\"",
"web_pyramid",
"=",
"BufferedTilePyramid",
"(",
"pyramid_type",
")",
"@",
"app",
".",
"route",
"(",
"'/'",
",",
"methods",
"=",
"[",
"'GET'",
"]",
")",
"def",
"index",
"(",
")",
":",
"\"\"\"Render and hosts the appropriate OpenLayers instance.\"\"\"",
"return",
"render_template_string",
"(",
"pkgutil",
".",
"get_data",
"(",
"'mapchete.static'",
",",
"'index.html'",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
",",
"srid",
"=",
"pyramid_srid",
",",
"process_bounds",
"=",
"process_bounds",
",",
"is_mercator",
"=",
"(",
"pyramid_srid",
"==",
"3857",
")",
",",
"process_names",
"=",
"mapchete_processes",
".",
"keys",
"(",
")",
")",
"@",
"app",
".",
"route",
"(",
"\"/\"",
".",
"join",
"(",
"[",
"\"\"",
",",
"\"wmts_simple\"",
",",
"\"1.0.0\"",
",",
"\"<string:mp_name>\"",
",",
"\"default\"",
",",
"grid",
",",
"\"<int:zoom>\"",
",",
"\"<int:row>\"",
",",
"\"<int:col>.<string:file_ext>\"",
"]",
")",
",",
"methods",
"=",
"[",
"'GET'",
"]",
")",
"def",
"get",
"(",
"mp_name",
",",
"zoom",
",",
"row",
",",
"col",
",",
"file_ext",
")",
":",
"\"\"\"Return processed, empty or error (in pink color) tile.\"\"\"",
"logger",
".",
"debug",
"(",
"\"received tile (%s, %s, %s) for process %s\"",
",",
"zoom",
",",
"row",
",",
"col",
",",
"mp_name",
")",
"# convert zoom, row, col into tile object using web pyramid",
"return",
"_tile_response",
"(",
"mapchete_processes",
"[",
"mp_name",
"]",
",",
"web_pyramid",
".",
"tile",
"(",
"zoom",
",",
"row",
",",
"col",
")",
",",
"debug",
")",
"return",
"app"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
read_raster_window
|
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
|
mapchete/io/raster.py
|
def read_raster_window(
input_files,
tile,
indexes=None,
resampling="nearest",
src_nodata=None,
dst_nodata=None,
gdal_opts=None
):
"""
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
"""
with rasterio.Env(
**get_gdal_options(
gdal_opts,
is_remote=path_is_remote(
input_files[0] if isinstance(input_files, list) else input_files, s3=True
)
)
) as env:
logger.debug("reading %s with GDAL options %s", input_files, env.options)
return _read_raster_window(
input_files,
tile,
indexes=indexes,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
|
def read_raster_window(
input_files,
tile,
indexes=None,
resampling="nearest",
src_nodata=None,
dst_nodata=None,
gdal_opts=None
):
"""
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
"""
with rasterio.Env(
**get_gdal_options(
gdal_opts,
is_remote=path_is_remote(
input_files[0] if isinstance(input_files, list) else input_files, s3=True
)
)
) as env:
logger.debug("reading %s with GDAL options %s", input_files, env.options)
return _read_raster_window(
input_files,
tile,
indexes=indexes,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
|
[
"Return",
"NumPy",
"arrays",
"from",
"an",
"input",
"raster",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L31-L86
|
[
"def",
"read_raster_window",
"(",
"input_files",
",",
"tile",
",",
"indexes",
"=",
"None",
",",
"resampling",
"=",
"\"nearest\"",
",",
"src_nodata",
"=",
"None",
",",
"dst_nodata",
"=",
"None",
",",
"gdal_opts",
"=",
"None",
")",
":",
"with",
"rasterio",
".",
"Env",
"(",
"*",
"*",
"get_gdal_options",
"(",
"gdal_opts",
",",
"is_remote",
"=",
"path_is_remote",
"(",
"input_files",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"input_files",
",",
"list",
")",
"else",
"input_files",
",",
"s3",
"=",
"True",
")",
")",
")",
"as",
"env",
":",
"logger",
".",
"debug",
"(",
"\"reading %s with GDAL options %s\"",
",",
"input_files",
",",
"env",
".",
"options",
")",
"return",
"_read_raster_window",
"(",
"input_files",
",",
"tile",
",",
"indexes",
"=",
"indexes",
",",
"resampling",
"=",
"resampling",
",",
"src_nodata",
"=",
"src_nodata",
",",
"dst_nodata",
"=",
"dst_nodata",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
_get_warped_array
|
Extract a numpy array from a raster file.
|
mapchete/io/raster.py
|
def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise
|
def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise
|
[
"Extract",
"a",
"numpy",
"array",
"from",
"a",
"raster",
"file",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L224-L248
|
[
"def",
"_get_warped_array",
"(",
"input_file",
"=",
"None",
",",
"indexes",
"=",
"None",
",",
"dst_bounds",
"=",
"None",
",",
"dst_shape",
"=",
"None",
",",
"dst_crs",
"=",
"None",
",",
"resampling",
"=",
"None",
",",
"src_nodata",
"=",
"None",
",",
"dst_nodata",
"=",
"None",
")",
":",
"try",
":",
"return",
"_rasterio_read",
"(",
"input_file",
"=",
"input_file",
",",
"indexes",
"=",
"indexes",
",",
"dst_bounds",
"=",
"dst_bounds",
",",
"dst_shape",
"=",
"dst_shape",
",",
"dst_crs",
"=",
"dst_crs",
",",
"resampling",
"=",
"resampling",
",",
"src_nodata",
"=",
"src_nodata",
",",
"dst_nodata",
"=",
"dst_nodata",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"error while reading file %s: %s\"",
",",
"input_file",
",",
"e",
")",
"raise"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
read_raster_no_crs
|
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
|
mapchete/io/raster.py
|
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None):
"""
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
with rasterio.Env(
**get_gdal_options(
gdal_opts, is_remote=path_is_remote(input_file, s3=True)
)
):
with rasterio.open(input_file, "r") as src:
return src.read(indexes=indexes, masked=True)
except RasterioIOError as e:
for i in ("does not exist in the file system", "No such file or directory"):
if i in str(e):
raise FileNotFoundError("%s not found" % input_file)
else:
raise
|
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None):
"""
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
with rasterio.Env(
**get_gdal_options(
gdal_opts, is_remote=path_is_remote(input_file, s3=True)
)
):
with rasterio.open(input_file, "r") as src:
return src.read(indexes=indexes, masked=True)
except RasterioIOError as e:
for i in ("does not exist in the file system", "No such file or directory"):
if i in str(e):
raise FileNotFoundError("%s not found" % input_file)
else:
raise
|
[
"Wrapper",
"function",
"around",
"rasterio",
".",
"open",
"()",
".",
"read",
"()",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L290-L324
|
[
"def",
"read_raster_no_crs",
"(",
"input_file",
",",
"indexes",
"=",
"None",
",",
"gdal_opts",
"=",
"None",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"try",
":",
"with",
"rasterio",
".",
"Env",
"(",
"*",
"*",
"get_gdal_options",
"(",
"gdal_opts",
",",
"is_remote",
"=",
"path_is_remote",
"(",
"input_file",
",",
"s3",
"=",
"True",
")",
")",
")",
":",
"with",
"rasterio",
".",
"open",
"(",
"input_file",
",",
"\"r\"",
")",
"as",
"src",
":",
"return",
"src",
".",
"read",
"(",
"indexes",
"=",
"indexes",
",",
"masked",
"=",
"True",
")",
"except",
"RasterioIOError",
"as",
"e",
":",
"for",
"i",
"in",
"(",
"\"does not exist in the file system\"",
",",
"\"No such file or directory\"",
")",
":",
"if",
"i",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"%s not found\"",
"%",
"input_file",
")",
"else",
":",
"raise"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
write_raster_window
|
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
|
mapchete/io/raster.py
|
def write_raster_window(
in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,
tags=None, bucket_resource=None
):
"""
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
"""
if not isinstance(out_path, str):
raise TypeError("out_path must be a string")
logger.debug("write %s", out_path)
if out_path == "memoryfile":
raise DeprecationWarning(
"Writing to memoryfile with write_raster_window() is deprecated. "
"Please use RasterWindowMemoryFile."
)
out_tile = in_tile if out_tile is None else out_tile
_validate_write_window_params(in_tile, out_tile, in_data, out_profile)
# extract data
window_data = extract_from_array(
in_raster=in_data,
in_affine=in_tile.affine,
out_tile=out_tile
) if in_tile != out_tile else in_data
# use transform instead of affine
if "affine" in out_profile:
out_profile["transform"] = out_profile.pop("affine")
# write if there is any band with non-masked data
if window_data.all() is not ma.masked:
try:
if out_path.startswith("s3://"):
with RasterWindowMemoryFile(
in_tile=out_tile,
in_data=window_data,
out_profile=out_profile,
out_tile=out_tile,
tags=tags
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
with rasterio.open(out_path, 'w', **out_profile) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.write(window_data.astype(out_profile["dtype"], copy=False))
_write_tags(dst, tags)
except Exception as e:
logger.exception("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "array window empty", out_path))
|
def write_raster_window(
in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,
tags=None, bucket_resource=None
):
"""
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
"""
if not isinstance(out_path, str):
raise TypeError("out_path must be a string")
logger.debug("write %s", out_path)
if out_path == "memoryfile":
raise DeprecationWarning(
"Writing to memoryfile with write_raster_window() is deprecated. "
"Please use RasterWindowMemoryFile."
)
out_tile = in_tile if out_tile is None else out_tile
_validate_write_window_params(in_tile, out_tile, in_data, out_profile)
# extract data
window_data = extract_from_array(
in_raster=in_data,
in_affine=in_tile.affine,
out_tile=out_tile
) if in_tile != out_tile else in_data
# use transform instead of affine
if "affine" in out_profile:
out_profile["transform"] = out_profile.pop("affine")
# write if there is any band with non-masked data
if window_data.all() is not ma.masked:
try:
if out_path.startswith("s3://"):
with RasterWindowMemoryFile(
in_tile=out_tile,
in_data=window_data,
out_profile=out_profile,
out_tile=out_tile,
tags=tags
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
with rasterio.open(out_path, 'w', **out_profile) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.write(window_data.astype(out_profile["dtype"], copy=False))
_write_tags(dst, tags)
except Exception as e:
logger.exception("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "array window empty", out_path))
|
[
"Write",
"a",
"window",
"from",
"a",
"numpy",
"array",
"to",
"an",
"output",
"file",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L360-L429
|
[
"def",
"write_raster_window",
"(",
"in_tile",
"=",
"None",
",",
"in_data",
"=",
"None",
",",
"out_profile",
"=",
"None",
",",
"out_tile",
"=",
"None",
",",
"out_path",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"bucket_resource",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"out_path",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"out_path must be a string\"",
")",
"logger",
".",
"debug",
"(",
"\"write %s\"",
",",
"out_path",
")",
"if",
"out_path",
"==",
"\"memoryfile\"",
":",
"raise",
"DeprecationWarning",
"(",
"\"Writing to memoryfile with write_raster_window() is deprecated. \"",
"\"Please use RasterWindowMemoryFile.\"",
")",
"out_tile",
"=",
"in_tile",
"if",
"out_tile",
"is",
"None",
"else",
"out_tile",
"_validate_write_window_params",
"(",
"in_tile",
",",
"out_tile",
",",
"in_data",
",",
"out_profile",
")",
"# extract data",
"window_data",
"=",
"extract_from_array",
"(",
"in_raster",
"=",
"in_data",
",",
"in_affine",
"=",
"in_tile",
".",
"affine",
",",
"out_tile",
"=",
"out_tile",
")",
"if",
"in_tile",
"!=",
"out_tile",
"else",
"in_data",
"# use transform instead of affine",
"if",
"\"affine\"",
"in",
"out_profile",
":",
"out_profile",
"[",
"\"transform\"",
"]",
"=",
"out_profile",
".",
"pop",
"(",
"\"affine\"",
")",
"# write if there is any band with non-masked data",
"if",
"window_data",
".",
"all",
"(",
")",
"is",
"not",
"ma",
".",
"masked",
":",
"try",
":",
"if",
"out_path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"with",
"RasterWindowMemoryFile",
"(",
"in_tile",
"=",
"out_tile",
",",
"in_data",
"=",
"window_data",
",",
"out_profile",
"=",
"out_profile",
",",
"out_tile",
"=",
"out_tile",
",",
"tags",
"=",
"tags",
")",
"as",
"memfile",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"upload tile\"",
",",
"out_path",
")",
")",
"bucket_resource",
".",
"put_object",
"(",
"Key",
"=",
"\"/\"",
".",
"join",
"(",
"out_path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
",",
"Body",
"=",
"memfile",
")",
"else",
":",
"with",
"rasterio",
".",
"open",
"(",
"out_path",
",",
"'w'",
",",
"*",
"*",
"out_profile",
")",
"as",
"dst",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"write tile\"",
",",
"out_path",
")",
")",
"dst",
".",
"write",
"(",
"window_data",
".",
"astype",
"(",
"out_profile",
"[",
"\"dtype\"",
"]",
",",
"copy",
"=",
"False",
")",
")",
"_write_tags",
"(",
"dst",
",",
"tags",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"error while writing file %s: %s\"",
",",
"out_path",
",",
"e",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"array window empty\"",
",",
"out_path",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
extract_from_array
|
Extract raster data window array.
Parameters
----------
in_raster : array or ReferencedRaster
in_affine : ``Affine`` required if in_raster is an array
out_tile : ``BufferedTile``
Returns
-------
extracted array : array
|
mapchete/io/raster.py
|
def extract_from_array(in_raster=None, in_affine=None, out_tile=None):
"""
Extract raster data window array.
Parameters
----------
in_raster : array or ReferencedRaster
in_affine : ``Affine`` required if in_raster is an array
out_tile : ``BufferedTile``
Returns
-------
extracted array : array
"""
if isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_raster = in_raster.data
# get range within array
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=out_tile.bounds, in_affine=in_affine, in_shape=in_raster.shape
)
# if output window is within input window
if (
minrow >= 0 and
mincol >= 0 and
maxrow <= in_raster.shape[-2] and
maxcol <= in_raster.shape[-1]
):
return in_raster[..., minrow:maxrow, mincol:maxcol]
# raise error if output is not fully within input
else:
raise ValueError("extraction fails if output shape is not within input")
|
def extract_from_array(in_raster=None, in_affine=None, out_tile=None):
"""
Extract raster data window array.
Parameters
----------
in_raster : array or ReferencedRaster
in_affine : ``Affine`` required if in_raster is an array
out_tile : ``BufferedTile``
Returns
-------
extracted array : array
"""
if isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_raster = in_raster.data
# get range within array
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=out_tile.bounds, in_affine=in_affine, in_shape=in_raster.shape
)
# if output window is within input window
if (
minrow >= 0 and
mincol >= 0 and
maxrow <= in_raster.shape[-2] and
maxcol <= in_raster.shape[-1]
):
return in_raster[..., minrow:maxrow, mincol:maxcol]
# raise error if output is not fully within input
else:
raise ValueError("extraction fails if output shape is not within input")
|
[
"Extract",
"raster",
"data",
"window",
"array",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L452-L484
|
[
"def",
"extract_from_array",
"(",
"in_raster",
"=",
"None",
",",
"in_affine",
"=",
"None",
",",
"out_tile",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"in_raster",
",",
"ReferencedRaster",
")",
":",
"in_affine",
"=",
"in_raster",
".",
"affine",
"in_raster",
"=",
"in_raster",
".",
"data",
"# get range within array",
"minrow",
",",
"maxrow",
",",
"mincol",
",",
"maxcol",
"=",
"bounds_to_ranges",
"(",
"out_bounds",
"=",
"out_tile",
".",
"bounds",
",",
"in_affine",
"=",
"in_affine",
",",
"in_shape",
"=",
"in_raster",
".",
"shape",
")",
"# if output window is within input window",
"if",
"(",
"minrow",
">=",
"0",
"and",
"mincol",
">=",
"0",
"and",
"maxrow",
"<=",
"in_raster",
".",
"shape",
"[",
"-",
"2",
"]",
"and",
"maxcol",
"<=",
"in_raster",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"return",
"in_raster",
"[",
"...",
",",
"minrow",
":",
"maxrow",
",",
"mincol",
":",
"maxcol",
"]",
"# raise error if output is not fully within input",
"else",
":",
"raise",
"ValueError",
"(",
"\"extraction fails if output shape is not within input\"",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
resample_from_array
|
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
|
mapchete/io/raster.py
|
def resample_from_array(
in_raster=None,
in_affine=None,
out_tile=None,
in_crs=None,
resampling="nearest",
nodataval=0
):
"""
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
"""
# TODO rename function
if isinstance(in_raster, ma.MaskedArray):
pass
if isinstance(in_raster, np.ndarray):
in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
elif isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_crs = in_raster.crs
in_raster = in_raster.data
elif isinstance(in_raster, tuple):
in_raster = ma.MaskedArray(
data=np.stack(in_raster),
mask=np.stack([
band.mask
if isinstance(band, ma.masked_array)
else np.where(band == nodataval, True, False)
for band in in_raster
]),
fill_value=nodataval
)
else:
raise TypeError("wrong input data type: %s" % type(in_raster))
if in_raster.ndim == 2:
in_raster = ma.expand_dims(in_raster, axis=0)
elif in_raster.ndim == 3:
pass
else:
raise TypeError("input array must have 2 or 3 dimensions")
if in_raster.fill_value != nodataval:
ma.set_fill_value(in_raster, nodataval)
out_shape = (in_raster.shape[0], ) + out_tile.shape
dst_data = np.empty(out_shape, in_raster.dtype)
in_raster = ma.masked_array(
data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval
)
reproject(
in_raster,
dst_data,
src_transform=in_affine,
src_crs=in_crs if in_crs else out_tile.crs,
dst_transform=out_tile.affine,
dst_crs=out_tile.crs,
resampling=Resampling[resampling]
)
return ma.MaskedArray(dst_data, mask=dst_data == nodataval)
|
def resample_from_array(
in_raster=None,
in_affine=None,
out_tile=None,
in_crs=None,
resampling="nearest",
nodataval=0
):
"""
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
"""
# TODO rename function
if isinstance(in_raster, ma.MaskedArray):
pass
if isinstance(in_raster, np.ndarray):
in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
elif isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_crs = in_raster.crs
in_raster = in_raster.data
elif isinstance(in_raster, tuple):
in_raster = ma.MaskedArray(
data=np.stack(in_raster),
mask=np.stack([
band.mask
if isinstance(band, ma.masked_array)
else np.where(band == nodataval, True, False)
for band in in_raster
]),
fill_value=nodataval
)
else:
raise TypeError("wrong input data type: %s" % type(in_raster))
if in_raster.ndim == 2:
in_raster = ma.expand_dims(in_raster, axis=0)
elif in_raster.ndim == 3:
pass
else:
raise TypeError("input array must have 2 or 3 dimensions")
if in_raster.fill_value != nodataval:
ma.set_fill_value(in_raster, nodataval)
out_shape = (in_raster.shape[0], ) + out_tile.shape
dst_data = np.empty(out_shape, in_raster.dtype)
in_raster = ma.masked_array(
data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval
)
reproject(
in_raster,
dst_data,
src_transform=in_affine,
src_crs=in_crs if in_crs else out_tile.crs,
dst_transform=out_tile.affine,
dst_crs=out_tile.crs,
resampling=Resampling[resampling]
)
return ma.MaskedArray(dst_data, mask=dst_data == nodataval)
|
[
"Extract",
"and",
"resample",
"from",
"array",
"to",
"target",
"tile",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L487-L556
|
[
"def",
"resample_from_array",
"(",
"in_raster",
"=",
"None",
",",
"in_affine",
"=",
"None",
",",
"out_tile",
"=",
"None",
",",
"in_crs",
"=",
"None",
",",
"resampling",
"=",
"\"nearest\"",
",",
"nodataval",
"=",
"0",
")",
":",
"# TODO rename function",
"if",
"isinstance",
"(",
"in_raster",
",",
"ma",
".",
"MaskedArray",
")",
":",
"pass",
"if",
"isinstance",
"(",
"in_raster",
",",
"np",
".",
"ndarray",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"in_raster",
",",
"mask",
"=",
"in_raster",
"==",
"nodataval",
")",
"elif",
"isinstance",
"(",
"in_raster",
",",
"ReferencedRaster",
")",
":",
"in_affine",
"=",
"in_raster",
".",
"affine",
"in_crs",
"=",
"in_raster",
".",
"crs",
"in_raster",
"=",
"in_raster",
".",
"data",
"elif",
"isinstance",
"(",
"in_raster",
",",
"tuple",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"data",
"=",
"np",
".",
"stack",
"(",
"in_raster",
")",
",",
"mask",
"=",
"np",
".",
"stack",
"(",
"[",
"band",
".",
"mask",
"if",
"isinstance",
"(",
"band",
",",
"ma",
".",
"masked_array",
")",
"else",
"np",
".",
"where",
"(",
"band",
"==",
"nodataval",
",",
"True",
",",
"False",
")",
"for",
"band",
"in",
"in_raster",
"]",
")",
",",
"fill_value",
"=",
"nodataval",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"wrong input data type: %s\"",
"%",
"type",
"(",
"in_raster",
")",
")",
"if",
"in_raster",
".",
"ndim",
"==",
"2",
":",
"in_raster",
"=",
"ma",
".",
"expand_dims",
"(",
"in_raster",
",",
"axis",
"=",
"0",
")",
"elif",
"in_raster",
".",
"ndim",
"==",
"3",
":",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"input array must have 2 or 3 dimensions\"",
")",
"if",
"in_raster",
".",
"fill_value",
"!=",
"nodataval",
":",
"ma",
".",
"set_fill_value",
"(",
"in_raster",
",",
"nodataval",
")",
"out_shape",
"=",
"(",
"in_raster",
".",
"shape",
"[",
"0",
"]",
",",
")",
"+",
"out_tile",
".",
"shape",
"dst_data",
"=",
"np",
".",
"empty",
"(",
"out_shape",
",",
"in_raster",
".",
"dtype",
")",
"in_raster",
"=",
"ma",
".",
"masked_array",
"(",
"data",
"=",
"in_raster",
".",
"filled",
"(",
")",
",",
"mask",
"=",
"in_raster",
".",
"mask",
",",
"fill_value",
"=",
"nodataval",
")",
"reproject",
"(",
"in_raster",
",",
"dst_data",
",",
"src_transform",
"=",
"in_affine",
",",
"src_crs",
"=",
"in_crs",
"if",
"in_crs",
"else",
"out_tile",
".",
"crs",
",",
"dst_transform",
"=",
"out_tile",
".",
"affine",
",",
"dst_crs",
"=",
"out_tile",
".",
"crs",
",",
"resampling",
"=",
"Resampling",
"[",
"resampling",
"]",
")",
"return",
"ma",
".",
"MaskedArray",
"(",
"dst_data",
",",
"mask",
"=",
"dst_data",
"==",
"nodataval",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
create_mosaic
|
Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),
otherwise strange things can happen!
Parameters
----------
tiles : iterable
an iterable containing tuples of a BufferedTile and an array
nodata : integer or float
raster nodata value to initialize the mosaic with (default: 0)
Returns
-------
mosaic : ReferencedRaster
|
mapchete/io/raster.py
|
def create_mosaic(tiles, nodata=0):
"""
Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),
otherwise strange things can happen!
Parameters
----------
tiles : iterable
an iterable containing tuples of a BufferedTile and an array
nodata : integer or float
raster nodata value to initialize the mosaic with (default: 0)
Returns
-------
mosaic : ReferencedRaster
"""
if isinstance(tiles, GeneratorType):
tiles = list(tiles)
elif not isinstance(tiles, list):
raise TypeError("tiles must be either a list or generator")
if not all([isinstance(pair, tuple) for pair in tiles]):
raise TypeError("tiles items must be tuples")
if not all([
all([isinstance(tile, BufferedTile), isinstance(data, np.ndarray)])
for tile, data in tiles
]):
raise TypeError("tuples must be pairs of BufferedTile and array")
if len(tiles) == 0:
raise ValueError("tiles list is empty")
logger.debug("create mosaic from %s tile(s)", len(tiles))
# quick return if there is just one tile
if len(tiles) == 1:
tile, data = tiles[0]
return ReferencedRaster(
data=data,
affine=tile.affine,
bounds=tile.bounds,
crs=tile.crs
)
# assert all tiles have same properties
pyramid, resolution, dtype = _get_tiles_properties(tiles)
# just handle antimeridian on global pyramid types
shift = _shift_required(tiles)
# determine mosaic shape and reference
m_left, m_bottom, m_right, m_top = None, None, None, None
for tile, data in tiles:
num_bands = data.shape[0] if data.ndim > 2 else 1
left, bottom, right, top = tile.bounds
if shift:
# shift by half of the grid width
left += pyramid.x_size / 2
right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if right > pyramid.right:
right -= pyramid.x_size
left -= pyramid.x_size
m_left = min([left, m_left]) if m_left is not None else left
m_bottom = min([bottom, m_bottom]) if m_bottom is not None else bottom
m_right = max([right, m_right]) if m_right is not None else right
m_top = max([top, m_top]) if m_top is not None else top
height = int(round((m_top - m_bottom) / resolution))
width = int(round((m_right - m_left) / resolution))
# initialize empty mosaic
mosaic = ma.MaskedArray(
data=np.full((num_bands, height, width), dtype=dtype, fill_value=nodata),
mask=np.ones((num_bands, height, width))
)
# create Affine
affine = Affine(resolution, 0, m_left, 0, -resolution, m_top)
# fill mosaic array with tile data
for tile, data in tiles:
data = prepare_array(data, nodata=nodata, dtype=dtype)
t_left, t_bottom, t_right, t_top = tile.bounds
if shift:
t_left += pyramid.x_size / 2
t_right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if t_right > pyramid.right:
t_right -= pyramid.x_size
t_left -= pyramid.x_size
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=(t_left, t_bottom, t_right, t_top),
in_affine=affine,
in_shape=(height, width)
)
mosaic[:, minrow:maxrow, mincol:maxcol] = data
mosaic.mask[:, minrow:maxrow, mincol:maxcol] = data.mask
if shift:
# shift back output mosaic
affine = Affine(resolution, 0, m_left - pyramid.x_size / 2, 0, -resolution, m_top)
return ReferencedRaster(
data=mosaic,
affine=affine,
bounds=Bounds(m_left, m_bottom, m_right, m_top),
crs=tile.crs
)
|
def create_mosaic(tiles, nodata=0):
"""
Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),
otherwise strange things can happen!
Parameters
----------
tiles : iterable
an iterable containing tuples of a BufferedTile and an array
nodata : integer or float
raster nodata value to initialize the mosaic with (default: 0)
Returns
-------
mosaic : ReferencedRaster
"""
if isinstance(tiles, GeneratorType):
tiles = list(tiles)
elif not isinstance(tiles, list):
raise TypeError("tiles must be either a list or generator")
if not all([isinstance(pair, tuple) for pair in tiles]):
raise TypeError("tiles items must be tuples")
if not all([
all([isinstance(tile, BufferedTile), isinstance(data, np.ndarray)])
for tile, data in tiles
]):
raise TypeError("tuples must be pairs of BufferedTile and array")
if len(tiles) == 0:
raise ValueError("tiles list is empty")
logger.debug("create mosaic from %s tile(s)", len(tiles))
# quick return if there is just one tile
if len(tiles) == 1:
tile, data = tiles[0]
return ReferencedRaster(
data=data,
affine=tile.affine,
bounds=tile.bounds,
crs=tile.crs
)
# assert all tiles have same properties
pyramid, resolution, dtype = _get_tiles_properties(tiles)
# just handle antimeridian on global pyramid types
shift = _shift_required(tiles)
# determine mosaic shape and reference
m_left, m_bottom, m_right, m_top = None, None, None, None
for tile, data in tiles:
num_bands = data.shape[0] if data.ndim > 2 else 1
left, bottom, right, top = tile.bounds
if shift:
# shift by half of the grid width
left += pyramid.x_size / 2
right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if right > pyramid.right:
right -= pyramid.x_size
left -= pyramid.x_size
m_left = min([left, m_left]) if m_left is not None else left
m_bottom = min([bottom, m_bottom]) if m_bottom is not None else bottom
m_right = max([right, m_right]) if m_right is not None else right
m_top = max([top, m_top]) if m_top is not None else top
height = int(round((m_top - m_bottom) / resolution))
width = int(round((m_right - m_left) / resolution))
# initialize empty mosaic
mosaic = ma.MaskedArray(
data=np.full((num_bands, height, width), dtype=dtype, fill_value=nodata),
mask=np.ones((num_bands, height, width))
)
# create Affine
affine = Affine(resolution, 0, m_left, 0, -resolution, m_top)
# fill mosaic array with tile data
for tile, data in tiles:
data = prepare_array(data, nodata=nodata, dtype=dtype)
t_left, t_bottom, t_right, t_top = tile.bounds
if shift:
t_left += pyramid.x_size / 2
t_right += pyramid.x_size / 2
# if tile is now shifted outside pyramid bounds, move within
if t_right > pyramid.right:
t_right -= pyramid.x_size
t_left -= pyramid.x_size
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=(t_left, t_bottom, t_right, t_top),
in_affine=affine,
in_shape=(height, width)
)
mosaic[:, minrow:maxrow, mincol:maxcol] = data
mosaic.mask[:, minrow:maxrow, mincol:maxcol] = data.mask
if shift:
# shift back output mosaic
affine = Affine(resolution, 0, m_left - pyramid.x_size / 2, 0, -resolution, m_top)
return ReferencedRaster(
data=mosaic,
affine=affine,
bounds=Bounds(m_left, m_bottom, m_right, m_top),
crs=tile.crs
)
|
[
"Create",
"a",
"mosaic",
"from",
"tiles",
".",
"Tiles",
"must",
"be",
"connected",
"(",
"also",
"possible",
"over",
"Antimeridian",
")",
"otherwise",
"strange",
"things",
"can",
"happen!"
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L559-L656
|
[
"def",
"create_mosaic",
"(",
"tiles",
",",
"nodata",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"tiles",
",",
"GeneratorType",
")",
":",
"tiles",
"=",
"list",
"(",
"tiles",
")",
"elif",
"not",
"isinstance",
"(",
"tiles",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"tiles must be either a list or generator\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"pair",
",",
"tuple",
")",
"for",
"pair",
"in",
"tiles",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"tiles items must be tuples\"",
")",
"if",
"not",
"all",
"(",
"[",
"all",
"(",
"[",
"isinstance",
"(",
"tile",
",",
"BufferedTile",
")",
",",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"]",
")",
"for",
"tile",
",",
"data",
"in",
"tiles",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"tuples must be pairs of BufferedTile and array\"",
")",
"if",
"len",
"(",
"tiles",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"tiles list is empty\"",
")",
"logger",
".",
"debug",
"(",
"\"create mosaic from %s tile(s)\"",
",",
"len",
"(",
"tiles",
")",
")",
"# quick return if there is just one tile",
"if",
"len",
"(",
"tiles",
")",
"==",
"1",
":",
"tile",
",",
"data",
"=",
"tiles",
"[",
"0",
"]",
"return",
"ReferencedRaster",
"(",
"data",
"=",
"data",
",",
"affine",
"=",
"tile",
".",
"affine",
",",
"bounds",
"=",
"tile",
".",
"bounds",
",",
"crs",
"=",
"tile",
".",
"crs",
")",
"# assert all tiles have same properties",
"pyramid",
",",
"resolution",
",",
"dtype",
"=",
"_get_tiles_properties",
"(",
"tiles",
")",
"# just handle antimeridian on global pyramid types",
"shift",
"=",
"_shift_required",
"(",
"tiles",
")",
"# determine mosaic shape and reference",
"m_left",
",",
"m_bottom",
",",
"m_right",
",",
"m_top",
"=",
"None",
",",
"None",
",",
"None",
",",
"None",
"for",
"tile",
",",
"data",
"in",
"tiles",
":",
"num_bands",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"if",
"data",
".",
"ndim",
">",
"2",
"else",
"1",
"left",
",",
"bottom",
",",
"right",
",",
"top",
"=",
"tile",
".",
"bounds",
"if",
"shift",
":",
"# shift by half of the grid width",
"left",
"+=",
"pyramid",
".",
"x_size",
"/",
"2",
"right",
"+=",
"pyramid",
".",
"x_size",
"/",
"2",
"# if tile is now shifted outside pyramid bounds, move within",
"if",
"right",
">",
"pyramid",
".",
"right",
":",
"right",
"-=",
"pyramid",
".",
"x_size",
"left",
"-=",
"pyramid",
".",
"x_size",
"m_left",
"=",
"min",
"(",
"[",
"left",
",",
"m_left",
"]",
")",
"if",
"m_left",
"is",
"not",
"None",
"else",
"left",
"m_bottom",
"=",
"min",
"(",
"[",
"bottom",
",",
"m_bottom",
"]",
")",
"if",
"m_bottom",
"is",
"not",
"None",
"else",
"bottom",
"m_right",
"=",
"max",
"(",
"[",
"right",
",",
"m_right",
"]",
")",
"if",
"m_right",
"is",
"not",
"None",
"else",
"right",
"m_top",
"=",
"max",
"(",
"[",
"top",
",",
"m_top",
"]",
")",
"if",
"m_top",
"is",
"not",
"None",
"else",
"top",
"height",
"=",
"int",
"(",
"round",
"(",
"(",
"m_top",
"-",
"m_bottom",
")",
"/",
"resolution",
")",
")",
"width",
"=",
"int",
"(",
"round",
"(",
"(",
"m_right",
"-",
"m_left",
")",
"/",
"resolution",
")",
")",
"# initialize empty mosaic",
"mosaic",
"=",
"ma",
".",
"MaskedArray",
"(",
"data",
"=",
"np",
".",
"full",
"(",
"(",
"num_bands",
",",
"height",
",",
"width",
")",
",",
"dtype",
"=",
"dtype",
",",
"fill_value",
"=",
"nodata",
")",
",",
"mask",
"=",
"np",
".",
"ones",
"(",
"(",
"num_bands",
",",
"height",
",",
"width",
")",
")",
")",
"# create Affine",
"affine",
"=",
"Affine",
"(",
"resolution",
",",
"0",
",",
"m_left",
",",
"0",
",",
"-",
"resolution",
",",
"m_top",
")",
"# fill mosaic array with tile data",
"for",
"tile",
",",
"data",
"in",
"tiles",
":",
"data",
"=",
"prepare_array",
"(",
"data",
",",
"nodata",
"=",
"nodata",
",",
"dtype",
"=",
"dtype",
")",
"t_left",
",",
"t_bottom",
",",
"t_right",
",",
"t_top",
"=",
"tile",
".",
"bounds",
"if",
"shift",
":",
"t_left",
"+=",
"pyramid",
".",
"x_size",
"/",
"2",
"t_right",
"+=",
"pyramid",
".",
"x_size",
"/",
"2",
"# if tile is now shifted outside pyramid bounds, move within",
"if",
"t_right",
">",
"pyramid",
".",
"right",
":",
"t_right",
"-=",
"pyramid",
".",
"x_size",
"t_left",
"-=",
"pyramid",
".",
"x_size",
"minrow",
",",
"maxrow",
",",
"mincol",
",",
"maxcol",
"=",
"bounds_to_ranges",
"(",
"out_bounds",
"=",
"(",
"t_left",
",",
"t_bottom",
",",
"t_right",
",",
"t_top",
")",
",",
"in_affine",
"=",
"affine",
",",
"in_shape",
"=",
"(",
"height",
",",
"width",
")",
")",
"mosaic",
"[",
":",
",",
"minrow",
":",
"maxrow",
",",
"mincol",
":",
"maxcol",
"]",
"=",
"data",
"mosaic",
".",
"mask",
"[",
":",
",",
"minrow",
":",
"maxrow",
",",
"mincol",
":",
"maxcol",
"]",
"=",
"data",
".",
"mask",
"if",
"shift",
":",
"# shift back output mosaic",
"affine",
"=",
"Affine",
"(",
"resolution",
",",
"0",
",",
"m_left",
"-",
"pyramid",
".",
"x_size",
"/",
"2",
",",
"0",
",",
"-",
"resolution",
",",
"m_top",
")",
"return",
"ReferencedRaster",
"(",
"data",
"=",
"mosaic",
",",
"affine",
"=",
"affine",
",",
"bounds",
"=",
"Bounds",
"(",
"m_left",
",",
"m_bottom",
",",
"m_right",
",",
"m_top",
")",
",",
"crs",
"=",
"tile",
".",
"crs",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
bounds_to_ranges
|
Return bounds range values from geolocated input.
Parameters
----------
out_bounds : tuple
left, bottom, right, top
in_affine : Affine
input geolocation
in_shape : tuple
input shape
Returns
-------
minrow, maxrow, mincol, maxcol
|
mapchete/io/raster.py
|
def bounds_to_ranges(out_bounds=None, in_affine=None, in_shape=None):
"""
Return bounds range values from geolocated input.
Parameters
----------
out_bounds : tuple
left, bottom, right, top
in_affine : Affine
input geolocation
in_shape : tuple
input shape
Returns
-------
minrow, maxrow, mincol, maxcol
"""
return itertools.chain(
*from_bounds(
*out_bounds, transform=in_affine, height=in_shape[-2], width=in_shape[-1]
).round_lengths(pixel_precision=0).round_offsets(pixel_precision=0).toranges()
)
|
def bounds_to_ranges(out_bounds=None, in_affine=None, in_shape=None):
"""
Return bounds range values from geolocated input.
Parameters
----------
out_bounds : tuple
left, bottom, right, top
in_affine : Affine
input geolocation
in_shape : tuple
input shape
Returns
-------
minrow, maxrow, mincol, maxcol
"""
return itertools.chain(
*from_bounds(
*out_bounds, transform=in_affine, height=in_shape[-2], width=in_shape[-1]
).round_lengths(pixel_precision=0).round_offsets(pixel_precision=0).toranges()
)
|
[
"Return",
"bounds",
"range",
"values",
"from",
"geolocated",
"input",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L659-L680
|
[
"def",
"bounds_to_ranges",
"(",
"out_bounds",
"=",
"None",
",",
"in_affine",
"=",
"None",
",",
"in_shape",
"=",
"None",
")",
":",
"return",
"itertools",
".",
"chain",
"(",
"*",
"from_bounds",
"(",
"*",
"out_bounds",
",",
"transform",
"=",
"in_affine",
",",
"height",
"=",
"in_shape",
"[",
"-",
"2",
"]",
",",
"width",
"=",
"in_shape",
"[",
"-",
"1",
"]",
")",
".",
"round_lengths",
"(",
"pixel_precision",
"=",
"0",
")",
".",
"round_offsets",
"(",
"pixel_precision",
"=",
"0",
")",
".",
"toranges",
"(",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
tiles_to_affine_shape
|
Return Affine and shape of combined tiles.
Parameters
----------
tiles : iterable
an iterable containing BufferedTiles
Returns
-------
Affine, Shape
|
mapchete/io/raster.py
|
def tiles_to_affine_shape(tiles):
"""
Return Affine and shape of combined tiles.
Parameters
----------
tiles : iterable
an iterable containing BufferedTiles
Returns
-------
Affine, Shape
"""
if not tiles:
raise TypeError("no tiles provided")
pixel_size = tiles[0].pixel_x_size
left, bottom, right, top = (
min([t.left for t in tiles]),
min([t.bottom for t in tiles]),
max([t.right for t in tiles]),
max([t.top for t in tiles]),
)
return (
Affine(pixel_size, 0, left, 0, -pixel_size, top),
Shape(
width=int(round((right - left) / pixel_size, 0)),
height=int(round((top - bottom) / pixel_size, 0)),
)
)
|
def tiles_to_affine_shape(tiles):
"""
Return Affine and shape of combined tiles.
Parameters
----------
tiles : iterable
an iterable containing BufferedTiles
Returns
-------
Affine, Shape
"""
if not tiles:
raise TypeError("no tiles provided")
pixel_size = tiles[0].pixel_x_size
left, bottom, right, top = (
min([t.left for t in tiles]),
min([t.bottom for t in tiles]),
max([t.right for t in tiles]),
max([t.top for t in tiles]),
)
return (
Affine(pixel_size, 0, left, 0, -pixel_size, top),
Shape(
width=int(round((right - left) / pixel_size, 0)),
height=int(round((top - bottom) / pixel_size, 0)),
)
)
|
[
"Return",
"Affine",
"and",
"shape",
"of",
"combined",
"tiles",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L683-L711
|
[
"def",
"tiles_to_affine_shape",
"(",
"tiles",
")",
":",
"if",
"not",
"tiles",
":",
"raise",
"TypeError",
"(",
"\"no tiles provided\"",
")",
"pixel_size",
"=",
"tiles",
"[",
"0",
"]",
".",
"pixel_x_size",
"left",
",",
"bottom",
",",
"right",
",",
"top",
"=",
"(",
"min",
"(",
"[",
"t",
".",
"left",
"for",
"t",
"in",
"tiles",
"]",
")",
",",
"min",
"(",
"[",
"t",
".",
"bottom",
"for",
"t",
"in",
"tiles",
"]",
")",
",",
"max",
"(",
"[",
"t",
".",
"right",
"for",
"t",
"in",
"tiles",
"]",
")",
",",
"max",
"(",
"[",
"t",
".",
"top",
"for",
"t",
"in",
"tiles",
"]",
")",
",",
")",
"return",
"(",
"Affine",
"(",
"pixel_size",
",",
"0",
",",
"left",
",",
"0",
",",
"-",
"pixel_size",
",",
"top",
")",
",",
"Shape",
"(",
"width",
"=",
"int",
"(",
"round",
"(",
"(",
"right",
"-",
"left",
")",
"/",
"pixel_size",
",",
"0",
")",
")",
",",
"height",
"=",
"int",
"(",
"round",
"(",
"(",
"top",
"-",
"bottom",
")",
"/",
"pixel_size",
",",
"0",
")",
")",
",",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
_shift_required
|
Determine if distance over antimeridian is shorter than normal distance.
|
mapchete/io/raster.py
|
def _shift_required(tiles):
"""Determine if distance over antimeridian is shorter than normal distance."""
if tiles[0][0].tile_pyramid.is_global:
# get set of tile columns
tile_cols = sorted(list(set([t[0].col for t in tiles])))
# if tile columns are an unbroken sequence, tiles are connected and are not
# passing the Antimeridian
if tile_cols == list(range(min(tile_cols), max(tile_cols) + 1)):
return False
else:
# look at column gaps and try to determine the smallest distance
def gen_groups(items):
"""Groups tile columns by sequence."""
j = items[0]
group = [j]
for i in items[1:]:
# item is next in expected sequence
if i == j + 1:
group.append(i)
# gap occured, so yield existing group and create new one
else:
yield group
group = [i]
j = i
yield group
groups = list(gen_groups(tile_cols))
# in case there is only one group, don't shift
if len(groups) == 1:
return False
# distance between first column of first group and last column of last group
normal_distance = groups[-1][-1] - groups[0][0]
# distance between last column of first group and last column of first group
# but crossing the antimeridian
antimeridian_distance = (
groups[0][-1] + tiles[0][0].tile_pyramid.matrix_width(tiles[0][0].zoom)
) - groups[-1][0]
# return whether distance over antimeridian is shorter
return antimeridian_distance < normal_distance
else:
return False
|
def _shift_required(tiles):
"""Determine if distance over antimeridian is shorter than normal distance."""
if tiles[0][0].tile_pyramid.is_global:
# get set of tile columns
tile_cols = sorted(list(set([t[0].col for t in tiles])))
# if tile columns are an unbroken sequence, tiles are connected and are not
# passing the Antimeridian
if tile_cols == list(range(min(tile_cols), max(tile_cols) + 1)):
return False
else:
# look at column gaps and try to determine the smallest distance
def gen_groups(items):
"""Groups tile columns by sequence."""
j = items[0]
group = [j]
for i in items[1:]:
# item is next in expected sequence
if i == j + 1:
group.append(i)
# gap occured, so yield existing group and create new one
else:
yield group
group = [i]
j = i
yield group
groups = list(gen_groups(tile_cols))
# in case there is only one group, don't shift
if len(groups) == 1:
return False
# distance between first column of first group and last column of last group
normal_distance = groups[-1][-1] - groups[0][0]
# distance between last column of first group and last column of first group
# but crossing the antimeridian
antimeridian_distance = (
groups[0][-1] + tiles[0][0].tile_pyramid.matrix_width(tiles[0][0].zoom)
) - groups[-1][0]
# return whether distance over antimeridian is shorter
return antimeridian_distance < normal_distance
else:
return False
|
[
"Determine",
"if",
"distance",
"over",
"antimeridian",
"is",
"shorter",
"than",
"normal",
"distance",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L727-L767
|
[
"def",
"_shift_required",
"(",
"tiles",
")",
":",
"if",
"tiles",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"tile_pyramid",
".",
"is_global",
":",
"# get set of tile columns",
"tile_cols",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"[",
"t",
"[",
"0",
"]",
".",
"col",
"for",
"t",
"in",
"tiles",
"]",
")",
")",
")",
"# if tile columns are an unbroken sequence, tiles are connected and are not",
"# passing the Antimeridian",
"if",
"tile_cols",
"==",
"list",
"(",
"range",
"(",
"min",
"(",
"tile_cols",
")",
",",
"max",
"(",
"tile_cols",
")",
"+",
"1",
")",
")",
":",
"return",
"False",
"else",
":",
"# look at column gaps and try to determine the smallest distance",
"def",
"gen_groups",
"(",
"items",
")",
":",
"\"\"\"Groups tile columns by sequence.\"\"\"",
"j",
"=",
"items",
"[",
"0",
"]",
"group",
"=",
"[",
"j",
"]",
"for",
"i",
"in",
"items",
"[",
"1",
":",
"]",
":",
"# item is next in expected sequence",
"if",
"i",
"==",
"j",
"+",
"1",
":",
"group",
".",
"append",
"(",
"i",
")",
"# gap occured, so yield existing group and create new one",
"else",
":",
"yield",
"group",
"group",
"=",
"[",
"i",
"]",
"j",
"=",
"i",
"yield",
"group",
"groups",
"=",
"list",
"(",
"gen_groups",
"(",
"tile_cols",
")",
")",
"# in case there is only one group, don't shift",
"if",
"len",
"(",
"groups",
")",
"==",
"1",
":",
"return",
"False",
"# distance between first column of first group and last column of last group",
"normal_distance",
"=",
"groups",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"-",
"groups",
"[",
"0",
"]",
"[",
"0",
"]",
"# distance between last column of first group and last column of first group",
"# but crossing the antimeridian",
"antimeridian_distance",
"=",
"(",
"groups",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"+",
"tiles",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"tile_pyramid",
".",
"matrix_width",
"(",
"tiles",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"zoom",
")",
")",
"-",
"groups",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"# return whether distance over antimeridian is shorter",
"return",
"antimeridian_distance",
"<",
"normal_distance",
"else",
":",
"return",
"False"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
memory_file
|
Return a rasterio.io.MemoryFile instance from input.
Parameters
----------
data : array
array to be written
profile : dict
rasterio profile for MemoryFile
|
mapchete/io/raster.py
|
def memory_file(data=None, profile=None):
"""
Return a rasterio.io.MemoryFile instance from input.
Parameters
----------
data : array
array to be written
profile : dict
rasterio profile for MemoryFile
"""
memfile = MemoryFile()
profile.update(width=data.shape[-2], height=data.shape[-1])
with memfile.open(**profile) as dataset:
dataset.write(data)
return memfile
|
def memory_file(data=None, profile=None):
"""
Return a rasterio.io.MemoryFile instance from input.
Parameters
----------
data : array
array to be written
profile : dict
rasterio profile for MemoryFile
"""
memfile = MemoryFile()
profile.update(width=data.shape[-2], height=data.shape[-1])
with memfile.open(**profile) as dataset:
dataset.write(data)
return memfile
|
[
"Return",
"a",
"rasterio",
".",
"io",
".",
"MemoryFile",
"instance",
"from",
"input",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L770-L785
|
[
"def",
"memory_file",
"(",
"data",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"memfile",
"=",
"MemoryFile",
"(",
")",
"profile",
".",
"update",
"(",
"width",
"=",
"data",
".",
"shape",
"[",
"-",
"2",
"]",
",",
"height",
"=",
"data",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"with",
"memfile",
".",
"open",
"(",
"*",
"*",
"profile",
")",
"as",
"dataset",
":",
"dataset",
".",
"write",
"(",
"data",
")",
"return",
"memfile"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
prepare_array
|
Turn input data into a proper array for further usage.
Outut array is always 3-dimensional with the given data type. If the output
is masked, the fill_value corresponds to the given nodata value and the
nodata value will be burned into the data array.
Parameters
----------
data : array or iterable
array (masked or normal) or iterable containing arrays
nodata : integer or float
nodata value (default: 0) used if input is not a masked array and
for output array
masked : bool
return a NumPy Array or a NumPy MaskedArray (default: True)
dtype : string
data type of output array (default: "int16")
Returns
-------
array : array
|
mapchete/io/raster.py
|
def prepare_array(data, masked=True, nodata=0, dtype="int16"):
"""
Turn input data into a proper array for further usage.
Outut array is always 3-dimensional with the given data type. If the output
is masked, the fill_value corresponds to the given nodata value and the
nodata value will be burned into the data array.
Parameters
----------
data : array or iterable
array (masked or normal) or iterable containing arrays
nodata : integer or float
nodata value (default: 0) used if input is not a masked array and
for output array
masked : bool
return a NumPy Array or a NumPy MaskedArray (default: True)
dtype : string
data type of output array (default: "int16")
Returns
-------
array : array
"""
# input is iterable
if isinstance(data, (list, tuple)):
return _prepare_iterable(data, masked, nodata, dtype)
# special case if a 2D single band is provided
elif isinstance(data, np.ndarray) and data.ndim == 2:
data = ma.expand_dims(data, axis=0)
# input is a masked array
if isinstance(data, ma.MaskedArray):
return _prepare_masked(data, masked, nodata, dtype)
# input is a NumPy array
elif isinstance(data, np.ndarray):
if masked:
return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)
else:
return data.astype(dtype, copy=False)
else:
raise ValueError(
"data must be array, masked array or iterable containing arrays."
)
|
def prepare_array(data, masked=True, nodata=0, dtype="int16"):
"""
Turn input data into a proper array for further usage.
Outut array is always 3-dimensional with the given data type. If the output
is masked, the fill_value corresponds to the given nodata value and the
nodata value will be burned into the data array.
Parameters
----------
data : array or iterable
array (masked or normal) or iterable containing arrays
nodata : integer or float
nodata value (default: 0) used if input is not a masked array and
for output array
masked : bool
return a NumPy Array or a NumPy MaskedArray (default: True)
dtype : string
data type of output array (default: "int16")
Returns
-------
array : array
"""
# input is iterable
if isinstance(data, (list, tuple)):
return _prepare_iterable(data, masked, nodata, dtype)
# special case if a 2D single band is provided
elif isinstance(data, np.ndarray) and data.ndim == 2:
data = ma.expand_dims(data, axis=0)
# input is a masked array
if isinstance(data, ma.MaskedArray):
return _prepare_masked(data, masked, nodata, dtype)
# input is a NumPy array
elif isinstance(data, np.ndarray):
if masked:
return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)
else:
return data.astype(dtype, copy=False)
else:
raise ValueError(
"data must be array, masked array or iterable containing arrays."
)
|
[
"Turn",
"input",
"data",
"into",
"a",
"proper",
"array",
"for",
"further",
"usage",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L788-L833
|
[
"def",
"prepare_array",
"(",
"data",
",",
"masked",
"=",
"True",
",",
"nodata",
"=",
"0",
",",
"dtype",
"=",
"\"int16\"",
")",
":",
"# input is iterable",
"if",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"_prepare_iterable",
"(",
"data",
",",
"masked",
",",
"nodata",
",",
"dtype",
")",
"# special case if a 2D single band is provided",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"ndim",
"==",
"2",
":",
"data",
"=",
"ma",
".",
"expand_dims",
"(",
"data",
",",
"axis",
"=",
"0",
")",
"# input is a masked array",
"if",
"isinstance",
"(",
"data",
",",
"ma",
".",
"MaskedArray",
")",
":",
"return",
"_prepare_masked",
"(",
"data",
",",
"masked",
",",
"nodata",
",",
"dtype",
")",
"# input is a NumPy array",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"masked",
":",
"return",
"ma",
".",
"masked_values",
"(",
"data",
".",
"astype",
"(",
"dtype",
",",
"copy",
"=",
"False",
")",
",",
"nodata",
",",
"copy",
"=",
"False",
")",
"else",
":",
"return",
"data",
".",
"astype",
"(",
"dtype",
",",
"copy",
"=",
"False",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"data must be array, masked array or iterable containing arrays.\"",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputData.bbox
|
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
|
mapchete/formats/default/vector_file.py
|
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
out_crs = self.pyramid.crs if out_crs is None else out_crs
with fiona.open(self.path) as inp:
inp_crs = CRS(inp.crs)
bbox = box(*inp.bounds)
# TODO find a way to get a good segmentize value in bbox source CRS
return reproject_geometry(bbox, src_crs=inp_crs, dst_crs=out_crs)
|
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
out_crs = self.pyramid.crs if out_crs is None else out_crs
with fiona.open(self.path) as inp:
inp_crs = CRS(inp.crs)
bbox = box(*inp.bounds)
# TODO find a way to get a good segmentize value in bbox source CRS
return reproject_geometry(bbox, src_crs=inp_crs, dst_crs=out_crs)
|
[
"Return",
"data",
"bounding",
"box",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/vector_file.py#L73-L92
|
[
"def",
"bbox",
"(",
"self",
",",
"out_crs",
"=",
"None",
")",
":",
"out_crs",
"=",
"self",
".",
"pyramid",
".",
"crs",
"if",
"out_crs",
"is",
"None",
"else",
"out_crs",
"with",
"fiona",
".",
"open",
"(",
"self",
".",
"path",
")",
"as",
"inp",
":",
"inp_crs",
"=",
"CRS",
"(",
"inp",
".",
"crs",
")",
"bbox",
"=",
"box",
"(",
"*",
"inp",
".",
"bounds",
")",
"# TODO find a way to get a good segmentize value in bbox source CRS",
"return",
"reproject_geometry",
"(",
"bbox",
",",
"src_crs",
"=",
"inp_crs",
",",
"dst_crs",
"=",
"out_crs",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputTile.read
|
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
also run checks if reprojected geometry is valid, otherwise throw
RuntimeError (default: True)
Returns
-------
data : list
|
mapchete/formats/default/vector_file.py
|
def read(self, validity_check=True, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
also run checks if reprojected geometry is valid, otherwise throw
RuntimeError (default: True)
Returns
-------
data : list
"""
return [] if self.is_empty() else self._read_from_cache(validity_check)
|
def read(self, validity_check=True, **kwargs):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
also run checks if reprojected geometry is valid, otherwise throw
RuntimeError (default: True)
Returns
-------
data : list
"""
return [] if self.is_empty() else self._read_from_cache(validity_check)
|
[
"Read",
"reprojected",
"&",
"resampled",
"input",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/vector_file.py#L118-L132
|
[
"def",
"read",
"(",
"self",
",",
"validity_check",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"]",
"if",
"self",
".",
"is_empty",
"(",
")",
"else",
"self",
".",
"_read_from_cache",
"(",
"validity_check",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputTile.is_empty
|
Check if there is data within this tile.
Returns
-------
is empty : bool
|
mapchete/formats/default/vector_file.py
|
def is_empty(self):
"""
Check if there is data within this tile.
Returns
-------
is empty : bool
"""
if not self.tile.bbox.intersects(self.vector_file.bbox()):
return True
return len(self._read_from_cache(True)) == 0
|
def is_empty(self):
"""
Check if there is data within this tile.
Returns
-------
is empty : bool
"""
if not self.tile.bbox.intersects(self.vector_file.bbox()):
return True
return len(self._read_from_cache(True)) == 0
|
[
"Check",
"if",
"there",
"is",
"data",
"within",
"this",
"tile",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/vector_file.py#L134-L144
|
[
"def",
"is_empty",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tile",
".",
"bbox",
".",
"intersects",
"(",
"self",
".",
"vector_file",
".",
"bbox",
"(",
")",
")",
":",
"return",
"True",
"return",
"len",
"(",
"self",
".",
"_read_from_cache",
"(",
"True",
")",
")",
"==",
"0"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
reproject_geometry
|
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
|
mapchete/io/vector.py
|
def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
dst_crs.is_epsg_code and # just in case for an CRS with EPSG code
dst_crs.get("init") in CRS_BOUNDS and # if CRS has defined bounds
dst_crs.get("init") != "epsg:4326" # and is not WGS84 (does not need clipping)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
# clip geometry dst_crs boundaries and return
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
return _reproject_geom(geometry, src_crs, dst_crs)
|
def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
dst_crs.is_epsg_code and # just in case for an CRS with EPSG code
dst_crs.get("init") in CRS_BOUNDS and # if CRS has defined bounds
dst_crs.get("init") != "epsg:4326" # and is not WGS84 (does not need clipping)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
# clip geometry dst_crs boundaries and return
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
return _reproject_geom(geometry, src_crs, dst_crs)
|
[
"Reproject",
"a",
"geometry",
"to",
"target",
"CRS",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L34-L105
|
[
"def",
"reproject_geometry",
"(",
"geometry",
",",
"src_crs",
"=",
"None",
",",
"dst_crs",
"=",
"None",
",",
"error_on_clip",
"=",
"False",
",",
"validity_check",
"=",
"True",
",",
"antimeridian_cutting",
"=",
"False",
")",
":",
"src_crs",
"=",
"_validated_crs",
"(",
"src_crs",
")",
"dst_crs",
"=",
"_validated_crs",
"(",
"dst_crs",
")",
"def",
"_reproject_geom",
"(",
"geometry",
",",
"src_crs",
",",
"dst_crs",
")",
":",
"if",
"geometry",
".",
"is_empty",
":",
"return",
"geometry",
"else",
":",
"out_geom",
"=",
"to_shape",
"(",
"transform_geom",
"(",
"src_crs",
".",
"to_dict",
"(",
")",
",",
"dst_crs",
".",
"to_dict",
"(",
")",
",",
"mapping",
"(",
"geometry",
")",
",",
"antimeridian_cutting",
"=",
"antimeridian_cutting",
")",
")",
"return",
"_repair",
"(",
"out_geom",
")",
"if",
"validity_check",
"else",
"out_geom",
"# return repaired geometry if no reprojection needed",
"if",
"src_crs",
"==",
"dst_crs",
"or",
"geometry",
".",
"is_empty",
":",
"return",
"_repair",
"(",
"geometry",
")",
"# geometry needs to be clipped to its CRS bounds",
"elif",
"(",
"dst_crs",
".",
"is_epsg_code",
"and",
"# just in case for an CRS with EPSG code",
"dst_crs",
".",
"get",
"(",
"\"init\"",
")",
"in",
"CRS_BOUNDS",
"and",
"# if CRS has defined bounds",
"dst_crs",
".",
"get",
"(",
"\"init\"",
")",
"!=",
"\"epsg:4326\"",
"# and is not WGS84 (does not need clipping)",
")",
":",
"wgs84_crs",
"=",
"CRS",
"(",
")",
".",
"from_epsg",
"(",
"4326",
")",
"# get dst_crs boundaries",
"crs_bbox",
"=",
"box",
"(",
"*",
"CRS_BOUNDS",
"[",
"dst_crs",
".",
"get",
"(",
"\"init\"",
")",
"]",
")",
"# reproject geometry to WGS84",
"geometry_4326",
"=",
"_reproject_geom",
"(",
"geometry",
",",
"src_crs",
",",
"wgs84_crs",
")",
"# raise error if geometry has to be clipped",
"if",
"error_on_clip",
"and",
"not",
"geometry_4326",
".",
"within",
"(",
"crs_bbox",
")",
":",
"raise",
"RuntimeError",
"(",
"\"geometry outside target CRS bounds\"",
")",
"# clip geometry dst_crs boundaries and return",
"return",
"_reproject_geom",
"(",
"crs_bbox",
".",
"intersection",
"(",
"geometry_4326",
")",
",",
"wgs84_crs",
",",
"dst_crs",
")",
"# return without clipping if destination CRS does not have defined bounds",
"else",
":",
"return",
"_reproject_geom",
"(",
"geometry",
",",
"src_crs",
",",
"dst_crs",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
segmentize_geometry
|
Segmentize Polygon outer ring by segmentize value.
Just Polygon geometry type supported.
Parameters
----------
geometry : ``shapely.geometry``
segmentize_value: float
Returns
-------
geometry : ``shapely.geometry``
|
mapchete/io/vector.py
|
def segmentize_geometry(geometry, segmentize_value):
"""
Segmentize Polygon outer ring by segmentize value.
Just Polygon geometry type supported.
Parameters
----------
geometry : ``shapely.geometry``
segmentize_value: float
Returns
-------
geometry : ``shapely.geometry``
"""
if geometry.geom_type != "Polygon":
raise TypeError("segmentize geometry type must be Polygon")
return Polygon(
LinearRing([
p
# pick polygon linestrings
for l in map(
lambda x: LineString([x[0], x[1]]),
zip(geometry.exterior.coords[:-1], geometry.exterior.coords[1:])
)
# interpolate additional points in between and don't forget end point
for p in [
l.interpolate(segmentize_value * i).coords[0]
for i in range(int(l.length / segmentize_value))
] + [l.coords[1]]
])
)
|
def segmentize_geometry(geometry, segmentize_value):
"""
Segmentize Polygon outer ring by segmentize value.
Just Polygon geometry type supported.
Parameters
----------
geometry : ``shapely.geometry``
segmentize_value: float
Returns
-------
geometry : ``shapely.geometry``
"""
if geometry.geom_type != "Polygon":
raise TypeError("segmentize geometry type must be Polygon")
return Polygon(
LinearRing([
p
# pick polygon linestrings
for l in map(
lambda x: LineString([x[0], x[1]]),
zip(geometry.exterior.coords[:-1], geometry.exterior.coords[1:])
)
# interpolate additional points in between and don't forget end point
for p in [
l.interpolate(segmentize_value * i).coords[0]
for i in range(int(l.length / segmentize_value))
] + [l.coords[1]]
])
)
|
[
"Segmentize",
"Polygon",
"outer",
"ring",
"by",
"segmentize",
"value",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L131-L163
|
[
"def",
"segmentize_geometry",
"(",
"geometry",
",",
"segmentize_value",
")",
":",
"if",
"geometry",
".",
"geom_type",
"!=",
"\"Polygon\"",
":",
"raise",
"TypeError",
"(",
"\"segmentize geometry type must be Polygon\"",
")",
"return",
"Polygon",
"(",
"LinearRing",
"(",
"[",
"p",
"# pick polygon linestrings",
"for",
"l",
"in",
"map",
"(",
"lambda",
"x",
":",
"LineString",
"(",
"[",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
"]",
")",
",",
"zip",
"(",
"geometry",
".",
"exterior",
".",
"coords",
"[",
":",
"-",
"1",
"]",
",",
"geometry",
".",
"exterior",
".",
"coords",
"[",
"1",
":",
"]",
")",
")",
"# interpolate additional points in between and don't forget end point",
"for",
"p",
"in",
"[",
"l",
".",
"interpolate",
"(",
"segmentize_value",
"*",
"i",
")",
".",
"coords",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"int",
"(",
"l",
".",
"length",
"/",
"segmentize_value",
")",
")",
"]",
"+",
"[",
"l",
".",
"coords",
"[",
"1",
"]",
"]",
"]",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
read_vector_window
|
Read a window of an input vector dataset.
Also clips geometry.
Parameters:
-----------
input_file : string
path to vector file
tile : ``Tile``
tile extent to read data from
validity_check : bool
checks if reprojected geometry is valid and throws ``RuntimeError`` if
invalid (default: True)
Returns
-------
features : list
a list of reprojected GeoJSON-like features
|
mapchete/io/vector.py
|
def read_vector_window(input_files, tile, validity_check=True):
"""
Read a window of an input vector dataset.
Also clips geometry.
Parameters:
-----------
input_file : string
path to vector file
tile : ``Tile``
tile extent to read data from
validity_check : bool
checks if reprojected geometry is valid and throws ``RuntimeError`` if
invalid (default: True)
Returns
-------
features : list
a list of reprojected GeoJSON-like features
"""
if not isinstance(input_files, list):
input_files = [input_files]
return [
feature
for feature in chain.from_iterable([
_read_vector_window(path, tile, validity_check=validity_check)
for path in input_files
])
]
|
def read_vector_window(input_files, tile, validity_check=True):
"""
Read a window of an input vector dataset.
Also clips geometry.
Parameters:
-----------
input_file : string
path to vector file
tile : ``Tile``
tile extent to read data from
validity_check : bool
checks if reprojected geometry is valid and throws ``RuntimeError`` if
invalid (default: True)
Returns
-------
features : list
a list of reprojected GeoJSON-like features
"""
if not isinstance(input_files, list):
input_files = [input_files]
return [
feature
for feature in chain.from_iterable([
_read_vector_window(path, tile, validity_check=validity_check)
for path in input_files
])
]
|
[
"Read",
"a",
"window",
"of",
"an",
"input",
"vector",
"dataset",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L166-L195
|
[
"def",
"read_vector_window",
"(",
"input_files",
",",
"tile",
",",
"validity_check",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"input_files",
",",
"list",
")",
":",
"input_files",
"=",
"[",
"input_files",
"]",
"return",
"[",
"feature",
"for",
"feature",
"in",
"chain",
".",
"from_iterable",
"(",
"[",
"_read_vector_window",
"(",
"path",
",",
"tile",
",",
"validity_check",
"=",
"validity_check",
")",
"for",
"path",
"in",
"input_files",
"]",
")",
"]"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
write_vector_window
|
Write features to GeoJSON file.
Parameters
----------
in_data : features
out_schema : dictionary
output schema for fiona
out_tile : ``BufferedTile``
tile used for output extent
out_path : string
output path for GeoJSON file
|
mapchete/io/vector.py
|
def write_vector_window(
in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None
):
"""
Write features to GeoJSON file.
Parameters
----------
in_data : features
out_schema : dictionary
output schema for fiona
out_tile : ``BufferedTile``
tile used for output extent
out_path : string
output path for GeoJSON file
"""
# Delete existing file.
try:
os.remove(out_path)
except OSError:
pass
out_features = []
for feature in in_data:
try:
# clip feature geometry to tile bounding box and append for writing
# if clipped feature still
for out_geom in multipart_to_singleparts(
clean_geometry_type(
to_shape(feature["geometry"]).intersection(out_tile.bbox),
out_schema["geometry"]
)
):
out_features.append({
"geometry": mapping(out_geom),
"properties": feature["properties"]
})
except Exception as e:
logger.warning("failed to prepare geometry for writing: %s", e)
continue
# write if there are output features
if out_features:
try:
if out_path.startswith("s3://"):
# write data to remote file
with VectorWindowMemoryFile(
tile=out_tile,
features=out_features,
schema=out_schema,
driver="GeoJSON"
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
# write data to local file
with fiona.open(
out_path, 'w', schema=out_schema, driver="GeoJSON",
crs=out_tile.crs.to_dict()
) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.writerecords(out_features)
except Exception as e:
logger.error("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "nothing to write", out_path))
|
def write_vector_window(
in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None
):
"""
Write features to GeoJSON file.
Parameters
----------
in_data : features
out_schema : dictionary
output schema for fiona
out_tile : ``BufferedTile``
tile used for output extent
out_path : string
output path for GeoJSON file
"""
# Delete existing file.
try:
os.remove(out_path)
except OSError:
pass
out_features = []
for feature in in_data:
try:
# clip feature geometry to tile bounding box and append for writing
# if clipped feature still
for out_geom in multipart_to_singleparts(
clean_geometry_type(
to_shape(feature["geometry"]).intersection(out_tile.bbox),
out_schema["geometry"]
)
):
out_features.append({
"geometry": mapping(out_geom),
"properties": feature["properties"]
})
except Exception as e:
logger.warning("failed to prepare geometry for writing: %s", e)
continue
# write if there are output features
if out_features:
try:
if out_path.startswith("s3://"):
# write data to remote file
with VectorWindowMemoryFile(
tile=out_tile,
features=out_features,
schema=out_schema,
driver="GeoJSON"
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
# write data to local file
with fiona.open(
out_path, 'w', schema=out_schema, driver="GeoJSON",
crs=out_tile.crs.to_dict()
) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.writerecords(out_features)
except Exception as e:
logger.error("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "nothing to write", out_path))
|
[
"Write",
"features",
"to",
"GeoJSON",
"file",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L221-L292
|
[
"def",
"write_vector_window",
"(",
"in_data",
"=",
"None",
",",
"out_schema",
"=",
"None",
",",
"out_tile",
"=",
"None",
",",
"out_path",
"=",
"None",
",",
"bucket_resource",
"=",
"None",
")",
":",
"# Delete existing file.",
"try",
":",
"os",
".",
"remove",
"(",
"out_path",
")",
"except",
"OSError",
":",
"pass",
"out_features",
"=",
"[",
"]",
"for",
"feature",
"in",
"in_data",
":",
"try",
":",
"# clip feature geometry to tile bounding box and append for writing",
"# if clipped feature still",
"for",
"out_geom",
"in",
"multipart_to_singleparts",
"(",
"clean_geometry_type",
"(",
"to_shape",
"(",
"feature",
"[",
"\"geometry\"",
"]",
")",
".",
"intersection",
"(",
"out_tile",
".",
"bbox",
")",
",",
"out_schema",
"[",
"\"geometry\"",
"]",
")",
")",
":",
"out_features",
".",
"append",
"(",
"{",
"\"geometry\"",
":",
"mapping",
"(",
"out_geom",
")",
",",
"\"properties\"",
":",
"feature",
"[",
"\"properties\"",
"]",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"failed to prepare geometry for writing: %s\"",
",",
"e",
")",
"continue",
"# write if there are output features",
"if",
"out_features",
":",
"try",
":",
"if",
"out_path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"# write data to remote file",
"with",
"VectorWindowMemoryFile",
"(",
"tile",
"=",
"out_tile",
",",
"features",
"=",
"out_features",
",",
"schema",
"=",
"out_schema",
",",
"driver",
"=",
"\"GeoJSON\"",
")",
"as",
"memfile",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"upload tile\"",
",",
"out_path",
")",
")",
"bucket_resource",
".",
"put_object",
"(",
"Key",
"=",
"\"/\"",
".",
"join",
"(",
"out_path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
",",
"Body",
"=",
"memfile",
")",
"else",
":",
"# write data to local file",
"with",
"fiona",
".",
"open",
"(",
"out_path",
",",
"'w'",
",",
"schema",
"=",
"out_schema",
",",
"driver",
"=",
"\"GeoJSON\"",
",",
"crs",
"=",
"out_tile",
".",
"crs",
".",
"to_dict",
"(",
")",
")",
"as",
"dst",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"write tile\"",
",",
"out_path",
")",
")",
"dst",
".",
"writerecords",
"(",
"out_features",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"error while writing file %s: %s\"",
",",
"out_path",
",",
"e",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"(",
"out_tile",
".",
"id",
",",
"\"nothing to write\"",
",",
"out_path",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
clean_geometry_type
|
Return geometry of a specific type if possible.
Filters and splits up GeometryCollection into target types. This is
necessary when after clipping and/or reprojecting the geometry types from
source geometries change (i.e. a Polygon becomes a LineString or a
LineString becomes Point) in some edge cases.
Parameters
----------
geometry : ``shapely.geometry``
target_type : string
target geometry type
allow_multipart : bool
allow multipart geometries (default: True)
Returns
-------
cleaned geometry : ``shapely.geometry``
returns None if input geometry type differs from target type
Raises
------
GeometryTypeError : if geometry type does not match target_type
|
mapchete/io/vector.py
|
def clean_geometry_type(geometry, target_type, allow_multipart=True):
"""
Return geometry of a specific type if possible.
Filters and splits up GeometryCollection into target types. This is
necessary when after clipping and/or reprojecting the geometry types from
source geometries change (i.e. a Polygon becomes a LineString or a
LineString becomes Point) in some edge cases.
Parameters
----------
geometry : ``shapely.geometry``
target_type : string
target geometry type
allow_multipart : bool
allow multipart geometries (default: True)
Returns
-------
cleaned geometry : ``shapely.geometry``
returns None if input geometry type differs from target type
Raises
------
GeometryTypeError : if geometry type does not match target_type
"""
multipart_geoms = {
"Point": MultiPoint,
"LineString": MultiLineString,
"Polygon": MultiPolygon,
"MultiPoint": MultiPoint,
"MultiLineString": MultiLineString,
"MultiPolygon": MultiPolygon
}
if target_type not in multipart_geoms.keys():
raise TypeError("target type is not supported: %s" % target_type)
if geometry.geom_type == target_type:
return geometry
elif allow_multipart:
target_multipart_type = multipart_geoms[target_type]
if geometry.geom_type == "GeometryCollection":
return target_multipart_type([
clean_geometry_type(g, target_type, allow_multipart)
for g in geometry])
elif any([
isinstance(geometry, target_multipart_type),
multipart_geoms[geometry.geom_type] == target_multipart_type
]):
return geometry
raise GeometryTypeError(
"geometry type does not match: %s, %s" % (geometry.geom_type, target_type)
)
|
def clean_geometry_type(geometry, target_type, allow_multipart=True):
"""
Return geometry of a specific type if possible.
Filters and splits up GeometryCollection into target types. This is
necessary when after clipping and/or reprojecting the geometry types from
source geometries change (i.e. a Polygon becomes a LineString or a
LineString becomes Point) in some edge cases.
Parameters
----------
geometry : ``shapely.geometry``
target_type : string
target geometry type
allow_multipart : bool
allow multipart geometries (default: True)
Returns
-------
cleaned geometry : ``shapely.geometry``
returns None if input geometry type differs from target type
Raises
------
GeometryTypeError : if geometry type does not match target_type
"""
multipart_geoms = {
"Point": MultiPoint,
"LineString": MultiLineString,
"Polygon": MultiPolygon,
"MultiPoint": MultiPoint,
"MultiLineString": MultiLineString,
"MultiPolygon": MultiPolygon
}
if target_type not in multipart_geoms.keys():
raise TypeError("target type is not supported: %s" % target_type)
if geometry.geom_type == target_type:
return geometry
elif allow_multipart:
target_multipart_type = multipart_geoms[target_type]
if geometry.geom_type == "GeometryCollection":
return target_multipart_type([
clean_geometry_type(g, target_type, allow_multipart)
for g in geometry])
elif any([
isinstance(geometry, target_multipart_type),
multipart_geoms[geometry.geom_type] == target_multipart_type
]):
return geometry
raise GeometryTypeError(
"geometry type does not match: %s, %s" % (geometry.geom_type, target_type)
)
|
[
"Return",
"geometry",
"of",
"a",
"specific",
"type",
"if",
"possible",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L373-L428
|
[
"def",
"clean_geometry_type",
"(",
"geometry",
",",
"target_type",
",",
"allow_multipart",
"=",
"True",
")",
":",
"multipart_geoms",
"=",
"{",
"\"Point\"",
":",
"MultiPoint",
",",
"\"LineString\"",
":",
"MultiLineString",
",",
"\"Polygon\"",
":",
"MultiPolygon",
",",
"\"MultiPoint\"",
":",
"MultiPoint",
",",
"\"MultiLineString\"",
":",
"MultiLineString",
",",
"\"MultiPolygon\"",
":",
"MultiPolygon",
"}",
"if",
"target_type",
"not",
"in",
"multipart_geoms",
".",
"keys",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"target type is not supported: %s\"",
"%",
"target_type",
")",
"if",
"geometry",
".",
"geom_type",
"==",
"target_type",
":",
"return",
"geometry",
"elif",
"allow_multipart",
":",
"target_multipart_type",
"=",
"multipart_geoms",
"[",
"target_type",
"]",
"if",
"geometry",
".",
"geom_type",
"==",
"\"GeometryCollection\"",
":",
"return",
"target_multipart_type",
"(",
"[",
"clean_geometry_type",
"(",
"g",
",",
"target_type",
",",
"allow_multipart",
")",
"for",
"g",
"in",
"geometry",
"]",
")",
"elif",
"any",
"(",
"[",
"isinstance",
"(",
"geometry",
",",
"target_multipart_type",
")",
",",
"multipart_geoms",
"[",
"geometry",
".",
"geom_type",
"]",
"==",
"target_multipart_type",
"]",
")",
":",
"return",
"geometry",
"raise",
"GeometryTypeError",
"(",
"\"geometry type does not match: %s, %s\"",
"%",
"(",
"geometry",
".",
"geom_type",
",",
"target_type",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
multipart_to_singleparts
|
Yield single part geometries if geom is multipart, otherwise yield geom.
Parameters:
-----------
geom : shapely geometry
Returns:
--------
shapely single part geometries
|
mapchete/io/vector.py
|
def multipart_to_singleparts(geom):
"""
Yield single part geometries if geom is multipart, otherwise yield geom.
Parameters:
-----------
geom : shapely geometry
Returns:
--------
shapely single part geometries
"""
if isinstance(geom, base.BaseGeometry):
if hasattr(geom, "geoms"):
for subgeom in geom:
yield subgeom
else:
yield geom
|
def multipart_to_singleparts(geom):
"""
Yield single part geometries if geom is multipart, otherwise yield geom.
Parameters:
-----------
geom : shapely geometry
Returns:
--------
shapely single part geometries
"""
if isinstance(geom, base.BaseGeometry):
if hasattr(geom, "geoms"):
for subgeom in geom:
yield subgeom
else:
yield geom
|
[
"Yield",
"single",
"part",
"geometries",
"if",
"geom",
"is",
"multipart",
"otherwise",
"yield",
"geom",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L446-L463
|
[
"def",
"multipart_to_singleparts",
"(",
"geom",
")",
":",
"if",
"isinstance",
"(",
"geom",
",",
"base",
".",
"BaseGeometry",
")",
":",
"if",
"hasattr",
"(",
"geom",
",",
"\"geoms\"",
")",
":",
"for",
"subgeom",
"in",
"geom",
":",
"yield",
"subgeom",
"else",
":",
"yield",
"geom"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
execute
|
Convert and optionally clip input raster data.
Inputs:
-------
raster
singleband or multiband data input
clip (optional)
vector data used to clip output
Parameters
----------
td_resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
Output
------
np.ndarray
|
mapchete/processes/convert.py
|
def execute(
mp,
td_resampling="nearest",
td_matching_method="gdal",
td_matching_max_zoom=None,
td_matching_precision=8,
td_fallback_to_higher_zoom=False,
clip_pixelbuffer=0,
**kwargs
):
"""
Convert and optionally clip input raster data.
Inputs:
-------
raster
singleband or multiband data input
clip (optional)
vector data used to clip output
Parameters
----------
td_resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
Output
------
np.ndarray
"""
# read clip geometry
if "clip" in mp.params["input"]:
clip_geom = mp.open("clip").read()
if not clip_geom:
logger.debug("no clip data over tile")
return "empty"
else:
clip_geom = []
with mp.open(
"raster",
matching_method=td_matching_method,
matching_max_zoom=td_matching_max_zoom,
matching_precision=td_matching_precision,
fallback_to_higher_zoom=td_fallback_to_higher_zoom,
resampling=td_resampling
) as raster:
raster_data = raster.read()
if raster.is_empty() or raster_data[0].mask.all():
logger.debug("raster empty")
return "empty"
if clip_geom:
# apply original nodata mask and clip
clipped = mp.clip(
np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data),
clip_geom,
clip_buffer=clip_pixelbuffer,
inverted=True
)
return np.where(clipped.mask, clipped, mp.params["output"].nodata)
else:
return np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data)
|
def execute(
mp,
td_resampling="nearest",
td_matching_method="gdal",
td_matching_max_zoom=None,
td_matching_precision=8,
td_fallback_to_higher_zoom=False,
clip_pixelbuffer=0,
**kwargs
):
"""
Convert and optionally clip input raster data.
Inputs:
-------
raster
singleband or multiband data input
clip (optional)
vector data used to clip output
Parameters
----------
td_resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
Output
------
np.ndarray
"""
# read clip geometry
if "clip" in mp.params["input"]:
clip_geom = mp.open("clip").read()
if not clip_geom:
logger.debug("no clip data over tile")
return "empty"
else:
clip_geom = []
with mp.open(
"raster",
matching_method=td_matching_method,
matching_max_zoom=td_matching_max_zoom,
matching_precision=td_matching_precision,
fallback_to_higher_zoom=td_fallback_to_higher_zoom,
resampling=td_resampling
) as raster:
raster_data = raster.read()
if raster.is_empty() or raster_data[0].mask.all():
logger.debug("raster empty")
return "empty"
if clip_geom:
# apply original nodata mask and clip
clipped = mp.clip(
np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data),
clip_geom,
clip_buffer=clip_pixelbuffer,
inverted=True
)
return np.where(clipped.mask, clipped, mp.params["output"].nodata)
else:
return np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data)
|
[
"Convert",
"and",
"optionally",
"clip",
"input",
"raster",
"data",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/processes/convert.py#L7-L88
|
[
"def",
"execute",
"(",
"mp",
",",
"td_resampling",
"=",
"\"nearest\"",
",",
"td_matching_method",
"=",
"\"gdal\"",
",",
"td_matching_max_zoom",
"=",
"None",
",",
"td_matching_precision",
"=",
"8",
",",
"td_fallback_to_higher_zoom",
"=",
"False",
",",
"clip_pixelbuffer",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"# read clip geometry",
"if",
"\"clip\"",
"in",
"mp",
".",
"params",
"[",
"\"input\"",
"]",
":",
"clip_geom",
"=",
"mp",
".",
"open",
"(",
"\"clip\"",
")",
".",
"read",
"(",
")",
"if",
"not",
"clip_geom",
":",
"logger",
".",
"debug",
"(",
"\"no clip data over tile\"",
")",
"return",
"\"empty\"",
"else",
":",
"clip_geom",
"=",
"[",
"]",
"with",
"mp",
".",
"open",
"(",
"\"raster\"",
",",
"matching_method",
"=",
"td_matching_method",
",",
"matching_max_zoom",
"=",
"td_matching_max_zoom",
",",
"matching_precision",
"=",
"td_matching_precision",
",",
"fallback_to_higher_zoom",
"=",
"td_fallback_to_higher_zoom",
",",
"resampling",
"=",
"td_resampling",
")",
"as",
"raster",
":",
"raster_data",
"=",
"raster",
".",
"read",
"(",
")",
"if",
"raster",
".",
"is_empty",
"(",
")",
"or",
"raster_data",
"[",
"0",
"]",
".",
"mask",
".",
"all",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"raster empty\"",
")",
"return",
"\"empty\"",
"if",
"clip_geom",
":",
"# apply original nodata mask and clip",
"clipped",
"=",
"mp",
".",
"clip",
"(",
"np",
".",
"where",
"(",
"raster_data",
"[",
"0",
"]",
".",
"mask",
",",
"mp",
".",
"params",
"[",
"\"output\"",
"]",
".",
"nodata",
",",
"raster_data",
")",
",",
"clip_geom",
",",
"clip_buffer",
"=",
"clip_pixelbuffer",
",",
"inverted",
"=",
"True",
")",
"return",
"np",
".",
"where",
"(",
"clipped",
".",
"mask",
",",
"clipped",
",",
"mp",
".",
"params",
"[",
"\"output\"",
"]",
".",
"nodata",
")",
"else",
":",
"return",
"np",
".",
"where",
"(",
"raster_data",
"[",
"0",
"]",
".",
"mask",
",",
"mp",
".",
"params",
"[",
"\"output\"",
"]",
".",
"nodata",
",",
"raster_data",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
get_best_zoom_level
|
Determine the best base zoom level for a raster.
"Best" means the maximum zoom level where no oversampling has to be done.
Parameters
----------
input_file : path to raster file
tile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)
Returns
-------
zoom : integer
|
mapchete/io/__init__.py
|
def get_best_zoom_level(input_file, tile_pyramid_type):
"""
Determine the best base zoom level for a raster.
"Best" means the maximum zoom level where no oversampling has to be done.
Parameters
----------
input_file : path to raster file
tile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)
Returns
-------
zoom : integer
"""
tile_pyramid = BufferedTilePyramid(tile_pyramid_type)
with rasterio.open(input_file, "r") as src:
xmin, ymin, xmax, ymax = reproject_geometry(
segmentize_geometry(
box(
src.bounds.left, src.bounds.bottom, src.bounds.right,
src.bounds.top
),
get_segmentize_value(input_file, tile_pyramid)
),
src_crs=src.crs, dst_crs=tile_pyramid.crs
).bounds
x_dif = xmax - xmin
y_dif = ymax - ymin
size = float(src.width + src.height)
avg_resolution = (
(x_dif / float(src.width)) * (float(src.width) / size) +
(y_dif / float(src.height)) * (float(src.height) / size)
)
for zoom in range(0, 40):
if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:
return zoom-1
|
def get_best_zoom_level(input_file, tile_pyramid_type):
"""
Determine the best base zoom level for a raster.
"Best" means the maximum zoom level where no oversampling has to be done.
Parameters
----------
input_file : path to raster file
tile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)
Returns
-------
zoom : integer
"""
tile_pyramid = BufferedTilePyramid(tile_pyramid_type)
with rasterio.open(input_file, "r") as src:
xmin, ymin, xmax, ymax = reproject_geometry(
segmentize_geometry(
box(
src.bounds.left, src.bounds.bottom, src.bounds.right,
src.bounds.top
),
get_segmentize_value(input_file, tile_pyramid)
),
src_crs=src.crs, dst_crs=tile_pyramid.crs
).bounds
x_dif = xmax - xmin
y_dif = ymax - ymin
size = float(src.width + src.height)
avg_resolution = (
(x_dif / float(src.width)) * (float(src.width) / size) +
(y_dif / float(src.height)) * (float(src.height) / size)
)
for zoom in range(0, 40):
if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:
return zoom-1
|
[
"Determine",
"the",
"best",
"base",
"zoom",
"level",
"for",
"a",
"raster",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L27-L64
|
[
"def",
"get_best_zoom_level",
"(",
"input_file",
",",
"tile_pyramid_type",
")",
":",
"tile_pyramid",
"=",
"BufferedTilePyramid",
"(",
"tile_pyramid_type",
")",
"with",
"rasterio",
".",
"open",
"(",
"input_file",
",",
"\"r\"",
")",
"as",
"src",
":",
"xmin",
",",
"ymin",
",",
"xmax",
",",
"ymax",
"=",
"reproject_geometry",
"(",
"segmentize_geometry",
"(",
"box",
"(",
"src",
".",
"bounds",
".",
"left",
",",
"src",
".",
"bounds",
".",
"bottom",
",",
"src",
".",
"bounds",
".",
"right",
",",
"src",
".",
"bounds",
".",
"top",
")",
",",
"get_segmentize_value",
"(",
"input_file",
",",
"tile_pyramid",
")",
")",
",",
"src_crs",
"=",
"src",
".",
"crs",
",",
"dst_crs",
"=",
"tile_pyramid",
".",
"crs",
")",
".",
"bounds",
"x_dif",
"=",
"xmax",
"-",
"xmin",
"y_dif",
"=",
"ymax",
"-",
"ymin",
"size",
"=",
"float",
"(",
"src",
".",
"width",
"+",
"src",
".",
"height",
")",
"avg_resolution",
"=",
"(",
"(",
"x_dif",
"/",
"float",
"(",
"src",
".",
"width",
")",
")",
"*",
"(",
"float",
"(",
"src",
".",
"width",
")",
"/",
"size",
")",
"+",
"(",
"y_dif",
"/",
"float",
"(",
"src",
".",
"height",
")",
")",
"*",
"(",
"float",
"(",
"src",
".",
"height",
")",
"/",
"size",
")",
")",
"for",
"zoom",
"in",
"range",
"(",
"0",
",",
"40",
")",
":",
"if",
"tile_pyramid",
".",
"pixel_x_size",
"(",
"zoom",
")",
"<=",
"avg_resolution",
":",
"return",
"zoom",
"-",
"1"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
get_segmentize_value
|
Return the recommended segmentation value in input file units.
It is calculated by multiplyling raster pixel size with tile shape in
pixels.
Parameters
----------
input_file : str
location of a file readable by rasterio
tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid``
tile pyramid to estimate target tile size
Returns
-------
segmenize value : float
length suggested of line segmentation to reproject file bounds
|
mapchete/io/__init__.py
|
def get_segmentize_value(input_file=None, tile_pyramid=None):
"""
Return the recommended segmentation value in input file units.
It is calculated by multiplyling raster pixel size with tile shape in
pixels.
Parameters
----------
input_file : str
location of a file readable by rasterio
tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid``
tile pyramid to estimate target tile size
Returns
-------
segmenize value : float
length suggested of line segmentation to reproject file bounds
"""
with rasterio.open(input_file, "r") as input_raster:
pixelsize = input_raster.transform[0]
return pixelsize * tile_pyramid.tile_size
|
def get_segmentize_value(input_file=None, tile_pyramid=None):
"""
Return the recommended segmentation value in input file units.
It is calculated by multiplyling raster pixel size with tile shape in
pixels.
Parameters
----------
input_file : str
location of a file readable by rasterio
tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid``
tile pyramid to estimate target tile size
Returns
-------
segmenize value : float
length suggested of line segmentation to reproject file bounds
"""
with rasterio.open(input_file, "r") as input_raster:
pixelsize = input_raster.transform[0]
return pixelsize * tile_pyramid.tile_size
|
[
"Return",
"the",
"recommended",
"segmentation",
"value",
"in",
"input",
"file",
"units",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L67-L88
|
[
"def",
"get_segmentize_value",
"(",
"input_file",
"=",
"None",
",",
"tile_pyramid",
"=",
"None",
")",
":",
"with",
"rasterio",
".",
"open",
"(",
"input_file",
",",
"\"r\"",
")",
"as",
"input_raster",
":",
"pixelsize",
"=",
"input_raster",
".",
"transform",
"[",
"0",
"]",
"return",
"pixelsize",
"*",
"tile_pyramid",
".",
"tile_size"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
tile_to_zoom_level
|
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
|
mapchete/io/__init__.py
|
def tile_to_zoom_level(tile, dst_pyramid=None, matching_method="gdal", precision=8):
"""
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
"""
def width_height(bounds):
try:
l, b, r, t = reproject_geometry(
box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs
).bounds
except ValueError:
raise TopologicalError("bounds cannot be translated into target CRS")
return r - l, t - b
if tile.tp.crs == dst_pyramid.crs:
return tile.zoom
else:
if matching_method == "gdal":
# use rasterio/GDAL method to calculate default warp target properties
transform, width, height = calculate_default_transform(
tile.tp.crs,
dst_pyramid.crs,
tile.width,
tile.height,
*tile.bounds
)
# this is the resolution the tile would have in destination TilePyramid CRS
tile_resolution = round(transform[0], precision)
elif matching_method == "min":
# calculate the minimum pixel size from the four tile corner pixels
l, b, r, t = tile.bounds
x = tile.pixel_x_size
y = tile.pixel_y_size
res = []
for bounds in [
(l, t - y, l + x, t), # left top
(l, b, l + x, b + y), # left bottom
(r - x, b, r, b + y), # right bottom
(r - x, t - y, r, t) # right top
]:
try:
w, h = width_height(bounds)
res.extend([w, h])
except TopologicalError:
logger.debug("pixel outside of destination pyramid")
if res:
tile_resolution = round(min(res), precision)
else:
raise TopologicalError("tile outside of destination pyramid")
else:
raise ValueError("invalid method given: %s", matching_method)
logger.debug(
"we are looking for a zoom level interpolating to %s resolution",
tile_resolution
)
zoom = 0
while True:
td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)
if td_resolution <= tile_resolution:
break
zoom += 1
logger.debug("target zoom for %s: %s (%s)", tile_resolution, zoom, td_resolution)
return zoom
|
def tile_to_zoom_level(tile, dst_pyramid=None, matching_method="gdal", precision=8):
"""
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
"""
def width_height(bounds):
try:
l, b, r, t = reproject_geometry(
box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs
).bounds
except ValueError:
raise TopologicalError("bounds cannot be translated into target CRS")
return r - l, t - b
if tile.tp.crs == dst_pyramid.crs:
return tile.zoom
else:
if matching_method == "gdal":
# use rasterio/GDAL method to calculate default warp target properties
transform, width, height = calculate_default_transform(
tile.tp.crs,
dst_pyramid.crs,
tile.width,
tile.height,
*tile.bounds
)
# this is the resolution the tile would have in destination TilePyramid CRS
tile_resolution = round(transform[0], precision)
elif matching_method == "min":
# calculate the minimum pixel size from the four tile corner pixels
l, b, r, t = tile.bounds
x = tile.pixel_x_size
y = tile.pixel_y_size
res = []
for bounds in [
(l, t - y, l + x, t), # left top
(l, b, l + x, b + y), # left bottom
(r - x, b, r, b + y), # right bottom
(r - x, t - y, r, t) # right top
]:
try:
w, h = width_height(bounds)
res.extend([w, h])
except TopologicalError:
logger.debug("pixel outside of destination pyramid")
if res:
tile_resolution = round(min(res), precision)
else:
raise TopologicalError("tile outside of destination pyramid")
else:
raise ValueError("invalid method given: %s", matching_method)
logger.debug(
"we are looking for a zoom level interpolating to %s resolution",
tile_resolution
)
zoom = 0
while True:
td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)
if td_resolution <= tile_resolution:
break
zoom += 1
logger.debug("target zoom for %s: %s (%s)", tile_resolution, zoom, td_resolution)
return zoom
|
[
"Determine",
"the",
"best",
"zoom",
"level",
"in",
"target",
"TilePyramid",
"from",
"given",
"Tile",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L91-L173
|
[
"def",
"tile_to_zoom_level",
"(",
"tile",
",",
"dst_pyramid",
"=",
"None",
",",
"matching_method",
"=",
"\"gdal\"",
",",
"precision",
"=",
"8",
")",
":",
"def",
"width_height",
"(",
"bounds",
")",
":",
"try",
":",
"l",
",",
"b",
",",
"r",
",",
"t",
"=",
"reproject_geometry",
"(",
"box",
"(",
"*",
"bounds",
")",
",",
"src_crs",
"=",
"tile",
".",
"crs",
",",
"dst_crs",
"=",
"dst_pyramid",
".",
"crs",
")",
".",
"bounds",
"except",
"ValueError",
":",
"raise",
"TopologicalError",
"(",
"\"bounds cannot be translated into target CRS\"",
")",
"return",
"r",
"-",
"l",
",",
"t",
"-",
"b",
"if",
"tile",
".",
"tp",
".",
"crs",
"==",
"dst_pyramid",
".",
"crs",
":",
"return",
"tile",
".",
"zoom",
"else",
":",
"if",
"matching_method",
"==",
"\"gdal\"",
":",
"# use rasterio/GDAL method to calculate default warp target properties",
"transform",
",",
"width",
",",
"height",
"=",
"calculate_default_transform",
"(",
"tile",
".",
"tp",
".",
"crs",
",",
"dst_pyramid",
".",
"crs",
",",
"tile",
".",
"width",
",",
"tile",
".",
"height",
",",
"*",
"tile",
".",
"bounds",
")",
"# this is the resolution the tile would have in destination TilePyramid CRS",
"tile_resolution",
"=",
"round",
"(",
"transform",
"[",
"0",
"]",
",",
"precision",
")",
"elif",
"matching_method",
"==",
"\"min\"",
":",
"# calculate the minimum pixel size from the four tile corner pixels",
"l",
",",
"b",
",",
"r",
",",
"t",
"=",
"tile",
".",
"bounds",
"x",
"=",
"tile",
".",
"pixel_x_size",
"y",
"=",
"tile",
".",
"pixel_y_size",
"res",
"=",
"[",
"]",
"for",
"bounds",
"in",
"[",
"(",
"l",
",",
"t",
"-",
"y",
",",
"l",
"+",
"x",
",",
"t",
")",
",",
"# left top",
"(",
"l",
",",
"b",
",",
"l",
"+",
"x",
",",
"b",
"+",
"y",
")",
",",
"# left bottom",
"(",
"r",
"-",
"x",
",",
"b",
",",
"r",
",",
"b",
"+",
"y",
")",
",",
"# right bottom",
"(",
"r",
"-",
"x",
",",
"t",
"-",
"y",
",",
"r",
",",
"t",
")",
"# right top",
"]",
":",
"try",
":",
"w",
",",
"h",
"=",
"width_height",
"(",
"bounds",
")",
"res",
".",
"extend",
"(",
"[",
"w",
",",
"h",
"]",
")",
"except",
"TopologicalError",
":",
"logger",
".",
"debug",
"(",
"\"pixel outside of destination pyramid\"",
")",
"if",
"res",
":",
"tile_resolution",
"=",
"round",
"(",
"min",
"(",
"res",
")",
",",
"precision",
")",
"else",
":",
"raise",
"TopologicalError",
"(",
"\"tile outside of destination pyramid\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid method given: %s\"",
",",
"matching_method",
")",
"logger",
".",
"debug",
"(",
"\"we are looking for a zoom level interpolating to %s resolution\"",
",",
"tile_resolution",
")",
"zoom",
"=",
"0",
"while",
"True",
":",
"td_resolution",
"=",
"round",
"(",
"dst_pyramid",
".",
"pixel_x_size",
"(",
"zoom",
")",
",",
"precision",
")",
"if",
"td_resolution",
"<=",
"tile_resolution",
":",
"break",
"zoom",
"+=",
"1",
"logger",
".",
"debug",
"(",
"\"target zoom for %s: %s (%s)\"",
",",
"tile_resolution",
",",
"zoom",
",",
"td_resolution",
")",
"return",
"zoom"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
path_is_remote
|
Determine whether file path is remote or local.
Parameters
----------
path : path to file
Returns
-------
is_remote : bool
|
mapchete/io/__init__.py
|
def path_is_remote(path, s3=True):
"""
Determine whether file path is remote or local.
Parameters
----------
path : path to file
Returns
-------
is_remote : bool
"""
prefixes = ("http://", "https://", "/vsicurl/")
if s3:
prefixes += ("s3://", "/vsis3/")
return path.startswith(prefixes)
|
def path_is_remote(path, s3=True):
"""
Determine whether file path is remote or local.
Parameters
----------
path : path to file
Returns
-------
is_remote : bool
"""
prefixes = ("http://", "https://", "/vsicurl/")
if s3:
prefixes += ("s3://", "/vsis3/")
return path.startswith(prefixes)
|
[
"Determine",
"whether",
"file",
"path",
"is",
"remote",
"or",
"local",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L176-L191
|
[
"def",
"path_is_remote",
"(",
"path",
",",
"s3",
"=",
"True",
")",
":",
"prefixes",
"=",
"(",
"\"http://\"",
",",
"\"https://\"",
",",
"\"/vsicurl/\"",
")",
"if",
"s3",
":",
"prefixes",
"+=",
"(",
"\"s3://\"",
",",
"\"/vsis3/\"",
")",
"return",
"path",
".",
"startswith",
"(",
"prefixes",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
path_exists
|
Check if file exists either remote or local.
Parameters:
-----------
path : path to file
Returns:
--------
exists : bool
|
mapchete/io/__init__.py
|
def path_exists(path):
"""
Check if file exists either remote or local.
Parameters:
-----------
path : path to file
Returns:
--------
exists : bool
"""
if path.startswith(("http://", "https://")):
try:
urlopen(path).info()
return True
except HTTPError as e:
if e.code == 404:
return False
else:
raise
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return True
else:
return False
else:
logger.debug("%s exists: %s", path, os.path.exists(path))
return os.path.exists(path)
|
def path_exists(path):
"""
Check if file exists either remote or local.
Parameters:
-----------
path : path to file
Returns:
--------
exists : bool
"""
if path.startswith(("http://", "https://")):
try:
urlopen(path).info()
return True
except HTTPError as e:
if e.code == 404:
return False
else:
raise
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return True
else:
return False
else:
logger.debug("%s exists: %s", path, os.path.exists(path))
return os.path.exists(path)
|
[
"Check",
"if",
"file",
"exists",
"either",
"remote",
"or",
"local",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L194-L225
|
[
"def",
"path_exists",
"(",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"(",
"\"http://\"",
",",
"\"https://\"",
")",
")",
":",
"try",
":",
"urlopen",
"(",
"path",
")",
".",
"info",
"(",
")",
"return",
"True",
"except",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"404",
":",
"return",
"False",
"else",
":",
"raise",
"elif",
"path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"bucket",
"=",
"get_boto3_bucket",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"2",
"]",
")",
"key",
"=",
"\"/\"",
".",
"join",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
"for",
"obj",
"in",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"key",
")",
":",
"if",
"obj",
".",
"key",
"==",
"key",
":",
"return",
"True",
"else",
":",
"return",
"False",
"else",
":",
"logger",
".",
"debug",
"(",
"\"%s exists: %s\"",
",",
"path",
",",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
absolute_path
|
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
|
mapchete/io/__init__.py
|
def absolute_path(path=None, base_dir=None):
"""
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
"""
if path_is_remote(path):
return path
else:
if os.path.isabs(path):
return path
else:
if base_dir is None or not os.path.isabs(base_dir):
raise TypeError("base_dir must be an absolute path.")
return os.path.abspath(os.path.join(base_dir, path))
|
def absolute_path(path=None, base_dir=None):
"""
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
"""
if path_is_remote(path):
return path
else:
if os.path.isabs(path):
return path
else:
if base_dir is None or not os.path.isabs(base_dir):
raise TypeError("base_dir must be an absolute path.")
return os.path.abspath(os.path.join(base_dir, path))
|
[
"Return",
"absolute",
"path",
"if",
"path",
"is",
"local",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L228-L249
|
[
"def",
"absolute_path",
"(",
"path",
"=",
"None",
",",
"base_dir",
"=",
"None",
")",
":",
"if",
"path_is_remote",
"(",
"path",
")",
":",
"return",
"path",
"else",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"path",
"else",
":",
"if",
"base_dir",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"base_dir",
")",
":",
"raise",
"TypeError",
"(",
"\"base_dir must be an absolute path.\"",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"path",
")",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
relative_path
|
Return relative path if path is local.
Parameters:
-----------
path : path to file
base_dir : directory where path sould be relative to
Returns:
--------
relative path
|
mapchete/io/__init__.py
|
def relative_path(path=None, base_dir=None):
"""
Return relative path if path is local.
Parameters:
-----------
path : path to file
base_dir : directory where path sould be relative to
Returns:
--------
relative path
"""
if path_is_remote(path) or not os.path.isabs(path):
return path
else:
return os.path.relpath(path, base_dir)
|
def relative_path(path=None, base_dir=None):
"""
Return relative path if path is local.
Parameters:
-----------
path : path to file
base_dir : directory where path sould be relative to
Returns:
--------
relative path
"""
if path_is_remote(path) or not os.path.isabs(path):
return path
else:
return os.path.relpath(path, base_dir)
|
[
"Return",
"relative",
"path",
"if",
"path",
"is",
"local",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L252-L268
|
[
"def",
"relative_path",
"(",
"path",
"=",
"None",
",",
"base_dir",
"=",
"None",
")",
":",
"if",
"path_is_remote",
"(",
"path",
")",
"or",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"path",
"else",
":",
"return",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"base_dir",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
write_json
|
Write local or remote.
|
mapchete/io/__init__.py
|
def write_json(path, params):
"""Write local or remote."""
logger.debug("write %s to %s", params, path)
if path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
logger.debug("upload %s", key)
bucket.put_object(
Key=key,
Body=json.dumps(params, sort_keys=True, indent=4)
)
else:
makedirs(os.path.dirname(path))
with open(path, 'w') as dst:
json.dump(params, dst, sort_keys=True, indent=4)
|
def write_json(path, params):
"""Write local or remote."""
logger.debug("write %s to %s", params, path)
if path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
logger.debug("upload %s", key)
bucket.put_object(
Key=key,
Body=json.dumps(params, sort_keys=True, indent=4)
)
else:
makedirs(os.path.dirname(path))
with open(path, 'w') as dst:
json.dump(params, dst, sort_keys=True, indent=4)
|
[
"Write",
"local",
"or",
"remote",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L286-L300
|
[
"def",
"write_json",
"(",
"path",
",",
"params",
")",
":",
"logger",
".",
"debug",
"(",
"\"write %s to %s\"",
",",
"params",
",",
"path",
")",
"if",
"path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"bucket",
"=",
"get_boto3_bucket",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"2",
"]",
")",
"key",
"=",
"\"/\"",
".",
"join",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
"logger",
".",
"debug",
"(",
"\"upload %s\"",
",",
"key",
")",
"bucket",
".",
"put_object",
"(",
"Key",
"=",
"key",
",",
"Body",
"=",
"json",
".",
"dumps",
"(",
"params",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
")",
"else",
":",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"dst",
":",
"json",
".",
"dump",
"(",
"params",
",",
"dst",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
read_json
|
Read local or remote.
|
mapchete/io/__init__.py
|
def read_json(path):
"""Read local or remote."""
if path.startswith(("http://", "https://")):
try:
return json.loads(urlopen(path).read().decode())
except HTTPError:
raise FileNotFoundError("%s not found", path)
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return json.loads(obj.get()['Body'].read().decode())
raise FileNotFoundError("%s not found", path)
else:
try:
with open(path, "r") as src:
return json.loads(src.read())
except:
raise FileNotFoundError("%s not found", path)
|
def read_json(path):
"""Read local or remote."""
if path.startswith(("http://", "https://")):
try:
return json.loads(urlopen(path).read().decode())
except HTTPError:
raise FileNotFoundError("%s not found", path)
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return json.loads(obj.get()['Body'].read().decode())
raise FileNotFoundError("%s not found", path)
else:
try:
with open(path, "r") as src:
return json.loads(src.read())
except:
raise FileNotFoundError("%s not found", path)
|
[
"Read",
"local",
"or",
"remote",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L303-L322
|
[
"def",
"read_json",
"(",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"(",
"\"http://\"",
",",
"\"https://\"",
")",
")",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"urlopen",
"(",
"path",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
")",
"except",
"HTTPError",
":",
"raise",
"FileNotFoundError",
"(",
"\"%s not found\"",
",",
"path",
")",
"elif",
"path",
".",
"startswith",
"(",
"\"s3://\"",
")",
":",
"bucket",
"=",
"get_boto3_bucket",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"2",
"]",
")",
"key",
"=",
"\"/\"",
".",
"join",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"3",
":",
"]",
")",
"for",
"obj",
"in",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"key",
")",
":",
"if",
"obj",
".",
"key",
"==",
"key",
":",
"return",
"json",
".",
"loads",
"(",
"obj",
".",
"get",
"(",
")",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
")",
"raise",
"FileNotFoundError",
"(",
"\"%s not found\"",
",",
"path",
")",
"else",
":",
"try",
":",
"with",
"open",
"(",
"path",
",",
"\"r\"",
")",
"as",
"src",
":",
"return",
"json",
".",
"loads",
"(",
"src",
".",
"read",
"(",
")",
")",
"except",
":",
"raise",
"FileNotFoundError",
"(",
"\"%s not found\"",
",",
"path",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
get_gdal_options
|
Return a merged set of custom and default GDAL/rasterio Env options.
If is_remote is set to True, the default GDAL_HTTP_OPTS are appended.
Parameters
----------
opts : dict or None
Explicit GDAL options.
is_remote : bool
Indicate whether Env is for a remote file.
Returns
-------
dictionary
|
mapchete/io/__init__.py
|
def get_gdal_options(opts, is_remote=False):
"""
Return a merged set of custom and default GDAL/rasterio Env options.
If is_remote is set to True, the default GDAL_HTTP_OPTS are appended.
Parameters
----------
opts : dict or None
Explicit GDAL options.
is_remote : bool
Indicate whether Env is for a remote file.
Returns
-------
dictionary
"""
user_opts = {} if opts is None else dict(**opts)
if is_remote:
return dict(GDAL_HTTP_OPTS, **user_opts)
else:
return user_opts
|
def get_gdal_options(opts, is_remote=False):
"""
Return a merged set of custom and default GDAL/rasterio Env options.
If is_remote is set to True, the default GDAL_HTTP_OPTS are appended.
Parameters
----------
opts : dict or None
Explicit GDAL options.
is_remote : bool
Indicate whether Env is for a remote file.
Returns
-------
dictionary
"""
user_opts = {} if opts is None else dict(**opts)
if is_remote:
return dict(GDAL_HTTP_OPTS, **user_opts)
else:
return user_opts
|
[
"Return",
"a",
"merged",
"set",
"of",
"custom",
"and",
"default",
"GDAL",
"/",
"rasterio",
"Env",
"options",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/__init__.py#L338-L359
|
[
"def",
"get_gdal_options",
"(",
"opts",
",",
"is_remote",
"=",
"False",
")",
":",
"user_opts",
"=",
"{",
"}",
"if",
"opts",
"is",
"None",
"else",
"dict",
"(",
"*",
"*",
"opts",
")",
"if",
"is_remote",
":",
"return",
"dict",
"(",
"GDAL_HTTP_OPTS",
",",
"*",
"*",
"user_opts",
")",
"else",
":",
"return",
"user_opts"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputData.open
|
Return InputTile object.
Parameters
----------
tile : ``Tile``
Returns
-------
input tile : ``InputTile``
tile view of input data
|
mapchete/formats/default/mapchete_input.py
|
def open(self, tile, **kwargs):
"""
Return InputTile object.
Parameters
----------
tile : ``Tile``
Returns
-------
input tile : ``InputTile``
tile view of input data
"""
return self.process.config.output.open(tile, self.process, **kwargs)
|
def open(self, tile, **kwargs):
"""
Return InputTile object.
Parameters
----------
tile : ``Tile``
Returns
-------
input tile : ``InputTile``
tile view of input data
"""
return self.process.config.output.open(tile, self.process, **kwargs)
|
[
"Return",
"InputTile",
"object",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/mapchete_input.py#L56-L69
|
[
"def",
"open",
"(",
"self",
",",
"tile",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"process",
".",
"config",
".",
"output",
".",
"open",
"(",
"tile",
",",
"self",
".",
"process",
",",
"*",
"*",
"kwargs",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
InputData.bbox
|
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
|
mapchete/formats/default/mapchete_input.py
|
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
return reproject_geometry(
self.process.config.area_at_zoom(),
src_crs=self.process.config.process_pyramid.crs,
dst_crs=self.pyramid.crs if out_crs is None else out_crs
)
|
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
return reproject_geometry(
self.process.config.area_at_zoom(),
src_crs=self.process.config.process_pyramid.crs,
dst_crs=self.pyramid.crs if out_crs is None else out_crs
)
|
[
"Return",
"data",
"bounding",
"box",
"."
] |
ungarj/mapchete
|
python
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/mapchete_input.py#L71-L89
|
[
"def",
"bbox",
"(",
"self",
",",
"out_crs",
"=",
"None",
")",
":",
"return",
"reproject_geometry",
"(",
"self",
".",
"process",
".",
"config",
".",
"area_at_zoom",
"(",
")",
",",
"src_crs",
"=",
"self",
".",
"process",
".",
"config",
".",
"process_pyramid",
".",
"crs",
",",
"dst_crs",
"=",
"self",
".",
"pyramid",
".",
"crs",
"if",
"out_crs",
"is",
"None",
"else",
"out_crs",
")"
] |
d482918d0e66a5b414dff6aa7cc854e01fc60ee4
|
valid
|
win_activate
|
Activates (gives focus to) a window.
:param title:
:param text:
:return:
|
autoit/win.py
|
def win_activate(title, **kwargs):
"""
Activates (gives focus to) a window.
:param title:
:param text:
:return:
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinActivate(LPCWSTR(title), LPCWSTR(text))
return ret
|
def win_activate(title, **kwargs):
"""
Activates (gives focus to) a window.
:param title:
:param text:
:return:
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinActivate(LPCWSTR(title), LPCWSTR(text))
return ret
|
[
"Activates",
"(",
"gives",
"focus",
"to",
")",
"a",
"window",
".",
":",
"param",
"title",
":",
":",
"param",
"text",
":",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L14-L23
|
[
"def",
"win_activate",
"(",
"title",
",",
"*",
"*",
"kwargs",
")",
":",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"ret",
"=",
"AUTO_IT",
".",
"AU3_WinActivate",
"(",
"LPCWSTR",
"(",
"title",
")",
",",
"LPCWSTR",
"(",
"text",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
win_exists
|
Checks to see if a specified window exists.
:param title: The title of the window to check.
:param text: The text of the window to check.
:return: Returns 1 if the window exists, otherwise returns 0.
|
autoit/win.py
|
def win_exists(title, **kwargs):
"""
Checks to see if a specified window exists.
:param title: The title of the window to check.
:param text: The text of the window to check.
:return: Returns 1 if the window exists, otherwise returns 0.
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinExists(LPCWSTR(title), LPCWSTR(text))
return ret
|
def win_exists(title, **kwargs):
"""
Checks to see if a specified window exists.
:param title: The title of the window to check.
:param text: The text of the window to check.
:return: Returns 1 if the window exists, otherwise returns 0.
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinExists(LPCWSTR(title), LPCWSTR(text))
return ret
|
[
"Checks",
"to",
"see",
"if",
"a",
"specified",
"window",
"exists",
".",
":",
"param",
"title",
":",
"The",
"title",
"of",
"the",
"window",
"to",
"check",
".",
":",
"param",
"text",
":",
"The",
"text",
"of",
"the",
"window",
"to",
"check",
".",
":",
"return",
":",
"Returns",
"1",
"if",
"the",
"window",
"exists",
"otherwise",
"returns",
"0",
"."
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L83-L92
|
[
"def",
"win_exists",
"(",
"title",
",",
"*",
"*",
"kwargs",
")",
":",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"ret",
"=",
"AUTO_IT",
".",
"AU3_WinExists",
"(",
"LPCWSTR",
"(",
"title",
")",
",",
"LPCWSTR",
"(",
"text",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
win_get_caret_pos
|
Returns the coordinates of the caret in the foreground window
:return:
|
autoit/win.py
|
def win_get_caret_pos():
"""
Returns the coordinates of the caret in the foreground window
:return:
"""
p = POINT()
AUTO_IT.AU3_WinGetCaretPos(byref(p))
return p.x, p.y
|
def win_get_caret_pos():
"""
Returns the coordinates of the caret in the foreground window
:return:
"""
p = POINT()
AUTO_IT.AU3_WinGetCaretPos(byref(p))
return p.x, p.y
|
[
"Returns",
"the",
"coordinates",
"of",
"the",
"caret",
"in",
"the",
"foreground",
"window",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L106-L113
|
[
"def",
"win_get_caret_pos",
"(",
")",
":",
"p",
"=",
"POINT",
"(",
")",
"AUTO_IT",
".",
"AU3_WinGetCaretPos",
"(",
"byref",
"(",
"p",
")",
")",
"return",
"p",
".",
"x",
",",
"p",
".",
"y"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
win_get_state
|
Retrieves the state of a given window.
:param title:
:param text:
:return:
1 = Window exists
2 = Window is visible
4 = Windows is enabled
8 = Window is active
16 = Window is minimized
32 = Windows is maximized
|
autoit/win.py
|
def win_get_state(title, **kwargs):
"""
Retrieves the state of a given window.
:param title:
:param text:
:return:
1 = Window exists
2 = Window is visible
4 = Windows is enabled
8 = Window is active
16 = Window is minimized
32 = Windows is maximized
"""
text = kwargs.get("text", "")
res = AUTO_IT.AU3_WinGetState(LPCWSTR(title), LPCWSTR(text))
return res
|
def win_get_state(title, **kwargs):
"""
Retrieves the state of a given window.
:param title:
:param text:
:return:
1 = Window exists
2 = Window is visible
4 = Windows is enabled
8 = Window is active
16 = Window is minimized
32 = Windows is maximized
"""
text = kwargs.get("text", "")
res = AUTO_IT.AU3_WinGetState(LPCWSTR(title), LPCWSTR(text))
return res
|
[
"Retrieves",
"the",
"state",
"of",
"a",
"given",
"window",
".",
":",
"param",
"title",
":",
":",
"param",
"text",
":",
":",
"return",
":",
"1",
"=",
"Window",
"exists",
"2",
"=",
"Window",
"is",
"visible",
"4",
"=",
"Windows",
"is",
"enabled",
"8",
"=",
"Window",
"is",
"active",
"16",
"=",
"Window",
"is",
"minimized",
"32",
"=",
"Windows",
"is",
"maximized"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L262-L277
|
[
"def",
"win_get_state",
"(",
"title",
",",
"*",
"*",
"kwargs",
")",
":",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"res",
"=",
"AUTO_IT",
".",
"AU3_WinGetState",
"(",
"LPCWSTR",
"(",
"title",
")",
",",
"LPCWSTR",
"(",
"text",
")",
")",
"return",
"res"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
win_menu_select_item
|
Usage:
win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)")
:param title:
:param text:
:param items:
:return:
|
autoit/win.py
|
def win_menu_select_item(title, *items, **kwargs):
"""
Usage:
win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)")
:param title:
:param text:
:param items:
:return:
"""
text = kwargs.get("text", "")
if not (0 < len(items) < 8):
raise ValueError("accepted none item or number of items exceed eight")
f_items = [LPCWSTR(item) for item in items]
for i in xrange(8 - len(f_items)):
f_items.append(LPCWSTR(""))
ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text),
*f_items)
return ret
|
def win_menu_select_item(title, *items, **kwargs):
"""
Usage:
win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)")
:param title:
:param text:
:param items:
:return:
"""
text = kwargs.get("text", "")
if not (0 < len(items) < 8):
raise ValueError("accepted none item or number of items exceed eight")
f_items = [LPCWSTR(item) for item in items]
for i in xrange(8 - len(f_items)):
f_items.append(LPCWSTR(""))
ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text),
*f_items)
return ret
|
[
"Usage",
":",
"win_menu_select_item",
"(",
"[",
"CLASS",
":",
"Notepad",
"]",
"u",
"文件",
"(",
"&F",
")",
"u",
"退出",
"(",
"&X",
")",
")",
":",
"param",
"title",
":",
":",
"param",
"text",
":",
":",
"param",
"items",
":",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L371-L389
|
[
"def",
"win_menu_select_item",
"(",
"title",
",",
"*",
"items",
",",
"*",
"*",
"kwargs",
")",
":",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"if",
"not",
"(",
"0",
"<",
"len",
"(",
"items",
")",
"<",
"8",
")",
":",
"raise",
"ValueError",
"(",
"\"accepted none item or number of items exceed eight\"",
")",
"f_items",
"=",
"[",
"LPCWSTR",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"]",
"for",
"i",
"in",
"xrange",
"(",
"8",
"-",
"len",
"(",
"f_items",
")",
")",
":",
"f_items",
".",
"append",
"(",
"LPCWSTR",
"(",
"\"\"",
")",
")",
"ret",
"=",
"AUTO_IT",
".",
"AU3_WinMenuSelectItem",
"(",
"LPCWSTR",
"(",
"title",
")",
",",
"LPCWSTR",
"(",
"text",
")",
",",
"*",
"f_items",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
win_set_trans
|
Sets the transparency of a window.
:param title:
:param trans: A number in the range 0 - 255. The larger the number,
the more transparent the window will become.
:param kwargs:
:return:
|
autoit/win.py
|
def win_set_trans(title, trans, **kwargs):
"""
Sets the transparency of a window.
:param title:
:param trans: A number in the range 0 - 255. The larger the number,
the more transparent the window will become.
:param kwargs:
:return:
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinSetTrans(LPCWSTR(title), LPCWSTR(text), INT(trans))
return ret
|
def win_set_trans(title, trans, **kwargs):
"""
Sets the transparency of a window.
:param title:
:param trans: A number in the range 0 - 255. The larger the number,
the more transparent the window will become.
:param kwargs:
:return:
"""
text = kwargs.get("text", "")
ret = AUTO_IT.AU3_WinSetTrans(LPCWSTR(title), LPCWSTR(text), INT(trans))
return ret
|
[
"Sets",
"the",
"transparency",
"of",
"a",
"window",
".",
":",
"param",
"title",
":",
":",
"param",
"trans",
":",
"A",
"number",
"in",
"the",
"range",
"0",
"-",
"255",
".",
"The",
"larger",
"the",
"number",
"the",
"more",
"transparent",
"the",
"window",
"will",
"become",
".",
":",
"param",
"kwargs",
":",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/win.py#L547-L559
|
[
"def",
"win_set_trans",
"(",
"title",
",",
"trans",
",",
"*",
"*",
"kwargs",
")",
":",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"ret",
"=",
"AUTO_IT",
".",
"AU3_WinSetTrans",
"(",
"LPCWSTR",
"(",
"title",
")",
",",
"LPCWSTR",
"(",
"text",
")",
",",
"INT",
"(",
"trans",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
auto_it_set_option
|
Changes the operation of various AutoIt functions/parameters
:param option: The option to change
:param param: The parameter (varies by option).
:return:
|
autoit/autoit.py
|
def auto_it_set_option(option, param):
"""
Changes the operation of various AutoIt functions/parameters
:param option: The option to change
:param param: The parameter (varies by option).
:return:
"""
pre_value = AUTO_IT.AU3_AutoItSetOption(LPCWSTR(option), INT(param))
return pre_value
|
def auto_it_set_option(option, param):
"""
Changes the operation of various AutoIt functions/parameters
:param option: The option to change
:param param: The parameter (varies by option).
:return:
"""
pre_value = AUTO_IT.AU3_AutoItSetOption(LPCWSTR(option), INT(param))
return pre_value
|
[
"Changes",
"the",
"operation",
"of",
"various",
"AutoIt",
"functions",
"/",
"parameters",
":",
"param",
"option",
":",
"The",
"option",
"to",
"change",
":",
"param",
"param",
":",
"The",
"parameter",
"(",
"varies",
"by",
"option",
")",
".",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/autoit.py#L95-L103
|
[
"def",
"auto_it_set_option",
"(",
"option",
",",
"param",
")",
":",
"pre_value",
"=",
"AUTO_IT",
".",
"AU3_AutoItSetOption",
"(",
"LPCWSTR",
"(",
"option",
")",
",",
"INT",
"(",
"param",
")",
")",
"return",
"pre_value"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
AutoItAPI.check
|
:param mark:
0 - do not need check return value or error()
1 - check error()
2 - check return value
|
autoit/autoit.py
|
def check(self, mark=0, err_msg="", **kwds):
"""
:param mark:
0 - do not need check return value or error()
1 - check error()
2 - check return value
"""
unexpected_ret = kwds.get("unexpected_ret", (0,))
def _check(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
flags = reduce(
self._parser, [dict(num=mark, flags=[]), 2, 1])["flags"]
if 1 in flags:
if self._has_error():
raise AutoItError(err_msg)
if 2 in flags:
if self._has_unexpected_ret(ret, unexpected_ret):
raise AutoItError(err_msg)
return ret
return wrapper
return _check
|
def check(self, mark=0, err_msg="", **kwds):
"""
:param mark:
0 - do not need check return value or error()
1 - check error()
2 - check return value
"""
unexpected_ret = kwds.get("unexpected_ret", (0,))
def _check(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
flags = reduce(
self._parser, [dict(num=mark, flags=[]), 2, 1])["flags"]
if 1 in flags:
if self._has_error():
raise AutoItError(err_msg)
if 2 in flags:
if self._has_unexpected_ret(ret, unexpected_ret):
raise AutoItError(err_msg)
return ret
return wrapper
return _check
|
[
":",
"param",
"mark",
":",
"0",
"-",
"do",
"not",
"need",
"check",
"return",
"value",
"or",
"error",
"()",
"1",
"-",
"check",
"error",
"()",
"2",
"-",
"check",
"return",
"value"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/autoit.py#L61-L88
|
[
"def",
"check",
"(",
"self",
",",
"mark",
"=",
"0",
",",
"err_msg",
"=",
"\"\"",
",",
"*",
"*",
"kwds",
")",
":",
"unexpected_ret",
"=",
"kwds",
".",
"get",
"(",
"\"unexpected_ret\"",
",",
"(",
"0",
",",
")",
")",
"def",
"_check",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"flags",
"=",
"reduce",
"(",
"self",
".",
"_parser",
",",
"[",
"dict",
"(",
"num",
"=",
"mark",
",",
"flags",
"=",
"[",
"]",
")",
",",
"2",
",",
"1",
"]",
")",
"[",
"\"flags\"",
"]",
"if",
"1",
"in",
"flags",
":",
"if",
"self",
".",
"_has_error",
"(",
")",
":",
"raise",
"AutoItError",
"(",
"err_msg",
")",
"if",
"2",
"in",
"flags",
":",
"if",
"self",
".",
"_has_unexpected_ret",
"(",
"ret",
",",
"unexpected_ret",
")",
":",
"raise",
"AutoItError",
"(",
"err_msg",
")",
"return",
"ret",
"return",
"wrapper",
"return",
"_check"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
process_set_priority
|
Changes the priority of a process
:param process: The name or PID of the process to check.
:param priority:A flag which determines what priority to set
0 - Idle/Low
1 - Below Normal (Not supported on Windows 95/98/ME)
2 - Normal
3 - Above Normal (Not supported on Windows 95/98/ME)
4 - High
5 - Realtime (Use with caution, may make the system unstable)
:return:
|
autoit/process.py
|
def process_set_priority(process, priority):
"""
Changes the priority of a process
:param process: The name or PID of the process to check.
:param priority:A flag which determines what priority to set
0 - Idle/Low
1 - Below Normal (Not supported on Windows 95/98/ME)
2 - Normal
3 - Above Normal (Not supported on Windows 95/98/ME)
4 - High
5 - Realtime (Use with caution, may make the system unstable)
:return:
"""
ret = AUTO_IT.AU3_ProcessSetPriority(LPCWSTR(process), INT(priority))
if ret == 0:
if error() == 1:
raise AutoItError("set priority failed")
elif error() == 2:
raise AutoItError("unsupported priority class be used")
return ret
|
def process_set_priority(process, priority):
"""
Changes the priority of a process
:param process: The name or PID of the process to check.
:param priority:A flag which determines what priority to set
0 - Idle/Low
1 - Below Normal (Not supported on Windows 95/98/ME)
2 - Normal
3 - Above Normal (Not supported on Windows 95/98/ME)
4 - High
5 - Realtime (Use with caution, may make the system unstable)
:return:
"""
ret = AUTO_IT.AU3_ProcessSetPriority(LPCWSTR(process), INT(priority))
if ret == 0:
if error() == 1:
raise AutoItError("set priority failed")
elif error() == 2:
raise AutoItError("unsupported priority class be used")
return ret
|
[
"Changes",
"the",
"priority",
"of",
"a",
"process",
":",
"param",
"process",
":",
"The",
"name",
"or",
"PID",
"of",
"the",
"process",
"to",
"check",
".",
":",
"param",
"priority",
":",
"A",
"flag",
"which",
"determines",
"what",
"priority",
"to",
"set",
"0",
"-",
"Idle",
"/",
"Low",
"1",
"-",
"Below",
"Normal",
"(",
"Not",
"supported",
"on",
"Windows",
"95",
"/",
"98",
"/",
"ME",
")",
"2",
"-",
"Normal",
"3",
"-",
"Above",
"Normal",
"(",
"Not",
"supported",
"on",
"Windows",
"95",
"/",
"98",
"/",
"ME",
")",
"4",
"-",
"High",
"5",
"-",
"Realtime",
"(",
"Use",
"with",
"caution",
"may",
"make",
"the",
"system",
"unstable",
")",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/process.py#L58-L77
|
[
"def",
"process_set_priority",
"(",
"process",
",",
"priority",
")",
":",
"ret",
"=",
"AUTO_IT",
".",
"AU3_ProcessSetPriority",
"(",
"LPCWSTR",
"(",
"process",
")",
",",
"INT",
"(",
"priority",
")",
")",
"if",
"ret",
"==",
"0",
":",
"if",
"error",
"(",
")",
"==",
"1",
":",
"raise",
"AutoItError",
"(",
"\"set priority failed\"",
")",
"elif",
"error",
"(",
")",
"==",
"2",
":",
"raise",
"AutoItError",
"(",
"\"unsupported priority class be used\"",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
process_wait
|
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
|
autoit/process.py
|
def process_wait(process, timeout=0):
"""
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))
return ret
|
def process_wait(process, timeout=0):
"""
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))
return ret
|
[
"Pauses",
"script",
"execution",
"until",
"a",
"given",
"process",
"exists",
".",
":",
"param",
"process",
":",
":",
"param",
"timeout",
":",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/process.py#L81-L89
|
[
"def",
"process_wait",
"(",
"process",
",",
"timeout",
"=",
"0",
")",
":",
"ret",
"=",
"AUTO_IT",
".",
"AU3_ProcessWait",
"(",
"LPCWSTR",
"(",
"process",
")",
",",
"INT",
"(",
"timeout",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
process_wait_close
|
Pauses script execution until a given process does not exist.
:param process:
:param timeout:
:return:
|
autoit/process.py
|
def process_wait_close(process, timeout=0):
"""
Pauses script execution until a given process does not exist.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWaitClose(LPCWSTR(process), INT(timeout))
return ret
|
def process_wait_close(process, timeout=0):
"""
Pauses script execution until a given process does not exist.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWaitClose(LPCWSTR(process), INT(timeout))
return ret
|
[
"Pauses",
"script",
"execution",
"until",
"a",
"given",
"process",
"does",
"not",
"exist",
".",
":",
"param",
"process",
":",
":",
"param",
"timeout",
":",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/process.py#L93-L101
|
[
"def",
"process_wait_close",
"(",
"process",
",",
"timeout",
"=",
"0",
")",
":",
"ret",
"=",
"AUTO_IT",
".",
"AU3_ProcessWaitClose",
"(",
"LPCWSTR",
"(",
"process",
")",
",",
"INT",
"(",
"timeout",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
run_as
|
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
|
autoit/process.py
|
def run_as(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAs(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
|
def run_as(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAs(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
|
[
"Runs",
"an",
"external",
"program",
".",
":",
"param",
"user",
":",
"username",
"The",
"user",
"name",
"to",
"use",
".",
":",
"param",
"domain",
":",
"The",
"domain",
"name",
"to",
"use",
".",
":",
"param",
"password",
":",
"The",
"password",
"to",
"use",
".",
":",
"param",
"logon_flag",
":",
"0",
"=",
"do",
"not",
"load",
"the",
"user",
"profile",
"1",
"=",
"(",
"default",
")",
"load",
"the",
"user",
"profile",
"2",
"=",
"use",
"for",
"net",
"credentials",
"only",
":",
"param",
"filename",
":",
"The",
"name",
"of",
"the",
"executable",
"(",
"EXE",
"BAT",
"COM",
"or",
"PIF",
")",
"to",
"run",
".",
":",
"param",
"work_dir",
":",
"The",
"working",
"directory",
".",
":",
"param",
"show_flag",
":",
"The",
"show",
"flag",
"of",
"the",
"executed",
"program",
":",
"SW_HIDE",
"=",
"Hidden",
"window",
"SW_MINIMIZE",
"=",
"Minimized",
"window",
"SW_MAXIMIZE",
"=",
"Maximized",
"window",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/process.py#L105-L126
|
[
"def",
"run_as",
"(",
"user",
",",
"domain",
",",
"password",
",",
"filename",
",",
"logon_flag",
"=",
"1",
",",
"work_dir",
"=",
"\"\"",
",",
"show_flag",
"=",
"Properties",
".",
"SW_SHOWNORMAL",
")",
":",
"ret",
"=",
"AUTO_IT",
".",
"AU3_RunAs",
"(",
"LPCWSTR",
"(",
"user",
")",
",",
"LPCWSTR",
"(",
"domain",
")",
",",
"LPCWSTR",
"(",
"password",
")",
",",
"INT",
"(",
"logon_flag",
")",
",",
"LPCWSTR",
"(",
"filename",
")",
",",
"LPCWSTR",
"(",
"work_dir",
")",
",",
"INT",
"(",
"show_flag",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
run_as_wait
|
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
|
autoit/process.py
|
def run_as_wait(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAsWait(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
|
def run_as_wait(user, domain, password, filename, logon_flag=1, work_dir="",
show_flag=Properties.SW_SHOWNORMAL):
"""
Runs an external program.
:param user: username The user name to use.
:param domain: The domain name to use.
:param password: The password to use.
:param logon_flag: 0 = do not load the user profile, 1 = (default) load
the user profile, 2 = use for net credentials only
:param filename: The name of the executable (EXE, BAT, COM, or PIF) to run.
:param work_dir: The working directory.
:param show_flag: The "show" flag of the executed program:
SW_HIDE = Hidden window
SW_MINIMIZE = Minimized window
SW_MAXIMIZE = Maximized window
:return:
"""
ret = AUTO_IT.AU3_RunAsWait(
LPCWSTR(user), LPCWSTR(domain), LPCWSTR(password), INT(logon_flag),
LPCWSTR(filename), LPCWSTR(work_dir), INT(show_flag)
)
return ret
|
[
"Runs",
"an",
"external",
"program",
".",
":",
"param",
"user",
":",
"username",
"The",
"user",
"name",
"to",
"use",
".",
":",
"param",
"domain",
":",
"The",
"domain",
"name",
"to",
"use",
".",
":",
"param",
"password",
":",
"The",
"password",
"to",
"use",
".",
":",
"param",
"logon_flag",
":",
"0",
"=",
"do",
"not",
"load",
"the",
"user",
"profile",
"1",
"=",
"(",
"default",
")",
"load",
"the",
"user",
"profile",
"2",
"=",
"use",
"for",
"net",
"credentials",
"only",
":",
"param",
"filename",
":",
"The",
"name",
"of",
"the",
"executable",
"(",
"EXE",
"BAT",
"COM",
"or",
"PIF",
")",
"to",
"run",
".",
":",
"param",
"work_dir",
":",
"The",
"working",
"directory",
".",
":",
"param",
"show_flag",
":",
"The",
"show",
"flag",
"of",
"the",
"executed",
"program",
":",
"SW_HIDE",
"=",
"Hidden",
"window",
"SW_MINIMIZE",
"=",
"Minimized",
"window",
"SW_MAXIMIZE",
"=",
"Maximized",
"window",
":",
"return",
":"
] |
jacexh/pyautoit
|
python
|
https://github.com/jacexh/pyautoit/blob/598314c3eed0639c701c8cb2366acb015e04b161/autoit/process.py#L130-L151
|
[
"def",
"run_as_wait",
"(",
"user",
",",
"domain",
",",
"password",
",",
"filename",
",",
"logon_flag",
"=",
"1",
",",
"work_dir",
"=",
"\"\"",
",",
"show_flag",
"=",
"Properties",
".",
"SW_SHOWNORMAL",
")",
":",
"ret",
"=",
"AUTO_IT",
".",
"AU3_RunAsWait",
"(",
"LPCWSTR",
"(",
"user",
")",
",",
"LPCWSTR",
"(",
"domain",
")",
",",
"LPCWSTR",
"(",
"password",
")",
",",
"INT",
"(",
"logon_flag",
")",
",",
"LPCWSTR",
"(",
"filename",
")",
",",
"LPCWSTR",
"(",
"work_dir",
")",
",",
"INT",
"(",
"show_flag",
")",
")",
"return",
"ret"
] |
598314c3eed0639c701c8cb2366acb015e04b161
|
valid
|
Webhook.hook
|
Registers a function as a hook. Multiple hooks can be registered for a given type, but the
order in which they are invoke is unspecified.
:param event_type: The event type this hook will be invoked for.
|
github_webhook/webhook.py
|
def hook(self, event_type='push'):
"""
Registers a function as a hook. Multiple hooks can be registered for a given type, but the
order in which they are invoke is unspecified.
:param event_type: The event type this hook will be invoked for.
"""
def decorator(func):
self._hooks[event_type].append(func)
return func
return decorator
|
def hook(self, event_type='push'):
"""
Registers a function as a hook. Multiple hooks can be registered for a given type, but the
order in which they are invoke is unspecified.
:param event_type: The event type this hook will be invoked for.
"""
def decorator(func):
self._hooks[event_type].append(func)
return func
return decorator
|
[
"Registers",
"a",
"function",
"as",
"a",
"hook",
".",
"Multiple",
"hooks",
"can",
"be",
"registered",
"for",
"a",
"given",
"type",
"but",
"the",
"order",
"in",
"which",
"they",
"are",
"invoke",
"is",
"unspecified",
"."
] |
bloomberg/python-github-webhook
|
python
|
https://github.com/bloomberg/python-github-webhook/blob/e9a70dd3a907f5c1a8f4cee190c59e4e775af37f/github_webhook/webhook.py#L29-L41
|
[
"def",
"hook",
"(",
"self",
",",
"event_type",
"=",
"'push'",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"self",
".",
"_hooks",
"[",
"event_type",
"]",
".",
"append",
"(",
"func",
")",
"return",
"func",
"return",
"decorator"
] |
e9a70dd3a907f5c1a8f4cee190c59e4e775af37f
|
valid
|
Webhook._get_digest
|
Return message digest if a secret key was provided
|
github_webhook/webhook.py
|
def _get_digest(self):
"""Return message digest if a secret key was provided"""
return hmac.new(
self._secret, request.data, hashlib.sha1).hexdigest() if self._secret else None
|
def _get_digest(self):
"""Return message digest if a secret key was provided"""
return hmac.new(
self._secret, request.data, hashlib.sha1).hexdigest() if self._secret else None
|
[
"Return",
"message",
"digest",
"if",
"a",
"secret",
"key",
"was",
"provided"
] |
bloomberg/python-github-webhook
|
python
|
https://github.com/bloomberg/python-github-webhook/blob/e9a70dd3a907f5c1a8f4cee190c59e4e775af37f/github_webhook/webhook.py#L43-L47
|
[
"def",
"_get_digest",
"(",
"self",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"self",
".",
"_secret",
",",
"request",
".",
"data",
",",
"hashlib",
".",
"sha1",
")",
".",
"hexdigest",
"(",
")",
"if",
"self",
".",
"_secret",
"else",
"None"
] |
e9a70dd3a907f5c1a8f4cee190c59e4e775af37f
|
valid
|
Webhook._postreceive
|
Callback from Flask
|
github_webhook/webhook.py
|
def _postreceive(self):
"""Callback from Flask"""
digest = self._get_digest()
if digest is not None:
sig_parts = _get_header('X-Hub-Signature').split('=', 1)
if not isinstance(digest, six.text_type):
digest = six.text_type(digest)
if (len(sig_parts) < 2 or sig_parts[0] != 'sha1'
or not hmac.compare_digest(sig_parts[1], digest)):
abort(400, 'Invalid signature')
event_type = _get_header('X-Github-Event')
data = request.get_json()
if data is None:
abort(400, 'Request body must contain json')
self._logger.info(
'%s (%s)', _format_event(event_type, data), _get_header('X-Github-Delivery'))
for hook in self._hooks.get(event_type, []):
hook(data)
return '', 204
|
def _postreceive(self):
"""Callback from Flask"""
digest = self._get_digest()
if digest is not None:
sig_parts = _get_header('X-Hub-Signature').split('=', 1)
if not isinstance(digest, six.text_type):
digest = six.text_type(digest)
if (len(sig_parts) < 2 or sig_parts[0] != 'sha1'
or not hmac.compare_digest(sig_parts[1], digest)):
abort(400, 'Invalid signature')
event_type = _get_header('X-Github-Event')
data = request.get_json()
if data is None:
abort(400, 'Request body must contain json')
self._logger.info(
'%s (%s)', _format_event(event_type, data), _get_header('X-Github-Delivery'))
for hook in self._hooks.get(event_type, []):
hook(data)
return '', 204
|
[
"Callback",
"from",
"Flask"
] |
bloomberg/python-github-webhook
|
python
|
https://github.com/bloomberg/python-github-webhook/blob/e9a70dd3a907f5c1a8f4cee190c59e4e775af37f/github_webhook/webhook.py#L49-L75
|
[
"def",
"_postreceive",
"(",
"self",
")",
":",
"digest",
"=",
"self",
".",
"_get_digest",
"(",
")",
"if",
"digest",
"is",
"not",
"None",
":",
"sig_parts",
"=",
"_get_header",
"(",
"'X-Hub-Signature'",
")",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"not",
"isinstance",
"(",
"digest",
",",
"six",
".",
"text_type",
")",
":",
"digest",
"=",
"six",
".",
"text_type",
"(",
"digest",
")",
"if",
"(",
"len",
"(",
"sig_parts",
")",
"<",
"2",
"or",
"sig_parts",
"[",
"0",
"]",
"!=",
"'sha1'",
"or",
"not",
"hmac",
".",
"compare_digest",
"(",
"sig_parts",
"[",
"1",
"]",
",",
"digest",
")",
")",
":",
"abort",
"(",
"400",
",",
"'Invalid signature'",
")",
"event_type",
"=",
"_get_header",
"(",
"'X-Github-Event'",
")",
"data",
"=",
"request",
".",
"get_json",
"(",
")",
"if",
"data",
"is",
"None",
":",
"abort",
"(",
"400",
",",
"'Request body must contain json'",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'%s (%s)'",
",",
"_format_event",
"(",
"event_type",
",",
"data",
")",
",",
"_get_header",
"(",
"'X-Github-Delivery'",
")",
")",
"for",
"hook",
"in",
"self",
".",
"_hooks",
".",
"get",
"(",
"event_type",
",",
"[",
"]",
")",
":",
"hook",
"(",
"data",
")",
"return",
"''",
",",
"204"
] |
e9a70dd3a907f5c1a8f4cee190c59e4e775af37f
|
valid
|
long_description
|
Generate .rst document for PyPi.
|
setup.py
|
def long_description():
"""Generate .rst document for PyPi."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--doc', dest="doc",
action="store_true", default=False)
args, sys.argv = parser.parse_known_args(sys.argv)
if args.doc:
import doc2md, pypandoc
md = doc2md.doc2md(doc2md.__doc__, "doc2md", toc=False)
long_description = pypandoc.convert(md, 'rst', format='md')
else:
return None
|
def long_description():
"""Generate .rst document for PyPi."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--doc', dest="doc",
action="store_true", default=False)
args, sys.argv = parser.parse_known_args(sys.argv)
if args.doc:
import doc2md, pypandoc
md = doc2md.doc2md(doc2md.__doc__, "doc2md", toc=False)
long_description = pypandoc.convert(md, 'rst', format='md')
else:
return None
|
[
"Generate",
".",
"rst",
"document",
"for",
"PyPi",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/setup.py#L5-L17
|
[
"def",
"long_description",
"(",
")",
":",
"import",
"argparse",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--doc'",
",",
"dest",
"=",
"\"doc\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
")",
"args",
",",
"sys",
".",
"argv",
"=",
"parser",
".",
"parse_known_args",
"(",
"sys",
".",
"argv",
")",
"if",
"args",
".",
"doc",
":",
"import",
"doc2md",
",",
"pypandoc",
"md",
"=",
"doc2md",
".",
"doc2md",
"(",
"doc2md",
".",
"__doc__",
",",
"\"doc2md\"",
",",
"toc",
"=",
"False",
")",
"long_description",
"=",
"pypandoc",
".",
"convert",
"(",
"md",
",",
"'rst'",
",",
"format",
"=",
"'md'",
")",
"else",
":",
"return",
"None"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
unindent
|
Remove common indentation from string.
Unlike doctrim there is no special treatment of the first line.
|
doc2md.py
|
def unindent(lines):
"""
Remove common indentation from string.
Unlike doctrim there is no special treatment of the first line.
"""
try:
# Determine minimum indentation:
indent = min(len(line) - len(line.lstrip())
for line in lines if line)
except ValueError:
return lines
else:
return [line[indent:] for line in lines]
|
def unindent(lines):
"""
Remove common indentation from string.
Unlike doctrim there is no special treatment of the first line.
"""
try:
# Determine minimum indentation:
indent = min(len(line) - len(line.lstrip())
for line in lines if line)
except ValueError:
return lines
else:
return [line[indent:] for line in lines]
|
[
"Remove",
"common",
"indentation",
"from",
"string",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/doc2md.py#L64-L78
|
[
"def",
"unindent",
"(",
"lines",
")",
":",
"try",
":",
"# Determine minimum indentation:",
"indent",
"=",
"min",
"(",
"len",
"(",
"line",
")",
"-",
"len",
"(",
"line",
".",
"lstrip",
"(",
")",
")",
"for",
"line",
"in",
"lines",
"if",
"line",
")",
"except",
"ValueError",
":",
"return",
"lines",
"else",
":",
"return",
"[",
"line",
"[",
"indent",
":",
"]",
"for",
"line",
"in",
"lines",
"]"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
find_sections
|
Find all section names and return a list with their names.
|
doc2md.py
|
def find_sections(lines):
"""
Find all section names and return a list with their names.
"""
sections = []
for line in lines:
if is_heading(line):
sections.append(get_heading(line))
return sections
|
def find_sections(lines):
"""
Find all section names and return a list with their names.
"""
sections = []
for line in lines:
if is_heading(line):
sections.append(get_heading(line))
return sections
|
[
"Find",
"all",
"section",
"names",
"and",
"return",
"a",
"list",
"with",
"their",
"names",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/doc2md.py#L120-L128
|
[
"def",
"find_sections",
"(",
"lines",
")",
":",
"sections",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"is_heading",
"(",
"line",
")",
":",
"sections",
".",
"append",
"(",
"get_heading",
"(",
"line",
")",
")",
"return",
"sections"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
make_toc
|
Generate table of contents for array of section names.
|
doc2md.py
|
def make_toc(sections, maxdepth=0):
"""
Generate table of contents for array of section names.
"""
if not sections:
return []
outer = min(n for n,t in sections)
refs = []
for ind,sec in sections:
if maxdepth and ind-outer+1 > maxdepth:
continue
ref = sec.lower()
ref = ref.replace('`', '')
ref = ref.replace(' ', '-')
ref = ref.replace('?', '')
refs.append(" "*(ind-outer) + "- [%s](#%s)" % (sec, ref))
return refs
|
def make_toc(sections, maxdepth=0):
"""
Generate table of contents for array of section names.
"""
if not sections:
return []
outer = min(n for n,t in sections)
refs = []
for ind,sec in sections:
if maxdepth and ind-outer+1 > maxdepth:
continue
ref = sec.lower()
ref = ref.replace('`', '')
ref = ref.replace(' ', '-')
ref = ref.replace('?', '')
refs.append(" "*(ind-outer) + "- [%s](#%s)" % (sec, ref))
return refs
|
[
"Generate",
"table",
"of",
"contents",
"for",
"array",
"of",
"section",
"names",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/doc2md.py#L130-L146
|
[
"def",
"make_toc",
"(",
"sections",
",",
"maxdepth",
"=",
"0",
")",
":",
"if",
"not",
"sections",
":",
"return",
"[",
"]",
"outer",
"=",
"min",
"(",
"n",
"for",
"n",
",",
"t",
"in",
"sections",
")",
"refs",
"=",
"[",
"]",
"for",
"ind",
",",
"sec",
"in",
"sections",
":",
"if",
"maxdepth",
"and",
"ind",
"-",
"outer",
"+",
"1",
">",
"maxdepth",
":",
"continue",
"ref",
"=",
"sec",
".",
"lower",
"(",
")",
"ref",
"=",
"ref",
".",
"replace",
"(",
"'`'",
",",
"''",
")",
"ref",
"=",
"ref",
".",
"replace",
"(",
"' '",
",",
"'-'",
")",
"ref",
"=",
"ref",
".",
"replace",
"(",
"'?'",
",",
"''",
")",
"refs",
".",
"append",
"(",
"\" \"",
"*",
"(",
"ind",
"-",
"outer",
")",
"+",
"\"- [%s](#%s)\"",
"%",
"(",
"sec",
",",
"ref",
")",
")",
"return",
"refs"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
doc2md
|
Convert a docstring to a markdown text.
|
doc2md.py
|
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
"""
Convert a docstring to a markdown text.
"""
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md)
|
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
"""
Convert a docstring to a markdown text.
"""
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md)
|
[
"Convert",
"a",
"docstring",
"to",
"a",
"markdown",
"text",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/doc2md.py#L177-L208
|
[
"def",
"doc2md",
"(",
"docstr",
",",
"title",
",",
"min_level",
"=",
"1",
",",
"more_info",
"=",
"False",
",",
"toc",
"=",
"True",
",",
"maxdepth",
"=",
"0",
")",
":",
"text",
"=",
"doctrim",
"(",
"docstr",
")",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"sections",
"=",
"find_sections",
"(",
"lines",
")",
"if",
"sections",
":",
"level",
"=",
"min",
"(",
"n",
"for",
"n",
",",
"t",
"in",
"sections",
")",
"-",
"1",
"else",
":",
"level",
"=",
"1",
"shiftlevel",
"=",
"0",
"if",
"level",
"<",
"min_level",
":",
"shiftlevel",
"=",
"min_level",
"-",
"level",
"level",
"=",
"min_level",
"sections",
"=",
"[",
"(",
"lev",
"+",
"shiftlevel",
",",
"tit",
")",
"for",
"lev",
",",
"tit",
"in",
"sections",
"]",
"head",
"=",
"next",
"(",
"(",
"i",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"lines",
")",
"if",
"is_heading",
"(",
"l",
")",
")",
",",
"0",
")",
"md",
"=",
"[",
"make_heading",
"(",
"level",
",",
"title",
")",
",",
"\"\"",
",",
"]",
"+",
"lines",
"[",
":",
"head",
"]",
"if",
"toc",
":",
"md",
"+=",
"make_toc",
"(",
"sections",
",",
"maxdepth",
")",
"md",
"+=",
"[",
"''",
"]",
"md",
"+=",
"_doc2md",
"(",
"lines",
"[",
"head",
":",
"]",
",",
"shiftlevel",
")",
"if",
"more_info",
":",
"return",
"(",
"md",
",",
"sections",
")",
"else",
":",
"return",
"\"\\n\"",
".",
"join",
"(",
"md",
")"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
mod2md
|
Generate markdown document from module, including API section.
|
doc2md.py
|
def mod2md(module, title, title_api_section, toc=True, maxdepth=0):
"""
Generate markdown document from module, including API section.
"""
docstr = module.__doc__
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
api_md = []
api_sec = []
if title_api_section and module.__all__:
sections.append((level+1, title_api_section))
for name in module.__all__:
api_sec.append((level+2, "`" + name + "`"))
api_md += ['', '']
entry = module.__dict__[name]
if entry.__doc__:
md, sec = doc2md(entry.__doc__, "`" + name + "`",
min_level=level+2, more_info=True, toc=False)
api_sec += sec
api_md += md
sections += api_sec
# headline
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
# main sections
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:])
# API section
md += [
'',
'',
make_heading(level+1, title_api_section),
]
if toc:
md += ['']
md += make_toc(api_sec, 1)
md += api_md
return "\n".join(md)
|
def mod2md(module, title, title_api_section, toc=True, maxdepth=0):
"""
Generate markdown document from module, including API section.
"""
docstr = module.__doc__
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
api_md = []
api_sec = []
if title_api_section and module.__all__:
sections.append((level+1, title_api_section))
for name in module.__all__:
api_sec.append((level+2, "`" + name + "`"))
api_md += ['', '']
entry = module.__dict__[name]
if entry.__doc__:
md, sec = doc2md(entry.__doc__, "`" + name + "`",
min_level=level+2, more_info=True, toc=False)
api_sec += sec
api_md += md
sections += api_sec
# headline
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
# main sections
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:])
# API section
md += [
'',
'',
make_heading(level+1, title_api_section),
]
if toc:
md += ['']
md += make_toc(api_sec, 1)
md += api_md
return "\n".join(md)
|
[
"Generate",
"markdown",
"document",
"from",
"module",
"including",
"API",
"section",
"."
] |
coldfix/doc2md
|
python
|
https://github.com/coldfix/doc2md/blob/afd2876316a715d3401adb442d46c9a07cd7e806/doc2md.py#L210-L265
|
[
"def",
"mod2md",
"(",
"module",
",",
"title",
",",
"title_api_section",
",",
"toc",
"=",
"True",
",",
"maxdepth",
"=",
"0",
")",
":",
"docstr",
"=",
"module",
".",
"__doc__",
"text",
"=",
"doctrim",
"(",
"docstr",
")",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"sections",
"=",
"find_sections",
"(",
"lines",
")",
"if",
"sections",
":",
"level",
"=",
"min",
"(",
"n",
"for",
"n",
",",
"t",
"in",
"sections",
")",
"-",
"1",
"else",
":",
"level",
"=",
"1",
"api_md",
"=",
"[",
"]",
"api_sec",
"=",
"[",
"]",
"if",
"title_api_section",
"and",
"module",
".",
"__all__",
":",
"sections",
".",
"append",
"(",
"(",
"level",
"+",
"1",
",",
"title_api_section",
")",
")",
"for",
"name",
"in",
"module",
".",
"__all__",
":",
"api_sec",
".",
"append",
"(",
"(",
"level",
"+",
"2",
",",
"\"`\"",
"+",
"name",
"+",
"\"`\"",
")",
")",
"api_md",
"+=",
"[",
"''",
",",
"''",
"]",
"entry",
"=",
"module",
".",
"__dict__",
"[",
"name",
"]",
"if",
"entry",
".",
"__doc__",
":",
"md",
",",
"sec",
"=",
"doc2md",
"(",
"entry",
".",
"__doc__",
",",
"\"`\"",
"+",
"name",
"+",
"\"`\"",
",",
"min_level",
"=",
"level",
"+",
"2",
",",
"more_info",
"=",
"True",
",",
"toc",
"=",
"False",
")",
"api_sec",
"+=",
"sec",
"api_md",
"+=",
"md",
"sections",
"+=",
"api_sec",
"# headline",
"head",
"=",
"next",
"(",
"(",
"i",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"lines",
")",
"if",
"is_heading",
"(",
"l",
")",
")",
",",
"0",
")",
"md",
"=",
"[",
"make_heading",
"(",
"level",
",",
"title",
")",
",",
"\"\"",
",",
"]",
"+",
"lines",
"[",
":",
"head",
"]",
"# main sections",
"if",
"toc",
":",
"md",
"+=",
"make_toc",
"(",
"sections",
",",
"maxdepth",
")",
"md",
"+=",
"[",
"''",
"]",
"md",
"+=",
"_doc2md",
"(",
"lines",
"[",
"head",
":",
"]",
")",
"# API section",
"md",
"+=",
"[",
"''",
",",
"''",
",",
"make_heading",
"(",
"level",
"+",
"1",
",",
"title_api_section",
")",
",",
"]",
"if",
"toc",
":",
"md",
"+=",
"[",
"''",
"]",
"md",
"+=",
"make_toc",
"(",
"api_sec",
",",
"1",
")",
"md",
"+=",
"api_md",
"return",
"\"\\n\"",
".",
"join",
"(",
"md",
")"
] |
afd2876316a715d3401adb442d46c9a07cd7e806
|
valid
|
ProfileBlockAnalyzer.largest_finite_distance
|
Compute the maximum temporal distance.
Returns
-------
max_temporal_distance : float
|
gtfspy/routing/profile_block_analyzer.py
|
def largest_finite_distance(self):
"""
Compute the maximum temporal distance.
Returns
-------
max_temporal_distance : float
"""
block_start_distances = [block.distance_start for block in self._profile_blocks if
block.distance_start < float('inf')]
block_end_distances = [block.distance_end for block in self._profile_blocks if
block.distance_end < float('inf')]
distances = block_start_distances + block_end_distances
if len(distances) > 0:
return max(distances)
else:
return None
|
def largest_finite_distance(self):
"""
Compute the maximum temporal distance.
Returns
-------
max_temporal_distance : float
"""
block_start_distances = [block.distance_start for block in self._profile_blocks if
block.distance_start < float('inf')]
block_end_distances = [block.distance_end for block in self._profile_blocks if
block.distance_end < float('inf')]
distances = block_start_distances + block_end_distances
if len(distances) > 0:
return max(distances)
else:
return None
|
[
"Compute",
"the",
"maximum",
"temporal",
"distance",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/profile_block_analyzer.py#L104-L120
|
[
"def",
"largest_finite_distance",
"(",
"self",
")",
":",
"block_start_distances",
"=",
"[",
"block",
".",
"distance_start",
"for",
"block",
"in",
"self",
".",
"_profile_blocks",
"if",
"block",
".",
"distance_start",
"<",
"float",
"(",
"'inf'",
")",
"]",
"block_end_distances",
"=",
"[",
"block",
".",
"distance_end",
"for",
"block",
"in",
"self",
".",
"_profile_blocks",
"if",
"block",
".",
"distance_end",
"<",
"float",
"(",
"'inf'",
")",
"]",
"distances",
"=",
"block_start_distances",
"+",
"block_end_distances",
"if",
"len",
"(",
"distances",
")",
">",
"0",
":",
"return",
"max",
"(",
"distances",
")",
"else",
":",
"return",
"None"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
ProfileBlockAnalyzer._temporal_distance_cdf
|
Temporal distance cumulative density function.
Returns
-------
x_values: numpy.array
values for the x-axis
cdf: numpy.array
cdf values
|
gtfspy/routing/profile_block_analyzer.py
|
def _temporal_distance_cdf(self):
"""
Temporal distance cumulative density function.
Returns
-------
x_values: numpy.array
values for the x-axis
cdf: numpy.array
cdf values
"""
distance_split_points = set()
for block in self._profile_blocks:
if block.distance_start != float('inf'):
distance_split_points.add(block.distance_end)
distance_split_points.add(block.distance_start)
distance_split_points_ordered = numpy.array(sorted(list(distance_split_points)))
temporal_distance_split_widths = distance_split_points_ordered[1:] - distance_split_points_ordered[:-1]
trip_counts = numpy.zeros(len(temporal_distance_split_widths))
delta_peaks = defaultdict(lambda: 0)
for block in self._profile_blocks:
if block.distance_start == block.distance_end:
delta_peaks[block.distance_end] += block.width()
else:
start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end)
end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start)
trip_counts[start_index:end_index] += 1
unnormalized_cdf = numpy.array([0] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts)))
if not (numpy.isclose(
[unnormalized_cdf[-1]],
[self._end_time - self._start_time - sum(delta_peaks.values())], atol=1E-4
).all()):
print(unnormalized_cdf[-1], self._end_time - self._start_time - sum(delta_peaks.values()))
raise RuntimeError("Something went wrong with cdf computation!")
if len(delta_peaks) > 0:
for peak in delta_peaks.keys():
if peak == float('inf'):
continue
index = numpy.nonzero(distance_split_points_ordered == peak)[0][0]
unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index])
distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index,
distance_split_points_ordered[index])
# walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)
unnormalized_cdf[(index + 1):] = unnormalized_cdf[(index + 1):] + delta_peaks[peak]
norm_cdf = unnormalized_cdf / (unnormalized_cdf[-1] + delta_peaks[float('inf')])
return distance_split_points_ordered, norm_cdf
|
def _temporal_distance_cdf(self):
"""
Temporal distance cumulative density function.
Returns
-------
x_values: numpy.array
values for the x-axis
cdf: numpy.array
cdf values
"""
distance_split_points = set()
for block in self._profile_blocks:
if block.distance_start != float('inf'):
distance_split_points.add(block.distance_end)
distance_split_points.add(block.distance_start)
distance_split_points_ordered = numpy.array(sorted(list(distance_split_points)))
temporal_distance_split_widths = distance_split_points_ordered[1:] - distance_split_points_ordered[:-1]
trip_counts = numpy.zeros(len(temporal_distance_split_widths))
delta_peaks = defaultdict(lambda: 0)
for block in self._profile_blocks:
if block.distance_start == block.distance_end:
delta_peaks[block.distance_end] += block.width()
else:
start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end)
end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start)
trip_counts[start_index:end_index] += 1
unnormalized_cdf = numpy.array([0] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts)))
if not (numpy.isclose(
[unnormalized_cdf[-1]],
[self._end_time - self._start_time - sum(delta_peaks.values())], atol=1E-4
).all()):
print(unnormalized_cdf[-1], self._end_time - self._start_time - sum(delta_peaks.values()))
raise RuntimeError("Something went wrong with cdf computation!")
if len(delta_peaks) > 0:
for peak in delta_peaks.keys():
if peak == float('inf'):
continue
index = numpy.nonzero(distance_split_points_ordered == peak)[0][0]
unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index])
distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index,
distance_split_points_ordered[index])
# walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)
unnormalized_cdf[(index + 1):] = unnormalized_cdf[(index + 1):] + delta_peaks[peak]
norm_cdf = unnormalized_cdf / (unnormalized_cdf[-1] + delta_peaks[float('inf')])
return distance_split_points_ordered, norm_cdf
|
[
"Temporal",
"distance",
"cumulative",
"density",
"function",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/profile_block_analyzer.py#L133-L183
|
[
"def",
"_temporal_distance_cdf",
"(",
"self",
")",
":",
"distance_split_points",
"=",
"set",
"(",
")",
"for",
"block",
"in",
"self",
".",
"_profile_blocks",
":",
"if",
"block",
".",
"distance_start",
"!=",
"float",
"(",
"'inf'",
")",
":",
"distance_split_points",
".",
"add",
"(",
"block",
".",
"distance_end",
")",
"distance_split_points",
".",
"add",
"(",
"block",
".",
"distance_start",
")",
"distance_split_points_ordered",
"=",
"numpy",
".",
"array",
"(",
"sorted",
"(",
"list",
"(",
"distance_split_points",
")",
")",
")",
"temporal_distance_split_widths",
"=",
"distance_split_points_ordered",
"[",
"1",
":",
"]",
"-",
"distance_split_points_ordered",
"[",
":",
"-",
"1",
"]",
"trip_counts",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"temporal_distance_split_widths",
")",
")",
"delta_peaks",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"for",
"block",
"in",
"self",
".",
"_profile_blocks",
":",
"if",
"block",
".",
"distance_start",
"==",
"block",
".",
"distance_end",
":",
"delta_peaks",
"[",
"block",
".",
"distance_end",
"]",
"+=",
"block",
".",
"width",
"(",
")",
"else",
":",
"start_index",
"=",
"numpy",
".",
"searchsorted",
"(",
"distance_split_points_ordered",
",",
"block",
".",
"distance_end",
")",
"end_index",
"=",
"numpy",
".",
"searchsorted",
"(",
"distance_split_points_ordered",
",",
"block",
".",
"distance_start",
")",
"trip_counts",
"[",
"start_index",
":",
"end_index",
"]",
"+=",
"1",
"unnormalized_cdf",
"=",
"numpy",
".",
"array",
"(",
"[",
"0",
"]",
"+",
"list",
"(",
"numpy",
".",
"cumsum",
"(",
"temporal_distance_split_widths",
"*",
"trip_counts",
")",
")",
")",
"if",
"not",
"(",
"numpy",
".",
"isclose",
"(",
"[",
"unnormalized_cdf",
"[",
"-",
"1",
"]",
"]",
",",
"[",
"self",
".",
"_end_time",
"-",
"self",
".",
"_start_time",
"-",
"sum",
"(",
"delta_peaks",
".",
"values",
"(",
")",
")",
"]",
",",
"atol",
"=",
"1E-4",
")",
".",
"all",
"(",
")",
")",
":",
"print",
"(",
"unnormalized_cdf",
"[",
"-",
"1",
"]",
",",
"self",
".",
"_end_time",
"-",
"self",
".",
"_start_time",
"-",
"sum",
"(",
"delta_peaks",
".",
"values",
"(",
")",
")",
")",
"raise",
"RuntimeError",
"(",
"\"Something went wrong with cdf computation!\"",
")",
"if",
"len",
"(",
"delta_peaks",
")",
">",
"0",
":",
"for",
"peak",
"in",
"delta_peaks",
".",
"keys",
"(",
")",
":",
"if",
"peak",
"==",
"float",
"(",
"'inf'",
")",
":",
"continue",
"index",
"=",
"numpy",
".",
"nonzero",
"(",
"distance_split_points_ordered",
"==",
"peak",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"unnormalized_cdf",
"=",
"numpy",
".",
"insert",
"(",
"unnormalized_cdf",
",",
"index",
",",
"unnormalized_cdf",
"[",
"index",
"]",
")",
"distance_split_points_ordered",
"=",
"numpy",
".",
"insert",
"(",
"distance_split_points_ordered",
",",
"index",
",",
"distance_split_points_ordered",
"[",
"index",
"]",
")",
"# walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)",
"unnormalized_cdf",
"[",
"(",
"index",
"+",
"1",
")",
":",
"]",
"=",
"unnormalized_cdf",
"[",
"(",
"index",
"+",
"1",
")",
":",
"]",
"+",
"delta_peaks",
"[",
"peak",
"]",
"norm_cdf",
"=",
"unnormalized_cdf",
"/",
"(",
"unnormalized_cdf",
"[",
"-",
"1",
"]",
"+",
"delta_peaks",
"[",
"float",
"(",
"'inf'",
")",
"]",
")",
"return",
"distance_split_points_ordered",
",",
"norm_cdf"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
ProfileBlockAnalyzer._temporal_distance_pdf
|
Temporal distance probability density function.
Returns
-------
non_delta_peak_split_points: numpy.array
non_delta_peak_densities: numpy.array
len(density) == len(temporal_distance_split_points_ordered) -1
delta_peak_loc_to_probability_mass : dict
|
gtfspy/routing/profile_block_analyzer.py
|
def _temporal_distance_pdf(self):
"""
Temporal distance probability density function.
Returns
-------
non_delta_peak_split_points: numpy.array
non_delta_peak_densities: numpy.array
len(density) == len(temporal_distance_split_points_ordered) -1
delta_peak_loc_to_probability_mass : dict
"""
temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()
delta_peak_loc_to_probability_mass = {}
non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]]
non_delta_peak_densities = []
for i in range(0, len(temporal_distance_split_points_ordered) - 1):
left = temporal_distance_split_points_ordered[i]
right = temporal_distance_split_points_ordered[i + 1]
width = right - left
prob_mass = norm_cdf[i + 1] - norm_cdf[i]
if width == 0.0:
delta_peak_loc_to_probability_mass[left] = prob_mass
else:
non_delta_peak_split_points.append(right)
non_delta_peak_densities.append(prob_mass / float(width))
assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1)
return numpy.array(non_delta_peak_split_points), \
numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass
|
def _temporal_distance_pdf(self):
"""
Temporal distance probability density function.
Returns
-------
non_delta_peak_split_points: numpy.array
non_delta_peak_densities: numpy.array
len(density) == len(temporal_distance_split_points_ordered) -1
delta_peak_loc_to_probability_mass : dict
"""
temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()
delta_peak_loc_to_probability_mass = {}
non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]]
non_delta_peak_densities = []
for i in range(0, len(temporal_distance_split_points_ordered) - 1):
left = temporal_distance_split_points_ordered[i]
right = temporal_distance_split_points_ordered[i + 1]
width = right - left
prob_mass = norm_cdf[i + 1] - norm_cdf[i]
if width == 0.0:
delta_peak_loc_to_probability_mass[left] = prob_mass
else:
non_delta_peak_split_points.append(right)
non_delta_peak_densities.append(prob_mass / float(width))
assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1)
return numpy.array(non_delta_peak_split_points), \
numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass
|
[
"Temporal",
"distance",
"probability",
"density",
"function",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/profile_block_analyzer.py#L185-L213
|
[
"def",
"_temporal_distance_pdf",
"(",
"self",
")",
":",
"temporal_distance_split_points_ordered",
",",
"norm_cdf",
"=",
"self",
".",
"_temporal_distance_cdf",
"(",
")",
"delta_peak_loc_to_probability_mass",
"=",
"{",
"}",
"non_delta_peak_split_points",
"=",
"[",
"temporal_distance_split_points_ordered",
"[",
"0",
"]",
"]",
"non_delta_peak_densities",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"temporal_distance_split_points_ordered",
")",
"-",
"1",
")",
":",
"left",
"=",
"temporal_distance_split_points_ordered",
"[",
"i",
"]",
"right",
"=",
"temporal_distance_split_points_ordered",
"[",
"i",
"+",
"1",
"]",
"width",
"=",
"right",
"-",
"left",
"prob_mass",
"=",
"norm_cdf",
"[",
"i",
"+",
"1",
"]",
"-",
"norm_cdf",
"[",
"i",
"]",
"if",
"width",
"==",
"0.0",
":",
"delta_peak_loc_to_probability_mass",
"[",
"left",
"]",
"=",
"prob_mass",
"else",
":",
"non_delta_peak_split_points",
".",
"append",
"(",
"right",
")",
"non_delta_peak_densities",
".",
"append",
"(",
"prob_mass",
"/",
"float",
"(",
"width",
")",
")",
"assert",
"(",
"len",
"(",
"non_delta_peak_densities",
")",
"==",
"len",
"(",
"non_delta_peak_split_points",
")",
"-",
"1",
")",
"return",
"numpy",
".",
"array",
"(",
"non_delta_peak_split_points",
")",
",",
"numpy",
".",
"array",
"(",
"non_delta_peak_densities",
")",
",",
"delta_peak_loc_to_probability_mass"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
remove_all_trips_fully_outside_buffer
|
Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float
|
gtfspy/filter.py
|
def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):
"""
Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float
"""
distance_function_str = add_wgs84_distance_function_to_db(db_conn)
stops_within_buffer_query_sql = "SELECT stop_I FROM stops WHERE CAST(" + distance_function_str + \
"(lat, lon, {lat} , {lon}) AS INT) < {d_m}"\
.format(lat=float(center_lat), lon=float(center_lon), d_m=int(1000*buffer_km))
select_all_trip_Is_where_stop_I_is_within_buffer_sql = "SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (" + stops_within_buffer_query_sql + ")"
trip_Is_to_remove_sql = "SELECT trip_I FROM trips WHERE trip_I NOT IN ( " + select_all_trip_Is_where_stop_I_is_within_buffer_sql + ")"
trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)["trip_I"].values
trip_Is_to_remove_string = ",".join([str(trip_I) for trip_I in trip_Is_to_remove])
remove_all_trips_fully_outside_buffer_sql = "DELETE FROM trips WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = "DELETE FROM stop_times WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
db_conn.execute(remove_all_trips_fully_outside_buffer_sql)
db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)
delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)
db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
if update_secondary_data:
update_secondary_data_copies(db_conn)
|
def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):
"""
Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float
"""
distance_function_str = add_wgs84_distance_function_to_db(db_conn)
stops_within_buffer_query_sql = "SELECT stop_I FROM stops WHERE CAST(" + distance_function_str + \
"(lat, lon, {lat} , {lon}) AS INT) < {d_m}"\
.format(lat=float(center_lat), lon=float(center_lon), d_m=int(1000*buffer_km))
select_all_trip_Is_where_stop_I_is_within_buffer_sql = "SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (" + stops_within_buffer_query_sql + ")"
trip_Is_to_remove_sql = "SELECT trip_I FROM trips WHERE trip_I NOT IN ( " + select_all_trip_Is_where_stop_I_is_within_buffer_sql + ")"
trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)["trip_I"].values
trip_Is_to_remove_string = ",".join([str(trip_I) for trip_I in trip_Is_to_remove])
remove_all_trips_fully_outside_buffer_sql = "DELETE FROM trips WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = "DELETE FROM stop_times WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
db_conn.execute(remove_all_trips_fully_outside_buffer_sql)
db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)
delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)
db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
if update_secondary_data:
update_secondary_data_copies(db_conn)
|
[
"Not",
"used",
"in",
"the",
"regular",
"filter",
"process",
"for",
"the",
"time",
"being",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L495-L529
|
[
"def",
"remove_all_trips_fully_outside_buffer",
"(",
"db_conn",
",",
"center_lat",
",",
"center_lon",
",",
"buffer_km",
",",
"update_secondary_data",
"=",
"True",
")",
":",
"distance_function_str",
"=",
"add_wgs84_distance_function_to_db",
"(",
"db_conn",
")",
"stops_within_buffer_query_sql",
"=",
"\"SELECT stop_I FROM stops WHERE CAST(\"",
"+",
"distance_function_str",
"+",
"\"(lat, lon, {lat} , {lon}) AS INT) < {d_m}\"",
".",
"format",
"(",
"lat",
"=",
"float",
"(",
"center_lat",
")",
",",
"lon",
"=",
"float",
"(",
"center_lon",
")",
",",
"d_m",
"=",
"int",
"(",
"1000",
"*",
"buffer_km",
")",
")",
"select_all_trip_Is_where_stop_I_is_within_buffer_sql",
"=",
"\"SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (\"",
"+",
"stops_within_buffer_query_sql",
"+",
"\")\"",
"trip_Is_to_remove_sql",
"=",
"\"SELECT trip_I FROM trips WHERE trip_I NOT IN ( \"",
"+",
"select_all_trip_Is_where_stop_I_is_within_buffer_sql",
"+",
"\")\"",
"trip_Is_to_remove",
"=",
"pandas",
".",
"read_sql",
"(",
"trip_Is_to_remove_sql",
",",
"db_conn",
")",
"[",
"\"trip_I\"",
"]",
".",
"values",
"trip_Is_to_remove_string",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"trip_I",
")",
"for",
"trip_I",
"in",
"trip_Is_to_remove",
"]",
")",
"remove_all_trips_fully_outside_buffer_sql",
"=",
"\"DELETE FROM trips WHERE trip_I IN (\"",
"+",
"trip_Is_to_remove_string",
"+",
"\")\"",
"remove_all_stop_times_where_trip_I_fully_outside_buffer_sql",
"=",
"\"DELETE FROM stop_times WHERE trip_I IN (\"",
"+",
"trip_Is_to_remove_string",
"+",
"\")\"",
"db_conn",
".",
"execute",
"(",
"remove_all_trips_fully_outside_buffer_sql",
")",
"db_conn",
".",
"execute",
"(",
"remove_all_stop_times_where_trip_I_fully_outside_buffer_sql",
")",
"delete_stops_not_in_stop_times_and_not_as_parent_stop",
"(",
"db_conn",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS",
")",
"db_conn",
".",
"execute",
"(",
"DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL",
")",
"if",
"update_secondary_data",
":",
"update_secondary_data_copies",
"(",
"db_conn",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
remove_dangling_shapes
|
Remove dangling entries from the shapes directory.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
|
gtfspy/filter.py
|
def remove_dangling_shapes(db_conn):
"""
Remove dangling entries from the shapes directory.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
"""
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL = \
"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I"
trip_min_max_shape_seqs= pandas.read_sql(SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL, db_conn)
rows = []
for row in trip_min_max_shape_seqs.itertuples():
shape_id, min_shape_break, max_shape_break = row.shape_id, row.min_shape_break, row.max_shape_break
if min_shape_break is None or max_shape_break is None:
min_shape_break = float('-inf')
max_shape_break = float('-inf')
rows.append( (shape_id, min_shape_break, max_shape_break) )
DELETE_SQL_BASE = "DELETE FROM shapes WHERE shape_id=? AND (seq<? OR seq>?)"
db_conn.executemany(DELETE_SQL_BASE, rows)
remove_dangling_shapes_references(db_conn)
|
def remove_dangling_shapes(db_conn):
"""
Remove dangling entries from the shapes directory.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
"""
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL = \
"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I"
trip_min_max_shape_seqs= pandas.read_sql(SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL, db_conn)
rows = []
for row in trip_min_max_shape_seqs.itertuples():
shape_id, min_shape_break, max_shape_break = row.shape_id, row.min_shape_break, row.max_shape_break
if min_shape_break is None or max_shape_break is None:
min_shape_break = float('-inf')
max_shape_break = float('-inf')
rows.append( (shape_id, min_shape_break, max_shape_break) )
DELETE_SQL_BASE = "DELETE FROM shapes WHERE shape_id=? AND (seq<? OR seq>?)"
db_conn.executemany(DELETE_SQL_BASE, rows)
remove_dangling_shapes_references(db_conn)
|
[
"Remove",
"dangling",
"entries",
"from",
"the",
"shapes",
"directory",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L532-L555
|
[
"def",
"remove_dangling_shapes",
"(",
"db_conn",
")",
":",
"db_conn",
".",
"execute",
"(",
"DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL",
")",
"SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL",
"=",
"\"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I\"",
"trip_min_max_shape_seqs",
"=",
"pandas",
".",
"read_sql",
"(",
"SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL",
",",
"db_conn",
")",
"rows",
"=",
"[",
"]",
"for",
"row",
"in",
"trip_min_max_shape_seqs",
".",
"itertuples",
"(",
")",
":",
"shape_id",
",",
"min_shape_break",
",",
"max_shape_break",
"=",
"row",
".",
"shape_id",
",",
"row",
".",
"min_shape_break",
",",
"row",
".",
"max_shape_break",
"if",
"min_shape_break",
"is",
"None",
"or",
"max_shape_break",
"is",
"None",
":",
"min_shape_break",
"=",
"float",
"(",
"'-inf'",
")",
"max_shape_break",
"=",
"float",
"(",
"'-inf'",
")",
"rows",
".",
"append",
"(",
"(",
"shape_id",
",",
"min_shape_break",
",",
"max_shape_break",
")",
")",
"DELETE_SQL_BASE",
"=",
"\"DELETE FROM shapes WHERE shape_id=? AND (seq<? OR seq>?)\"",
"db_conn",
".",
"executemany",
"(",
"DELETE_SQL_BASE",
",",
"rows",
")",
"remove_dangling_shapes_references",
"(",
"db_conn",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
FilterExtract._delete_rows_by_start_and_end_date
|
Removes rows from the sqlite database copy that are out of the time span defined by start_date and end_date
:param gtfs: GTFS object
:param copy_db_conn: sqlite database connection
:param start_date:
:param end_date:
:return:
|
gtfspy/filter.py
|
def _delete_rows_by_start_and_end_date(self):
"""
Removes rows from the sqlite database copy that are out of the time span defined by start_date and end_date
:param gtfs: GTFS object
:param copy_db_conn: sqlite database connection
:param start_date:
:param end_date:
:return:
"""
# filter by start_time_ut and end_date_ut:
if (self.start_date is not None) and (self.end_date is not None):
start_date_ut = self.gtfs.get_day_start_ut(self.start_date)
end_date_ut = self.gtfs.get_day_start_ut(self.end_date)
if self.copy_db_conn.execute("SELECT count(*) FROM day_trips2 WHERE start_time_ut IS null "
"OR end_time_ut IS null").fetchone() != (0,):
raise ValueError("Missing information in day_trips2 (start_time_ut and/or end_time_ut), "
"check trips.start_time_ds and trips.end_time_ds.")
logging.info("Filtering based on start_time_ut and end_time_ut")
table_to_preserve_map = {
"calendar": "start_date < date({filter_end_ut}, 'unixepoch', 'localtime') "
"AND "
"end_date >= date({filter_start_ut}, 'unixepoch', 'localtime') ",
"calendar_dates": "date >= date({filter_start_ut}, 'unixepoch', 'localtime') "
"AND "
"date < date({filter_end_ut}, 'unixepoch', 'localtime') ",
"day_trips2": 'start_time_ut < {filter_end_ut} '
'AND '
'end_time_ut > {filter_start_ut} ',
"days": "day_start_ut >= {filter_start_ut} "
"AND "
"day_start_ut < {filter_end_ut} "
}
table_to_remove_map = {key: "WHERE NOT ( " + to_preserve + " );"
for key, to_preserve in table_to_preserve_map.items() }
# Ensure that process timezone is correct as we rely on 'localtime' in the SQL statements.
GTFS(self.copy_db_conn).set_current_process_time_zone()
# remove the 'source' entries from tables
for table, query_template in table_to_remove_map.items():
param_dict = {"filter_start_ut": str(start_date_ut),
"filter_end_ut": str(end_date_ut)}
query = "DELETE FROM " + table + " " + \
query_template.format(**param_dict)
self.copy_db_conn.execute(query)
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
def _delete_rows_by_start_and_end_date(self):
"""
Removes rows from the sqlite database copy that are out of the time span defined by start_date and end_date
:param gtfs: GTFS object
:param copy_db_conn: sqlite database connection
:param start_date:
:param end_date:
:return:
"""
# filter by start_time_ut and end_date_ut:
if (self.start_date is not None) and (self.end_date is not None):
start_date_ut = self.gtfs.get_day_start_ut(self.start_date)
end_date_ut = self.gtfs.get_day_start_ut(self.end_date)
if self.copy_db_conn.execute("SELECT count(*) FROM day_trips2 WHERE start_time_ut IS null "
"OR end_time_ut IS null").fetchone() != (0,):
raise ValueError("Missing information in day_trips2 (start_time_ut and/or end_time_ut), "
"check trips.start_time_ds and trips.end_time_ds.")
logging.info("Filtering based on start_time_ut and end_time_ut")
table_to_preserve_map = {
"calendar": "start_date < date({filter_end_ut}, 'unixepoch', 'localtime') "
"AND "
"end_date >= date({filter_start_ut}, 'unixepoch', 'localtime') ",
"calendar_dates": "date >= date({filter_start_ut}, 'unixepoch', 'localtime') "
"AND "
"date < date({filter_end_ut}, 'unixepoch', 'localtime') ",
"day_trips2": 'start_time_ut < {filter_end_ut} '
'AND '
'end_time_ut > {filter_start_ut} ',
"days": "day_start_ut >= {filter_start_ut} "
"AND "
"day_start_ut < {filter_end_ut} "
}
table_to_remove_map = {key: "WHERE NOT ( " + to_preserve + " );"
for key, to_preserve in table_to_preserve_map.items() }
# Ensure that process timezone is correct as we rely on 'localtime' in the SQL statements.
GTFS(self.copy_db_conn).set_current_process_time_zone()
# remove the 'source' entries from tables
for table, query_template in table_to_remove_map.items():
param_dict = {"filter_start_ut": str(start_date_ut),
"filter_end_ut": str(end_date_ut)}
query = "DELETE FROM " + table + " " + \
query_template.format(**param_dict)
self.copy_db_conn.execute(query)
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
[
"Removes",
"rows",
"from",
"the",
"sqlite",
"database",
"copy",
"that",
"are",
"out",
"of",
"the",
"time",
"span",
"defined",
"by",
"start_date",
"and",
"end_date",
":",
"param",
"gtfs",
":",
"GTFS",
"object",
":",
"param",
"copy_db_conn",
":",
"sqlite",
"database",
"connection",
":",
"param",
"start_date",
":",
":",
"param",
"end_date",
":",
":",
"return",
":"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L152-L200
|
[
"def",
"_delete_rows_by_start_and_end_date",
"(",
"self",
")",
":",
"# filter by start_time_ut and end_date_ut:",
"if",
"(",
"self",
".",
"start_date",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"end_date",
"is",
"not",
"None",
")",
":",
"start_date_ut",
"=",
"self",
".",
"gtfs",
".",
"get_day_start_ut",
"(",
"self",
".",
"start_date",
")",
"end_date_ut",
"=",
"self",
".",
"gtfs",
".",
"get_day_start_ut",
"(",
"self",
".",
"end_date",
")",
"if",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"\"SELECT count(*) FROM day_trips2 WHERE start_time_ut IS null \"",
"\"OR end_time_ut IS null\"",
")",
".",
"fetchone",
"(",
")",
"!=",
"(",
"0",
",",
")",
":",
"raise",
"ValueError",
"(",
"\"Missing information in day_trips2 (start_time_ut and/or end_time_ut), \"",
"\"check trips.start_time_ds and trips.end_time_ds.\"",
")",
"logging",
".",
"info",
"(",
"\"Filtering based on start_time_ut and end_time_ut\"",
")",
"table_to_preserve_map",
"=",
"{",
"\"calendar\"",
":",
"\"start_date < date({filter_end_ut}, 'unixepoch', 'localtime') \"",
"\"AND \"",
"\"end_date >= date({filter_start_ut}, 'unixepoch', 'localtime') \"",
",",
"\"calendar_dates\"",
":",
"\"date >= date({filter_start_ut}, 'unixepoch', 'localtime') \"",
"\"AND \"",
"\"date < date({filter_end_ut}, 'unixepoch', 'localtime') \"",
",",
"\"day_trips2\"",
":",
"'start_time_ut < {filter_end_ut} '",
"'AND '",
"'end_time_ut > {filter_start_ut} '",
",",
"\"days\"",
":",
"\"day_start_ut >= {filter_start_ut} \"",
"\"AND \"",
"\"day_start_ut < {filter_end_ut} \"",
"}",
"table_to_remove_map",
"=",
"{",
"key",
":",
"\"WHERE NOT ( \"",
"+",
"to_preserve",
"+",
"\" );\"",
"for",
"key",
",",
"to_preserve",
"in",
"table_to_preserve_map",
".",
"items",
"(",
")",
"}",
"# Ensure that process timezone is correct as we rely on 'localtime' in the SQL statements.",
"GTFS",
"(",
"self",
".",
"copy_db_conn",
")",
".",
"set_current_process_time_zone",
"(",
")",
"# remove the 'source' entries from tables",
"for",
"table",
",",
"query_template",
"in",
"table_to_remove_map",
".",
"items",
"(",
")",
":",
"param_dict",
"=",
"{",
"\"filter_start_ut\"",
":",
"str",
"(",
"start_date_ut",
")",
",",
"\"filter_end_ut\"",
":",
"str",
"(",
"end_date_ut",
")",
"}",
"query",
"=",
"\"DELETE FROM \"",
"+",
"table",
"+",
"\" \"",
"+",
"query_template",
".",
"format",
"(",
"*",
"*",
"param_dict",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"query",
")",
"self",
".",
"copy_db_conn",
".",
"commit",
"(",
")",
"return",
"FILTERED",
"else",
":",
"return",
"NOT_FILTERED"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
FilterExtract._filter_by_calendar
|
update calendar table's services
:param copy_db_conn:
:param start_date:
:param end_date:
:return:
|
gtfspy/filter.py
|
def _filter_by_calendar(self):
"""
update calendar table's services
:param copy_db_conn:
:param start_date:
:param end_date:
:return:
"""
if (self.start_date is not None) and (self.end_date is not None):
logging.info("Making date extract")
start_date_query = "UPDATE calendar " \
"SET start_date='{start_date}' " \
"WHERE start_date<'{start_date}' ".format(start_date=self.start_date)
self.copy_db_conn.execute(start_date_query)
end_date_query = "UPDATE calendar " \
"SET end_date='{end_date_to_include}' " \
"WHERE end_date>'{end_date_to_include}' " \
.format(end_date_to_include=self.end_date_to_include_str)
self.copy_db_conn.execute(end_date_query)
# then recursively delete further data:
self.copy_db_conn.execute(DELETE_TRIPS_NOT_IN_DAYS_SQL)
self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL)
delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)
self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)
self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
def _filter_by_calendar(self):
"""
update calendar table's services
:param copy_db_conn:
:param start_date:
:param end_date:
:return:
"""
if (self.start_date is not None) and (self.end_date is not None):
logging.info("Making date extract")
start_date_query = "UPDATE calendar " \
"SET start_date='{start_date}' " \
"WHERE start_date<'{start_date}' ".format(start_date=self.start_date)
self.copy_db_conn.execute(start_date_query)
end_date_query = "UPDATE calendar " \
"SET end_date='{end_date_to_include}' " \
"WHERE end_date>'{end_date_to_include}' " \
.format(end_date_to_include=self.end_date_to_include_str)
self.copy_db_conn.execute(end_date_query)
# then recursively delete further data:
self.copy_db_conn.execute(DELETE_TRIPS_NOT_IN_DAYS_SQL)
self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL)
delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)
self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)
self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
[
"update",
"calendar",
"table",
"s",
"services",
":",
"param",
"copy_db_conn",
":",
":",
"param",
"start_date",
":",
":",
"param",
"end_date",
":",
":",
"return",
":"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L234-L267
|
[
"def",
"_filter_by_calendar",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"start_date",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"end_date",
"is",
"not",
"None",
")",
":",
"logging",
".",
"info",
"(",
"\"Making date extract\"",
")",
"start_date_query",
"=",
"\"UPDATE calendar \"",
"\"SET start_date='{start_date}' \"",
"\"WHERE start_date<'{start_date}' \"",
".",
"format",
"(",
"start_date",
"=",
"self",
".",
"start_date",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"start_date_query",
")",
"end_date_query",
"=",
"\"UPDATE calendar \"",
"\"SET end_date='{end_date_to_include}' \"",
"\"WHERE end_date>'{end_date_to_include}' \"",
".",
"format",
"(",
"end_date_to_include",
"=",
"self",
".",
"end_date_to_include_str",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"end_date_query",
")",
"# then recursively delete further data:",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_TRIPS_NOT_IN_DAYS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL",
")",
"delete_stops_not_in_stop_times_and_not_as_parent_stop",
"(",
"self",
".",
"copy_db_conn",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"commit",
"(",
")",
"return",
"FILTERED",
"else",
":",
"return",
"NOT_FILTERED"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
FilterExtract._filter_by_agency
|
filter by agency ids
:param copy_db_conn:
:param agency_ids_to_preserve:
:return:
|
gtfspy/filter.py
|
def _filter_by_agency(self):
"""
filter by agency ids
:param copy_db_conn:
:param agency_ids_to_preserve:
:return:
"""
if self.agency_ids_to_preserve is not None:
logging.info("Filtering based on agency_ids")
agency_ids_to_preserve = list(self.agency_ids_to_preserve)
agencies = pandas.read_sql("SELECT * FROM agencies", self.copy_db_conn)
agencies_to_remove = []
for idx, row in agencies.iterrows():
if row['agency_id'] not in agency_ids_to_preserve:
agencies_to_remove.append(row['agency_id'])
for agency_id in agencies_to_remove:
self.copy_db_conn.execute('DELETE FROM agencies WHERE agency_id=?', (agency_id,))
# and remove recursively related to the agencies:
self.copy_db_conn.execute('DELETE FROM routes WHERE '
'agency_I NOT IN (SELECT agency_I FROM agencies)')
self.copy_db_conn.execute('DELETE FROM trips WHERE '
'route_I NOT IN (SELECT route_I FROM routes)')
self.copy_db_conn.execute('DELETE FROM calendar WHERE '
'service_I NOT IN (SELECT service_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM calendar_dates WHERE '
'service_I NOT IN (SELECT service_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM days WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM shapes WHERE '
'shape_id NOT IN (SELECT shape_id FROM trips)')
self.copy_db_conn.execute('DELETE FROM day_trips2 WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
def _filter_by_agency(self):
"""
filter by agency ids
:param copy_db_conn:
:param agency_ids_to_preserve:
:return:
"""
if self.agency_ids_to_preserve is not None:
logging.info("Filtering based on agency_ids")
agency_ids_to_preserve = list(self.agency_ids_to_preserve)
agencies = pandas.read_sql("SELECT * FROM agencies", self.copy_db_conn)
agencies_to_remove = []
for idx, row in agencies.iterrows():
if row['agency_id'] not in agency_ids_to_preserve:
agencies_to_remove.append(row['agency_id'])
for agency_id in agencies_to_remove:
self.copy_db_conn.execute('DELETE FROM agencies WHERE agency_id=?', (agency_id,))
# and remove recursively related to the agencies:
self.copy_db_conn.execute('DELETE FROM routes WHERE '
'agency_I NOT IN (SELECT agency_I FROM agencies)')
self.copy_db_conn.execute('DELETE FROM trips WHERE '
'route_I NOT IN (SELECT route_I FROM routes)')
self.copy_db_conn.execute('DELETE FROM calendar WHERE '
'service_I NOT IN (SELECT service_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM calendar_dates WHERE '
'service_I NOT IN (SELECT service_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM days WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.execute('DELETE FROM shapes WHERE '
'shape_id NOT IN (SELECT shape_id FROM trips)')
self.copy_db_conn.execute('DELETE FROM day_trips2 WHERE '
'trip_I NOT IN (SELECT trip_I FROM trips)')
self.copy_db_conn.commit()
return FILTERED
else:
return NOT_FILTERED
|
[
"filter",
"by",
"agency",
"ids",
":",
"param",
"copy_db_conn",
":",
":",
"param",
"agency_ids_to_preserve",
":",
":",
"return",
":"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L269-L308
|
[
"def",
"_filter_by_agency",
"(",
"self",
")",
":",
"if",
"self",
".",
"agency_ids_to_preserve",
"is",
"not",
"None",
":",
"logging",
".",
"info",
"(",
"\"Filtering based on agency_ids\"",
")",
"agency_ids_to_preserve",
"=",
"list",
"(",
"self",
".",
"agency_ids_to_preserve",
")",
"agencies",
"=",
"pandas",
".",
"read_sql",
"(",
"\"SELECT * FROM agencies\"",
",",
"self",
".",
"copy_db_conn",
")",
"agencies_to_remove",
"=",
"[",
"]",
"for",
"idx",
",",
"row",
"in",
"agencies",
".",
"iterrows",
"(",
")",
":",
"if",
"row",
"[",
"'agency_id'",
"]",
"not",
"in",
"agency_ids_to_preserve",
":",
"agencies_to_remove",
".",
"append",
"(",
"row",
"[",
"'agency_id'",
"]",
")",
"for",
"agency_id",
"in",
"agencies_to_remove",
":",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM agencies WHERE agency_id=?'",
",",
"(",
"agency_id",
",",
")",
")",
"# and remove recursively related to the agencies:",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM routes WHERE '",
"'agency_I NOT IN (SELECT agency_I FROM agencies)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM trips WHERE '",
"'route_I NOT IN (SELECT route_I FROM routes)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM calendar WHERE '",
"'service_I NOT IN (SELECT service_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM calendar_dates WHERE '",
"'service_I NOT IN (SELECT service_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM days WHERE '",
"'trip_I NOT IN (SELECT trip_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM stop_times WHERE '",
"'trip_I NOT IN (SELECT trip_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM stop_times WHERE '",
"'trip_I NOT IN (SELECT trip_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM shapes WHERE '",
"'shape_id NOT IN (SELECT shape_id FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM day_trips2 WHERE '",
"'trip_I NOT IN (SELECT trip_I FROM trips)'",
")",
"self",
".",
"copy_db_conn",
".",
"commit",
"(",
")",
"return",
"FILTERED",
"else",
":",
"return",
"NOT_FILTERED"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
FilterExtract._filter_spatially
|
Filter the feed based on self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
1. First include all stops that are within self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
2. Then include all intermediate stops that are between any of the included stop pairs with some PT trip.
3. Repeat step 2 until no more stops are to be included.
As a summary this process should get rid of PT network tendrils, but should preserve the PT network intact
at its core.
|
gtfspy/filter.py
|
def _filter_spatially(self):
"""
Filter the feed based on self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
1. First include all stops that are within self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
2. Then include all intermediate stops that are between any of the included stop pairs with some PT trip.
3. Repeat step 2 until no more stops are to be included.
As a summary this process should get rid of PT network tendrils, but should preserve the PT network intact
at its core.
"""
if self.buffer_lat is None or self.buffer_lon is None or self.buffer_distance_km is None:
return NOT_FILTERED
print("filtering with lat: " + str(self.buffer_lat) +
" lon: " + str(self.buffer_lon) +
" buffer distance: " + str(self.buffer_distance_km))
remove_all_trips_fully_outside_buffer(self.copy_db_conn,
self.buffer_lat,
self.buffer_lon,
self.buffer_distance_km,
update_secondary_data=False)
logging.info("Making spatial extract")
find_distance_func_name = add_wgs84_distance_function_to_db(self.copy_db_conn)
assert find_distance_func_name == "find_distance"
# select all stops that are within the buffer and have some stop_times assigned.
stop_distance_filter_sql_base = (
"SELECT DISTINCT stops.stop_I FROM stops, stop_times" +
" WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) < {buffer_distance_meters}" +
" AND stops.stop_I=stop_times.stop_I"
)
stops_within_buffer_sql = stop_distance_filter_sql_base.format(
buffer_lat=float(self.buffer_lat),
buffer_lon=float(self.buffer_lon),
buffer_distance_meters=int(self.buffer_distance_km * 1000)
)
stops_within_buffer = set(row[0] for row in self.copy_db_conn.execute(stops_within_buffer_sql))
# For each trip_I, find smallest (min_seq) and largest (max_seq) stop sequence numbers that
# are within the soft buffer_distance from the buffer_lon and buffer_lat, and add them into the
# list of stops to preserve.
# Note that if a trip is OUT-IN-OUT-IN-OUT, this process preserves (at least) the part IN-OUT-IN of the trip.
# Repeat until no more stops are found.
stops_within_buffer_string = "(" +",".join(str(stop_I) for stop_I in stops_within_buffer) + ")"
trip_min_max_include_seq_sql = (
'SELECT trip_I, min(seq) AS min_seq, max(seq) AS max_seq FROM stop_times, stops '
'WHERE stop_times.stop_I = stops.stop_I '
' AND stops.stop_I IN {stop_I_list}'
' GROUP BY trip_I'
).format(stop_I_list=stops_within_buffer_string)
trip_I_min_seq_max_seq_df = pandas.read_sql(trip_min_max_include_seq_sql, self.copy_db_conn)
for trip_I_seq_row in trip_I_min_seq_max_seq_df.itertuples():
trip_I = trip_I_seq_row.trip_I
min_seq = trip_I_seq_row.min_seq
max_seq = trip_I_seq_row.max_seq
# DELETE FROM STOP_TIMES
if min_seq == max_seq:
# Only one entry in stop_times to be left, remove whole trip.
self.copy_db_conn.execute("DELETE FROM stop_times WHERE trip_I={trip_I}".format(trip_I=trip_I))
self.copy_db_conn.execute("DELETE FROM trips WHERE trip_i={trip_I}".format(trip_I=trip_I))
else:
# DELETE STOP_TIME ENTRIES BEFORE ENTERING AND AFTER DEPARTING THE BUFFER AREA
DELETE_STOP_TIME_ENTRIES_SQL = \
"DELETE FROM stop_times WHERE trip_I={trip_I} AND (seq<{min_seq} OR seq>{max_seq})"\
.format(trip_I=trip_I, max_seq=max_seq, min_seq=min_seq)
self.copy_db_conn.execute(DELETE_STOP_TIME_ENTRIES_SQL)
STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL = \
"SELECT seq, stop_I IN {stops_within_hard_buffer} AS within FROM stop_times WHERE trip_I={trip_I} ORDER BY seq"\
.format(stops_within_hard_buffer=stops_within_buffer_string, trip_I=trip_I)
stop_times_within_buffer_df = pandas.read_sql(STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL, self.copy_db_conn)
if stop_times_within_buffer_df['within'].all():
continue
else:
_split_trip(self.copy_db_conn, trip_I, stop_times_within_buffer_df)
# Delete all shapes that are not fully within the buffer to avoid shapes going outside
# the buffer area in a some cases.
# This could probably be done in some more sophisticated way though (per trip)
SHAPE_IDS_NOT_WITHIN_BUFFER_SQL = \
"SELECT DISTINCT shape_id FROM SHAPES " \
"WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) > {buffer_distance_meters}" \
.format(buffer_lat=self.buffer_lat,
buffer_lon=self.buffer_lon,
buffer_distance_meters=self.buffer_distance_km * 1000)
DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL = "DELETE FROM shapes WHERE shape_id IN (" \
+ SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + ")"
self.copy_db_conn.execute(DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL)
SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS = \
"UPDATE trips SET shape_id=NULL WHERE trips.shape_id IN (" + SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + ")"
self.copy_db_conn.execute(SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS)
# Delete trips with only one stop
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I IN (SELECT trip_I FROM '
'(SELECT trip_I, count(*) AS N_stops from stop_times '
'GROUP BY trip_I) q1 '
'WHERE N_stops = 1)')
# Delete trips with only one stop but several instances in stop_times
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I IN (SELECT q1.trip_I AS trip_I FROM '
'(SELECT trip_I, stop_I, count(*) AS stops_per_stop FROM stop_times '
'GROUP BY trip_I, stop_I) q1, '
'(SELECT trip_I, count(*) as n_stops FROM stop_times '
'GROUP BY trip_I) q2 '
'WHERE q1.trip_I = q2.trip_I AND n_stops = stops_per_stop)')
# Delete all stop_times for uncovered stops
delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)
# Consecutively delete all the rest remaining.
self.copy_db_conn.execute(DELETE_TRIPS_NOT_REFERENCED_IN_STOP_TIMES)
self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)
self.copy_db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
remove_dangling_shapes(self.copy_db_conn)
self.copy_db_conn.commit()
return FILTERED
|
def _filter_spatially(self):
"""
Filter the feed based on self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
1. First include all stops that are within self.buffer_distance_km from self.buffer_lon and self.buffer_lat.
2. Then include all intermediate stops that are between any of the included stop pairs with some PT trip.
3. Repeat step 2 until no more stops are to be included.
As a summary this process should get rid of PT network tendrils, but should preserve the PT network intact
at its core.
"""
if self.buffer_lat is None or self.buffer_lon is None or self.buffer_distance_km is None:
return NOT_FILTERED
print("filtering with lat: " + str(self.buffer_lat) +
" lon: " + str(self.buffer_lon) +
" buffer distance: " + str(self.buffer_distance_km))
remove_all_trips_fully_outside_buffer(self.copy_db_conn,
self.buffer_lat,
self.buffer_lon,
self.buffer_distance_km,
update_secondary_data=False)
logging.info("Making spatial extract")
find_distance_func_name = add_wgs84_distance_function_to_db(self.copy_db_conn)
assert find_distance_func_name == "find_distance"
# select all stops that are within the buffer and have some stop_times assigned.
stop_distance_filter_sql_base = (
"SELECT DISTINCT stops.stop_I FROM stops, stop_times" +
" WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) < {buffer_distance_meters}" +
" AND stops.stop_I=stop_times.stop_I"
)
stops_within_buffer_sql = stop_distance_filter_sql_base.format(
buffer_lat=float(self.buffer_lat),
buffer_lon=float(self.buffer_lon),
buffer_distance_meters=int(self.buffer_distance_km * 1000)
)
stops_within_buffer = set(row[0] for row in self.copy_db_conn.execute(stops_within_buffer_sql))
# For each trip_I, find smallest (min_seq) and largest (max_seq) stop sequence numbers that
# are within the soft buffer_distance from the buffer_lon and buffer_lat, and add them into the
# list of stops to preserve.
# Note that if a trip is OUT-IN-OUT-IN-OUT, this process preserves (at least) the part IN-OUT-IN of the trip.
# Repeat until no more stops are found.
stops_within_buffer_string = "(" +",".join(str(stop_I) for stop_I in stops_within_buffer) + ")"
trip_min_max_include_seq_sql = (
'SELECT trip_I, min(seq) AS min_seq, max(seq) AS max_seq FROM stop_times, stops '
'WHERE stop_times.stop_I = stops.stop_I '
' AND stops.stop_I IN {stop_I_list}'
' GROUP BY trip_I'
).format(stop_I_list=stops_within_buffer_string)
trip_I_min_seq_max_seq_df = pandas.read_sql(trip_min_max_include_seq_sql, self.copy_db_conn)
for trip_I_seq_row in trip_I_min_seq_max_seq_df.itertuples():
trip_I = trip_I_seq_row.trip_I
min_seq = trip_I_seq_row.min_seq
max_seq = trip_I_seq_row.max_seq
# DELETE FROM STOP_TIMES
if min_seq == max_seq:
# Only one entry in stop_times to be left, remove whole trip.
self.copy_db_conn.execute("DELETE FROM stop_times WHERE trip_I={trip_I}".format(trip_I=trip_I))
self.copy_db_conn.execute("DELETE FROM trips WHERE trip_i={trip_I}".format(trip_I=trip_I))
else:
# DELETE STOP_TIME ENTRIES BEFORE ENTERING AND AFTER DEPARTING THE BUFFER AREA
DELETE_STOP_TIME_ENTRIES_SQL = \
"DELETE FROM stop_times WHERE trip_I={trip_I} AND (seq<{min_seq} OR seq>{max_seq})"\
.format(trip_I=trip_I, max_seq=max_seq, min_seq=min_seq)
self.copy_db_conn.execute(DELETE_STOP_TIME_ENTRIES_SQL)
STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL = \
"SELECT seq, stop_I IN {stops_within_hard_buffer} AS within FROM stop_times WHERE trip_I={trip_I} ORDER BY seq"\
.format(stops_within_hard_buffer=stops_within_buffer_string, trip_I=trip_I)
stop_times_within_buffer_df = pandas.read_sql(STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL, self.copy_db_conn)
if stop_times_within_buffer_df['within'].all():
continue
else:
_split_trip(self.copy_db_conn, trip_I, stop_times_within_buffer_df)
# Delete all shapes that are not fully within the buffer to avoid shapes going outside
# the buffer area in a some cases.
# This could probably be done in some more sophisticated way though (per trip)
SHAPE_IDS_NOT_WITHIN_BUFFER_SQL = \
"SELECT DISTINCT shape_id FROM SHAPES " \
"WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) > {buffer_distance_meters}" \
.format(buffer_lat=self.buffer_lat,
buffer_lon=self.buffer_lon,
buffer_distance_meters=self.buffer_distance_km * 1000)
DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL = "DELETE FROM shapes WHERE shape_id IN (" \
+ SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + ")"
self.copy_db_conn.execute(DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL)
SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS = \
"UPDATE trips SET shape_id=NULL WHERE trips.shape_id IN (" + SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + ")"
self.copy_db_conn.execute(SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS)
# Delete trips with only one stop
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I IN (SELECT trip_I FROM '
'(SELECT trip_I, count(*) AS N_stops from stop_times '
'GROUP BY trip_I) q1 '
'WHERE N_stops = 1)')
# Delete trips with only one stop but several instances in stop_times
self.copy_db_conn.execute('DELETE FROM stop_times WHERE '
'trip_I IN (SELECT q1.trip_I AS trip_I FROM '
'(SELECT trip_I, stop_I, count(*) AS stops_per_stop FROM stop_times '
'GROUP BY trip_I, stop_I) q1, '
'(SELECT trip_I, count(*) as n_stops FROM stop_times '
'GROUP BY trip_I) q2 '
'WHERE q1.trip_I = q2.trip_I AND n_stops = stops_per_stop)')
# Delete all stop_times for uncovered stops
delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)
# Consecutively delete all the rest remaining.
self.copy_db_conn.execute(DELETE_TRIPS_NOT_REFERENCED_IN_STOP_TIMES)
self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)
self.copy_db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
remove_dangling_shapes(self.copy_db_conn)
self.copy_db_conn.commit()
return FILTERED
|
[
"Filter",
"the",
"feed",
"based",
"on",
"self",
".",
"buffer_distance_km",
"from",
"self",
".",
"buffer_lon",
"and",
"self",
".",
"buffer_lat",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/filter.py#L310-L435
|
[
"def",
"_filter_spatially",
"(",
"self",
")",
":",
"if",
"self",
".",
"buffer_lat",
"is",
"None",
"or",
"self",
".",
"buffer_lon",
"is",
"None",
"or",
"self",
".",
"buffer_distance_km",
"is",
"None",
":",
"return",
"NOT_FILTERED",
"print",
"(",
"\"filtering with lat: \"",
"+",
"str",
"(",
"self",
".",
"buffer_lat",
")",
"+",
"\" lon: \"",
"+",
"str",
"(",
"self",
".",
"buffer_lon",
")",
"+",
"\" buffer distance: \"",
"+",
"str",
"(",
"self",
".",
"buffer_distance_km",
")",
")",
"remove_all_trips_fully_outside_buffer",
"(",
"self",
".",
"copy_db_conn",
",",
"self",
".",
"buffer_lat",
",",
"self",
".",
"buffer_lon",
",",
"self",
".",
"buffer_distance_km",
",",
"update_secondary_data",
"=",
"False",
")",
"logging",
".",
"info",
"(",
"\"Making spatial extract\"",
")",
"find_distance_func_name",
"=",
"add_wgs84_distance_function_to_db",
"(",
"self",
".",
"copy_db_conn",
")",
"assert",
"find_distance_func_name",
"==",
"\"find_distance\"",
"# select all stops that are within the buffer and have some stop_times assigned.",
"stop_distance_filter_sql_base",
"=",
"(",
"\"SELECT DISTINCT stops.stop_I FROM stops, stop_times\"",
"+",
"\" WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) < {buffer_distance_meters}\"",
"+",
"\" AND stops.stop_I=stop_times.stop_I\"",
")",
"stops_within_buffer_sql",
"=",
"stop_distance_filter_sql_base",
".",
"format",
"(",
"buffer_lat",
"=",
"float",
"(",
"self",
".",
"buffer_lat",
")",
",",
"buffer_lon",
"=",
"float",
"(",
"self",
".",
"buffer_lon",
")",
",",
"buffer_distance_meters",
"=",
"int",
"(",
"self",
".",
"buffer_distance_km",
"*",
"1000",
")",
")",
"stops_within_buffer",
"=",
"set",
"(",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"stops_within_buffer_sql",
")",
")",
"# For each trip_I, find smallest (min_seq) and largest (max_seq) stop sequence numbers that",
"# are within the soft buffer_distance from the buffer_lon and buffer_lat, and add them into the",
"# list of stops to preserve.",
"# Note that if a trip is OUT-IN-OUT-IN-OUT, this process preserves (at least) the part IN-OUT-IN of the trip.",
"# Repeat until no more stops are found.",
"stops_within_buffer_string",
"=",
"\"(\"",
"+",
"\",\"",
".",
"join",
"(",
"str",
"(",
"stop_I",
")",
"for",
"stop_I",
"in",
"stops_within_buffer",
")",
"+",
"\")\"",
"trip_min_max_include_seq_sql",
"=",
"(",
"'SELECT trip_I, min(seq) AS min_seq, max(seq) AS max_seq FROM stop_times, stops '",
"'WHERE stop_times.stop_I = stops.stop_I '",
"' AND stops.stop_I IN {stop_I_list}'",
"' GROUP BY trip_I'",
")",
".",
"format",
"(",
"stop_I_list",
"=",
"stops_within_buffer_string",
")",
"trip_I_min_seq_max_seq_df",
"=",
"pandas",
".",
"read_sql",
"(",
"trip_min_max_include_seq_sql",
",",
"self",
".",
"copy_db_conn",
")",
"for",
"trip_I_seq_row",
"in",
"trip_I_min_seq_max_seq_df",
".",
"itertuples",
"(",
")",
":",
"trip_I",
"=",
"trip_I_seq_row",
".",
"trip_I",
"min_seq",
"=",
"trip_I_seq_row",
".",
"min_seq",
"max_seq",
"=",
"trip_I_seq_row",
".",
"max_seq",
"# DELETE FROM STOP_TIMES",
"if",
"min_seq",
"==",
"max_seq",
":",
"# Only one entry in stop_times to be left, remove whole trip.",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"\"DELETE FROM stop_times WHERE trip_I={trip_I}\"",
".",
"format",
"(",
"trip_I",
"=",
"trip_I",
")",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"\"DELETE FROM trips WHERE trip_i={trip_I}\"",
".",
"format",
"(",
"trip_I",
"=",
"trip_I",
")",
")",
"else",
":",
"# DELETE STOP_TIME ENTRIES BEFORE ENTERING AND AFTER DEPARTING THE BUFFER AREA",
"DELETE_STOP_TIME_ENTRIES_SQL",
"=",
"\"DELETE FROM stop_times WHERE trip_I={trip_I} AND (seq<{min_seq} OR seq>{max_seq})\"",
".",
"format",
"(",
"trip_I",
"=",
"trip_I",
",",
"max_seq",
"=",
"max_seq",
",",
"min_seq",
"=",
"min_seq",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_STOP_TIME_ENTRIES_SQL",
")",
"STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL",
"=",
"\"SELECT seq, stop_I IN {stops_within_hard_buffer} AS within FROM stop_times WHERE trip_I={trip_I} ORDER BY seq\"",
".",
"format",
"(",
"stops_within_hard_buffer",
"=",
"stops_within_buffer_string",
",",
"trip_I",
"=",
"trip_I",
")",
"stop_times_within_buffer_df",
"=",
"pandas",
".",
"read_sql",
"(",
"STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL",
",",
"self",
".",
"copy_db_conn",
")",
"if",
"stop_times_within_buffer_df",
"[",
"'within'",
"]",
".",
"all",
"(",
")",
":",
"continue",
"else",
":",
"_split_trip",
"(",
"self",
".",
"copy_db_conn",
",",
"trip_I",
",",
"stop_times_within_buffer_df",
")",
"# Delete all shapes that are not fully within the buffer to avoid shapes going outside",
"# the buffer area in a some cases.",
"# This could probably be done in some more sophisticated way though (per trip)",
"SHAPE_IDS_NOT_WITHIN_BUFFER_SQL",
"=",
"\"SELECT DISTINCT shape_id FROM SHAPES \"",
"\"WHERE CAST(find_distance(lat, lon, {buffer_lat}, {buffer_lon}) AS INT) > {buffer_distance_meters}\"",
".",
"format",
"(",
"buffer_lat",
"=",
"self",
".",
"buffer_lat",
",",
"buffer_lon",
"=",
"self",
".",
"buffer_lon",
",",
"buffer_distance_meters",
"=",
"self",
".",
"buffer_distance_km",
"*",
"1000",
")",
"DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL",
"=",
"\"DELETE FROM shapes WHERE shape_id IN (\"",
"+",
"SHAPE_IDS_NOT_WITHIN_BUFFER_SQL",
"+",
"\")\"",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL",
")",
"SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS",
"=",
"\"UPDATE trips SET shape_id=NULL WHERE trips.shape_id IN (\"",
"+",
"SHAPE_IDS_NOT_WITHIN_BUFFER_SQL",
"+",
"\")\"",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS",
")",
"# Delete trips with only one stop",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM stop_times WHERE '",
"'trip_I IN (SELECT trip_I FROM '",
"'(SELECT trip_I, count(*) AS N_stops from stop_times '",
"'GROUP BY trip_I) q1 '",
"'WHERE N_stops = 1)'",
")",
"# Delete trips with only one stop but several instances in stop_times",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"'DELETE FROM stop_times WHERE '",
"'trip_I IN (SELECT q1.trip_I AS trip_I FROM '",
"'(SELECT trip_I, stop_I, count(*) AS stops_per_stop FROM stop_times '",
"'GROUP BY trip_I, stop_I) q1, '",
"'(SELECT trip_I, count(*) as n_stops FROM stop_times '",
"'GROUP BY trip_I) q2 '",
"'WHERE q1.trip_I = q2.trip_I AND n_stops = stops_per_stop)'",
")",
"# Delete all stop_times for uncovered stops",
"delete_stops_not_in_stop_times_and_not_as_parent_stop",
"(",
"self",
".",
"copy_db_conn",
")",
"# Consecutively delete all the rest remaining.",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_TRIPS_NOT_REFERENCED_IN_STOP_TIMES",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL",
")",
"self",
".",
"copy_db_conn",
".",
"execute",
"(",
"DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS",
")",
"remove_dangling_shapes",
"(",
"self",
".",
"copy_db_conn",
")",
"self",
".",
"copy_db_conn",
".",
"commit",
"(",
")",
"return",
"FILTERED"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
compute_pseudo_connections
|
Given a set of transit events and the static walk network,
"transform" the static walking network into a set of "pseudo-connections".
As a first approximation, we add pseudo-connections to depart after each arrival of a transit connection
to it's arrival stop.
Parameters
----------
transit_connections: list[Connection]
start_time_dep : int
start time in unixtime seconds
end_time_dep: int
end time in unixtime seconds (no new connections will be scanned after this time)
transfer_margin: int
required extra margin required for transfers in seconds
walk_speed: float
walking speed between stops in meters / second
walk_network: networkx.Graph
each edge should have the walking distance as a data attribute ("d_walk") expressed in meters
Returns
-------
pseudo_connections: set[Connection]
|
gtfspy/routing/pseudo_connections.py
|
def compute_pseudo_connections(transit_connections, start_time_dep,
end_time_dep, transfer_margin,
walk_network, walk_speed):
"""
Given a set of transit events and the static walk network,
"transform" the static walking network into a set of "pseudo-connections".
As a first approximation, we add pseudo-connections to depart after each arrival of a transit connection
to it's arrival stop.
Parameters
----------
transit_connections: list[Connection]
start_time_dep : int
start time in unixtime seconds
end_time_dep: int
end time in unixtime seconds (no new connections will be scanned after this time)
transfer_margin: int
required extra margin required for transfers in seconds
walk_speed: float
walking speed between stops in meters / second
walk_network: networkx.Graph
each edge should have the walking distance as a data attribute ("d_walk") expressed in meters
Returns
-------
pseudo_connections: set[Connection]
"""
# A pseudo-connection should be created after (each) arrival to a transit_connection's arrival stop.
pseudo_connection_set = set() # use a set to ignore possible duplicates
for c in transit_connections:
if start_time_dep <= c.departure_time <= end_time_dep:
walk_arr_stop = c.departure_stop
walk_arr_time = c.departure_time - transfer_margin
for _, walk_dep_stop, data in walk_network.edges(nbunch=[walk_arr_stop], data=True):
walk_dep_time = walk_arr_time - data['d_walk'] / float(walk_speed)
if walk_dep_time > end_time_dep or walk_dep_time < start_time_dep:
continue
pseudo_connection = Connection(walk_dep_stop,
walk_arr_stop,
walk_dep_time,
walk_arr_time,
Connection.WALK_TRIP_ID,
Connection.WALK_SEQ,
is_walk=True)
pseudo_connection_set.add(pseudo_connection)
return pseudo_connection_set
|
def compute_pseudo_connections(transit_connections, start_time_dep,
end_time_dep, transfer_margin,
walk_network, walk_speed):
"""
Given a set of transit events and the static walk network,
"transform" the static walking network into a set of "pseudo-connections".
As a first approximation, we add pseudo-connections to depart after each arrival of a transit connection
to it's arrival stop.
Parameters
----------
transit_connections: list[Connection]
start_time_dep : int
start time in unixtime seconds
end_time_dep: int
end time in unixtime seconds (no new connections will be scanned after this time)
transfer_margin: int
required extra margin required for transfers in seconds
walk_speed: float
walking speed between stops in meters / second
walk_network: networkx.Graph
each edge should have the walking distance as a data attribute ("d_walk") expressed in meters
Returns
-------
pseudo_connections: set[Connection]
"""
# A pseudo-connection should be created after (each) arrival to a transit_connection's arrival stop.
pseudo_connection_set = set() # use a set to ignore possible duplicates
for c in transit_connections:
if start_time_dep <= c.departure_time <= end_time_dep:
walk_arr_stop = c.departure_stop
walk_arr_time = c.departure_time - transfer_margin
for _, walk_dep_stop, data in walk_network.edges(nbunch=[walk_arr_stop], data=True):
walk_dep_time = walk_arr_time - data['d_walk'] / float(walk_speed)
if walk_dep_time > end_time_dep or walk_dep_time < start_time_dep:
continue
pseudo_connection = Connection(walk_dep_stop,
walk_arr_stop,
walk_dep_time,
walk_arr_time,
Connection.WALK_TRIP_ID,
Connection.WALK_SEQ,
is_walk=True)
pseudo_connection_set.add(pseudo_connection)
return pseudo_connection_set
|
[
"Given",
"a",
"set",
"of",
"transit",
"events",
"and",
"the",
"static",
"walk",
"network",
"transform",
"the",
"static",
"walking",
"network",
"into",
"a",
"set",
"of",
"pseudo",
"-",
"connections",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/pseudo_connections.py#L4-L50
|
[
"def",
"compute_pseudo_connections",
"(",
"transit_connections",
",",
"start_time_dep",
",",
"end_time_dep",
",",
"transfer_margin",
",",
"walk_network",
",",
"walk_speed",
")",
":",
"# A pseudo-connection should be created after (each) arrival to a transit_connection's arrival stop.",
"pseudo_connection_set",
"=",
"set",
"(",
")",
"# use a set to ignore possible duplicates",
"for",
"c",
"in",
"transit_connections",
":",
"if",
"start_time_dep",
"<=",
"c",
".",
"departure_time",
"<=",
"end_time_dep",
":",
"walk_arr_stop",
"=",
"c",
".",
"departure_stop",
"walk_arr_time",
"=",
"c",
".",
"departure_time",
"-",
"transfer_margin",
"for",
"_",
",",
"walk_dep_stop",
",",
"data",
"in",
"walk_network",
".",
"edges",
"(",
"nbunch",
"=",
"[",
"walk_arr_stop",
"]",
",",
"data",
"=",
"True",
")",
":",
"walk_dep_time",
"=",
"walk_arr_time",
"-",
"data",
"[",
"'d_walk'",
"]",
"/",
"float",
"(",
"walk_speed",
")",
"if",
"walk_dep_time",
">",
"end_time_dep",
"or",
"walk_dep_time",
"<",
"start_time_dep",
":",
"continue",
"pseudo_connection",
"=",
"Connection",
"(",
"walk_dep_stop",
",",
"walk_arr_stop",
",",
"walk_dep_time",
",",
"walk_arr_time",
",",
"Connection",
".",
"WALK_TRIP_ID",
",",
"Connection",
".",
"WALK_SEQ",
",",
"is_walk",
"=",
"True",
")",
"pseudo_connection_set",
".",
"add",
"(",
"pseudo_connection",
")",
"return",
"pseudo_connection_set"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
SpreadingStop.get_min_visit_time
|
Get the earliest visit time of the stop.
|
gtfspy/spreading/spreading_stop.py
|
def get_min_visit_time(self):
"""
Get the earliest visit time of the stop.
"""
if not self.visit_events:
return float('inf')
else:
return min(self.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut
|
def get_min_visit_time(self):
"""
Get the earliest visit time of the stop.
"""
if not self.visit_events:
return float('inf')
else:
return min(self.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut
|
[
"Get",
"the",
"earliest",
"visit",
"time",
"of",
"the",
"stop",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/spreading_stop.py#L8-L15
|
[
"def",
"get_min_visit_time",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visit_events",
":",
"return",
"float",
"(",
"'inf'",
")",
"else",
":",
"return",
"min",
"(",
"self",
".",
"visit_events",
",",
"key",
"=",
"lambda",
"event",
":",
"event",
".",
"arr_time_ut",
")",
".",
"arr_time_ut"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
SpreadingStop.visit
|
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
|
gtfspy/spreading/spreading_stop.py
|
def visit(self, event):
"""
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
"""
to_visit = False
if event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time():
to_visit = True
else:
for ve in self.visit_events:
if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut:
to_visit = True
if to_visit:
self.visit_events.append(event)
min_time = self.get_min_visit_time()
# remove any visits that are 'too old'
self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time]
return to_visit
|
def visit(self, event):
"""
Visit the stop if it has not been visited already by an event with
earlier arr_time_ut (or with other trip that does not require a transfer)
Parameters
----------
event : Event
an instance of the Event (namedtuple)
Returns
-------
visited : bool
if visit is stored, returns True, otherwise False
"""
to_visit = False
if event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time():
to_visit = True
else:
for ve in self.visit_events:
if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut:
to_visit = True
if to_visit:
self.visit_events.append(event)
min_time = self.get_min_visit_time()
# remove any visits that are 'too old'
self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time]
return to_visit
|
[
"Visit",
"the",
"stop",
"if",
"it",
"has",
"not",
"been",
"visited",
"already",
"by",
"an",
"event",
"with",
"earlier",
"arr_time_ut",
"(",
"or",
"with",
"other",
"trip",
"that",
"does",
"not",
"require",
"a",
"transfer",
")"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/spreading_stop.py#L23-L51
|
[
"def",
"visit",
"(",
"self",
",",
"event",
")",
":",
"to_visit",
"=",
"False",
"if",
"event",
".",
"arr_time_ut",
"<=",
"self",
".",
"min_transfer_time",
"+",
"self",
".",
"get_min_visit_time",
"(",
")",
":",
"to_visit",
"=",
"True",
"else",
":",
"for",
"ve",
"in",
"self",
".",
"visit_events",
":",
"if",
"(",
"event",
".",
"trip_I",
"==",
"ve",
".",
"trip_I",
")",
"and",
"event",
".",
"arr_time_ut",
"<",
"ve",
".",
"arr_time_ut",
":",
"to_visit",
"=",
"True",
"if",
"to_visit",
":",
"self",
".",
"visit_events",
".",
"append",
"(",
"event",
")",
"min_time",
"=",
"self",
".",
"get_min_visit_time",
"(",
")",
"# remove any visits that are 'too old'",
"self",
".",
"visit_events",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"visit_events",
"if",
"v",
".",
"arr_time_ut",
"<=",
"min_time",
"+",
"self",
".",
"min_transfer_time",
"]",
"return",
"to_visit"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
SpreadingStop.can_infect
|
Whether the spreading stop can infect using this event.
|
gtfspy/spreading/spreading_stop.py
|
def can_infect(self, event):
"""
Whether the spreading stop can infect using this event.
"""
if event.from_stop_I != self.stop_I:
return False
if not self.has_been_visited():
return False
else:
time_sep = event.dep_time_ut-self.get_min_visit_time()
# if the gap between the earliest visit_time and current time is
# smaller than the min. transfer time, the stop can pass the spreading
# forward
if (time_sep >= self.min_transfer_time) or (event.trip_I == -1 and time_sep >= 0):
return True
else:
for visit in self.visit_events:
# if no transfer, please hop-on
if (event.trip_I == visit.trip_I) and (time_sep >= 0):
return True
return False
|
def can_infect(self, event):
"""
Whether the spreading stop can infect using this event.
"""
if event.from_stop_I != self.stop_I:
return False
if not self.has_been_visited():
return False
else:
time_sep = event.dep_time_ut-self.get_min_visit_time()
# if the gap between the earliest visit_time and current time is
# smaller than the min. transfer time, the stop can pass the spreading
# forward
if (time_sep >= self.min_transfer_time) or (event.trip_I == -1 and time_sep >= 0):
return True
else:
for visit in self.visit_events:
# if no transfer, please hop-on
if (event.trip_I == visit.trip_I) and (time_sep >= 0):
return True
return False
|
[
"Whether",
"the",
"spreading",
"stop",
"can",
"infect",
"using",
"this",
"event",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/spreading_stop.py#L56-L77
|
[
"def",
"can_infect",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"from_stop_I",
"!=",
"self",
".",
"stop_I",
":",
"return",
"False",
"if",
"not",
"self",
".",
"has_been_visited",
"(",
")",
":",
"return",
"False",
"else",
":",
"time_sep",
"=",
"event",
".",
"dep_time_ut",
"-",
"self",
".",
"get_min_visit_time",
"(",
")",
"# if the gap between the earliest visit_time and current time is",
"# smaller than the min. transfer time, the stop can pass the spreading",
"# forward",
"if",
"(",
"time_sep",
">=",
"self",
".",
"min_transfer_time",
")",
"or",
"(",
"event",
".",
"trip_I",
"==",
"-",
"1",
"and",
"time_sep",
">=",
"0",
")",
":",
"return",
"True",
"else",
":",
"for",
"visit",
"in",
"self",
".",
"visit_events",
":",
"# if no transfer, please hop-on",
"if",
"(",
"event",
".",
"trip_I",
"==",
"visit",
".",
"trip_I",
")",
"and",
"(",
"time_sep",
">=",
"0",
")",
":",
"return",
"True",
"return",
"False"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
get_transit_connections
|
Parameters
----------
gtfs: gtfspy.GTFS
end_time_ut: int
start_time_ut: int
Returns
-------
list[Connection]
|
gtfspy/routing/helpers.py
|
def get_transit_connections(gtfs, start_time_ut, end_time_ut):
"""
Parameters
----------
gtfs: gtfspy.GTFS
end_time_ut: int
start_time_ut: int
Returns
-------
list[Connection]
"""
if start_time_ut + 20 * 3600 < end_time_ut:
warn("Note that it is possible that same trip_I's can take place during multiple days, "
"which could (potentially) affect the outcomes of the CSA routing!")
assert (isinstance(gtfs, GTFS))
events_df = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)
assert (isinstance(events_df, pandas.DataFrame))
return list(map(lambda e: Connection(e.from_stop_I, e.to_stop_I, e.dep_time_ut, e.arr_time_ut, e.trip_I, e.seq),
events_df.itertuples()
)
)
|
def get_transit_connections(gtfs, start_time_ut, end_time_ut):
"""
Parameters
----------
gtfs: gtfspy.GTFS
end_time_ut: int
start_time_ut: int
Returns
-------
list[Connection]
"""
if start_time_ut + 20 * 3600 < end_time_ut:
warn("Note that it is possible that same trip_I's can take place during multiple days, "
"which could (potentially) affect the outcomes of the CSA routing!")
assert (isinstance(gtfs, GTFS))
events_df = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)
assert (isinstance(events_df, pandas.DataFrame))
return list(map(lambda e: Connection(e.from_stop_I, e.to_stop_I, e.dep_time_ut, e.arr_time_ut, e.trip_I, e.seq),
events_df.itertuples()
)
)
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"end_time_ut",
":",
"int",
"start_time_ut",
":",
"int"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/helpers.py#L8-L29
|
[
"def",
"get_transit_connections",
"(",
"gtfs",
",",
"start_time_ut",
",",
"end_time_ut",
")",
":",
"if",
"start_time_ut",
"+",
"20",
"*",
"3600",
"<",
"end_time_ut",
":",
"warn",
"(",
"\"Note that it is possible that same trip_I's can take place during multiple days, \"",
"\"which could (potentially) affect the outcomes of the CSA routing!\"",
")",
"assert",
"(",
"isinstance",
"(",
"gtfs",
",",
"GTFS",
")",
")",
"events_df",
"=",
"temporal_network",
"(",
"gtfs",
",",
"start_time_ut",
"=",
"start_time_ut",
",",
"end_time_ut",
"=",
"end_time_ut",
")",
"assert",
"(",
"isinstance",
"(",
"events_df",
",",
"pandas",
".",
"DataFrame",
")",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"e",
":",
"Connection",
"(",
"e",
".",
"from_stop_I",
",",
"e",
".",
"to_stop_I",
",",
"e",
".",
"dep_time_ut",
",",
"e",
".",
"arr_time_ut",
",",
"e",
".",
"trip_I",
",",
"e",
".",
"seq",
")",
",",
"events_df",
".",
"itertuples",
"(",
")",
")",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
get_walk_network
|
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
walk_network: networkx.Graph:
|
gtfspy/routing/helpers.py
|
def get_walk_network(gtfs, max_link_distance_m=1000):
"""
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
walk_network: networkx.Graph:
"""
assert (isinstance(gtfs, GTFS))
return walk_transfer_stop_to_stop_network(gtfs, max_link_distance=max_link_distance_m)
|
def get_walk_network(gtfs, max_link_distance_m=1000):
"""
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
walk_network: networkx.Graph:
"""
assert (isinstance(gtfs, GTFS))
return walk_transfer_stop_to_stop_network(gtfs, max_link_distance=max_link_distance_m)
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/helpers.py#L32-L43
|
[
"def",
"get_walk_network",
"(",
"gtfs",
",",
"max_link_distance_m",
"=",
"1000",
")",
":",
"assert",
"(",
"isinstance",
"(",
"gtfs",
",",
"GTFS",
")",
")",
"return",
"walk_transfer_stop_to_stop_network",
"(",
"gtfs",
",",
"max_link_distance",
"=",
"max_link_distance_m",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
calculate_trip_shape_breakpoints
|
Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes
|
gtfspy/import_loaders/stop_times_loader.py
|
def calculate_trip_shape_breakpoints(conn):
"""Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes"""
from gtfspy import shapes
cur = conn.cursor()
breakpoints_cache = {}
# Counters for problems - don't print every problem.
count_bad_shape_ordering = 0
count_bad_shape_fit = 0
count_no_shape_fit = 0
trip_Is = [x[0] for x in
cur.execute('SELECT DISTINCT trip_I FROM stop_times').fetchall()]
for trip_I in trip_Is:
# Get the shape points
row = cur.execute('''SELECT shape_id
FROM trips WHERE trip_I=?''', (trip_I,)).fetchone()
if row is None:
continue
shape_id = row[0]
if shape_id is None or shape_id == '':
continue
# Get the stop points
cur.execute('''SELECT seq, lat, lon, stop_id
FROM stop_times LEFT JOIN stops USING (stop_I)
WHERE trip_I=?
ORDER BY seq''',
(trip_I,))
#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))
stop_points = [dict(seq=row[0],
lat=row[1],
lon=row[2],
stop_I=row[3])
for row in cur if row[1] and row[2]]
# Calculate a cache key for this sequence.
# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:
cache_key = (shape_id, tuple(x['stop_I'] for x in stop_points))
if cache_key in breakpoints_cache:
breakpoints = breakpoints_cache[cache_key]
else:
# Must re-calculate breakpoints:
shape_points = shapes.get_shape_points(cur, shape_id)
breakpoints, badness \
= shapes.find_segments(stop_points, shape_points)
if breakpoints != sorted(breakpoints):
# route_name, route_id, route_I, trip_id, trip_I = \
# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I
# FROM trips LEFT JOIN routes USING (route_I)
# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()
# print "Ignoring: Route with bad shape ordering:", route_name, route_id, route_I, trip_id, trip_I
count_bad_shape_ordering += 1
# select * from stop_times where trip_I=NNNN order by shape_break;
breakpoints_cache[cache_key] = None
continue # Do not set shape_break for this trip.
# Add it to cache
breakpoints_cache[cache_key] = breakpoints
if badness > 30 * len(breakpoints):
#print "bad shape fit: %s (%s, %s, %s)" % (badness, trip_I, shape_id, len(breakpoints))
count_bad_shape_fit += 1
if breakpoints is None:
continue
if len(breakpoints) == 0:
# No valid route could be identified.
#print "Ignoring: No shape identified for trip_I=%s, shape_id=%s" % (trip_I, shape_id)
count_no_shape_fit += 1
continue
# breakpoints is the corresponding points for each stop
assert len(breakpoints) == len(stop_points)
cur.executemany('UPDATE stop_times SET shape_break=? '
'WHERE trip_I=? AND seq=? ',
((int(bkpt), int(trip_I), int(stpt['seq']))
for bkpt, stpt in zip(breakpoints, stop_points)))
if count_bad_shape_fit > 0:
print(" Shape trip breakpoints: %s bad fits" % count_bad_shape_fit)
if count_bad_shape_ordering > 0:
print(" Shape trip breakpoints: %s bad shape orderings" % count_bad_shape_ordering)
if count_no_shape_fit > 0:
print(" Shape trip breakpoints: %s no shape fits" % count_no_shape_fit)
conn.commit()
|
def calculate_trip_shape_breakpoints(conn):
"""Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes"""
from gtfspy import shapes
cur = conn.cursor()
breakpoints_cache = {}
# Counters for problems - don't print every problem.
count_bad_shape_ordering = 0
count_bad_shape_fit = 0
count_no_shape_fit = 0
trip_Is = [x[0] for x in
cur.execute('SELECT DISTINCT trip_I FROM stop_times').fetchall()]
for trip_I in trip_Is:
# Get the shape points
row = cur.execute('''SELECT shape_id
FROM trips WHERE trip_I=?''', (trip_I,)).fetchone()
if row is None:
continue
shape_id = row[0]
if shape_id is None or shape_id == '':
continue
# Get the stop points
cur.execute('''SELECT seq, lat, lon, stop_id
FROM stop_times LEFT JOIN stops USING (stop_I)
WHERE trip_I=?
ORDER BY seq''',
(trip_I,))
#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))
stop_points = [dict(seq=row[0],
lat=row[1],
lon=row[2],
stop_I=row[3])
for row in cur if row[1] and row[2]]
# Calculate a cache key for this sequence.
# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:
cache_key = (shape_id, tuple(x['stop_I'] for x in stop_points))
if cache_key in breakpoints_cache:
breakpoints = breakpoints_cache[cache_key]
else:
# Must re-calculate breakpoints:
shape_points = shapes.get_shape_points(cur, shape_id)
breakpoints, badness \
= shapes.find_segments(stop_points, shape_points)
if breakpoints != sorted(breakpoints):
# route_name, route_id, route_I, trip_id, trip_I = \
# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I
# FROM trips LEFT JOIN routes USING (route_I)
# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()
# print "Ignoring: Route with bad shape ordering:", route_name, route_id, route_I, trip_id, trip_I
count_bad_shape_ordering += 1
# select * from stop_times where trip_I=NNNN order by shape_break;
breakpoints_cache[cache_key] = None
continue # Do not set shape_break for this trip.
# Add it to cache
breakpoints_cache[cache_key] = breakpoints
if badness > 30 * len(breakpoints):
#print "bad shape fit: %s (%s, %s, %s)" % (badness, trip_I, shape_id, len(breakpoints))
count_bad_shape_fit += 1
if breakpoints is None:
continue
if len(breakpoints) == 0:
# No valid route could be identified.
#print "Ignoring: No shape identified for trip_I=%s, shape_id=%s" % (trip_I, shape_id)
count_no_shape_fit += 1
continue
# breakpoints is the corresponding points for each stop
assert len(breakpoints) == len(stop_points)
cur.executemany('UPDATE stop_times SET shape_break=? '
'WHERE trip_I=? AND seq=? ',
((int(bkpt), int(trip_I), int(stpt['seq']))
for bkpt, stpt in zip(breakpoints, stop_points)))
if count_bad_shape_fit > 0:
print(" Shape trip breakpoints: %s bad fits" % count_bad_shape_fit)
if count_bad_shape_ordering > 0:
print(" Shape trip breakpoints: %s bad shape orderings" % count_bad_shape_ordering)
if count_no_shape_fit > 0:
print(" Shape trip breakpoints: %s no shape fits" % count_no_shape_fit)
conn.commit()
|
[
"Pre",
"-",
"compute",
"the",
"shape",
"points",
"corresponding",
"to",
"each",
"trip",
"s",
"stop",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/stop_times_loader.py#L97-L184
|
[
"def",
"calculate_trip_shape_breakpoints",
"(",
"conn",
")",
":",
"from",
"gtfspy",
"import",
"shapes",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"breakpoints_cache",
"=",
"{",
"}",
"# Counters for problems - don't print every problem.",
"count_bad_shape_ordering",
"=",
"0",
"count_bad_shape_fit",
"=",
"0",
"count_no_shape_fit",
"=",
"0",
"trip_Is",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"cur",
".",
"execute",
"(",
"'SELECT DISTINCT trip_I FROM stop_times'",
")",
".",
"fetchall",
"(",
")",
"]",
"for",
"trip_I",
"in",
"trip_Is",
":",
"# Get the shape points",
"row",
"=",
"cur",
".",
"execute",
"(",
"'''SELECT shape_id\n FROM trips WHERE trip_I=?'''",
",",
"(",
"trip_I",
",",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"continue",
"shape_id",
"=",
"row",
"[",
"0",
"]",
"if",
"shape_id",
"is",
"None",
"or",
"shape_id",
"==",
"''",
":",
"continue",
"# Get the stop points",
"cur",
".",
"execute",
"(",
"'''SELECT seq, lat, lon, stop_id\n FROM stop_times LEFT JOIN stops USING (stop_I)\n WHERE trip_I=?\n ORDER BY seq'''",
",",
"(",
"trip_I",
",",
")",
")",
"#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))",
"stop_points",
"=",
"[",
"dict",
"(",
"seq",
"=",
"row",
"[",
"0",
"]",
",",
"lat",
"=",
"row",
"[",
"1",
"]",
",",
"lon",
"=",
"row",
"[",
"2",
"]",
",",
"stop_I",
"=",
"row",
"[",
"3",
"]",
")",
"for",
"row",
"in",
"cur",
"if",
"row",
"[",
"1",
"]",
"and",
"row",
"[",
"2",
"]",
"]",
"# Calculate a cache key for this sequence.",
"# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:",
"cache_key",
"=",
"(",
"shape_id",
",",
"tuple",
"(",
"x",
"[",
"'stop_I'",
"]",
"for",
"x",
"in",
"stop_points",
")",
")",
"if",
"cache_key",
"in",
"breakpoints_cache",
":",
"breakpoints",
"=",
"breakpoints_cache",
"[",
"cache_key",
"]",
"else",
":",
"# Must re-calculate breakpoints:",
"shape_points",
"=",
"shapes",
".",
"get_shape_points",
"(",
"cur",
",",
"shape_id",
")",
"breakpoints",
",",
"badness",
"=",
"shapes",
".",
"find_segments",
"(",
"stop_points",
",",
"shape_points",
")",
"if",
"breakpoints",
"!=",
"sorted",
"(",
"breakpoints",
")",
":",
"# route_name, route_id, route_I, trip_id, trip_I = \\",
"# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I",
"# FROM trips LEFT JOIN routes USING (route_I)",
"# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()",
"# print \"Ignoring: Route with bad shape ordering:\", route_name, route_id, route_I, trip_id, trip_I",
"count_bad_shape_ordering",
"+=",
"1",
"# select * from stop_times where trip_I=NNNN order by shape_break;",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"None",
"continue",
"# Do not set shape_break for this trip.",
"# Add it to cache",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"breakpoints",
"if",
"badness",
">",
"30",
"*",
"len",
"(",
"breakpoints",
")",
":",
"#print \"bad shape fit: %s (%s, %s, %s)\" % (badness, trip_I, shape_id, len(breakpoints))",
"count_bad_shape_fit",
"+=",
"1",
"if",
"breakpoints",
"is",
"None",
":",
"continue",
"if",
"len",
"(",
"breakpoints",
")",
"==",
"0",
":",
"# No valid route could be identified.",
"#print \"Ignoring: No shape identified for trip_I=%s, shape_id=%s\" % (trip_I, shape_id)",
"count_no_shape_fit",
"+=",
"1",
"continue",
"# breakpoints is the corresponding points for each stop",
"assert",
"len",
"(",
"breakpoints",
")",
"==",
"len",
"(",
"stop_points",
")",
"cur",
".",
"executemany",
"(",
"'UPDATE stop_times SET shape_break=? '",
"'WHERE trip_I=? AND seq=? '",
",",
"(",
"(",
"int",
"(",
"bkpt",
")",
",",
"int",
"(",
"trip_I",
")",
",",
"int",
"(",
"stpt",
"[",
"'seq'",
"]",
")",
")",
"for",
"bkpt",
",",
"stpt",
"in",
"zip",
"(",
"breakpoints",
",",
"stop_points",
")",
")",
")",
"if",
"count_bad_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad fits\"",
"%",
"count_bad_shape_fit",
")",
"if",
"count_bad_shape_ordering",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad shape orderings\"",
"%",
"count_bad_shape_ordering",
")",
"if",
"count_no_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s no shape fits\"",
"%",
"count_no_shape_fit",
")",
"conn",
".",
"commit",
"(",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
JourneyDataManager.import_journey_data_for_target_stop
|
Parameters
----------
origin_stop_I_to_journey_labels: dict
key: origin_stop_Is
value: list of labels
target_stop_I: int
|
gtfspy/routing/journey_data.py
|
def import_journey_data_for_target_stop(self, target_stop_I, origin_stop_I_to_journey_labels, enforce_synchronous_writes=False):
"""
Parameters
----------
origin_stop_I_to_journey_labels: dict
key: origin_stop_Is
value: list of labels
target_stop_I: int
"""
cur = self.conn.cursor()
self.conn.isolation_level = 'EXCLUSIVE'
# if not enforce_synchronous_writes:
cur.execute('PRAGMA synchronous = 0;')
if self.track_route:
self._insert_journeys_with_route_into_db(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))
else:
self._insert_journeys_into_db_no_route(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))
print("Finished import process")
self.conn.commit()
|
def import_journey_data_for_target_stop(self, target_stop_I, origin_stop_I_to_journey_labels, enforce_synchronous_writes=False):
"""
Parameters
----------
origin_stop_I_to_journey_labels: dict
key: origin_stop_Is
value: list of labels
target_stop_I: int
"""
cur = self.conn.cursor()
self.conn.isolation_level = 'EXCLUSIVE'
# if not enforce_synchronous_writes:
cur.execute('PRAGMA synchronous = 0;')
if self.track_route:
self._insert_journeys_with_route_into_db(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))
else:
self._insert_journeys_into_db_no_route(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))
print("Finished import process")
self.conn.commit()
|
[
"Parameters",
"----------",
"origin_stop_I_to_journey_labels",
":",
"dict",
"key",
":",
"origin_stop_Is",
"value",
":",
"list",
"of",
"labels",
"target_stop_I",
":",
"int"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data.py#L82-L101
|
[
"def",
"import_journey_data_for_target_stop",
"(",
"self",
",",
"target_stop_I",
",",
"origin_stop_I_to_journey_labels",
",",
"enforce_synchronous_writes",
"=",
"False",
")",
":",
"cur",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"self",
".",
"conn",
".",
"isolation_level",
"=",
"'EXCLUSIVE'",
"# if not enforce_synchronous_writes:",
"cur",
".",
"execute",
"(",
"'PRAGMA synchronous = 0;'",
")",
"if",
"self",
".",
"track_route",
":",
"self",
".",
"_insert_journeys_with_route_into_db",
"(",
"origin_stop_I_to_journey_labels",
",",
"target_stop",
"=",
"int",
"(",
"target_stop_I",
")",
")",
"else",
":",
"self",
".",
"_insert_journeys_into_db_no_route",
"(",
"origin_stop_I_to_journey_labels",
",",
"target_stop",
"=",
"int",
"(",
"target_stop_I",
")",
")",
"print",
"(",
"\"Finished import process\"",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
JourneyDataManager._insert_journeys_into_db_no_route
|
con.isolation_level = 'EXCLUSIVE'
con.execute('BEGIN EXCLUSIVE')
#exclusive access starts here. Nothing else can r/w the db, do your magic here.
con.commit()
|
gtfspy/routing/journey_data.py
|
def _insert_journeys_into_db_no_route(self, stop_profiles, target_stop=None):
# TODO: Change the insertion so that the check last journey id and insertions are in the same transaction block
"""
con.isolation_level = 'EXCLUSIVE'
con.execute('BEGIN EXCLUSIVE')
#exclusive access starts here. Nothing else can r/w the db, do your magic here.
con.commit()
"""
print("Collecting journey data")
journey_id = 1
journey_list = []
tot = len(stop_profiles)
for i, (origin_stop, labels) in enumerate(stop_profiles.items(), start=1):
#print("\r Stop " + str(i) + " of " + str(tot), end='', flush=True)
for label in labels:
assert (isinstance(label, LabelTimeWithBoardingsCount))
if self.multitarget_routing:
target_stop = None
else:
target_stop = int(target_stop)
values = [int(journey_id),
int(origin_stop),
target_stop,
int(label.departure_time),
int(label.arrival_time_target),
int(label.n_boardings)]
journey_list.append(values)
journey_id += 1
print("Inserting journeys without route into database")
insert_journeys_stmt = '''INSERT INTO journeys(
journey_id,
from_stop_I,
to_stop_I,
departure_time,
arrival_time_target,
n_boardings) VALUES (%s) ''' % (", ".join(["?" for x in range(6)]))
#self.conn.executemany(insert_journeys_stmt, journey_list)
self._executemany_exclusive(insert_journeys_stmt, journey_list)
self.conn.commit()
|
def _insert_journeys_into_db_no_route(self, stop_profiles, target_stop=None):
# TODO: Change the insertion so that the check last journey id and insertions are in the same transaction block
"""
con.isolation_level = 'EXCLUSIVE'
con.execute('BEGIN EXCLUSIVE')
#exclusive access starts here. Nothing else can r/w the db, do your magic here.
con.commit()
"""
print("Collecting journey data")
journey_id = 1
journey_list = []
tot = len(stop_profiles)
for i, (origin_stop, labels) in enumerate(stop_profiles.items(), start=1):
#print("\r Stop " + str(i) + " of " + str(tot), end='', flush=True)
for label in labels:
assert (isinstance(label, LabelTimeWithBoardingsCount))
if self.multitarget_routing:
target_stop = None
else:
target_stop = int(target_stop)
values = [int(journey_id),
int(origin_stop),
target_stop,
int(label.departure_time),
int(label.arrival_time_target),
int(label.n_boardings)]
journey_list.append(values)
journey_id += 1
print("Inserting journeys without route into database")
insert_journeys_stmt = '''INSERT INTO journeys(
journey_id,
from_stop_I,
to_stop_I,
departure_time,
arrival_time_target,
n_boardings) VALUES (%s) ''' % (", ".join(["?" for x in range(6)]))
#self.conn.executemany(insert_journeys_stmt, journey_list)
self._executemany_exclusive(insert_journeys_stmt, journey_list)
self.conn.commit()
|
[
"con",
".",
"isolation_level",
"=",
"EXCLUSIVE",
"con",
".",
"execute",
"(",
"BEGIN",
"EXCLUSIVE",
")",
"#exclusive",
"access",
"starts",
"here",
".",
"Nothing",
"else",
"can",
"r",
"/",
"w",
"the",
"db",
"do",
"your",
"magic",
"here",
".",
"con",
".",
"commit",
"()"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data.py#L113-L153
|
[
"def",
"_insert_journeys_into_db_no_route",
"(",
"self",
",",
"stop_profiles",
",",
"target_stop",
"=",
"None",
")",
":",
"# TODO: Change the insertion so that the check last journey id and insertions are in the same transaction block",
"print",
"(",
"\"Collecting journey data\"",
")",
"journey_id",
"=",
"1",
"journey_list",
"=",
"[",
"]",
"tot",
"=",
"len",
"(",
"stop_profiles",
")",
"for",
"i",
",",
"(",
"origin_stop",
",",
"labels",
")",
"in",
"enumerate",
"(",
"stop_profiles",
".",
"items",
"(",
")",
",",
"start",
"=",
"1",
")",
":",
"#print(\"\\r Stop \" + str(i) + \" of \" + str(tot), end='', flush=True)",
"for",
"label",
"in",
"labels",
":",
"assert",
"(",
"isinstance",
"(",
"label",
",",
"LabelTimeWithBoardingsCount",
")",
")",
"if",
"self",
".",
"multitarget_routing",
":",
"target_stop",
"=",
"None",
"else",
":",
"target_stop",
"=",
"int",
"(",
"target_stop",
")",
"values",
"=",
"[",
"int",
"(",
"journey_id",
")",
",",
"int",
"(",
"origin_stop",
")",
",",
"target_stop",
",",
"int",
"(",
"label",
".",
"departure_time",
")",
",",
"int",
"(",
"label",
".",
"arrival_time_target",
")",
",",
"int",
"(",
"label",
".",
"n_boardings",
")",
"]",
"journey_list",
".",
"append",
"(",
"values",
")",
"journey_id",
"+=",
"1",
"print",
"(",
"\"Inserting journeys without route into database\"",
")",
"insert_journeys_stmt",
"=",
"'''INSERT INTO journeys(\n journey_id,\n from_stop_I,\n to_stop_I,\n departure_time,\n arrival_time_target,\n n_boardings) VALUES (%s) '''",
"%",
"(",
"\", \"",
".",
"join",
"(",
"[",
"\"?\"",
"for",
"x",
"in",
"range",
"(",
"6",
")",
"]",
")",
")",
"#self.conn.executemany(insert_journeys_stmt, journey_list)",
"self",
".",
"_executemany_exclusive",
"(",
"insert_journeys_stmt",
",",
"journey_list",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
JourneyDataManager._journey_label_generator
|
Parameters
----------
destination_stop_Is: list-like
origin_stop_Is: list-like
Yields
------
(origin_stop_I, destination_stop_I, journey_labels) : tuple
|
gtfspy/routing/journey_data.py
|
def _journey_label_generator(self, destination_stop_Is=None, origin_stop_Is=None):
"""
Parameters
----------
destination_stop_Is: list-like
origin_stop_Is: list-like
Yields
------
(origin_stop_I, destination_stop_I, journey_labels) : tuple
"""
conn = self.conn
conn.row_factory = sqlite3.Row
if destination_stop_Is is None:
destination_stop_Is = self.get_targets_having_journeys()
if origin_stop_Is is None:
origin_stop_Is = self.get_origins_having_journeys()
for destination_stop_I in destination_stop_Is:
if self.track_route:
label_features = "journey_id, from_stop_I, to_stop_I, n_boardings, movement_duration, " \
"journey_duration, in_vehicle_duration, transfer_wait_duration, walking_duration, " \
"departure_time, arrival_time_target"""
else:
label_features = "journey_id, from_stop_I, to_stop_I, n_boardings, departure_time, " \
"arrival_time_target"
sql = "SELECT " + label_features + " FROM journeys WHERE to_stop_I = %s" % destination_stop_I
df = pd.read_sql_query(sql, self.conn)
for origin_stop_I in origin_stop_Is:
selection = df.loc[df['from_stop_I'] == origin_stop_I]
journey_labels = []
for journey in selection.to_dict(orient='records'):
journey["pre_journey_wait_fp"] = -1
try:
journey_labels.append(LabelGeneric(journey))
except Exception as e:
print(journey)
raise e
yield origin_stop_I, destination_stop_I, journey_labels
|
def _journey_label_generator(self, destination_stop_Is=None, origin_stop_Is=None):
"""
Parameters
----------
destination_stop_Is: list-like
origin_stop_Is: list-like
Yields
------
(origin_stop_I, destination_stop_I, journey_labels) : tuple
"""
conn = self.conn
conn.row_factory = sqlite3.Row
if destination_stop_Is is None:
destination_stop_Is = self.get_targets_having_journeys()
if origin_stop_Is is None:
origin_stop_Is = self.get_origins_having_journeys()
for destination_stop_I in destination_stop_Is:
if self.track_route:
label_features = "journey_id, from_stop_I, to_stop_I, n_boardings, movement_duration, " \
"journey_duration, in_vehicle_duration, transfer_wait_duration, walking_duration, " \
"departure_time, arrival_time_target"""
else:
label_features = "journey_id, from_stop_I, to_stop_I, n_boardings, departure_time, " \
"arrival_time_target"
sql = "SELECT " + label_features + " FROM journeys WHERE to_stop_I = %s" % destination_stop_I
df = pd.read_sql_query(sql, self.conn)
for origin_stop_I in origin_stop_Is:
selection = df.loc[df['from_stop_I'] == origin_stop_I]
journey_labels = []
for journey in selection.to_dict(orient='records'):
journey["pre_journey_wait_fp"] = -1
try:
journey_labels.append(LabelGeneric(journey))
except Exception as e:
print(journey)
raise e
yield origin_stop_I, destination_stop_I, journey_labels
|
[
"Parameters",
"----------",
"destination_stop_Is",
":",
"list",
"-",
"like",
"origin_stop_Is",
":",
"list",
"-",
"like"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data.py#L427-L466
|
[
"def",
"_journey_label_generator",
"(",
"self",
",",
"destination_stop_Is",
"=",
"None",
",",
"origin_stop_Is",
"=",
"None",
")",
":",
"conn",
"=",
"self",
".",
"conn",
"conn",
".",
"row_factory",
"=",
"sqlite3",
".",
"Row",
"if",
"destination_stop_Is",
"is",
"None",
":",
"destination_stop_Is",
"=",
"self",
".",
"get_targets_having_journeys",
"(",
")",
"if",
"origin_stop_Is",
"is",
"None",
":",
"origin_stop_Is",
"=",
"self",
".",
"get_origins_having_journeys",
"(",
")",
"for",
"destination_stop_I",
"in",
"destination_stop_Is",
":",
"if",
"self",
".",
"track_route",
":",
"label_features",
"=",
"\"journey_id, from_stop_I, to_stop_I, n_boardings, movement_duration, \"",
"\"journey_duration, in_vehicle_duration, transfer_wait_duration, walking_duration, \"",
"\"departure_time, arrival_time_target\"",
"\"\"",
"else",
":",
"label_features",
"=",
"\"journey_id, from_stop_I, to_stop_I, n_boardings, departure_time, \"",
"\"arrival_time_target\"",
"sql",
"=",
"\"SELECT \"",
"+",
"label_features",
"+",
"\" FROM journeys WHERE to_stop_I = %s\"",
"%",
"destination_stop_I",
"df",
"=",
"pd",
".",
"read_sql_query",
"(",
"sql",
",",
"self",
".",
"conn",
")",
"for",
"origin_stop_I",
"in",
"origin_stop_Is",
":",
"selection",
"=",
"df",
".",
"loc",
"[",
"df",
"[",
"'from_stop_I'",
"]",
"==",
"origin_stop_I",
"]",
"journey_labels",
"=",
"[",
"]",
"for",
"journey",
"in",
"selection",
".",
"to_dict",
"(",
"orient",
"=",
"'records'",
")",
":",
"journey",
"[",
"\"pre_journey_wait_fp\"",
"]",
"=",
"-",
"1",
"try",
":",
"journey_labels",
".",
"append",
"(",
"LabelGeneric",
"(",
"journey",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"journey",
")",
"raise",
"e",
"yield",
"origin_stop_I",
",",
"destination_stop_I",
",",
"journey_labels"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
JourneyDataManager._insert_travel_impedance_data_to_db
|
Parameters
----------
travel_impedance_measure_name: str
data: list[dict]
Each list element must contain keys:
"from_stop_I", "to_stop_I", "min", "max", "median" and "mean"
|
gtfspy/routing/journey_data.py
|
def _insert_travel_impedance_data_to_db(self, travel_impedance_measure_name, data):
"""
Parameters
----------
travel_impedance_measure_name: str
data: list[dict]
Each list element must contain keys:
"from_stop_I", "to_stop_I", "min", "max", "median" and "mean"
"""
f = float
data_tuple = [(x["from_stop_I"], x["to_stop_I"], f(x["min"]), f(x["max"]), f(x["median"]), f(x["mean"])) for x in data]
insert_stmt = '''INSERT OR REPLACE INTO ''' + travel_impedance_measure_name + ''' (
from_stop_I,
to_stop_I,
min,
max,
median,
mean) VALUES (?, ?, ?, ?, ?, ?) '''
self.conn.executemany(insert_stmt, data_tuple)
self.conn.commit()
|
def _insert_travel_impedance_data_to_db(self, travel_impedance_measure_name, data):
"""
Parameters
----------
travel_impedance_measure_name: str
data: list[dict]
Each list element must contain keys:
"from_stop_I", "to_stop_I", "min", "max", "median" and "mean"
"""
f = float
data_tuple = [(x["from_stop_I"], x["to_stop_I"], f(x["min"]), f(x["max"]), f(x["median"]), f(x["mean"])) for x in data]
insert_stmt = '''INSERT OR REPLACE INTO ''' + travel_impedance_measure_name + ''' (
from_stop_I,
to_stop_I,
min,
max,
median,
mean) VALUES (?, ?, ?, ?, ?, ?) '''
self.conn.executemany(insert_stmt, data_tuple)
self.conn.commit()
|
[
"Parameters",
"----------",
"travel_impedance_measure_name",
":",
"str",
"data",
":",
"list",
"[",
"dict",
"]",
"Each",
"list",
"element",
"must",
"contain",
"keys",
":",
"from_stop_I",
"to_stop_I",
"min",
"max",
"median",
"and",
"mean"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data.py#L640-L659
|
[
"def",
"_insert_travel_impedance_data_to_db",
"(",
"self",
",",
"travel_impedance_measure_name",
",",
"data",
")",
":",
"f",
"=",
"float",
"data_tuple",
"=",
"[",
"(",
"x",
"[",
"\"from_stop_I\"",
"]",
",",
"x",
"[",
"\"to_stop_I\"",
"]",
",",
"f",
"(",
"x",
"[",
"\"min\"",
"]",
")",
",",
"f",
"(",
"x",
"[",
"\"max\"",
"]",
")",
",",
"f",
"(",
"x",
"[",
"\"median\"",
"]",
")",
",",
"f",
"(",
"x",
"[",
"\"mean\"",
"]",
")",
")",
"for",
"x",
"in",
"data",
"]",
"insert_stmt",
"=",
"'''INSERT OR REPLACE INTO '''",
"+",
"travel_impedance_measure_name",
"+",
"''' (\n from_stop_I,\n to_stop_I,\n min,\n max,\n median,\n mean) VALUES (?, ?, ?, ?, ?, ?) '''",
"self",
".",
"conn",
".",
"executemany",
"(",
"insert_stmt",
",",
"data_tuple",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
plot_trip_counts_per_day
|
Parameters
----------
G: gtfspy.GTFS
ax: maptlotlib.Axes, optional
highlight_dates: list[str|datetime.datetime]
The values of highlight dates should represent dates, and or datetime objects.
highlight_date_labels: list
The labels for each highlight dates.
show: bool, optional
whether or not to immediately show the results
Returns
-------
ax: maptlotlib.Axes object
|
gtfspy/plots.py
|
def plot_trip_counts_per_day(G, ax=None, highlight_dates=None, highlight_date_labels=None, show=False):
"""
Parameters
----------
G: gtfspy.GTFS
ax: maptlotlib.Axes, optional
highlight_dates: list[str|datetime.datetime]
The values of highlight dates should represent dates, and or datetime objects.
highlight_date_labels: list
The labels for each highlight dates.
show: bool, optional
whether or not to immediately show the results
Returns
-------
ax: maptlotlib.Axes object
"""
daily_trip_counts = G.get_trip_counts_per_day()
if ax is None:
_fig, ax = plt.subplots()
daily_trip_counts["datetime"] = pandas.to_datetime(daily_trip_counts["date_str"])
daily_trip_counts.plot("datetime", "trip_counts", kind="line", ax=ax, marker="o", color="C0", ls=":",
label="Trip counts")
ax.set_xlabel("Date")
ax.set_ylabel("Trip counts per day")
if highlight_dates is not None:
assert isinstance(highlight_dates, list)
if highlight_date_labels is not None:
assert isinstance(highlight_date_labels, list)
assert len(highlight_dates) == len(highlight_date_labels), "Number of highlight date labels do not match"
else:
highlight_date_labels = [None] * len(highlight_dates)
for i, (highlight_date, label) in enumerate(zip(highlight_dates, highlight_date_labels)):
color = "C" + str(int(i % 8 + 1))
highlight_date = pandas.to_datetime(highlight_date)
ax.axvline(highlight_date, color=color, label=label)
ax.legend(loc="best")
ax.grid()
if show:
plt.show()
return ax
|
def plot_trip_counts_per_day(G, ax=None, highlight_dates=None, highlight_date_labels=None, show=False):
"""
Parameters
----------
G: gtfspy.GTFS
ax: maptlotlib.Axes, optional
highlight_dates: list[str|datetime.datetime]
The values of highlight dates should represent dates, and or datetime objects.
highlight_date_labels: list
The labels for each highlight dates.
show: bool, optional
whether or not to immediately show the results
Returns
-------
ax: maptlotlib.Axes object
"""
daily_trip_counts = G.get_trip_counts_per_day()
if ax is None:
_fig, ax = plt.subplots()
daily_trip_counts["datetime"] = pandas.to_datetime(daily_trip_counts["date_str"])
daily_trip_counts.plot("datetime", "trip_counts", kind="line", ax=ax, marker="o", color="C0", ls=":",
label="Trip counts")
ax.set_xlabel("Date")
ax.set_ylabel("Trip counts per day")
if highlight_dates is not None:
assert isinstance(highlight_dates, list)
if highlight_date_labels is not None:
assert isinstance(highlight_date_labels, list)
assert len(highlight_dates) == len(highlight_date_labels), "Number of highlight date labels do not match"
else:
highlight_date_labels = [None] * len(highlight_dates)
for i, (highlight_date, label) in enumerate(zip(highlight_dates, highlight_date_labels)):
color = "C" + str(int(i % 8 + 1))
highlight_date = pandas.to_datetime(highlight_date)
ax.axvline(highlight_date, color=color, label=label)
ax.legend(loc="best")
ax.grid()
if show:
plt.show()
return ax
|
[
"Parameters",
"----------",
"G",
":",
"gtfspy",
".",
"GTFS",
"ax",
":",
"maptlotlib",
".",
"Axes",
"optional",
"highlight_dates",
":",
"list",
"[",
"str|datetime",
".",
"datetime",
"]",
"The",
"values",
"of",
"highlight",
"dates",
"should",
"represent",
"dates",
"and",
"or",
"datetime",
"objects",
".",
"highlight_date_labels",
":",
"list",
"The",
"labels",
"for",
"each",
"highlight",
"dates",
".",
"show",
":",
"bool",
"optional",
"whether",
"or",
"not",
"to",
"immediately",
"show",
"the",
"results"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/plots.py#L9-L49
|
[
"def",
"plot_trip_counts_per_day",
"(",
"G",
",",
"ax",
"=",
"None",
",",
"highlight_dates",
"=",
"None",
",",
"highlight_date_labels",
"=",
"None",
",",
"show",
"=",
"False",
")",
":",
"daily_trip_counts",
"=",
"G",
".",
"get_trip_counts_per_day",
"(",
")",
"if",
"ax",
"is",
"None",
":",
"_fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"daily_trip_counts",
"[",
"\"datetime\"",
"]",
"=",
"pandas",
".",
"to_datetime",
"(",
"daily_trip_counts",
"[",
"\"date_str\"",
"]",
")",
"daily_trip_counts",
".",
"plot",
"(",
"\"datetime\"",
",",
"\"trip_counts\"",
",",
"kind",
"=",
"\"line\"",
",",
"ax",
"=",
"ax",
",",
"marker",
"=",
"\"o\"",
",",
"color",
"=",
"\"C0\"",
",",
"ls",
"=",
"\":\"",
",",
"label",
"=",
"\"Trip counts\"",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Date\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Trip counts per day\"",
")",
"if",
"highlight_dates",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"highlight_dates",
",",
"list",
")",
"if",
"highlight_date_labels",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"highlight_date_labels",
",",
"list",
")",
"assert",
"len",
"(",
"highlight_dates",
")",
"==",
"len",
"(",
"highlight_date_labels",
")",
",",
"\"Number of highlight date labels do not match\"",
"else",
":",
"highlight_date_labels",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"highlight_dates",
")",
"for",
"i",
",",
"(",
"highlight_date",
",",
"label",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"highlight_dates",
",",
"highlight_date_labels",
")",
")",
":",
"color",
"=",
"\"C\"",
"+",
"str",
"(",
"int",
"(",
"i",
"%",
"8",
"+",
"1",
")",
")",
"highlight_date",
"=",
"pandas",
".",
"to_datetime",
"(",
"highlight_date",
")",
"ax",
".",
"axvline",
"(",
"highlight_date",
",",
"color",
"=",
"color",
",",
"label",
"=",
"label",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
")",
"ax",
".",
"grid",
"(",
")",
"if",
"show",
":",
"plt",
".",
"show",
"(",
")",
"return",
"ax"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
DayTripsMaterializer.make_views
|
Create day_trips and day_stop_times views.
day_trips: day_trips2 x trips = days x trips
day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times
|
gtfspy/import_loaders/day_trips_materializer.py
|
def make_views(cls, conn):
"""Create day_trips and day_stop_times views.
day_trips: day_trips2 x trips = days x trips
day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times
"""
conn.execute('DROP VIEW IF EXISTS main.day_trips')
conn.execute('CREATE VIEW day_trips AS '
'SELECT day_trips2.*, trips.* '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut '
'FROM day_trips2 JOIN trips USING (trip_I);')
conn.commit()
conn.execute('DROP VIEW IF EXISTS main.day_stop_times')
conn.execute('CREATE VIEW day_stop_times AS '
'SELECT day_trips2.*, trips.*, stop_times.*, '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut, '
'day_trips2.day_start_ut+stop_times.arr_time_ds AS arr_time_ut, '
'day_trips2.day_start_ut+stop_times.dep_time_ds AS dep_time_ut '
'FROM day_trips2 '
'JOIN trips USING (trip_I) '
'JOIN stop_times USING (trip_I)')
conn.commit()
|
def make_views(cls, conn):
"""Create day_trips and day_stop_times views.
day_trips: day_trips2 x trips = days x trips
day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times
"""
conn.execute('DROP VIEW IF EXISTS main.day_trips')
conn.execute('CREATE VIEW day_trips AS '
'SELECT day_trips2.*, trips.* '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut '
'FROM day_trips2 JOIN trips USING (trip_I);')
conn.commit()
conn.execute('DROP VIEW IF EXISTS main.day_stop_times')
conn.execute('CREATE VIEW day_stop_times AS '
'SELECT day_trips2.*, trips.*, stop_times.*, '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut, '
'day_trips2.day_start_ut+stop_times.arr_time_ds AS arr_time_ut, '
'day_trips2.day_start_ut+stop_times.dep_time_ds AS dep_time_ut '
'FROM day_trips2 '
'JOIN trips USING (trip_I) '
'JOIN stop_times USING (trip_I)')
conn.commit()
|
[
"Create",
"day_trips",
"and",
"day_stop_times",
"views",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/day_trips_materializer.py#L33-L57
|
[
"def",
"make_views",
"(",
"cls",
",",
"conn",
")",
":",
"conn",
".",
"execute",
"(",
"'DROP VIEW IF EXISTS main.day_trips'",
")",
"conn",
".",
"execute",
"(",
"'CREATE VIEW day_trips AS '",
"'SELECT day_trips2.*, trips.* '",
"#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '",
"#'days.day_start_ut+trips.end_time_ds AS end_time_ut '",
"'FROM day_trips2 JOIN trips USING (trip_I);'",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"execute",
"(",
"'DROP VIEW IF EXISTS main.day_stop_times'",
")",
"conn",
".",
"execute",
"(",
"'CREATE VIEW day_stop_times AS '",
"'SELECT day_trips2.*, trips.*, stop_times.*, '",
"#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '",
"#'days.day_start_ut+trips.end_time_ds AS end_time_ut, '",
"'day_trips2.day_start_ut+stop_times.arr_time_ds AS arr_time_ut, '",
"'day_trips2.day_start_ut+stop_times.dep_time_ds AS dep_time_ut '",
"'FROM day_trips2 '",
"'JOIN trips USING (trip_I) '",
"'JOIN stop_times USING (trip_I)'",
")",
"conn",
".",
"commit",
"(",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
createcolorbar
|
Create a colourbar with limits of lwr and upr
|
gtfspy/colormaps.py
|
def createcolorbar(cmap, norm):
"""Create a colourbar with limits of lwr and upr"""
cax, kw = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())
c = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
return c
|
def createcolorbar(cmap, norm):
"""Create a colourbar with limits of lwr and upr"""
cax, kw = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())
c = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
return c
|
[
"Create",
"a",
"colourbar",
"with",
"limits",
"of",
"lwr",
"and",
"upr"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/colormaps.py#L71-L75
|
[
"def",
"createcolorbar",
"(",
"cmap",
",",
"norm",
")",
":",
"cax",
",",
"kw",
"=",
"matplotlib",
".",
"colorbar",
".",
"make_axes",
"(",
"matplotlib",
".",
"pyplot",
".",
"gca",
"(",
")",
")",
"c",
"=",
"matplotlib",
".",
"colorbar",
".",
"ColorbarBase",
"(",
"cax",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
")",
"return",
"c"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_walk_transfer_edges
|
Parameters
----------
gtfs: gtfspy.GTFS
output_file_name: str
|
gtfspy/exports.py
|
def write_walk_transfer_edges(gtfs, output_file_name):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_file_name: str
"""
transfers = gtfs.get_table("stop_distances")
transfers.drop([u"min_transfer_time", u"timed_transfer"], 1, inplace=True)
with util.create_file(output_file_name, tmpdir=True, keepext=True) as tmpfile:
transfers.to_csv(tmpfile, encoding='utf-8', index=False)
|
def write_walk_transfer_edges(gtfs, output_file_name):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_file_name: str
"""
transfers = gtfs.get_table("stop_distances")
transfers.drop([u"min_transfer_time", u"timed_transfer"], 1, inplace=True)
with util.create_file(output_file_name, tmpdir=True, keepext=True) as tmpfile:
transfers.to_csv(tmpfile, encoding='utf-8', index=False)
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"output_file_name",
":",
"str"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L17-L27
|
[
"def",
"write_walk_transfer_edges",
"(",
"gtfs",
",",
"output_file_name",
")",
":",
"transfers",
"=",
"gtfs",
".",
"get_table",
"(",
"\"stop_distances\"",
")",
"transfers",
".",
"drop",
"(",
"[",
"u\"min_transfer_time\"",
",",
"u\"timed_transfer\"",
"]",
",",
"1",
",",
"inplace",
"=",
"True",
")",
"with",
"util",
".",
"create_file",
"(",
"output_file_name",
",",
"tmpdir",
"=",
"True",
",",
"keepext",
"=",
"True",
")",
"as",
"tmpfile",
":",
"transfers",
".",
"to_csv",
"(",
"tmpfile",
",",
"encoding",
"=",
"'utf-8'",
",",
"index",
"=",
"False",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_nodes
|
Parameters
----------
gtfs: gtfspy.GTFS
output: str
Path to the output file
fields: list, optional
which pieces of information to provide
|
gtfspy/exports.py
|
def write_nodes(gtfs, output, fields=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output: str
Path to the output file
fields: list, optional
which pieces of information to provide
"""
nodes = gtfs.get_table("stops")
if fields is not None:
nodes = nodes[fields]
with util.create_file(output, tmpdir=True, keepext=True) as tmpfile:
nodes.to_csv(tmpfile, encoding='utf-8', index=False, sep=";")
|
def write_nodes(gtfs, output, fields=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output: str
Path to the output file
fields: list, optional
which pieces of information to provide
"""
nodes = gtfs.get_table("stops")
if fields is not None:
nodes = nodes[fields]
with util.create_file(output, tmpdir=True, keepext=True) as tmpfile:
nodes.to_csv(tmpfile, encoding='utf-8', index=False, sep=";")
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"output",
":",
"str",
"Path",
"to",
"the",
"output",
"file",
"fields",
":",
"list",
"optional",
"which",
"pieces",
"of",
"information",
"to",
"provide"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L30-L44
|
[
"def",
"write_nodes",
"(",
"gtfs",
",",
"output",
",",
"fields",
"=",
"None",
")",
":",
"nodes",
"=",
"gtfs",
".",
"get_table",
"(",
"\"stops\"",
")",
"if",
"fields",
"is",
"not",
"None",
":",
"nodes",
"=",
"nodes",
"[",
"fields",
"]",
"with",
"util",
".",
"create_file",
"(",
"output",
",",
"tmpdir",
"=",
"True",
",",
"keepext",
"=",
"True",
")",
"as",
"tmpfile",
":",
"nodes",
".",
"to_csv",
"(",
"tmpfile",
",",
"encoding",
"=",
"'utf-8'",
",",
"index",
"=",
"False",
",",
"sep",
"=",
"\";\"",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_stops_geojson
|
Parameters
----------
gtfs: gtfspy.GTFS
out_file: file-like or path to file
fields: dict
simultaneously map each original_name to the new_name
Returns
-------
|
gtfspy/exports.py
|
def write_stops_geojson(gtfs, out_file, fields=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
out_file: file-like or path to file
fields: dict
simultaneously map each original_name to the new_name
Returns
-------
"""
geojson = create_stops_geojson_dict(gtfs, fields)
if hasattr(out_file, "write"):
out_file.write(json.dumps(geojson))
else:
with util.create_file(out_file, tmpdir=True, keepext=True) as tmpfile_path:
tmpfile = open(tmpfile_path, 'w')
tmpfile.write(json.dumps(geojson))
|
def write_stops_geojson(gtfs, out_file, fields=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
out_file: file-like or path to file
fields: dict
simultaneously map each original_name to the new_name
Returns
-------
"""
geojson = create_stops_geojson_dict(gtfs, fields)
if hasattr(out_file, "write"):
out_file.write(json.dumps(geojson))
else:
with util.create_file(out_file, tmpdir=True, keepext=True) as tmpfile_path:
tmpfile = open(tmpfile_path, 'w')
tmpfile.write(json.dumps(geojson))
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"out_file",
":",
"file",
"-",
"like",
"or",
"path",
"to",
"file",
"fields",
":",
"dict",
"simultaneously",
"map",
"each",
"original_name",
"to",
"the",
"new_name",
"Returns",
"-------"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L81-L98
|
[
"def",
"write_stops_geojson",
"(",
"gtfs",
",",
"out_file",
",",
"fields",
"=",
"None",
")",
":",
"geojson",
"=",
"create_stops_geojson_dict",
"(",
"gtfs",
",",
"fields",
")",
"if",
"hasattr",
"(",
"out_file",
",",
"\"write\"",
")",
":",
"out_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"geojson",
")",
")",
"else",
":",
"with",
"util",
".",
"create_file",
"(",
"out_file",
",",
"tmpdir",
"=",
"True",
",",
"keepext",
"=",
"True",
")",
"as",
"tmpfile_path",
":",
"tmpfile",
"=",
"open",
"(",
"tmpfile_path",
",",
"'w'",
")",
"tmpfile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"geojson",
")",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_combined_transit_stop_to_stop_network
|
Parameters
----------
gtfs : gtfspy.GTFS
output_path : str
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
|
gtfspy/exports.py
|
def write_combined_transit_stop_to_stop_network(gtfs, output_path, fmt=None):
"""
Parameters
----------
gtfs : gtfspy.GTFS
output_path : str
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead """
if fmt is None:
fmt = "edg"
multi_di_graph = combined_stop_to_stop_transit_network(gtfs)
_write_stop_to_stop_network_edges(multi_di_graph, output_path, fmt=fmt)
|
def write_combined_transit_stop_to_stop_network(gtfs, output_path, fmt=None):
"""
Parameters
----------
gtfs : gtfspy.GTFS
output_path : str
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead """
if fmt is None:
fmt = "edg"
multi_di_graph = combined_stop_to_stop_transit_network(gtfs)
_write_stop_to_stop_network_edges(multi_di_graph, output_path, fmt=fmt)
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"output_path",
":",
"str",
"fmt",
":",
"None",
"optional",
"defaulting",
"to",
"edg",
"and",
"writing",
"results",
"as",
".",
"edg",
"files",
"If",
"csv",
"csv",
"files",
"are",
"produced",
"instead"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L101-L113
|
[
"def",
"write_combined_transit_stop_to_stop_network",
"(",
"gtfs",
",",
"output_path",
",",
"fmt",
"=",
"None",
")",
":",
"if",
"fmt",
"is",
"None",
":",
"fmt",
"=",
"\"edg\"",
"multi_di_graph",
"=",
"combined_stop_to_stop_transit_network",
"(",
"gtfs",
")",
"_write_stop_to_stop_network_edges",
"(",
"multi_di_graph",
",",
"output_path",
",",
"fmt",
"=",
"fmt",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_static_networks
|
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
|
gtfspy/exports.py
|
def write_static_networks(gtfs, output_dir, fmt=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
"""
if fmt is None:
fmt = "edg"
single_layer_networks = stop_to_stop_networks_by_type(gtfs)
util.makedirs(output_dir)
for route_type, net in single_layer_networks.items():
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
file_name = os.path.join(output_dir, "network_" + tag + "." + fmt)
if len(net.edges()) > 0:
_write_stop_to_stop_network_edges(net, file_name, fmt=fmt)
|
def write_static_networks(gtfs, output_dir, fmt=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
"""
if fmt is None:
fmt = "edg"
single_layer_networks = stop_to_stop_networks_by_type(gtfs)
util.makedirs(output_dir)
for route_type, net in single_layer_networks.items():
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
file_name = os.path.join(output_dir, "network_" + tag + "." + fmt)
if len(net.edges()) > 0:
_write_stop_to_stop_network_edges(net, file_name, fmt=fmt)
|
[
"Parameters",
"----------",
"gtfs",
":",
"gtfspy",
".",
"GTFS",
"output_dir",
":",
"(",
"str",
"unicode",
")",
"a",
"path",
"where",
"to",
"write",
"fmt",
":",
"None",
"optional",
"defaulting",
"to",
"edg",
"and",
"writing",
"results",
"as",
".",
"edg",
"files",
"If",
"csv",
"csv",
"files",
"are",
"produced",
"instead"
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L116-L135
|
[
"def",
"write_static_networks",
"(",
"gtfs",
",",
"output_dir",
",",
"fmt",
"=",
"None",
")",
":",
"if",
"fmt",
"is",
"None",
":",
"fmt",
"=",
"\"edg\"",
"single_layer_networks",
"=",
"stop_to_stop_networks_by_type",
"(",
"gtfs",
")",
"util",
".",
"makedirs",
"(",
"output_dir",
")",
"for",
"route_type",
",",
"net",
"in",
"single_layer_networks",
".",
"items",
"(",
")",
":",
"tag",
"=",
"route_types",
".",
"ROUTE_TYPE_TO_LOWERCASE_TAG",
"[",
"route_type",
"]",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"network_\"",
"+",
"tag",
"+",
"\".\"",
"+",
"fmt",
")",
"if",
"len",
"(",
"net",
".",
"edges",
"(",
")",
")",
">",
"0",
":",
"_write_stop_to_stop_network_edges",
"(",
"net",
",",
"file_name",
",",
"fmt",
"=",
"fmt",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
valid
|
write_temporal_networks_by_route_type
|
Write temporal networks by route type to disk.
Parameters
----------
gtfs: gtfspy.GTFS
extract_output_dir: str
|
gtfspy/exports.py
|
def write_temporal_networks_by_route_type(gtfs, extract_output_dir):
"""
Write temporal networks by route type to disk.
Parameters
----------
gtfs: gtfspy.GTFS
extract_output_dir: str
"""
util.makedirs(extract_output_dir)
for route_type in route_types.TRANSIT_ROUTE_TYPES:
pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
out_file_name = os.path.join(extract_output_dir, tag + ".tnet")
pandas_data_frame.to_csv(out_file_name, encoding='utf-8', index=False)
|
def write_temporal_networks_by_route_type(gtfs, extract_output_dir):
"""
Write temporal networks by route type to disk.
Parameters
----------
gtfs: gtfspy.GTFS
extract_output_dir: str
"""
util.makedirs(extract_output_dir)
for route_type in route_types.TRANSIT_ROUTE_TYPES:
pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
out_file_name = os.path.join(extract_output_dir, tag + ".tnet")
pandas_data_frame.to_csv(out_file_name, encoding='utf-8', index=False)
|
[
"Write",
"temporal",
"networks",
"by",
"route",
"type",
"to",
"disk",
"."
] |
CxAalto/gtfspy
|
python
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/exports.py#L138-L152
|
[
"def",
"write_temporal_networks_by_route_type",
"(",
"gtfs",
",",
"extract_output_dir",
")",
":",
"util",
".",
"makedirs",
"(",
"extract_output_dir",
")",
"for",
"route_type",
"in",
"route_types",
".",
"TRANSIT_ROUTE_TYPES",
":",
"pandas_data_frame",
"=",
"temporal_network",
"(",
"gtfs",
",",
"start_time_ut",
"=",
"None",
",",
"end_time_ut",
"=",
"None",
",",
"route_type",
"=",
"route_type",
")",
"tag",
"=",
"route_types",
".",
"ROUTE_TYPE_TO_LOWERCASE_TAG",
"[",
"route_type",
"]",
"out_file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extract_output_dir",
",",
"tag",
"+",
"\".tnet\"",
")",
"pandas_data_frame",
".",
"to_csv",
"(",
"out_file_name",
",",
"encoding",
"=",
"'utf-8'",
",",
"index",
"=",
"False",
")"
] |
bddba4b74faae6c1b91202f19184811e326547e5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.